summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/acpi_video.c2
-rw-r--r--drivers/acpi/acpi_watchdog.c2
-rw-r--r--drivers/acpi/acpica/actables.h6
-rw-r--r--drivers/acpi/acpica/tbfadt.c14
-rw-r--r--drivers/acpi/acpica/tbutils.c85
-rw-r--r--drivers/acpi/acpica/tbxface.c130
-rw-r--r--drivers/acpi/arm64/iort.c607
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/bus.c2
-rw-r--r--drivers/acpi/cppc_acpi.c12
-rw-r--r--drivers/acpi/glue.c13
-rw-r--r--drivers/acpi/internal.h1
-rw-r--r--drivers/acpi/nfit/core.c3
-rw-r--r--drivers/acpi/numa.c2
-rw-r--r--drivers/acpi/osl.c17
-rw-r--r--drivers/acpi/pci_mcfg.c190
-rw-r--r--drivers/acpi/proc.c2
-rw-r--r--drivers/acpi/processor_core.c8
-rw-r--r--drivers/acpi/processor_thermal.c2
-rw-r--r--drivers/acpi/processor_throttling.c2
-rw-r--r--drivers/acpi/resource.c57
-rw-r--r--drivers/acpi/scan.c37
-rw-r--r--drivers/acpi/spcr.c8
-rw-r--r--drivers/acpi/sysfs.c56
-rw-r--r--drivers/acpi/tables.c17
-rw-r--r--drivers/acpi/thermal.c2
-rw-r--r--drivers/atm/adummy.c2
-rw-r--r--drivers/atm/atmtcp.c2
-rw-r--r--drivers/atm/eni.c2
-rw-r--r--drivers/atm/firestream.c2
-rw-r--r--drivers/atm/fore200e.c2
-rw-r--r--drivers/atm/he.c2
-rw-r--r--drivers/atm/horizon.c2
-rw-r--r--drivers/atm/idt77105.c2
-rw-r--r--drivers/atm/idt77252.c2
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/atm/nicstar.c2
-rw-r--r--drivers/atm/suni.c2
-rw-r--r--drivers/atm/uPD98402.c2
-rw-r--r--drivers/atm/zatm.c2
-rw-r--r--drivers/auxdisplay/Kconfig6
-rw-r--r--drivers/base/base.h2
-rw-r--r--drivers/base/cacheinfo.c5
-rw-r--r--drivers/base/core.c7
-rw-r--r--drivers/base/dd.c13
-rw-r--r--drivers/base/memory.c2
-rw-r--r--drivers/base/power/domain.c2
-rw-r--r--drivers/base/power/main.c2
-rw-r--r--drivers/base/power/wakeup.c4
-rw-r--r--drivers/block/DAC960.c2
-rw-r--r--drivers/block/amiflop.c2
-rw-r--r--drivers/block/brd.c2
-rw-r--r--drivers/block/cciss.c2
-rw-r--r--drivers/block/cryptoloop.c2
-rw-r--r--drivers/block/hd.c2
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/nbd.c8
-rw-r--r--drivers/block/null_blk.c2
-rw-r--r--drivers/block/paride/pcd.c2
-rw-r--r--drivers/block/paride/pd.c2
-rw-r--r--drivers/block/paride/pf.c2
-rw-r--r--drivers/block/paride/pg.c2
-rw-r--r--drivers/block/paride/pt.c2
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/block/rbd.c2
-rw-r--r--drivers/block/swim3.c2
-rw-r--r--drivers/block/sx8.c2
-rw-r--r--drivers/block/umem.c2
-rw-r--r--drivers/block/virtio_blk.c7
-rw-r--r--drivers/block/zram/zram_drv.c19
-rw-r--r--drivers/bluetooth/Makefile2
-rw-r--r--drivers/bluetooth/hci_vhci.c2
-rw-r--r--drivers/bus/Kconfig16
-rw-r--r--drivers/bus/Makefile3
-rw-r--r--drivers/bus/arm-cci.c12
-rw-r--r--drivers/bus/arm-ccn.c7
-rw-r--r--drivers/bus/da8xx-mstpri.c267
-rw-r--r--drivers/bus/tegra-gmi.c284
-rw-r--r--drivers/bus/vexpress-config.c7
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/char/agp/alpha-agp.c3
-rw-r--r--drivers/char/agp/compat_ioctl.c2
-rw-r--r--drivers/char/agp/frontend.c2
-rw-r--r--drivers/char/applicom.c2
-rw-r--r--drivers/char/bfin-otp.c2
-rw-r--r--drivers/char/ds1620.c2
-rw-r--r--drivers/char/dtlk.c2
-rw-r--r--drivers/char/generic_nvram.c2
-rw-r--r--drivers/char/hangcheck-timer.c2
-rw-r--r--drivers/char/hpet.c4
-rw-r--r--drivers/char/hw_random/Kconfig2
-rw-r--r--drivers/char/hw_random/atmel-rng.c26
-rw-r--r--drivers/char/hw_random/core.c5
-rw-r--r--drivers/char/hw_random/meson-rng.c2
-rw-r--r--drivers/char/hw_random/msm-rng.c4
-rw-r--r--drivers/char/hw_random/omap-rng.c162
-rw-r--r--drivers/char/hw_random/pic32-rng.c3
-rw-r--r--drivers/char/hw_random/pseries-rng.c5
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c1
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c10
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c190
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c37
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c2
-rw-r--r--drivers/char/lp.c2
-rw-r--r--drivers/char/mbcs.c2
-rw-r--r--drivers/char/mem.c10
-rw-r--r--drivers/char/mmtimer.c2
-rw-r--r--drivers/char/mspec.c2
-rw-r--r--drivers/char/mwave/3780i.c2
-rw-r--r--drivers/char/mwave/mwavedd.h2
-rw-r--r--drivers/char/nsc_gpio.c2
-rw-r--r--drivers/char/nwbutton.c2
-rw-r--r--drivers/char/nwflash.c2
-rw-r--r--drivers/char/pc8736x_gpio.c2
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c2
-rw-r--r--drivers/char/pcmcia/synclink_cs.c2
-rw-r--r--drivers/char/ppdev.c13
-rw-r--r--drivers/char/random.c2
-rw-r--r--drivers/char/raw.c2
-rw-r--r--drivers/char/scx200_gpio.c2
-rw-r--r--drivers/char/sonypi.c2
-rw-r--r--drivers/char/tlclk.c2
-rw-r--r--drivers/char/toshiba.c2
-rw-r--r--drivers/char/tpm/Kconfig2
-rw-r--r--drivers/char/tpm/Makefile14
-rw-r--r--drivers/char/tpm/tpm-chip.c42
-rw-r--r--drivers/char/tpm/tpm-interface.c110
-rw-r--r--drivers/char/tpm/tpm-sysfs.c7
-rw-r--r--drivers/char/tpm/tpm.h41
-rw-r--r--drivers/char/tpm/tpm2-cmd.c2
-rw-r--r--drivers/char/tpm/tpm_acpi.c46
-rw-r--r--drivers/char/tpm/tpm_crb.c173
-rw-r--r--drivers/char/tpm/tpm_eventlog.c230
-rw-r--r--drivers/char/tpm/tpm_eventlog.h22
-rw-r--r--drivers/char/tpm/tpm_of.c48
-rw-r--r--drivers/char/tpm/tpm_tis.c11
-rw-r--r--drivers/char/tpm/tpm_tis_core.c64
-rw-r--r--drivers/char/tpm/tpm_vtpm_proxy.c85
-rw-r--r--drivers/char/tpm/xen-tpmfront.c1
-rw-r--r--drivers/char/virtio_console.c14
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c2
-rw-r--r--drivers/clk/clk-stm32f4.c4
-rw-r--r--drivers/clk/clkdev.c8
-rw-r--r--drivers/clk/imx/clk-imx31.c52
-rw-r--r--drivers/clk/pxa/clk-pxa25x.c2
-rw-r--r--drivers/clk/renesas/clk-mstp.c27
-rw-r--r--drivers/clocksource/Kconfig20
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/acpi_pm.c14
-rw-r--r--drivers/clocksource/arc_timer.c336
-rw-r--r--drivers/clocksource/arm_arch_timer.c6
-rw-r--r--drivers/clocksource/arm_global_timer.c4
-rw-r--r--drivers/clocksource/cadence_ttc_timer.c4
-rw-r--r--drivers/clocksource/clksrc-dbx500-prcmu.c2
-rw-r--r--drivers/clocksource/dummy_timer.c2
-rw-r--r--drivers/clocksource/dw_apb_timer.c8
-rw-r--r--drivers/clocksource/em_sti.c12
-rw-r--r--drivers/clocksource/exynos_mct.c9
-rw-r--r--drivers/clocksource/h8300_timer16.c2
-rw-r--r--drivers/clocksource/h8300_tpu.c2
-rw-r--r--drivers/clocksource/i8253.c4
-rw-r--r--drivers/clocksource/jcore-pit.c4
-rw-r--r--drivers/clocksource/metag_generic.c4
-rw-r--r--drivers/clocksource/mips-gic-timer.c6
-rw-r--r--drivers/clocksource/mmio.c18
-rw-r--r--drivers/clocksource/moxart_timer.c22
-rw-r--r--drivers/clocksource/mxs_timer.c2
-rw-r--r--drivers/clocksource/pxa_timer.c11
-rw-r--r--drivers/clocksource/qcom-timer.c4
-rw-r--r--drivers/clocksource/samsung_pwm_timer.c2
-rw-r--r--drivers/clocksource/scx200_hrt.c4
-rw-r--r--drivers/clocksource/sh_cmt.c2
-rw-r--r--drivers/clocksource/sh_tmu.c2
-rw-r--r--drivers/clocksource/tcb_clksrc.c4
-rw-r--r--drivers/clocksource/time-armada-370-xp.c2
-rw-r--r--drivers/clocksource/time-pistachio.c4
-rw-r--r--drivers/clocksource/timer-atlas7.c4
-rw-r--r--drivers/clocksource/timer-atmel-pit.c2
-rw-r--r--drivers/clocksource/timer-atmel-st.c2
-rw-r--r--drivers/clocksource/timer-nps.c228
-rw-r--r--drivers/clocksource/timer-prima2.c2
-rw-r--r--drivers/clocksource/timer-sun5i.c2
-rw-r--r--drivers/clocksource/timer-ti-32k.c4
-rw-r--r--drivers/clocksource/vt8500_timer.c4
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c2
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c2
-rw-r--r--drivers/cpufreq/ia64-acpi-cpufreq.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c53
-rw-r--r--drivers/cpufreq/s3c64xx-cpufreq.c2
-rw-r--r--drivers/cpuidle/governors/ladder.c2
-rw-r--r--drivers/crypto/Kconfig2
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c3
-rw-r--r--drivers/crypto/atmel-aes-regs.h4
-rw-r--r--drivers/crypto/atmel-aes.c189
-rw-r--r--drivers/crypto/caam/Kconfig11
-rw-r--r--drivers/crypto/caam/Makefile1
-rw-r--r--drivers/crypto/caam/caamalg.c1505
-rw-r--r--drivers/crypto/caam/caamalg_desc.c1306
-rw-r--r--drivers/crypto/caam/caamalg_desc.h97
-rw-r--r--drivers/crypto/caam/caamhash.c227
-rw-r--r--drivers/crypto/caam/caampkc.c4
-rw-r--r--drivers/crypto/caam/caamrng.c10
-rw-r--r--drivers/crypto/caam/ctrl.c75
-rw-r--r--drivers/crypto/caam/desc.h22
-rw-r--r--drivers/crypto/caam/desc_constr.h133
-rw-r--r--drivers/crypto/caam/error.c5
-rw-r--r--drivers/crypto/caam/intern.h1
-rw-r--r--drivers/crypto/caam/jr.c27
-rw-r--r--drivers/crypto/caam/key_gen.c62
-rw-r--r--drivers/crypto/caam/key_gen.h6
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h6
-rw-r--r--drivers/crypto/ccp/ccp-dev-v3.c4
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c30
-rw-r--r--drivers/crypto/ccp/ccp-dev.c6
-rw-r--r--drivers/crypto/ccp/ccp-dev.h45
-rw-r--r--drivers/crypto/chelsio/Kconfig1
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c2001
-rw-r--r--drivers/crypto/chelsio/chcr_algo.h103
-rw-r--r--drivers/crypto/chelsio/chcr_core.c8
-rw-r--r--drivers/crypto/chelsio/chcr_core.h18
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h115
-rw-r--r--drivers/crypto/marvell/cesa.c4
-rw-r--r--drivers/crypto/marvell/cesa.h8
-rw-r--r--drivers/crypto/marvell/cipher.c8
-rw-r--r--drivers/crypto/marvell/hash.c99
-rw-r--r--drivers/crypto/marvell/tdma.c42
-rw-r--r--drivers/crypto/mv_cesa.c4
-rw-r--r--drivers/crypto/nx/nx.c1
-rw-r--r--drivers/crypto/sahara.c2
-rw-r--r--drivers/crypto/talitos.c2
-rw-r--r--drivers/crypto/virtio/Kconfig10
-rw-r--r--drivers/crypto/virtio/Makefile5
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c540
-rw-r--r--drivers/crypto/virtio/virtio_crypto_common.h128
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c476
-rw-r--r--drivers/crypto/virtio/virtio_crypto_mgr.c264
-rw-r--r--drivers/crypto/vmx/Makefile12
-rw-r--r--drivers/dax/dax.c97
-rw-r--r--drivers/dax/pmem.c3
-rw-r--r--drivers/devfreq/devfreq.c15
-rw-r--r--drivers/devfreq/exynos-bus.c2
-rw-r--r--drivers/dio/dio.c2
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/amba-pl08x.c11
-rw-r--r--drivers/dma/at_hdmac.c3
-rw-r--r--drivers/dma/at_xdmac.c5
-rw-r--r--drivers/dma/dmatest.c78
-rw-r--r--drivers/dma/dw/Kconfig2
-rw-r--r--drivers/dma/dw/core.c2
-rw-r--r--drivers/dma/dw/platform.c18
-rw-r--r--drivers/dma/dw/regs.h3
-rw-r--r--drivers/dma/edma.c3
-rw-r--r--drivers/dma/fsl_raid.c1
-rw-r--r--drivers/dma/hsu/pci.c8
-rw-r--r--drivers/dma/img-mdc-dma.c9
-rw-r--r--drivers/dma/imx-sdma.c13
-rw-r--r--drivers/dma/ioat/dma.c17
-rw-r--r--drivers/dma/ioat/hw.h2
-rw-r--r--drivers/dma/ioat/init.c36
-rw-r--r--drivers/dma/k3dma.c3
-rw-r--r--drivers/dma/mic_x100_dma.c2
-rw-r--r--drivers/dma/mv_xor.c190
-rw-r--r--drivers/dma/mv_xor.h1
-rw-r--r--drivers/dma/nbpfaxi.c38
-rw-r--r--drivers/dma/omap-dma.c218
-rw-r--r--drivers/dma/pch_dma.c5
-rw-r--r--drivers/dma/pl330.c34
-rw-r--r--drivers/dma/pxa_dma.c28
-rw-r--r--drivers/dma/qcom/hidma.c173
-rw-r--r--drivers/dma/qcom/hidma.h9
-rw-r--r--drivers/dma/qcom/hidma_dbg.c4
-rw-r--r--drivers/dma/qcom/hidma_ll.c176
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c11
-rw-r--r--drivers/dma/s3c24xx-dma.c5
-rw-r--r--drivers/dma/sh/rcar-dmac.c8
-rw-r--r--drivers/dma/sh/usb-dmac.c3
-rw-r--r--drivers/dma/sirf-dma.c4
-rw-r--r--drivers/dma/stm32-dma.c23
-rw-r--r--drivers/dma/ti-dma-crossbar.c2
-rw-r--r--drivers/dma/zx296702_dma.c3
-rw-r--r--drivers/edac/altera_edac.c1
-rw-r--r--drivers/edac/amd64_edac.h2
-rw-r--r--drivers/edac/amd76x_edac.c2
-rw-r--r--drivers/edac/amd8111_edac.c1
-rw-r--r--drivers/edac/amd8131_edac.c1
-rw-r--r--drivers/edac/cell_edac.c2
-rw-r--r--drivers/edac/cpc925_edac.c1
-rw-r--r--drivers/edac/e752x_edac.c2
-rw-r--r--drivers/edac/e7xxx_edac.c2
-rw-r--r--drivers/edac/edac_device.c79
-rw-r--r--drivers/edac/edac_device.h (renamed from drivers/edac/edac_core.h)309
-rw-r--r--drivers/edac/edac_device_sysfs.c4
-rw-r--r--drivers/edac/edac_mc.c86
-rw-r--r--drivers/edac/edac_mc.h245
-rw-r--r--drivers/edac/edac_mc_sysfs.c2
-rw-r--r--drivers/edac/edac_module.c2
-rw-r--r--drivers/edac/edac_module.h4
-rw-r--r--drivers/edac/edac_pci.c84
-rw-r--r--drivers/edac/edac_pci.h271
-rw-r--r--drivers/edac/edac_pci_sysfs.c13
-rw-r--r--drivers/edac/fsl_ddr_edac.c1
-rw-r--r--drivers/edac/ghes_edac.c2
-rw-r--r--drivers/edac/highbank_l2_edac.c1
-rw-r--r--drivers/edac/highbank_mc_edac.c1
-rw-r--r--drivers/edac/i3000_edac.c2
-rw-r--r--drivers/edac/i3200_edac.c2
-rw-r--r--drivers/edac/i5000_edac.c2
-rw-r--r--drivers/edac/i5100_edac.c1
-rw-r--r--drivers/edac/i5400_edac.c2
-rw-r--r--drivers/edac/i7300_edac.c2
-rw-r--r--drivers/edac/i7core_edac.c2
-rw-r--r--drivers/edac/i82443bxgx_edac.c2
-rw-r--r--drivers/edac/i82860_edac.c2
-rw-r--r--drivers/edac/i82875p_edac.c2
-rw-r--r--drivers/edac/i82975x_edac.c2
-rw-r--r--drivers/edac/ie31200_edac.c2
-rw-r--r--drivers/edac/layerscape_edac.c2
-rw-r--r--drivers/edac/mpc85xx_edac.c1
-rw-r--r--drivers/edac/mv64x60_edac.c1
-rw-r--r--drivers/edac/octeon_edac-l2c.c1
-rw-r--r--drivers/edac/octeon_edac-lmc.c1
-rw-r--r--drivers/edac/octeon_edac-pc.c1
-rw-r--r--drivers/edac/octeon_edac-pci.c1
-rw-r--r--drivers/edac/pasemi_edac.c2
-rw-r--r--drivers/edac/ppc4xx_edac.c2
-rw-r--r--drivers/edac/r82600_edac.c2
-rw-r--r--drivers/edac/sb_edac.c2
-rw-r--r--drivers/edac/skx_edac.c2
-rw-r--r--drivers/edac/synopsys_edac.c2
-rw-r--r--drivers/edac/tile_edac.c2
-rw-r--r--drivers/edac/x38_edac.c2
-rw-r--r--drivers/edac/xgene_edac.c1
-rw-r--r--drivers/extcon/extcon-arizona.c8
-rw-r--r--drivers/extcon/extcon.c2
-rw-r--r--drivers/firmware/Kconfig27
-rw-r--r--drivers/firmware/Makefile3
-rw-r--r--drivers/firmware/arm_scpi.c286
-rw-r--r--drivers/firmware/dmi_scan.c4
-rw-r--r--drivers/firmware/efi/fake_mem.c3
-rw-r--r--drivers/firmware/efi/libstub/Makefile2
-rw-r--r--drivers/firmware/efi/libstub/efistub.h8
-rw-r--r--drivers/firmware/efi/libstub/fdt.c87
-rw-r--r--drivers/firmware/efi/memmap.c38
-rw-r--r--drivers/firmware/psci.c2
-rw-r--r--drivers/firmware/psci_checker.c490
-rw-r--r--drivers/firmware/qcom_scm.c53
-rw-r--r--drivers/firmware/tegra/Kconfig25
-rw-r--r--drivers/firmware/tegra/Makefile2
-rw-r--r--drivers/firmware/tegra/bpmp.c868
-rw-r--r--drivers/firmware/tegra/ivc.c695
-rw-r--r--drivers/firmware/ti_sci.c1991
-rw-r--r--drivers/firmware/ti_sci.h492
-rw-r--r--drivers/gpio/gpio-mxs.c2
-rw-r--r--drivers/gpio/gpio-tps65218.c3
-rw-r--r--drivers/gpio/gpiolib.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c935
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c278
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c442
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c221
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h7
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c1
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c5
-rw-r--r--drivers/gpu/drm/ast/ast_main.c7
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c4
-rw-r--r--drivers/gpu/drm/drm_vm.c10
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c6
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c2
-rw-r--r--drivers/gpu/drm/gma500/gem.c5
-rw-r--r--drivers/gpu/drm/i915/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/gvt/Makefile2
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c55
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h7
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c1005
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c115
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.h19
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c2
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h12
-rw-r--r--drivers/gpu/drm/i915/intel_display.c63
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c45
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c7
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c7
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c9
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c109
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c10
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c13
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c2
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c3
-rw-r--r--drivers/gpu/drm/meson/meson_venc.c19
-rw-r--r--drivers/gpu/drm/meson/meson_venc_cvbs.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c9
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c8
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c18
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c3
-rw-r--r--drivers/gpu/drm/radeon/si.c60
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c13
-rw-r--r--drivers/gpu/drm/tegra/gem.c4
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c29
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c2
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c5
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c6
-rw-r--r--drivers/hid/Kconfig5
-rw-r--r--drivers/hid/hid-core.c26
-rw-r--r--drivers/hid/hid-ids.h10
-rw-r--r--drivers/hid/hid-mf.c19
-rw-r--r--drivers/hid/hid-microsoft.c12
-rw-r--r--drivers/hid/hid-rmi.c975
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish-regs.h8
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h12
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c38
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.c2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/hbm.c1
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/init.c1
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h3
-rw-r--r--drivers/hid/usbhid/hid-core.c3
-rw-r--r--drivers/hid/usbhid/hid-quirks.c12
-rw-r--r--drivers/hid/usbhid/usbkbd.c3
-rw-r--r--drivers/hid/usbhid/usbmouse.c3
-rw-r--r--drivers/hid/wacom.h5
-rw-r--r--drivers/hid/wacom_sys.c147
-rw-r--r--drivers/hid/wacom_wac.c289
-rw-r--r--drivers/hid/wacom_wac.h37
-rw-r--r--drivers/hv/hv.c8
-rw-r--r--drivers/hwmon/lm90.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c8
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c2
-rw-r--r--drivers/hwtracing/intel_th/sth.c11
-rw-r--r--drivers/hwtracing/stm/Kconfig11
-rw-r--r--drivers/hwtracing/stm/Makefile2
-rw-r--r--drivers/hwtracing/stm/core.c7
-rw-r--r--drivers/hwtracing/stm/dummy_stm.c2
-rw-r--r--drivers/hwtracing/stm/ftrace.c87
-rw-r--r--drivers/i2c/Kconfig1
-rw-r--r--drivers/i2c/busses/Kconfig25
-rw-r--r--drivers/i2c/busses/Makefile2
-rw-r--r--drivers/i2c/busses/i2c-axxia.c2
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c2
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c218
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c46
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h8
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c10
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c54
-rw-r--r--drivers/i2c/busses/i2c-dln2.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c123
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c652
-rw-r--r--drivers/i2c/busses/i2c-mlxcpld.c504
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.c50
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.h21
-rw-r--r--drivers/i2c/busses/i2c-piix4.c22
-rw-r--r--drivers/i2c/busses/i2c-pxa-pci.c32
-rw-r--r--drivers/i2c/busses/i2c-pxa.c26
-rw-r--r--drivers/i2c/busses/i2c-qup.c122
-rw-r--r--drivers/i2c/busses/i2c-rcar.c5
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c4
-rw-r--r--drivers/i2c/busses/i2c-uniphier-f.c6
-rw-r--r--drivers/i2c/busses/i2c-uniphier.c6
-rw-r--r--drivers/i2c/busses/i2c-viperboard.c2
-rw-r--r--drivers/i2c/busses/i2c-xgene-slimpro.c1
-rw-r--r--drivers/i2c/busses/i2c-xlp9xx.c1
-rw-r--r--drivers/i2c/i2c-core.c207
-rw-r--r--drivers/i2c/i2c-dev.c2
-rw-r--r--drivers/i2c/i2c-smbus.c102
-rw-r--r--drivers/i2c/muxes/Kconfig11
-rw-r--r--drivers/i2c/muxes/Makefile1
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c18
-rw-r--r--drivers/i2c/muxes/i2c-mux-mlxcpld.c222
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c33
-rw-r--r--drivers/ide/hpt366.c2
-rw-r--r--drivers/ide/ide-disk.c2
-rw-r--r--drivers/ide/ide-io.c2
-rw-r--r--drivers/ide/ide-iops.c2
-rw-r--r--drivers/ide/ide-probe.c2
-rw-r--r--drivers/ide/ide-proc.c2
-rw-r--r--drivers/iio/accel/st_accel_core.c12
-rw-r--r--drivers/iio/adc/Kconfig2
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_buffer.c4
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c13
-rw-r--r--drivers/iio/counter/104-quad-8.c13
-rw-r--r--drivers/iio/imu/bmi160/bmi160_core.c25
-rw-r--r--drivers/iio/light/max44000.c2
-rw-r--r--drivers/iio/trigger/iio-trig-hrtimer.c5
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/core/agent.c1
-rw-r--r--drivers/infiniband/core/cache.c16
-rw-r--r--drivers/infiniband/core/cm.c72
-rw-r--r--drivers/infiniband/core/cma.c43
-rw-r--r--drivers/infiniband/core/core_priv.h3
-rw-r--r--drivers/infiniband/core/device.c5
-rw-r--r--drivers/infiniband/core/fmr_pool.c1
-rw-r--r--drivers/infiniband/core/iwcm.c21
-rw-r--r--drivers/infiniband/core/iwpm_msg.c1
-rw-r--r--drivers/infiniband/core/iwpm_util.c12
-rw-r--r--drivers/infiniband/core/mad.c46
-rw-r--r--drivers/infiniband/core/multicast.c7
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c21
-rw-r--r--drivers/infiniband/core/ucm.c7
-rw-r--r--drivers/infiniband/core/ucma.c5
-rw-r--r--drivers/infiniband/core/umem.c2
-rw-r--r--drivers/infiniband/core/umem_odp.c2
-rw-r--r--drivers/infiniband/core/user_mad.c2
-rw-r--r--drivers/infiniband/core/uverbs.h1
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c231
-rw-r--r--drivers/infiniband/core/uverbs_main.c8
-rw-r--r--drivers/infiniband/core/verbs.c108
-rw-r--r--drivers/infiniband/hw/Makefile1
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_dbg.c20
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c8
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c5
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c3
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.h9
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c9
-rw-r--r--drivers/infiniband/hw/hfi1/chip_registers.h3
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.c110
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c3
-rw-r--r--drivers/infiniband/hw/hfi1/eprom.c211
-rw-r--r--drivers/infiniband/hw/hfi1/firmware.c156
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h144
-rw-r--r--drivers/infiniband/hw/hfi1/iowait.h8
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c31
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.c2
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c40
-rw-r--r--drivers/infiniband/hw/hfi1/pio.h38
-rw-r--r--drivers/infiniband/hw/hfi1/pio_copy.c22
-rw-r--r--drivers/infiniband/hw/hfi1/platform.c193
-rw-r--r--drivers/infiniband/hw/hfi1/platform.h127
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c11
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c60
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c44
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c18
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.h12
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c4
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c4
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c60
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c209
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.h16
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.c13
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.h1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c11
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.c8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.h12
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_common.h44
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c46
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h66
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_eq.c6
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c6
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c1222
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.h74
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c329
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c43
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c5
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_user.h53
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h37
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c377
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.h11
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c671
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_d.h25
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hw.c61
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c166
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_osdep.h8
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_p.h21
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_pble.c4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c273
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.h20
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_type.h102
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ucontext.h4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_uk.c57
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_user.h18
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c289
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c259
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.h3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_virtchnl.c33
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c10
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c4
-rw-r--r--drivers/infiniband/hw/mlx4/cm.c4
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c58
-rw-r--r--drivers/infiniband/hw/mlx4/main.c63
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c5
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h3
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c13
-rw-r--r--drivers/infiniband/hw/mlx5/ah.c25
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c34
-rw-r--r--drivers/infiniband/hw/mlx5/main.c268
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c7
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h12
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c71
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c131
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_av.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_reset.c4
-rw-r--r--drivers/infiniband/hw/nes/nes.c1
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c4
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c6
-rw-r--r--drivers/infiniband/hw/nes/nes_mgt.c10
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c84
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c7
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c3
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.h4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c7
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c4
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c62
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h3
-rw-r--r--drivers/infiniband/hw/qib/qib_diag.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_eeprom.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c22
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c47
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c44
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c24
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c33
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c22
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c16
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.h4
-rw-r--r--drivers/infiniband/hw/usnic/usnic_vnic.c22
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/Kconfig7
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/Makefile3
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma.h474
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cmd.c119
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c425
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h586
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c127
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c1211
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c304
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c334
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c972
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h131
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c579
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h436
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c63
-rw-r--r--drivers/infiniband/sw/rdmavt/mcast.c5
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c22
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c20
-rw-r--r--drivers/infiniband/sw/rdmavt/trace.h141
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_mr.h112
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_qp.h96
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_rvt.h81
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_tx.h132
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c11
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c7
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c11
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c20
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c28
-rw-r--r--drivers/infiniband/sw/rxe/rxe_srq.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c19
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.h1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c21
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c10
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c7
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c2
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c2
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c5
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c31
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c48
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c22
-rw-r--r--drivers/input/input-compat.c2
-rw-r--r--drivers/input/joydev.c1
-rw-r--r--drivers/input/joystick/walkera0701.c2
-rw-r--r--drivers/input/joystick/xpad.c13
-rw-r--r--drivers/input/keyboard/gpio_keys.c190
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c128
-rw-r--r--drivers/input/keyboard/lpc32xx-keys.c2
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c2
-rw-r--r--drivers/input/keyboard/tca8418_keypad.c21
-rw-r--r--drivers/input/misc/Kconfig7
-rw-r--r--drivers/input/misc/adxl34x-i2c.c4
-rw-r--r--drivers/input/misc/arizona-haptics.c13
-rw-r--r--drivers/input/misc/atlas_btns.c2
-rw-r--r--drivers/input/misc/bma150.c9
-rw-r--r--drivers/input/misc/da9063_onkey.c9
-rw-r--r--drivers/input/misc/drv260x.c119
-rw-r--r--drivers/input/misc/drv2665.c6
-rw-r--r--drivers/input/misc/drv2667.c4
-rw-r--r--drivers/input/misc/soc_button_array.c8
-rw-r--r--drivers/input/misc/tps65218-pwrbutton.c8
-rw-r--r--drivers/input/mouse/alps.c56
-rw-r--r--drivers/input/mouse/alps.h24
-rw-r--r--drivers/input/mouse/amimouse.c2
-rw-r--r--drivers/input/mouse/atarimouse.c2
-rw-r--r--drivers/input/mouse/elan_i2c_core.c17
-rw-r--r--drivers/input/mouse/synaptics_i2c.c4
-rw-r--r--drivers/input/mouse/trackpoint.c2
-rw-r--r--drivers/input/rmi4/Kconfig43
-rw-r--r--drivers/input/rmi4/Makefile4
-rw-r--r--drivers/input/rmi4/rmi_2d_sensor.c10
-rw-r--r--drivers/input/rmi4/rmi_2d_sensor.h2
-rw-r--r--drivers/input/rmi4/rmi_bus.c12
-rw-r--r--drivers/input/rmi4/rmi_bus.h12
-rw-r--r--drivers/input/rmi4/rmi_driver.c420
-rw-r--r--drivers/input/rmi4/rmi_driver.h31
-rw-r--r--drivers/input/rmi4/rmi_f01.c12
-rw-r--r--drivers/input/rmi4/rmi_f03.c250
-rw-r--r--drivers/input/rmi4/rmi_f11.c98
-rw-r--r--drivers/input/rmi4/rmi_f12.c146
-rw-r--r--drivers/input/rmi4/rmi_f30.c22
-rw-r--r--drivers/input/rmi4/rmi_f34.c516
-rw-r--r--drivers/input/rmi4/rmi_f34.h314
-rw-r--r--drivers/input/rmi4/rmi_f34v7.c1372
-rw-r--r--drivers/input/rmi4/rmi_f54.c19
-rw-r--r--drivers/input/rmi4/rmi_f55.c131
-rw-r--r--drivers/input/rmi4/rmi_i2c.c74
-rw-r--r--drivers/input/rmi4/rmi_smbus.c447
-rw-r--r--drivers/input/rmi4/rmi_spi.c72
-rw-r--r--drivers/input/serio/hp_sdc.c2
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h94
-rw-r--r--drivers/input/serio/i8042.c2
-rw-r--r--drivers/input/serio/q40kbd.c2
-rw-r--r--drivers/input/serio/serport.c2
-rw-r--r--drivers/input/tablet/aiptek.c2
-rw-r--r--drivers/input/tablet/gtco.c2
-rw-r--r--drivers/input/touchscreen/elants_i2c.c4
-rw-r--r--drivers/input/touchscreen/fsl-imx25-tcq.c1
-rw-r--r--drivers/input/touchscreen/imx6ul_tsc.c83
-rw-r--r--drivers/input/touchscreen/melfas_mip4.c51
-rw-r--r--drivers/input/touchscreen/raydium_i2c_ts.c2
-rw-r--r--drivers/input/touchscreen/silead.c29
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c2
-rw-r--r--drivers/iommu/amd_iommu.c4
-rw-r--r--drivers/iommu/amd_iommu_init.c14
-rw-r--r--drivers/iommu/amd_iommu_v2.c4
-rw-r--r--drivers/iommu/arm-smmu-v3.c104
-rw-r--r--drivers/iommu/arm-smmu.c177
-rw-r--r--drivers/iommu/dma-iommu.c24
-rw-r--r--drivers/iommu/dmar.c11
-rw-r--r--drivers/iommu/exynos-iommu.c293
-rw-r--r--drivers/iommu/intel-iommu.c42
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c5
-rw-r--r--drivers/iommu/io-pgtable-arm.c7
-rw-r--r--drivers/iommu/iommu.c53
-rw-r--r--drivers/iommu/iova.c2
-rw-r--r--drivers/iommu/mtk_iommu.c85
-rw-r--r--drivers/iommu/mtk_iommu.h11
-rw-r--r--drivers/iommu/mtk_iommu_v1.c105
-rw-r--r--drivers/iommu/of_iommu.c39
-rw-r--r--drivers/iommu/s390-iommu.c1
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c6
-rw-r--r--drivers/irqchip/irq-bcm2836.c2
-rw-r--r--drivers/irqchip/irq-gic-v3.c6
-rw-r--r--drivers/irqchip/irq-gic.c2
-rw-r--r--drivers/irqchip/irq-hip04.c2
-rw-r--r--drivers/irqchip/irq-mips-gic.c16
-rw-r--r--drivers/irqchip/irq-st.c2
-rw-r--r--drivers/isdn/capi/kcapi.c2
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c32
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c32
-rw-r--r--drivers/isdn/gigaset/usb-gigaset.c32
-rw-r--r--drivers/isdn/hardware/avm/b1.c2
-rw-r--r--drivers/isdn/hardware/avm/b1dma.c2
-rw-r--r--drivers/isdn/hardware/avm/c4.c2
-rw-r--r--drivers/isdn/hardware/eicon/capimain.c2
-rw-r--r--drivers/isdn/hardware/eicon/divamnt.c2
-rw-r--r--drivers/isdn/hardware/eicon/divasi.c2
-rw-r--r--drivers/isdn/hardware/eicon/divasmain.c2
-rw-r--r--drivers/isdn/hardware/eicon/divasproc.c2
-rw-r--r--drivers/isdn/hisax/config.c16
-rw-r--r--drivers/isdn/hisax/hisax.h4
-rw-r--r--drivers/isdn/hisax/q931.c2
-rw-r--r--drivers/isdn/hysdn/hysdn_boot.c2
-rw-r--r--drivers/isdn/i4l/isdn_concap.c6
-rw-r--r--drivers/isdn/i4l/isdn_x25iface.c16
-rw-r--r--drivers/leds/trigger/ledtrig-cpu.c2
-rw-r--r--drivers/lguest/core.c2
-rw-r--r--drivers/lguest/page_tables.c2
-rw-r--r--drivers/lguest/x86/core.c2
-rw-r--r--drivers/macintosh/Kconfig1
-rw-r--r--drivers/macintosh/ans-lcd.c2
-rw-r--r--drivers/macintosh/smu.c2
-rw-r--r--drivers/macintosh/via-pmu.c2
-rw-r--r--drivers/macintosh/via-pmu68k.c2
-rw-r--r--drivers/mailbox/Kconfig9
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/bcm-pdc-mailbox.c548
-rw-r--r--drivers/mailbox/mailbox-sti.c1
-rw-r--r--drivers/mailbox/mailbox-test.c92
-rw-r--r--drivers/mailbox/mailbox.c3
-rw-r--r--drivers/mailbox/pcc.c5
-rw-r--r--drivers/mailbox/tegra-hsp.c479
-rw-r--r--drivers/md/bcache/bcache.h4
-rw-r--r--drivers/md/bcache/btree.c39
-rw-r--r--drivers/md/bcache/btree.h3
-rw-r--r--drivers/md/bcache/request.c4
-rw-r--r--drivers/md/bcache/super.c7
-rw-r--r--drivers/md/dm-cache-block-types.h6
-rw-r--r--drivers/md/dm-ioctl.c2
-rw-r--r--drivers/md/md.h8
-rw-r--r--drivers/md/raid0.c12
-rw-r--r--drivers/md/raid1.c275
-rw-r--r--drivers/md/raid10.c245
-rw-r--r--drivers/md/raid5-cache.c36
-rw-r--r--drivers/md/raid5.c7
-rw-r--r--drivers/media/Kconfig18
-rw-r--r--drivers/media/Makefile4
-rw-r--r--drivers/media/cec/Makefile (renamed from drivers/staging/media/cec/Makefile)2
-rw-r--r--drivers/media/cec/cec-adap.c (renamed from drivers/staging/media/cec/cec-adap.c)244
-rw-r--r--drivers/media/cec/cec-api.c (renamed from drivers/staging/media/cec/cec-api.c)13
-rw-r--r--drivers/media/cec/cec-core.c (renamed from drivers/staging/media/cec/cec-core.c)18
-rw-r--r--drivers/media/cec/cec-priv.h (renamed from drivers/staging/media/cec/cec-priv.h)0
-rw-r--r--drivers/media/common/b2c2/flexcop-common.h1
-rw-r--r--drivers/media/common/b2c2/flexcop-eeprom.c3
-rw-r--r--drivers/media/common/b2c2/flexcop-i2c.c14
-rw-r--r--drivers/media/common/b2c2/flexcop-misc.c9
-rw-r--r--drivers/media/common/b2c2/flexcop.c3
-rw-r--r--drivers/media/common/cx2341x.c12
-rw-r--r--drivers/media/common/saa7146/saa7146_video.c4
-rw-r--r--drivers/media/common/siano/smsdvb-main.c2
-rw-r--r--drivers/media/common/tveeprom.c77
-rw-r--r--drivers/media/common/v4l2-tpg/v4l2-tpg-core.c412
-rw-r--r--drivers/media/dvb-core/Kconfig17
-rw-r--r--drivers/media/dvb-core/Makefile2
-rw-r--r--drivers/media/dvb-core/demux.h5
-rw-r--r--drivers/media/dvb-core/dmxdev.c32
-rw-r--r--drivers/media/dvb-core/dvb-usb-ids.h2
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.c60
-rw-r--r--drivers/media/dvb-core/dvb_demux.c117
-rw-r--r--drivers/media/dvb-core/dvb_demux.h2
-rw-r--r--drivers/media/dvb-core/dvb_filter.c603
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c107
-rw-r--r--drivers/media/dvb-core/dvb_frontend.h10
-rw-r--r--drivers/media/dvb-core/dvb_net.c954
-rw-r--r--drivers/media/dvb-core/dvb_ringbuffer.c2
-rw-r--r--drivers/media/dvb-core/dvbdev.c44
-rw-r--r--drivers/media/dvb-core/dvbdev.h25
-rw-r--r--drivers/media/dvb-frontends/Kconfig9
-rw-r--r--drivers/media/dvb-frontends/af9013.c4
-rw-r--r--drivers/media/dvb-frontends/af9033.c2
-rw-r--r--drivers/media/dvb-frontends/as102_fe.c2
-rw-r--r--drivers/media/dvb-frontends/ascot2e.c3
-rw-r--r--drivers/media/dvb-frontends/atbm8830.c2
-rw-r--r--drivers/media/dvb-frontends/au8522_common.c4
-rw-r--r--drivers/media/dvb-frontends/au8522_dig.c4
-rw-r--r--drivers/media/dvb-frontends/bcm3510.c4
-rw-r--r--drivers/media/dvb-frontends/cx22700.c4
-rw-r--r--drivers/media/dvb-frontends/cx24110.c8
-rw-r--r--drivers/media/dvb-frontends/cx24113.c7
-rw-r--r--drivers/media/dvb-frontends/cx24116.c12
-rw-r--r--drivers/media/dvb-frontends/cx24117.c8
-rw-r--r--drivers/media/dvb-frontends/cx24120.c7
-rw-r--r--drivers/media/dvb-frontends/cx24123.c8
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.c6
-rw-r--r--drivers/media/dvb-frontends/dib0070.c53
-rw-r--r--drivers/media/dvb-frontends/dib0090.c165
-rw-r--r--drivers/media/dvb-frontends/dib3000mb.c141
-rw-r--r--drivers/media/dvb-frontends/dib3000mb_priv.h16
-rw-r--r--drivers/media/dvb-frontends/dib3000mc.c12
-rw-r--r--drivers/media/dvb-frontends/dib7000m.c77
-rw-r--r--drivers/media/dvb-frontends/dib7000p.c131
-rw-r--r--drivers/media/dvb-frontends/dib8000.c261
-rw-r--r--drivers/media/dvb-frontends/dib9000.c175
-rw-r--r--drivers/media/dvb-frontends/dibx000_common.c46
-rw-r--r--drivers/media/dvb-frontends/dibx000_common.h2
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.c4
-rw-r--r--drivers/media/dvb-frontends/drxd_hard.c2
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c2
-rw-r--r--drivers/media/dvb-frontends/ds3000.c19
-rw-r--r--drivers/media/dvb-frontends/dvb-pll.c22
-rw-r--r--drivers/media/dvb-frontends/dvb_dummy_fe.c12
-rw-r--r--drivers/media/dvb-frontends/ec100.c4
-rw-r--r--drivers/media/dvb-frontends/gp8psk-fe.c4
-rw-r--r--drivers/media/dvb-frontends/hd29l2.c4
-rw-r--r--drivers/media/dvb-frontends/helene.c3
-rw-r--r--drivers/media/dvb-frontends/horus3a.c3
-rw-r--r--drivers/media/dvb-frontends/itd1000.c3
-rw-r--r--drivers/media/dvb-frontends/ix2505v.c3
-rw-r--r--drivers/media/dvb-frontends/l64781.c4
-rw-r--r--drivers/media/dvb-frontends/lg2160.c4
-rw-r--r--drivers/media/dvb-frontends/lgdt3305.c8
-rw-r--r--drivers/media/dvb-frontends/lgdt3306a.c4
-rw-r--r--drivers/media/dvb-frontends/lgdt330x.c11
-rw-r--r--drivers/media/dvb-frontends/lgs8gl5.c4
-rw-r--r--drivers/media/dvb-frontends/lgs8gxx.c2
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c4
-rw-r--r--drivers/media/dvb-frontends/m88rs2000.c13
-rw-r--r--drivers/media/dvb-frontends/mb86a16.c2
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.c5
-rw-r--r--drivers/media/dvb-frontends/mn88472.c26
-rw-r--r--drivers/media/dvb-frontends/mn88473.c225
-rw-r--r--drivers/media/dvb-frontends/mn88473_priv.h2
-rw-r--r--drivers/media/dvb-frontends/mt312.c9
-rw-r--r--drivers/media/dvb-frontends/mt352.c4
-rw-r--r--drivers/media/dvb-frontends/nxt200x.c15
-rw-r--r--drivers/media/dvb-frontends/nxt6000.c140
-rw-r--r--drivers/media/dvb-frontends/or51132.c10
-rw-r--r--drivers/media/dvb-frontends/or51211.c7
-rw-r--r--drivers/media/dvb-frontends/rtl2830.c2
-rw-r--r--drivers/media/dvb-frontends/rtl2832.c2
-rw-r--r--drivers/media/dvb-frontends/s5h1409.c8
-rw-r--r--drivers/media/dvb-frontends/s5h1411.c8
-rw-r--r--drivers/media/dvb-frontends/s5h1420.c4
-rw-r--r--drivers/media/dvb-frontends/s5h1432.c8
-rw-r--r--drivers/media/dvb-frontends/s921.c8
-rw-r--r--drivers/media/dvb-frontends/si2165.c2
-rw-r--r--drivers/media/dvb-frontends/si21xx.c10
-rw-r--r--drivers/media/dvb-frontends/sp8870.c4
-rw-r--r--drivers/media/dvb-frontends/sp887x.c7
-rw-r--r--drivers/media/dvb-frontends/stb0899_drv.c34
-rw-r--r--drivers/media/dvb-frontends/stb6000.c3
-rw-r--r--drivers/media/dvb-frontends/stb6100.c6
-rw-r--r--drivers/media/dvb-frontends/stv0288.c13
-rw-r--r--drivers/media/dvb-frontends/stv0297.c8
-rw-r--r--drivers/media/dvb-frontends/stv0299.c11
-rw-r--r--drivers/media/dvb-frontends/stv0367.c4
-rw-r--r--drivers/media/dvb-frontends/stv0900_core.c2
-rw-r--r--drivers/media/dvb-frontends/stv0900_sw.c3
-rw-r--r--drivers/media/dvb-frontends/stv090x.c28
-rw-r--r--drivers/media/dvb-frontends/stv6110.c3
-rw-r--r--drivers/media/dvb-frontends/stv6110x.c4
-rw-r--r--drivers/media/dvb-frontends/tc90522.c7
-rw-r--r--drivers/media/dvb-frontends/tda10021.c7
-rw-r--r--drivers/media/dvb-frontends/tda10023.c10
-rw-r--r--drivers/media/dvb-frontends/tda10048.c16
-rw-r--r--drivers/media/dvb-frontends/tda1004x.c4
-rw-r--r--drivers/media/dvb-frontends/tda10071.c4
-rw-r--r--drivers/media/dvb-frontends/tda10086.c2
-rw-r--r--drivers/media/dvb-frontends/tda18271c2dd.c3
-rw-r--r--drivers/media/dvb-frontends/tda665x.c3
-rw-r--r--drivers/media/dvb-frontends/tda8083.c4
-rw-r--r--drivers/media/dvb-frontends/tda8261.c3
-rw-r--r--drivers/media/dvb-frontends/tda826x.c3
-rw-r--r--drivers/media/dvb-frontends/ts2020.c3
-rw-r--r--drivers/media/dvb-frontends/tua6100.c3
-rw-r--r--drivers/media/dvb-frontends/ves1820.c12
-rw-r--r--drivers/media/dvb-frontends/ves1x93.c4
-rw-r--r--drivers/media/dvb-frontends/zl10036.c8
-rw-r--r--drivers/media/dvb-frontends/zl10039.c6
-rw-r--r--drivers/media/dvb-frontends/zl10353.c4
-rw-r--r--drivers/media/firewire/firedtv-avc.c4
-rw-r--r--drivers/media/firewire/firedtv-rc.c5
-rw-r--r--drivers/media/i2c/Kconfig6
-rw-r--r--drivers/media/i2c/ad5820.c5
-rw-r--r--drivers/media/i2c/adv7170.c2
-rw-r--r--drivers/media/i2c/adv7175.c2
-rw-r--r--drivers/media/i2c/adv7511.c5
-rw-r--r--drivers/media/i2c/adv7604.c30
-rw-r--r--drivers/media/i2c/adv7842.c6
-rw-r--r--drivers/media/i2c/bt856.c2
-rw-r--r--drivers/media/i2c/bt866.c2
-rw-r--r--drivers/media/i2c/cs53l32a.c2
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c11
-rw-r--r--drivers/media/i2c/cx25840/cx25840-ir.c7
-rw-r--r--drivers/media/i2c/m52790.c2
-rw-r--r--drivers/media/i2c/msp3400-driver.c90
-rw-r--r--drivers/media/i2c/msp3400-kthreads.c115
-rw-r--r--drivers/media/i2c/saa6588.c2
-rw-r--r--drivers/media/i2c/saa7110.c2
-rw-r--r--drivers/media/i2c/saa7185.c2
-rw-r--r--drivers/media/i2c/smiapp-pll.c3
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c916
-rw-r--r--drivers/media/i2c/smiapp/smiapp-regs.c4
-rw-r--r--drivers/media/i2c/smiapp/smiapp.h28
-rw-r--r--drivers/media/i2c/soc_camera/ov772x.c3
-rw-r--r--drivers/media/i2c/soc_camera/ov9740.c3
-rw-r--r--drivers/media/i2c/soc_camera/tw9910.c3
-rw-r--r--drivers/media/i2c/tlv320aic23b.c2
-rw-r--r--drivers/media/i2c/tvaudio.c5
-rw-r--r--drivers/media/i2c/tvp514x.c6
-rw-r--r--drivers/media/i2c/tvp5150.c298
-rw-r--r--drivers/media/i2c/vp27smpx.c2
-rw-r--r--drivers/media/i2c/vpx3220.c2
-rw-r--r--drivers/media/i2c/wm8739.c2
-rw-r--r--drivers/media/i2c/wm8775.c2
-rw-r--r--drivers/media/media-device.c32
-rw-r--r--drivers/media/media-entity.c6
-rw-r--r--drivers/media/pci/b2c2/flexcop-dma.c6
-rw-r--r--drivers/media/pci/b2c2/flexcop-pci.c7
-rw-r--r--drivers/media/pci/bt8xx/btcx-risc.c46
-rw-r--r--drivers/media/pci/bt8xx/bttv-cards.c9
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c6
-rw-r--r--drivers/media/pci/bt8xx/bttv-i2c.c6
-rw-r--r--drivers/media/pci/bt8xx/bttv-input.c4
-rw-r--r--drivers/media/pci/bt8xx/dst.c278
-rw-r--r--drivers/media/pci/bt8xx/dvb-bt8xx.c25
-rw-r--r--drivers/media/pci/cobalt/cobalt-v4l2.c23
-rw-r--r--drivers/media/pci/cx18/cx18-alsa-main.c8
-rw-r--r--drivers/media/pci/cx18/cx18-av-core.c17
-rw-r--r--drivers/media/pci/cx18/cx18-av-firmware.c3
-rw-r--r--drivers/media/pci/cx18/cx18-controls.c9
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c35
-rw-r--r--drivers/media/pci/cx18/cx18-dvb.c6
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.c6
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.c6
-rw-r--r--drivers/media/pci/cx18/cx18-irq.c4
-rw-r--r--drivers/media/pci/cx18/cx18-mailbox.c39
-rw-r--r--drivers/media/pci/cx18/cx18-queue.c8
-rw-r--r--drivers/media/pci/cx18/cx18-streams.c7
-rw-r--r--drivers/media/pci/cx23885/altera-ci.c15
-rw-r--r--drivers/media/pci/cx23885/altera-ci.h14
-rw-r--r--drivers/media/pci/cx23885/cimax2.c15
-rw-r--r--drivers/media/pci/cx23885/cx23885-417.c65
-rw-r--r--drivers/media/pci/cx23885/cx23885-alsa.c30
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c53
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c146
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c41
-rw-r--r--drivers/media/pci/cx23885/cx23885-f300.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-i2c.c27
-rw-r--r--drivers/media/pci/cx23885/cx23885-input.c6
-rw-r--r--drivers/media/pci/cx23885/cx23885-ir.c4
-rw-r--r--drivers/media/pci/cx23885/cx23885-vbi.c7
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c26
-rw-r--r--drivers/media/pci/cx23885/cx23885.h2
-rw-r--r--drivers/media/pci/cx23885/cx23888-ir.c13
-rw-r--r--drivers/media/pci/cx23885/netup-eeprom.c4
-rw-r--r--drivers/media/pci/cx23885/netup-init.c8
-rw-r--r--drivers/media/pci/cx88/cx88-alsa.c304
-rw-r--r--drivers/media/pci/cx88/cx88-blackbird.c292
-rw-r--r--drivers/media/pci/cx88/cx88-cards.c485
-rw-r--r--drivers/media/pci/cx88/cx88-core.c420
-rw-r--r--drivers/media/pci/cx88/cx88-dsp.c136
-rw-r--r--drivers/media/pci/cx88/cx88-dvb.c331
-rw-r--r--drivers/media/pci/cx88/cx88-i2c.c136
-rw-r--r--drivers/media/pci/cx88/cx88-input.c66
-rw-r--r--drivers/media/pci/cx88/cx88-mpeg.c315
-rw-r--r--drivers/media/pci/cx88/cx88-reg.h123
-rw-r--r--drivers/media/pci/cx88/cx88-tvaudio.c169
-rw-r--r--drivers/media/pci/cx88/cx88-vbi.c47
-rw-r--r--drivers/media/pci/cx88/cx88-video.c403
-rw-r--r--drivers/media/pci/cx88/cx88-vp3054-i2c.c60
-rw-r--r--drivers/media/pci/cx88/cx88-vp3054-i2c.h38
-rw-r--r--drivers/media/pci/cx88/cx88.h203
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-core.c6
-rw-r--r--drivers/media/pci/dm1105/dm1105.c3
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-main.c12
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c37
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-firmware.c4
-rw-r--r--drivers/media/pci/ivtv/ivtv-yuv.c8
-rw-r--r--drivers/media/pci/ivtv/ivtvfb.c3
-rw-r--r--drivers/media/pci/meye/meye.c19
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_core.c13
-rw-r--r--drivers/media/pci/pluto2/pluto2.c4
-rw-r--r--drivers/media/pci/pt1/pt1.c7
-rw-r--r--drivers/media/pci/pt1/va1j5jf8007s.c2
-rw-r--r--drivers/media/pci/pt1/va1j5jf8007t.c2
-rw-r--r--drivers/media/pci/pt3/pt3.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-alsa.c3
-rw-r--r--drivers/media/pci/saa7134/saa7134-cards.c8
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c39
-rw-r--r--drivers/media/pci/saa7134/saa7134-dvb.c32
-rw-r--r--drivers/media/pci/saa7134/saa7134-i2c.c31
-rw-r--r--drivers/media/pci/saa7134/saa7134-input.c13
-rw-r--r--drivers/media/pci/saa7164/saa7164-buffer.c3
-rw-r--r--drivers/media/pci/saa7164/saa7164-bus.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-cards.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-cmd.c12
-rw-r--r--drivers/media/pci/saa7164/saa7164-core.c66
-rw-r--r--drivers/media/pci/saa7164/saa7164-dvb.c34
-rw-r--r--drivers/media/pci/saa7164/saa7164-encoder.c18
-rw-r--r--drivers/media/pci/saa7164/saa7164-fw.c10
-rw-r--r--drivers/media/pci/saa7164/saa7164-vbi.c14
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2.c4
-rw-r--r--drivers/media/pci/solo6x10/solo6x10.h3
-rw-r--r--drivers/media/pci/ttpci/Makefile2
-rw-r--r--drivers/media/pci/ttpci/av7110.c49
-rw-r--r--drivers/media/pci/ttpci/av7110.h7
-rw-r--r--drivers/media/pci/ttpci/av7110_hw.c12
-rw-r--r--drivers/media/pci/ttpci/budget-av.c3
-rw-r--r--drivers/media/pci/ttpci/budget-ci.c4
-rw-r--r--drivers/media/pci/ttpci/budget-patch.c3
-rw-r--r--drivers/media/pci/ttpci/budget.c3
-rw-r--r--drivers/media/pci/ttpci/budget.h8
-rw-r--r--drivers/media/pci/ttpci/dvb_filter.c114
-rw-r--r--drivers/media/pci/ttpci/dvb_filter.h (renamed from drivers/media/dvb-core/dvb_filter.h)0
-rw-r--r--drivers/media/pci/ttpci/ttpci-eeprom.c3
-rw-r--r--drivers/media/pci/tw5864/tw5864-reg.h8
-rw-r--r--drivers/media/pci/tw5864/tw5864-video.c13
-rw-r--r--drivers/media/pci/tw68/tw68-video.c16
-rw-r--r--drivers/media/pci/zoran/videocodec.c2
-rw-r--r--drivers/media/pci/zoran/zoran_device.c35
-rw-r--r--drivers/media/pci/zoran/zoran_driver.c4
-rw-r--r--drivers/media/platform/Kconfig49
-rw-r--r--drivers/media/platform/Makefile3
-rw-r--r--drivers/media/platform/arv.c2
-rw-r--r--drivers/media/platform/atmel/atmel-isc.c9
-rw-r--r--drivers/media/platform/blackfin/bfin_capture.c6
-rw-r--r--drivers/media/platform/blackfin/ppi.c2
-rw-r--r--drivers/media/platform/coda/coda-common.c7
-rw-r--r--drivers/media/platform/coda/coda-h264.c1
-rw-r--r--drivers/media/platform/davinci/dm355_ccdc.c4
-rw-r--r--drivers/media/platform/davinci/dm644x_ccdc.c4
-rw-r--r--drivers/media/platform/davinci/vpbe.c82
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c91
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c37
-rw-r--r--drivers/media/platform/davinci/vpif_display.c39
-rw-r--r--drivers/media/platform/davinci/vpss.c7
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.c279
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.h11
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c38
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.c14
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c3
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c26
-rw-r--r--drivers/media/platform/mtk-mdp/Makefile9
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_comp.c159
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_comp.h72
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_core.c290
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_core.h260
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h126
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c1286
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_m2m.h22
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_regs.c156
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_regs.h31
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c145
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_vpu.h41
-rw-r--r--drivers/media/platform/mtk-vcodec/Makefile15
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c1451
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h88
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c394
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c202
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h28
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h62
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c8
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c3
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c33
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h5
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c507
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c634
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c967
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_drv_base.h56
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_drv_if.c122
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_drv_if.h101
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h103
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_vpu_if.c170
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_vpu_if.h96
-rw-r--r--drivers/media/platform/mtk-vpu/mtk_vpu.c21
-rw-r--r--drivers/media/platform/mtk-vpu/mtk_vpu.h48
-rw-r--r--drivers/media/platform/mx2_emmaprp.c10
-rw-r--r--drivers/media/platform/omap/omap_vout.c24
-rw-r--r--drivers/media/platform/omap/omap_vout_vrfb.c5
-rw-r--r--drivers/media/platform/omap3isp/isp.c23
-rw-r--r--drivers/media/platform/omap3isp/ispccdc.c9
-rw-r--r--drivers/media/platform/omap3isp/ispcsi2.c13
-rw-r--r--drivers/media/platform/omap3isp/ispcsiphy.c4
-rw-r--r--drivers/media/platform/omap3isp/isph3a_aewb.c8
-rw-r--r--drivers/media/platform/omap3isp/isph3a_af.c8
-rw-r--r--drivers/media/platform/omap3isp/isphist.c28
-rw-r--r--drivers/media/platform/omap3isp/ispstat.c58
-rw-r--r--drivers/media/platform/pxa_camera.c18
-rw-r--r--drivers/media/platform/rcar-fcp.c1
-rw-r--r--drivers/media/platform/rcar_fdp1.c2445
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c17
-rw-r--r--drivers/media/platform/s5p-mfc/regs-mfc-v6.h3
-rw-r--r--drivers/media/platform/s5p-mfc/regs-mfc-v8.h2
-rw-r--r--drivers/media/platform/s5p-mfc/regs-mfc.h3
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c73
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_common.h12
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_debug.h6
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_dec.c15
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr.c6
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c7
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_pm.c132
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-v4l2.c1
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c24
-rw-r--r--drivers/media/platform/sti/hva/hva-hw.c8
-rw-r--r--drivers/media/platform/ti-vpe/Makefile10
-rw-r--r--drivers/media/platform/ti-vpe/cal.c14
-rw-r--r--drivers/media/platform/ti-vpe/csc.c18
-rw-r--r--drivers/media/platform/ti-vpe/csc.h2
-rw-r--r--drivers/media/platform/ti-vpe/sc.c28
-rw-r--r--drivers/media/platform/ti-vpe/sc.h11
-rw-r--r--drivers/media/platform/ti-vpe/vpdma.c355
-rw-r--r--drivers/media/platform/ti-vpe/vpdma.h85
-rw-r--r--drivers/media/platform/ti-vpe/vpdma_priv.h130
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c471
-rw-r--r--drivers/media/platform/via-camera.c7
-rw-r--r--drivers/media/platform/vivid/Kconfig2
-rw-r--r--drivers/media/platform/vivid/vivid-cec.c3
-rw-r--r--drivers/media/platform/vivid/vivid-cec.h1
-rw-r--r--drivers/media/platform/vivid/vivid-core.c13
-rw-r--r--drivers/media/platform/vivid/vivid-core.h3
-rw-r--r--drivers/media/platform/vivid/vivid-ctrls.c25
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.c17
-rw-r--r--drivers/media/platform/vivid/vivid-vid-common.c70
-rw-r--r--drivers/media/platform/vivid/vivid-vid-out.c1
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c1
-rw-r--r--drivers/media/platform/vsp1/vsp1_pipe.c8
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.c2
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c5
-rw-r--r--drivers/media/radio/radio-gemtek.c8
-rw-r--r--drivers/media/radio/radio-wl1273.c3
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c7
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c15
-rw-r--r--drivers/media/radio/si4713/si4713.c13
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c46
-rw-r--r--drivers/media/radio/wl128x/fmdrv_rx.c8
-rw-r--r--drivers/media/rc/Kconfig17
-rw-r--r--drivers/media/rc/Makefile1
-rw-r--r--drivers/media/rc/ati_remote.c3
-rw-r--r--drivers/media/rc/ene_ir.c3
-rw-r--r--drivers/media/rc/fintek-cir.c6
-rw-r--r--drivers/media/rc/imon.c61
-rw-r--r--drivers/media/rc/ir-hix5hd2.c25
-rw-r--r--drivers/media/rc/ir-rx51.c2
-rw-r--r--drivers/media/rc/ir-sanyo-decoder.c3
-rw-r--r--drivers/media/rc/ite-cir.c11
-rw-r--r--drivers/media/rc/lirc_dev.c29
-rw-r--r--drivers/media/rc/mceusb.c102
-rw-r--r--drivers/media/rc/meson-ir.c1
-rw-r--r--drivers/media/rc/nuvoton-cir.c143
-rw-r--r--drivers/media/rc/nuvoton-cir.h4
-rw-r--r--drivers/media/rc/rc-ir-raw.c17
-rw-r--r--drivers/media/rc/rc-main.c70
-rw-r--r--drivers/media/rc/redrat3.c327
-rw-r--r--drivers/media/rc/serial_ir.c844
-rw-r--r--drivers/media/rc/streamzap.c11
-rw-r--r--drivers/media/rc/winbond-cir.c13
-rw-r--r--drivers/media/spi/gs1662.c4
-rw-r--r--drivers/media/tuners/fc0011.c11
-rw-r--r--drivers/media/tuners/fc0012.c3
-rw-r--r--drivers/media/tuners/fc0013.c3
-rw-r--r--drivers/media/tuners/max2165.c4
-rw-r--r--drivers/media/tuners/mc44s803.c8
-rw-r--r--drivers/media/tuners/mt2060.c3
-rw-r--r--drivers/media/tuners/mt2063.c4
-rw-r--r--drivers/media/tuners/mt20xx.c25
-rw-r--r--drivers/media/tuners/mt2131.c3
-rw-r--r--drivers/media/tuners/mt2266.c3
-rw-r--r--drivers/media/tuners/mxl5005s.c3
-rw-r--r--drivers/media/tuners/mxl5007t.c4
-rw-r--r--drivers/media/tuners/qt1010.c3
-rw-r--r--drivers/media/tuners/r820t.c4
-rw-r--r--drivers/media/tuners/tda18218.c3
-rw-r--r--drivers/media/tuners/tda18271-common.c4
-rw-r--r--drivers/media/tuners/tda18271-fe.c7
-rw-r--r--drivers/media/tuners/tda18271-maps.c6
-rw-r--r--drivers/media/tuners/tda827x.c3
-rw-r--r--drivers/media/tuners/tda8290.c8
-rw-r--r--drivers/media/tuners/tda9887.c2
-rw-r--r--drivers/media/tuners/tea5761.c10
-rw-r--r--drivers/media/tuners/tea5767.c4
-rw-r--r--drivers/media/tuners/tuner-simple.c49
-rw-r--r--drivers/media/tuners/tuner-xc2028.c120
-rw-r--r--drivers/media/tuners/xc4000.c29
-rw-r--r--drivers/media/tuners/xc5000.c4
-rw-r--r--drivers/media/usb/Kconfig5
-rw-r--r--drivers/media/usb/Makefile1
-rw-r--r--drivers/media/usb/au0828/au0828-video.c3
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.c11
-rw-r--r--drivers/media/usb/cpia2/cpia2_usb.c4
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-core.c10
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-dvb.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9015.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvbsky.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c14
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c12
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c3
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.c10
-rw-r--r--drivers/media/usb/dvb-usb/af9005-fe.c4
-rw-r--r--drivers/media/usb/dvb-usb/af9005.c1
-rw-r--r--drivers/media/usb/dvb-usb/cinergyT2-core.c6
-rw-r--r--drivers/media/usb/dvb-usb/cinergyT2-fe.c4
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c26
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.h5
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_core.c5
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c3
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-common.c2
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-mc-common.c1
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u-fe.c4
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-dvb.c3
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-firmware.c6
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb.h6
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c12
-rw-r--r--drivers/media/usb/dvb-usb/friio-fe.c4
-rw-r--r--drivers/media/usb/dvb-usb/friio.c4
-rw-r--r--drivers/media/usb/dvb-usb/gp8psk.c3
-rw-r--r--drivers/media/usb/dvb-usb/m920x.c10
-rw-r--r--drivers/media/usb/dvb-usb/opera1.c3
-rw-r--r--drivers/media/usb/dvb-usb/technisat-usb2.c3
-rw-r--r--drivers/media/usb/dvb-usb/vp702x-fe.c4
-rw-r--r--drivers/media/usb/dvb-usb/vp7045-fe.c4
-rw-r--r--drivers/media/usb/em28xx/Kconfig2
-rw-r--r--drivers/media/usb/em28xx/em28xx-audio.c95
-rw-r--r--drivers/media/usb/em28xx/em28xx-camera.c69
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c204
-rw-r--r--drivers/media/usb/em28xx/em28xx-core.c206
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c112
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c291
-rw-r--r--drivers/media/usb/em28xx/em28xx-input.c65
-rw-r--r--drivers/media/usb/em28xx/em28xx-vbi.c9
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c163
-rw-r--r--drivers/media/usb/em28xx/em28xx.h19
-rw-r--r--drivers/media/usb/go7007/Kconfig2
-rw-r--r--drivers/media/usb/gspca/gspca.c3
-rw-r--r--drivers/media/usb/gspca/jl2005bcd.c5
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_core.c11
-rw-r--r--drivers/media/usb/gspca/mr97310a.c3
-rw-r--r--drivers/media/usb/gspca/ov519.c3
-rw-r--r--drivers/media/usb/gspca/pac207.c4
-rw-r--r--drivers/media/usb/gspca/pac7302.c3
-rw-r--r--drivers/media/usb/gspca/sn9c20x.c6
-rw-r--r--drivers/media/usb/gspca/spca506.c3
-rw-r--r--drivers/media/usb/gspca/sq905.c3
-rw-r--r--drivers/media/usb/gspca/sq905c.c9
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx.c27
-rw-r--r--drivers/media/usb/gspca/sunplus.c3
-rw-r--r--drivers/media/usb/gspca/topro.c3
-rw-r--r--drivers/media/usb/gspca/zc3xx.c3
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-core.c9
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-i2c.c7
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-video.c26
-rw-r--r--drivers/media/usb/pulse8-cec/Kconfig (renamed from drivers/staging/media/pulse8-cec/Kconfig)2
-rw-r--r--drivers/media/usb/pulse8-cec/Makefile (renamed from drivers/staging/media/pulse8-cec/Makefile)0
-rw-r--r--drivers/media/usb/pulse8-cec/pulse8-cec.c (renamed from drivers/staging/media/pulse8-cec/pulse8-cec.c)12
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-audio.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-cs53l32a.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-debugifc.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-eeprom.c7
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-encoder.c29
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c181
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c47
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-io.c35
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-ioread.c38
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-std.c3
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-sysfs.c1
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c10
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-video-v4l.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-wm8775.c3
-rw-r--r--drivers/media/usb/pwc/pwc-ctrl.c2
-rw-r--r--drivers/media/usb/pwc/pwc-if.c4
-rw-r--r--drivers/media/usb/pwc/pwc-v4l.c6
-rw-r--r--drivers/media/usb/siano/smsusb.c4
-rw-r--r--drivers/media/usb/stkwebcam/stk-sensor.c10
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c14
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.h2
-rw-r--r--drivers/media/usb/tm6000/tm6000-alsa.c4
-rw-r--r--drivers/media/usb/tm6000/tm6000-core.c14
-rw-r--r--drivers/media/usb/tm6000/tm6000-dvb.c16
-rw-r--r--drivers/media/usb/tm6000/tm6000-i2c.c3
-rw-r--r--drivers/media/usb/tm6000/tm6000-stds.c3
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c18
-rw-r--r--drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c3
-rw-r--r--drivers/media/usb/ttusb-dec/ttusb_dec.c92
-rw-r--r--drivers/media/usb/ttusb-dec/ttusbdecfe.c8
-rw-r--r--drivers/media/usb/usbtv/usbtv-video.c105
-rw-r--r--drivers/media/usb/usbtv/usbtv.h3
-rw-r--r--drivers/media/usb/usbvision/usbvision-core.c20
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c4
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c177
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c19
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h12
-rw-r--r--drivers/media/usb/zr364xx/zr364xx.c6
-rw-r--r--drivers/media/v4l2-core/Kconfig1
-rw-r--r--drivers/media/v4l2-core/tuner-core.c121
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c30
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c59
-rw-r--r--drivers/media/v4l2-core/v4l2-flash-led-class.c16
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c103
-rw-r--r--drivers/media/v4l2-core/videobuf-core.c3
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c5
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c25
-rw-r--r--drivers/media/v4l2-core/videobuf2-v4l2.c10
-rw-r--r--drivers/media/v4l2-core/videobuf2-vmalloc.c3
-rw-r--r--drivers/memory/Kconfig8
-rw-r--r--drivers/memory/Makefile1
-rw-r--r--drivers/memory/atmel-ebi.c2
-rw-r--r--drivers/memory/atmel-sdramc.c6
-rw-r--r--drivers/memory/da8xx-ddrctl.c173
-rw-r--r--drivers/message/fusion/mptctl.c2
-rw-r--r--drivers/message/fusion/mptlan.h2
-rw-r--r--drivers/mfd/88pm860x-core.c5
-rw-r--r--drivers/mfd/Kconfig26
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/ab3100-core.c39
-rw-r--r--drivers/mfd/ab8500-core.c37
-rw-r--r--drivers/mfd/ab8500-debugfs.c21
-rw-r--r--drivers/mfd/ab8500-gpadc.c19
-rw-r--r--drivers/mfd/ab8500-sysctrl.c9
-rw-r--r--drivers/mfd/abx500-core.c7
-rw-r--r--drivers/mfd/arizona-core.c1
-rw-r--r--drivers/mfd/arizona-irq.c8
-rw-r--r--drivers/mfd/axp20x-i2c.c7
-rw-r--r--drivers/mfd/axp20x.c4
-rw-r--r--drivers/mfd/bcm590xx.c2
-rw-r--r--drivers/mfd/cs47l24-tables.c6
-rw-r--r--drivers/mfd/davinci_voicecodec.c1
-rw-r--r--drivers/mfd/fsl-imx25-tsadc.c1
-rw-r--r--drivers/mfd/hi655x-pmic.c1
-rw-r--r--drivers/mfd/intel-lpss-pci.c1
-rw-r--r--drivers/mfd/intel_soc_pmic_bxtwc.c40
-rw-r--r--drivers/mfd/lpc_ich.c2
-rw-r--r--drivers/mfd/max77620.c2
-rw-r--r--drivers/mfd/palmas.c3
-rw-r--r--drivers/mfd/qcom-pm8xxx.c231
-rw-r--r--drivers/mfd/rk808.c23
-rw-r--r--drivers/mfd/rn5t618.c1
-rw-r--r--drivers/mfd/si476x-i2c.c2
-rw-r--r--drivers/mfd/sun4i-gpadc.c181
-rw-r--r--drivers/mfd/tc3589x.c4
-rw-r--r--drivers/mfd/tps65217.c71
-rw-r--r--drivers/mfd/tps65218.c34
-rw-r--r--drivers/mfd/tps65912-core.c17
-rw-r--r--drivers/mfd/wm5102-tables.c1412
-rw-r--r--drivers/mfd/wm8994-core.c6
-rw-r--r--drivers/misc/cxl/api.c147
-rw-r--r--drivers/misc/cxl/context.c22
-rw-r--r--drivers/misc/cxl/cxl.h6
-rw-r--r--drivers/misc/cxl/debugfs.c6
-rw-r--r--drivers/misc/cxl/file.c5
-rw-r--r--drivers/misc/cxl/guest.c2
-rw-r--r--drivers/misc/cxl/irq.c2
-rw-r--r--drivers/misc/cxl/native.c20
-rw-r--r--drivers/misc/cxl/pci.c2
-rw-r--r--drivers/misc/cxl/phb.c2
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c2
-rw-r--r--drivers/misc/ibmasm/module.c2
-rw-r--r--drivers/misc/mei/bus-fixup.c3
-rw-r--r--drivers/misc/mei/bus.c2
-rw-r--r--drivers/misc/mei/client.c20
-rw-r--r--drivers/misc/mei/debugfs.c2
-rw-r--r--drivers/misc/mei/hbm.c4
-rw-r--r--drivers/misc/mei/hw.h6
-rw-r--r--drivers/misc/mei/mei_dev.h2
-rw-r--r--drivers/misc/sgi-gru/grumain.c2
-rw-r--r--drivers/misc/sram.c42
-rw-r--r--drivers/mmc/core/block.c2
-rw-r--r--drivers/mmc/core/core.c12
-rw-r--r--drivers/mmc/core/mmc_ops.c25
-rw-r--r--drivers/mmc/core/sd.c12
-rw-r--r--drivers/mmc/host/android-goldfish.c2
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c8
-rw-r--r--drivers/mmc/host/mxs-mmc.c6
-rw-r--r--drivers/mmc/host/sdhci-acpi.c3
-rw-r--r--drivers/mmc/host/sdhci-cadence.c1
-rw-r--r--drivers/mmc/host/sdhci.c33
-rw-r--r--drivers/mtd/bcm47xxpart.c10
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.c24
-rw-r--r--drivers/mtd/devices/pmc551.c2
-rw-r--r--drivers/mtd/devices/slram.c2
-rw-r--r--drivers/mtd/ftl.c2
-rw-r--r--drivers/mtd/inftlcore.c2
-rw-r--r--drivers/mtd/inftlmount.c2
-rw-r--r--drivers/mtd/maps/sc520cdp.c8
-rw-r--r--drivers/mtd/maps/sun_uflash.c2
-rw-r--r--drivers/mtd/mtd_blkdevs.c2
-rw-r--r--drivers/mtd/mtdchar.c2
-rw-r--r--drivers/mtd/mtdcore.c44
-rw-r--r--drivers/mtd/mtdswap.c2
-rw-r--r--drivers/mtd/nand/Kconfig24
-rw-r--r--drivers/mtd/nand/Makefile2
-rw-r--r--drivers/mtd/nand/ams-delta.c5
-rw-r--r--drivers/mtd/nand/atmel_nand.c10
-rw-r--r--drivers/mtd/nand/brcmnand/brcmnand.c10
-rw-r--r--drivers/mtd/nand/cafe_nand.c5
-rw-r--r--drivers/mtd/nand/cmx270_nand.c4
-rw-r--r--drivers/mtd/nand/cs553x_nand.c5
-rw-r--r--drivers/mtd/nand/denali.c101
-rw-r--r--drivers/mtd/nand/denali.h12
-rw-r--r--drivers/mtd/nand/denali_dt.c3
-rw-r--r--drivers/mtd/nand/denali_pci.c1
-rw-r--r--drivers/mtd/nand/fsmc_nand.c9
-rw-r--r--drivers/mtd/nand/gpio.c5
-rw-r--r--drivers/mtd/nand/hisi504_nand.c4
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c12
-rw-r--r--drivers/mtd/nand/lpc32xx_slc.c10
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c4
-rw-r--r--drivers/mtd/nand/mtk_nand.c4
-rw-r--r--drivers/mtd/nand/mxc_nand.c10
-rw-r--r--drivers/mtd/nand/nand_base.c84
-rw-r--r--drivers/mtd/nand/nand_ids.c3
-rw-r--r--drivers/mtd/nand/nand_timings.c26
-rw-r--r--drivers/mtd/nand/nandsim.c19
-rw-r--r--drivers/mtd/nand/omap2.c10
-rw-r--r--drivers/mtd/nand/orion_nand.c5
-rw-r--r--drivers/mtd/nand/oxnas_nand.c195
-rw-r--r--drivers/mtd/nand/pasemi_nand.c5
-rw-r--r--drivers/mtd/nand/plat_nand.c5
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c22
-rw-r--r--drivers/mtd/nand/s3c2410.c286
-rw-r--r--drivers/mtd/nand/socrates_nand.c12
-rw-r--r--drivers/mtd/nand/sunxi_nand.c4
-rw-r--r--drivers/mtd/nand/tango_nand.c678
-rw-r--r--drivers/mtd/nand/tmio_nand.c6
-rw-r--r--drivers/mtd/nand/vf610_nfc.c10
-rw-r--r--drivers/mtd/nand/xway_nand.c5
-rw-r--r--drivers/mtd/nftlcore.c2
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c6
-rw-r--r--drivers/mtd/spi-nor/fsl-quadspi.c8
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c14
-rw-r--r--drivers/net/appletalk/ipddp.c4
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/softing/softing_fw.c4
-rw-r--r--drivers/net/can/softing/softing_main.c2
-rw-r--r--drivers/net/dsa/bcm_sf2.c11
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c6
-rw-r--r--drivers/net/eql.c2
-rw-r--r--drivers/net/ethernet/3com/3c509.c2
-rw-r--r--drivers/net/ethernet/3com/3c515.c17
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c2
-rw-r--r--drivers/net/ethernet/3com/3c59x.c2
-rw-r--r--drivers/net/ethernet/3com/typhoon.c2
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c2
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c2
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c2
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c2
-rw-r--r--drivers/net/ethernet/alteon/acenic.c2
-rw-r--r--drivers/net/ethernet/altera/Makefile1
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c2
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ptp.c2
-rw-r--r--drivers/net/ethernet/atheros/alx/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c48
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c3
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c8
-rw-r--r--drivers/net/ethernet/cadence/Kconfig9
-rw-r--r--drivers/net/ethernet/cadence/Makefile1
-rw-r--r--drivers/net/ethernet/cadence/macb.c31
-rw-r--r--drivers/net/ethernet/cadence/macb_pci.c146
-rw-r--r--drivers/net/ethernet/cavium/Kconfig2
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c6
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c66
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c67
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h13
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c12
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c14
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c14
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c2
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h2
-rw-r--r--drivers/net/ethernet/dlink/sundance.c2
-rw-r--r--drivers/net/ethernet/ec_bhf.c5
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c34
-rw-r--r--drivers/net/ethernet/fealnx.c2
-rw-r--r--drivers/net/ethernet/freescale/Makefile2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/Kconfig2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c77
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/Kconfig2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c15
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fcc.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-scc.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h2
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c2
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c13
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c12
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c18
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h7
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c77
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c4
-rw-r--r--drivers/net/ethernet/korina.c8
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c106
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_clock.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c92
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c1
-rw-r--r--drivers/net/ethernet/microchip/encx24j600-regmap.c17
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c19
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c2
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c2
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c2
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_roce.c4
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-phy.c7
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c15
-rw-r--r--drivers/net/ethernet/rdc/r6040.c10
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c3
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c21
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c8
-rw-r--r--drivers/net/ethernet/sfc/Kconfig21
-rw-r--r--drivers/net/ethernet/sfc/ef10.c3
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c37
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c60
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h14
-rw-r--r--drivers/net/ethernet/sfc/siena.c1
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c2
-rw-r--r--drivers/net/ethernet/sis/sis900.c2
-rw-r--r--drivers/net/ethernet/smsc/epic100.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c89
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c50
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c6
-rw-r--r--drivers/net/ethernet/sun/cassini.c2
-rw-r--r--drivers/net/ethernet/sun/sungem.c2
-rw-r--r--drivers/net/ethernet/sun/sunhme.c2
-rw-r--r--drivers/net/ethernet/sun/sunhme.h2
-rw-r--r--drivers/net/ethernet/ti/cpmac.c2
-rw-r--r--drivers/net/ethernet/ti/cpts.c2
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c24
-rw-r--r--drivers/net/ethernet/tile/tilegx.c4
-rw-r--r--drivers/net/ethernet/via/via-rhine.c2
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c2
-rw-r--r--drivers/net/fddi/skfp/hwmtm.c12
-rw-r--r--drivers/net/fddi/skfp/pmf.c2
-rw-r--r--drivers/net/fddi/skfp/skfddi.c2
-rw-r--r--drivers/net/fddi/skfp/smt.c2
-rw-r--r--drivers/net/gtp.c8
-rw-r--r--drivers/net/hamradio/6pack.c2
-rw-r--r--drivers/net/hamradio/baycom_epp.c2
-rw-r--r--drivers/net/hamradio/baycom_par.c2
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c2
-rw-r--r--drivers/net/hamradio/baycom_ser_hdx.c2
-rw-r--r--drivers/net/hamradio/bpqether.c2
-rw-r--r--drivers/net/hamradio/dmascc.c2
-rw-r--r--drivers/net/hamradio/hdlcdrv.c2
-rw-r--r--drivers/net/hamradio/mkiss.c2
-rw-r--r--drivers/net/hamradio/scc.c2
-rw-r--r--drivers/net/hamradio/yam.c2
-rw-r--r--drivers/net/hippi/rrunner.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c3
-rw-r--r--drivers/net/ieee802154/at86rf230.c13
-rw-r--r--drivers/net/ieee802154/atusb.c59
-rw-r--r--drivers/net/ipvlan/ipvlan.h5
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c60
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c7
-rw-r--r--drivers/net/irda/irtty-sir.c2
-rw-r--r--drivers/net/irda/kingsun-sir.c2
-rw-r--r--drivers/net/irda/ks959-sir.c2
-rw-r--r--drivers/net/irda/ksdazzle-sir.c2
-rw-r--r--drivers/net/irda/mcs7780.c2
-rw-r--r--drivers/net/irda/vlsi_ir.c2
-rw-r--r--drivers/net/irda/w83977af_ir.c6
-rw-r--r--drivers/net/loopback.c2
-rw-r--r--drivers/net/macvtap.c4
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/net/phy/davicom.c2
-rw-r--r--drivers/net/phy/dp83867.c18
-rw-r--r--drivers/net/phy/icplus.c2
-rw-r--r--drivers/net/phy/lxt.c2
-rw-r--r--drivers/net/phy/marvell.c3
-rw-r--r--drivers/net/phy/phy.c9
-rw-r--r--drivers/net/phy/phy_device.c22
-rw-r--r--drivers/net/phy/qsemi.c2
-rw-r--r--drivers/net/ppp/ppp_async.c2
-rw-r--r--drivers/net/ppp/ppp_synctty.c2
-rw-r--r--drivers/net/ppp/pppoe.c2
-rw-r--r--drivers/net/ppp/pppox.c2
-rw-r--r--drivers/net/sb1000.c2
-rw-r--r--drivers/net/slip/slhc.c2
-rw-r--r--drivers/net/slip/slip.c2
-rw-r--r--drivers/net/tun.c9
-rw-r--r--drivers/net/usb/asix_devices.c1
-rw-r--r--drivers/net/usb/catc.c2
-rw-r--r--drivers/net/usb/cdc_ncm.c2
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/pegasus.c2
-rw-r--r--drivers/net/usb/r8152.c87
-rw-r--r--drivers/net/usb/rtl8150.c2
-rw-r--r--drivers/net/virtio_net.c407
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h3
-rw-r--r--drivers/net/vrf.c13
-rw-r--r--drivers/net/vxlan.c13
-rw-r--r--drivers/net/wan/dlci.c2
-rw-r--r--drivers/net/wan/dscc4.c2
-rw-r--r--drivers/net/wan/farsync.c2
-rw-r--r--drivers/net/wan/hd64570.c2
-rw-r--r--drivers/net/wan/hd64572.c2
-rw-r--r--drivers/net/wan/lapbether.c2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2
-rw-r--r--drivers/net/wan/lmc/lmc_media.c99
-rw-r--r--drivers/net/wan/sbni.c2
-rw-r--r--drivers/net/wan/sdla.c2
-rw-r--r--drivers/net/wan/slic_ds26522.c2
-rw-r--r--drivers/net/wireless/ath/Makefile2
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile2
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c2
-rw-r--r--drivers/net/wireless/atmel/atmel.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile1
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_geo.c2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_module.c2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_rx.c2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_tx.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/calib.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/Makefile2
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_hw.c2
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_main.c2
-rw-r--r--drivers/net/wireless/intersil/orinoco/Makefile3
-rw-r--r--drivers/net/wireless/intersil/orinoco/mic.c44
-rw-r--r--drivers/net/wireless/intersil/orinoco/mic.h3
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco.h4
-rw-r--r--drivers/net/wireless/intersil/prism54/isl_38xx.c2
-rw-r--r--drivers/net/wireless/intersil/prism54/isl_ioctl.c2
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/Makefile2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c4
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c1
-rw-r--r--drivers/net/wireless/ti/wl1251/Makefile2
-rw-r--r--drivers/net/wireless/ti/wlcore/Makefile2
-rw-r--r--drivers/net/wireless/wl3501_cs.c2
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.c25
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.h5
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.c662
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.h48
-rw-r--r--drivers/ntb/ntb_transport.c29
-rw-r--r--drivers/nubus/proc.c2
-rw-r--r--drivers/nvdimm/claim.c46
-rw-r--r--drivers/nvdimm/core.c29
-rw-r--r--drivers/nvdimm/dimm.c2
-rw-r--r--drivers/nvdimm/dimm_devs.c7
-rw-r--r--drivers/nvdimm/e820.c12
-rw-r--r--drivers/nvdimm/label.c2
-rw-r--r--drivers/nvdimm/namespace_devs.c8
-rw-r--r--drivers/nvdimm/nd.h12
-rw-r--r--drivers/nvdimm/pfn_devs.c2
-rw-r--r--drivers/nvdimm/pmem.c21
-rw-r--r--drivers/nvdimm/region_devs.c2
-rw-r--r--drivers/nvme/host/core.c24
-rw-r--r--drivers/nvme/host/fc.c24
-rw-r--r--drivers/nvme/host/nvme.h9
-rw-r--r--drivers/nvme/host/pci.c58
-rw-r--r--drivers/nvme/host/rdma.c57
-rw-r--r--drivers/nvme/host/scsi.c27
-rw-r--r--drivers/nvme/target/admin-cmd.c4
-rw-r--r--drivers/nvme/target/fcloop.c4
-rw-r--r--drivers/nvme/target/rdma.c3
-rw-r--r--drivers/nvmem/core.c4
-rw-r--r--drivers/nvmem/imx-ocotp.c2
-rw-r--r--drivers/nvmem/qfprom.c14
-rw-r--r--drivers/of/base.c9
-rw-r--r--drivers/of/irq.c1
-rw-r--r--drivers/of/of_numa.c7
-rw-r--r--drivers/of/of_pci.c21
-rw-r--r--drivers/of/platform.c6
-rw-r--r--drivers/of/resolver.c358
-rw-r--r--drivers/oprofile/buffer_sync.c2
-rw-r--r--drivers/oprofile/event_buffer.c2
-rw-r--r--drivers/oprofile/oprofilefs.c2
-rw-r--r--drivers/parisc/ccio-dma.c2
-rw-r--r--drivers/parisc/ccio-rm-dma.c2
-rw-r--r--drivers/parisc/eisa_eeprom.c2
-rw-r--r--drivers/parisc/eisa_enumerator.c2
-rw-r--r--drivers/parisc/led.c2
-rw-r--r--drivers/parisc/pdc_stable.c2
-rw-r--r--drivers/parport/daisy.c2
-rw-r--r--drivers/parport/ieee1284_ops.c2
-rw-r--r--drivers/parport/parport_gsc.c2
-rw-r--r--drivers/parport/probe.c2
-rw-r--r--drivers/parport/procfs.c2
-rw-r--r--drivers/pci/access.c16
-rw-r--r--drivers/pci/bus.c2
-rw-r--r--drivers/pci/ecam.c12
-rw-r--r--drivers/pci/host/Kconfig16
-rw-r--r--drivers/pci/host/Makefile19
-rw-r--r--drivers/pci/host/pci-hyperv.c100
-rw-r--r--drivers/pci/host/pci-layerscape.c20
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c2
-rw-r--r--drivers/pci/host/pci-tegra.c160
-rw-r--r--drivers/pci/host/pci-thunder-ecam.c9
-rw-r--r--drivers/pci/host/pci-thunder-pem.c94
-rw-r--r--drivers/pci/host/pci-xgene.c126
-rw-r--r--drivers/pci/host/pcie-altera.c10
-rw-r--r--drivers/pci/host/pcie-hisi.c107
-rw-r--r--drivers/pci/host/pcie-iproc-bcma.c1
-rw-r--r--drivers/pci/host/pcie-iproc-msi.c1
-rw-r--r--drivers/pci/host/pcie-iproc-platform.c28
-rw-r--r--drivers/pci/host/pcie-iproc.c949
-rw-r--r--drivers/pci/host/pcie-iproc.h45
-rw-r--r--drivers/pci/host/pcie-qcom.c177
-rw-r--r--drivers/pci/host/pcie-rcar.c5
-rw-r--r--drivers/pci/host/pcie-rockchip.c284
-rw-r--r--drivers/pci/host/pcie-spear13xx.c6
-rw-r--r--drivers/pci/host/vmd.c30
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c31
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c2
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c5
-rw-r--r--drivers/pci/hotplug/cpqphp_nvram.c2
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c12
-rw-r--r--drivers/pci/hotplug/pciehp_core.c9
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c8
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c21
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c10
-rw-r--r--drivers/pci/iov.c70
-rw-r--r--drivers/pci/msi.c3
-rw-r--r--drivers/pci/pci-acpi.c100
-rw-r--r--drivers/pci/pci-mid.c2
-rw-r--r--drivers/pci/pci-sysfs.c2
-rw-r--r--drivers/pci/pci.c138
-rw-r--r--drivers/pci/pci.h19
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c18
-rw-r--r--drivers/pci/pcie/aspm.c22
-rw-r--r--drivers/pci/pcie/pme.c29
-rw-r--r--drivers/pci/pcie/portdrv_core.c3
-rw-r--r--drivers/pci/pcie/portdrv_pci.c13
-rw-r--r--drivers/pci/probe.c248
-rw-r--r--drivers/pci/proc.c2
-rw-r--r--drivers/pci/quirks.c184
-rw-r--r--drivers/pci/remove.c2
-rw-r--r--drivers/pci/rom.c5
-rw-r--r--drivers/pci/setup-res.c48
-rw-r--r--drivers/pci/syscall.c2
-rw-r--r--drivers/perf/arm_pmu.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c6
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c2
-rw-r--r--drivers/pinctrl/pinctrl-amd.c19
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c91
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h31
-rw-r--r--drivers/platform/x86/Kconfig49
-rw-r--r--drivers/platform/x86/Makefile5
-rw-r--r--drivers/platform/x86/acer-wmi.c56
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c23
-rw-r--r--drivers/platform/x86/asus-wmi.c29
-rw-r--r--drivers/platform/x86/asus-wmi.h1
-rw-r--r--drivers/platform/x86/dell-laptop.c26
-rw-r--r--drivers/platform/x86/dell-wmi.c12
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c42
-rw-r--r--drivers/platform/x86/ideapad-laptop.c7
-rw-r--r--drivers/platform/x86/intel-hid.c6
-rw-r--r--drivers/platform/x86/intel-smartconnect.c2
-rw-r--r--drivers/platform/x86/intel-vbtn.c35
-rw-r--r--drivers/platform/x86/intel_bxtwc_tmu.c162
-rw-r--r--drivers/platform/x86/intel_mid_thermal.c2
-rw-r--r--drivers/platform/x86/intel_pmc_core.c386
-rw-r--r--drivers/platform/x86/intel_pmc_core.h110
-rw-r--r--drivers/platform/x86/mlx-platform.c361
-rw-r--r--drivers/platform/x86/mlxcpld-hotplug.c515
-rw-r--r--drivers/platform/x86/msi-wmi.c2
-rw-r--r--drivers/platform/x86/panasonic-laptop.c2
-rw-r--r--drivers/platform/x86/sony-laptop.c2
-rw-r--r--drivers/platform/x86/surface3-wmi.c297
-rw-r--r--drivers/platform/x86/surface3_button.c250
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c97
-rw-r--r--drivers/pnp/interface.c2
-rw-r--r--drivers/pnp/pnpbios/proc.c2
-rw-r--r--drivers/power/reset/Kconfig10
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/at91-poweroff.c1
-rw-r--r--drivers/power/reset/at91-reset.c2
-rw-r--r--drivers/power/reset/ltc2952-poweroff.c2
-rw-r--r--drivers/power/reset/piix4-poweroff.c113
-rw-r--r--drivers/power/reset/syscon-reboot-mode.c1
-rw-r--r--drivers/power/reset/zx-reboot.c1
-rw-r--r--drivers/power/supply/ab8500_fg.c8
-rw-r--r--drivers/power/supply/axp288_fuel_gauge.c1
-rw-r--r--drivers/power/supply/bq24190_charger.c2
-rw-r--r--drivers/power/supply/bq27xxx_battery.c44
-rw-r--r--drivers/power/supply/bq27xxx_battery_i2c.c4
-rw-r--r--drivers/power/supply/ipaq_micro_battery.c2
-rw-r--r--drivers/power/supply/lp8788-charger.c3
-rw-r--r--drivers/power/supply/max17040_battery.c52
-rw-r--r--drivers/power/supply/max8997_charger.c1
-rw-r--r--drivers/power/supply/power_supply_core.c4
-rw-r--r--drivers/power/supply/wm8350_power.c2
-rw-r--r--drivers/power/supply/wm97xx_battery.c25
-rw-r--r--drivers/pwm/Kconfig9
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/pwm-hibvt.c271
-rw-r--r--drivers/pwm/pwm-meson.c1
-rw-r--r--drivers/regulator/rk808-regulator.c9
-rw-r--r--drivers/regulator/tps65218-regulator.c153
-rw-r--r--drivers/remoteproc/remoteproc_core.c29
-rw-r--r--drivers/reset/Kconfig1
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/core.c43
-rw-r--r--drivers/reset/reset-berlin.c12
-rw-r--r--drivers/reset/reset-lpc18xx.c32
-rw-r--r--drivers/reset/reset-oxnas.c1
-rw-r--r--drivers/reset/reset-socfpga.c10
-rw-r--r--drivers/reset/reset-sunxi.c9
-rw-r--r--drivers/reset/reset-zynq.c10
-rw-r--r--drivers/reset/sti/Kconfig8
-rw-r--r--drivers/reset/sti/Makefile2
-rw-r--r--drivers/reset/sti/reset-stih415.c112
-rw-r--r--drivers/reset/sti/reset-stih416.c143
-rw-r--r--drivers/reset/tegra/Kconfig2
-rw-r--r--drivers/reset/tegra/Makefile1
-rw-r--r--drivers/reset/tegra/reset-bpmp.c71
-rw-r--r--drivers/rpmsg/rpmsg_core.c4
-rw-r--r--drivers/rtc/Kconfig38
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/interface.c16
-rw-r--r--drivers/rtc/rtc-cmos.c75
-rw-r--r--drivers/rtc/rtc-ds1307.c52
-rw-r--r--drivers/rtc/rtc-ds1374.c4
-rw-r--r--drivers/rtc/rtc-imxdi.c2
-rw-r--r--drivers/rtc/rtc-jz4740.c154
-rw-r--r--drivers/rtc/rtc-lib.c4
-rw-r--r--drivers/rtc/rtc-mcp795.c122
-rw-r--r--drivers/rtc/rtc-pcf85063.c7
-rw-r--r--drivers/rtc/rtc-r7301.c453
-rw-r--r--drivers/rtc/rtc-starfire.c10
-rw-r--r--drivers/rtc/rtc-sun4v.c10
-rw-r--r--drivers/rtc/rtc-twl.c202
-rw-r--r--drivers/s390/block/dasd_3990_erp.c6
-rw-r--r--drivers/s390/block/dasd_devmap.c2
-rw-r--r--drivers/s390/block/dasd_eckd.c2
-rw-r--r--drivers/s390/block/dasd_eer.c2
-rw-r--r--drivers/s390/block/dasd_erp.c2
-rw-r--r--drivers/s390/block/dasd_genhd.c2
-rw-r--r--drivers/s390/block/dasd_ioctl.c2
-rw-r--r--drivers/s390/block/dasd_proc.c2
-rw-r--r--drivers/s390/block/xpram.c2
-rw-r--r--drivers/s390/char/con3215.c2
-rw-r--r--drivers/s390/char/keyboard.c2
-rw-r--r--drivers/s390/char/monreader.c2
-rw-r--r--drivers/s390/char/monwriter.c2
-rw-r--r--drivers/s390/char/sclp_rw.c2
-rw-r--r--drivers/s390/char/sclp_tty.c2
-rw-r--r--drivers/s390/char/sclp_vt220.c2
-rw-r--r--drivers/s390/char/tape_char.c2
-rw-r--r--drivers/s390/char/tty3270.c2
-rw-r--r--drivers/s390/char/vmcp.c2
-rw-r--r--drivers/s390/char/vmlogrdr.c2
-rw-r--r--drivers/s390/char/vmur.c2
-rw-r--r--drivers/s390/char/zcore.c2
-rw-r--r--drivers/s390/cio/blacklist.c2
-rw-r--r--drivers/s390/crypto/Makefile13
-rw-r--r--drivers/s390/crypto/ap_asm.h191
-rw-r--r--drivers/s390/crypto/ap_bus.c1335
-rw-r--r--drivers/s390/crypto/ap_bus.h98
-rw-r--r--drivers/s390/crypto/ap_card.c170
-rw-r--r--drivers/s390/crypto/ap_debug.h28
-rw-r--r--drivers/s390/crypto/ap_queue.c701
-rw-r--r--drivers/s390/crypto/zcrypt_api.c1139
-rw-r--r--drivers/s390/crypto/zcrypt_api.h99
-rw-r--r--drivers/s390/crypto/zcrypt_card.c187
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c216
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c319
-rw-r--r--drivers/s390/crypto/zcrypt_debug.h50
-rw-r--r--drivers/s390/crypto/zcrypt_error.h105
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c135
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.h5
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c660
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.h23
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c378
-rw-r--r--drivers/s390/crypto/zcrypt_queue.c226
-rw-r--r--drivers/s390/net/netiucv.c2
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c17
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h41
-rw-r--r--drivers/s390/scsi/zfcp_erp.c61
-rw-r--r--drivers/s390/scsi/zfcp_ext.h4
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h3
-rw-r--r--drivers/s390/scsi/zfcp_reqlist.h30
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c61
-rw-r--r--drivers/sbus/char/display7seg.c2
-rw-r--r--drivers/sbus/char/envctrl.c2
-rw-r--r--drivers/sbus/char/flash.c2
-rw-r--r--drivers/sbus/char/jsflash.c2
-rw-r--r--drivers/sbus/char/openprom.c2
-rw-r--r--drivers/scsi/3w-9xxx.c11
-rw-r--r--drivers/scsi/3w-9xxx.h9
-rw-r--r--drivers/scsi/3w-sas.c9
-rw-r--r--drivers/scsi/3w-sas.h7
-rw-r--r--drivers/scsi/3w-xxxx.c9
-rw-r--r--drivers/scsi/3w-xxxx.h5
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/NCR5380.c77
-rw-r--r--drivers/scsi/NCR5380.h11
-rw-r--r--drivers/scsi/aacraid/aachba.c2
-rw-r--r--drivers/scsi/aacraid/commctrl.c2
-rw-r--r--drivers/scsi/aacraid/linit.c2
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c2
-rw-r--r--drivers/scsi/bfa/bfad.c8
-rw-r--r--drivers/scsi/bfa/bfad_drv.h2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c79
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c78
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c320
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c40
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h2
-rw-r--r--drivers/scsi/dpt_i2o.c2
-rw-r--r--drivers/scsi/fnic/fnic.h1
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c16
-rw-r--r--drivers/scsi/g_NCR5380.c153
-rw-r--r--drivers/scsi/g_NCR5380.h2
-rw-r--r--drivers/scsi/gdth.c2
-rw-r--r--drivers/scsi/hpsa.c37
-rw-r--r--drivers/scsi/hptiop.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c7
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h1
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c5
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h1
-rw-r--r--drivers/scsi/ips.h2
-rw-r--r--drivers/scsi/megaraid.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h6
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c43
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c34
-rw-r--r--drivers/scsi/osst.c2
-rw-r--r--drivers/scsi/qedi/Kconfig10
-rw-r--r--drivers/scsi/qedi/Makefile5
-rw-r--r--drivers/scsi/qedi/qedi.h364
-rw-r--r--drivers/scsi/qedi/qedi_dbg.c143
-rw-r--r--drivers/scsi/qedi/qedi_dbg.h144
-rw-r--r--drivers/scsi/qedi/qedi_debugfs.c244
-rw-r--r--drivers/scsi/qedi/qedi_fw.c2378
-rw-r--r--drivers/scsi/qedi/qedi_gbl.h73
-rw-r--r--drivers/scsi/qedi/qedi_hsi.h52
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c1624
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.h232
-rw-r--r--drivers/scsi/qedi/qedi_main.c2095
-rw-r--r--drivers/scsi/qedi/qedi_sysfs.c52
-rw-r--r--drivers/scsi/qedi/qedi_version.h14
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c36
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h108
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h28
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c173
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h30
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c407
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c224
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c85
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c116
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c475
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c2
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/scsi/scsi_ioctl.c2
-rw-r--r--drivers/scsi/scsi_lib.c4
-rw-r--r--drivers/scsi/scsi_proc.c2
-rw-r--r--drivers/scsi/scsi_sysfs.c4
-rw-r--r--drivers/scsi/sd.c19
-rw-r--r--drivers/scsi/sg.c3
-rw-r--r--drivers/scsi/snic/snic_main.c3
-rw-r--r--drivers/scsi/sr.c2
-rw-r--r--drivers/scsi/sr_ioctl.c2
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c44
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h1
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h30
-rw-r--r--drivers/scsi/ufs/ufshcd.c59
-rw-r--r--drivers/scsi/ufs/ufshcd.h12
-rw-r--r--drivers/scsi/ufs/ufshci.h7
-rw-r--r--drivers/soc/fsl/qbman/bman.c8
-rw-r--r--drivers/soc/fsl/qbman/bman_ccsr.c3
-rw-r--r--drivers/soc/fsl/qbman/bman_portal.c17
-rw-r--r--drivers/soc/fsl/qbman/dpaa_sys.h1
-rw-r--r--drivers/soc/fsl/qbman/qman.c233
-rw-r--r--drivers/soc/fsl/qbman/qman_ccsr.c3
-rw-r--r--drivers/soc/fsl/qbman/qman_portal.c41
-rw-r--r--drivers/soc/fsl/qbman/qman_priv.h17
-rw-r--r--drivers/soc/fsl/qbman/qman_test_api.c27
-rw-r--r--drivers/soc/fsl/qbman/qman_test_stash.c38
-rw-r--r--drivers/soc/fsl/qe/qe.c6
-rw-r--r--drivers/soc/mediatek/Kconfig2
-rw-r--r--drivers/soc/mediatek/mtk-scpsys.c465
-rw-r--r--drivers/soc/renesas/Makefile4
-rw-r--r--drivers/soc/renesas/r8a7743-sysc.c32
-rw-r--r--drivers/soc/renesas/r8a7745-sysc.c32
-rw-r--r--drivers/soc/renesas/rcar-sysc.c6
-rw-r--r--drivers/soc/renesas/rcar-sysc.h2
-rw-r--r--drivers/soc/renesas/renesas-soc.c257
-rw-r--r--drivers/soc/rockchip/pm_domains.c81
-rw-r--r--drivers/soc/tegra/Kconfig14
-rw-r--r--drivers/soc/tegra/pmc.c398
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c2
-rw-r--r--drivers/spi/spi-s3c64xx.c21
-rw-r--r--drivers/staging/android/ion/ion.c2
-rw-r--r--drivers/staging/greybus/camera.c4
-rw-r--r--drivers/staging/greybus/es2.c6
-rw-r--r--drivers/staging/greybus/svc.c6
-rw-r--r--drivers/staging/greybus/timesync.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c85
-rw-r--r--drivers/staging/lustre/lustre/llite/dcache.c33
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h17
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_nfs.c22
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c13
-rw-r--r--drivers/staging/lustre/lustre/llite/statahead.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/symlink.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c6
-rw-r--r--drivers/staging/media/Kconfig4
-rw-r--r--drivers/staging/media/Makefile2
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c66
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.h5
-rw-r--r--drivers/staging/media/cec/Kconfig12
-rw-r--r--drivers/staging/media/cec/TODO32
-rw-r--r--drivers/staging/media/davinci_vpfe/Makefile4
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_resizer.c31
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_resizer.h2
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c4
-rw-r--r--drivers/staging/media/lirc/Kconfig13
-rw-r--r--drivers/staging/media/lirc/Makefile1
-rw-r--r--drivers/staging/media/lirc/lirc_imon.c11
-rw-r--r--drivers/staging/media/lirc/lirc_sasem.c5
-rw-r--r--drivers/staging/media/lirc/lirc_serial.c1130
-rw-r--r--drivers/staging/media/pulse8-cec/TODO52
-rw-r--r--drivers/staging/media/s5p-cec/Kconfig2
-rw-r--r--drivers/staging/media/s5p-cec/TODO12
-rw-r--r--drivers/staging/media/s5p-cec/s5p_cec.c10
-rw-r--r--drivers/staging/media/st-cec/Kconfig2
-rw-r--r--drivers/staging/media/st-cec/TODO7
-rw-r--r--drivers/staging/media/st-cec/stih-cec.c7
-rw-r--r--drivers/staging/octeon/ethernet.c2
-rw-r--r--drivers/staging/rtl8188eu/Makefile2
-rw-r--r--drivers/staging/rtl8192e/Makefile2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/Makefile2
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_target.c1
-rw-r--r--drivers/target/iscsi/iscsi_target.c2
-rw-r--r--drivers/target/iscsi/iscsi_target.h12
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.h5
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c7
-rw-r--r--drivers/target/iscsi/iscsi_target_datain_values.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_datain_values.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_device.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.h6
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.h10
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.h7
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h7
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.h4
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.h5
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.h6
-rw-r--r--drivers/target/iscsi/iscsi_target_seq_pdu_list.h5
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.h6
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h9
-rw-r--r--drivers/target/iscsi/iscsi_target_transport.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h8
-rw-r--r--drivers/target/loopback/tcm_loop.h4
-rw-r--r--drivers/target/sbp/sbp_target.c3
-rw-r--r--drivers/target/target_core_alua.c3
-rw-r--r--drivers/target/target_core_alua.h2
-rw-r--r--drivers/target/target_core_configfs.c6
-rw-r--r--drivers/target/target_core_device.c1
-rw-r--r--drivers/target/target_core_file.c1
-rw-r--r--drivers/target/target_core_file.h2
-rw-r--r--drivers/target/target_core_iblock.h3
-rw-r--r--drivers/target/target_core_internal.h5
-rw-r--r--drivers/target/target_core_pr.c5
-rw-r--r--drivers/target/target_core_pr.h4
-rw-r--r--drivers/target/target_core_pscsi.h7
-rw-r--r--drivers/target/target_core_rd.c2
-rw-r--r--drivers/target/target_core_rd.h4
-rw-r--r--drivers/target/target_core_sbc.c1
-rw-r--r--drivers/target/target_core_transport.c24
-rw-r--r--drivers/target/target_core_ua.h2
-rw-r--r--drivers/target/target_core_user.c5
-rw-r--r--drivers/target/target_core_xcopy.c158
-rw-r--r--drivers/target/target_core_xcopy.h9
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h3
-rw-r--r--drivers/tty/amiserial.c2
-rw-r--r--drivers/tty/hvc/hvc_console.c2
-rw-r--r--drivers/tty/hvc/hvcs.c2
-rw-r--r--drivers/tty/hvc/hvsi.c2
-rw-r--r--drivers/tty/moxa.c2
-rw-r--r--drivers/tty/mxser.c2
-rw-r--r--drivers/tty/n_hdlc.c2
-rw-r--r--drivers/tty/n_r3964.c2
-rw-r--r--drivers/tty/serial/8250/8250_core.c2
-rw-r--r--drivers/tty/serial/8250/8250_lpss.c2
-rw-r--r--drivers/tty/serial/8250/8250_pci.c12
-rw-r--r--drivers/tty/serial/8250/8250_port.c2
-rw-r--r--drivers/tty/serial/atmel_serial.c22
-rw-r--r--drivers/tty/serial/icom.c2
-rw-r--r--drivers/tty/serial/serial_core.c2
-rw-r--r--drivers/tty/synclink.c2
-rw-r--r--drivers/tty/synclink_gt.c2
-rw-r--r--drivers/tty/synclinkmp.c2
-rw-r--r--drivers/tty/sysrq.c4
-rw-r--r--drivers/tty/tty_ioctl.c2
-rw-r--r--drivers/tty/vt/consolemap.c2
-rw-r--r--drivers/tty/vt/selection.c2
-rw-r--r--drivers/tty/vt/vc_screen.c2
-rw-r--r--drivers/tty/vt/vt_ioctl.c2
-rw-r--r--drivers/usb/atm/usbatm.c2
-rw-r--r--drivers/usb/chipidea/otg_fsm.c14
-rw-r--r--drivers/usb/core/config.c10
-rw-r--r--drivers/usb/core/hub.c61
-rw-r--r--drivers/usb/dwc2/gadget.c2
-rw-r--r--drivers/usb/dwc2/params.c30
-rw-r--r--drivers/usb/dwc3/core.h10
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c6
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c17
-rw-r--r--drivers/usb/dwc3/ep0.c46
-rw-r--r--drivers/usb/dwc3/gadget.c26
-rw-r--r--drivers/usb/gadget/composite.c12
-rw-r--r--drivers/usb/gadget/function/f_fs.c16
-rw-r--r--drivers/usb/gadget/function/f_hid.c8
-rw-r--r--drivers/usb/gadget/function/f_ncm.c3
-rw-r--r--drivers/usb/gadget/function/f_printer.c6
-rw-r--r--drivers/usb/gadget/function/f_tcm.c2
-rw-r--r--drivers/usb/gadget/legacy/inode.c22
-rw-r--r--drivers/usb/gadget/udc/core.c6
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c6
-rw-r--r--drivers/usb/host/ehci-timer.c5
-rw-r--r--drivers/usb/host/fotg210-hcd.c5
-rw-r--r--drivers/usb/host/ohci-at91.c24
-rw-r--r--drivers/usb/host/uhci-hcd.c2
-rw-r--r--drivers/usb/host/uhci-pci.c4
-rw-r--r--drivers/usb/host/xhci-mem.c46
-rw-r--r--drivers/usb/host/xhci-mtk.c4
-rw-r--r--drivers/usb/host/xhci-pci.c3
-rw-r--r--drivers/usb/host/xhci-ring.c262
-rw-r--r--drivers/usb/host/xhci.c17
-rw-r--r--drivers/usb/host/xhci.h5
-rw-r--r--drivers/usb/misc/ftdi-elan.c2
-rw-r--r--drivers/usb/misc/idmouse.c2
-rw-r--r--drivers/usb/misc/ldusb.c2
-rw-r--r--drivers/usb/misc/legousbtower.c2
-rw-r--r--drivers/usb/mon/mon_bin.c2
-rw-r--r--drivers/usb/mon/mon_stat.c2
-rw-r--r--drivers/usb/mon/mon_text.c2
-rw-r--r--drivers/usb/musb/blackfin.c1
-rw-r--r--drivers/usb/musb/musb_core.c2
-rw-r--r--drivers/usb/musb/musb_core.h7
-rw-r--r--drivers/usb/musb/musb_cppi41.c9
-rw-r--r--drivers/usb/musb/musb_debugfs.c22
-rw-r--r--drivers/usb/musb/musb_dsps.c12
-rw-r--r--drivers/usb/musb/musb_host.c10
-rw-r--r--drivers/usb/musb/musbhsdma.h2
-rw-r--r--drivers/usb/serial/ch341.c108
-rw-r--r--drivers/usb/serial/cyberjack.c10
-rw-r--r--drivers/usb/serial/f81534.c8
-rw-r--r--drivers/usb/serial/garmin_gps.c1
-rw-r--r--drivers/usb/serial/io_edgeport.c5
-rw-r--r--drivers/usb/serial/io_ti.c22
-rw-r--r--drivers/usb/serial/iuu_phoenix.c11
-rw-r--r--drivers/usb/serial/keyspan_pda.c14
-rw-r--r--drivers/usb/serial/kl5kusb105.c9
-rw-r--r--drivers/usb/serial/kobil_sct.c12
-rw-r--r--drivers/usb/serial/mos7720.c56
-rw-r--r--drivers/usb/serial/mos7840.c24
-rw-r--r--drivers/usb/serial/omninet.c13
-rw-r--r--drivers/usb/serial/oti6858.c16
-rw-r--r--drivers/usb/serial/pl2303.c8
-rw-r--r--drivers/usb/serial/quatech2.c4
-rw-r--r--drivers/usb/serial/spcp8x5.c14
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c7
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/usb/wusbcore/crypto.c3
-rw-r--r--drivers/vfio/mdev/mdev_core.c100
-rw-r--r--drivers/vfio/mdev/mdev_private.h29
-rw-r--r--drivers/vfio/mdev/mdev_sysfs.c8
-rw-r--r--drivers/vfio/mdev/vfio_mdev.c12
-rw-r--r--drivers/vfio/pci/vfio_pci.c4
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c2
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c5
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c328
-rw-r--r--drivers/vfio/vfio_iommu_type1.c104
-rw-r--r--drivers/vhost/scsi.c5
-rw-r--r--drivers/vhost/vhost.c43
-rw-r--r--drivers/vhost/vhost.h3
-rw-r--r--drivers/vhost/vringh.c5
-rw-r--r--drivers/vhost/vsock.c25
-rw-r--r--drivers/video/console/newport_con.c2
-rw-r--r--drivers/video/fbdev/68328fb.c2
-rw-r--r--drivers/video/fbdev/cobalt_lcdfb.c5
-rw-r--r--drivers/video/fbdev/hitfb.c2
-rw-r--r--drivers/video/fbdev/hpfb.c2
-rw-r--r--drivers/video/fbdev/mx3fb.c2
-rw-r--r--drivers/video/fbdev/q40fb.c2
-rw-r--r--drivers/video/fbdev/sm501fb.c2
-rw-r--r--drivers/video/fbdev/stifb.c2
-rw-r--r--drivers/video/fbdev/w100fb.c2
-rw-r--r--drivers/virtio/virtio_mmio.c2
-rw-r--r--drivers/virtio/virtio_pci_common.c201
-rw-r--r--drivers/virtio/virtio_pci_common.h1
-rw-r--r--drivers/virtio/virtio_pci_modern.c8
-rw-r--r--drivers/virtio/virtio_ring.c6
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.c2
-rw-r--r--drivers/watchdog/Kconfig49
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/bcm2835_wdt.c20
-rw-r--r--drivers/watchdog/bcm7038_wdt.c1
-rw-r--r--drivers/watchdog/cpwd.c23
-rw-r--r--drivers/watchdog/da9062_wdt.c12
-rw-r--r--drivers/watchdog/davinci_wdt.c6
-rw-r--r--drivers/watchdog/intel-mid_wdt.c22
-rw-r--r--drivers/watchdog/it87_wdt.c4
-rw-r--r--drivers/watchdog/jz4740_wdt.c2
-rw-r--r--drivers/watchdog/loongson1_wdt.c170
-rw-r--r--drivers/watchdog/max77620_wdt.c1
-rw-r--r--drivers/watchdog/mei_wdt.c2
-rw-r--r--drivers/watchdog/meson_gxbb_wdt.c1
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c1
-rw-r--r--drivers/watchdog/octeon-wdt-main.c1
-rw-r--r--drivers/watchdog/qcom-wdt.c2
-rw-r--r--drivers/watchdog/sa1100_wdt.c24
-rw-r--r--drivers/xen/arm-device.c8
-rw-r--r--drivers/xen/events/events_fifo.c5
-rw-r--r--drivers/xen/evtchn.c4
-rw-r--r--drivers/xen/platform-pci.c71
-rw-r--r--drivers/xen/privcmd.c2
-rw-r--r--drivers/xen/swiotlb-xen.c8
-rw-r--r--drivers/xen/xenbus/xenbus_comms.h1
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c49
-rw-r--r--drivers/zorro/proc.c2
2410 files changed, 89718 insertions, 30260 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index c5f9cbe0ae21..83e5f7e1a20d 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -104,7 +104,7 @@ config ACPI_PROCFS_POWER
Say N to delete power /proc/acpi/ directories that have moved to /sys/
config ACPI_REV_OVERRIDE_POSSIBLE
- bool "Allow supported ACPI revision to be overriden"
+ bool "Allow supported ACPI revision to be overridden"
depends on X86
default y
help
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 201292e67ee8..d00bc0ef87a6 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -37,7 +37,7 @@
#include <linux/suspend.h>
#include <linux/acpi.h>
#include <acpi/video.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define PREFIX "ACPI: "
diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
index 13caebd679f5..8c4e0a18460a 100644
--- a/drivers/acpi/acpi_watchdog.c
+++ b/drivers/acpi/acpi_watchdog.c
@@ -114,7 +114,7 @@ void __init acpi_watchdog_init(void)
pdev = platform_device_register_simple("wdat_wdt", PLATFORM_DEVID_NONE,
resources, nresources);
if (IS_ERR(pdev))
- pr_err("Failed to create platform device\n");
+ pr_err("Device creation failed: %ld\n", PTR_ERR(pdev));
kfree(resources);
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 7dd527f8ca1d..94be8a8e6c08 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -166,6 +166,12 @@ acpi_tb_install_table_with_override(struct acpi_table_desc *new_table_desc,
acpi_status acpi_tb_parse_root_table(acpi_physical_address rsdp_address);
+acpi_status
+acpi_tb_get_table(struct acpi_table_desc *table_desc,
+ struct acpi_table_header **out_table);
+
+void acpi_tb_put_table(struct acpi_table_desc *table_desc);
+
/*
* tbxfload
*/
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 5fb838e592dc..81473a4880ce 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -311,6 +311,8 @@ void acpi_tb_parse_fadt(void)
{
u32 length;
struct acpi_table_header *table;
+ struct acpi_table_desc *fadt_desc;
+ acpi_status status;
/*
* The FADT has multiple versions with different lengths,
@@ -319,14 +321,12 @@ void acpi_tb_parse_fadt(void)
* Get a local copy of the FADT and convert it to a common format
* Map entire FADT, assumed to be smaller than one page.
*/
- length = acpi_gbl_root_table_list.tables[acpi_gbl_fadt_index].length;
-
- table =
- acpi_os_map_memory(acpi_gbl_root_table_list.
- tables[acpi_gbl_fadt_index].address, length);
- if (!table) {
+ fadt_desc = &acpi_gbl_root_table_list.tables[acpi_gbl_fadt_index];
+ status = acpi_tb_get_table(fadt_desc, &table);
+ if (ACPI_FAILURE(status)) {
return;
}
+ length = fadt_desc->length;
/*
* Validate the FADT checksum before we copy the table. Ignore
@@ -340,7 +340,7 @@ void acpi_tb_parse_fadt(void)
/* All done with the real FADT, unmap it */
- acpi_os_unmap_memory(table, length);
+ acpi_tb_put_table(fadt_desc);
/* Obtain the DSDT and FACS tables via their addresses within the FADT */
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 51eb07cf9898..86854e846800 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -381,3 +381,88 @@ next_table:
acpi_os_unmap_memory(table, length);
return_ACPI_STATUS(AE_OK);
}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_get_table
+ *
+ * PARAMETERS: table_desc - Table descriptor
+ * out_table - Where the pointer to the table is returned
+ *
+ * RETURN: Status and pointer to the requested table
+ *
+ * DESCRIPTION: Increase a reference to a table descriptor and return the
+ * validated table pointer.
+ * If the table descriptor is an entry of the root table list,
+ * this API must be invoked with ACPI_MTX_TABLES acquired.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_tb_get_table(struct acpi_table_desc *table_desc,
+ struct acpi_table_header **out_table)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_tb_get_table);
+
+ if (table_desc->validation_count == 0) {
+
+ /* Table need to be "VALIDATED" */
+
+ status = acpi_tb_validate_table(table_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ table_desc->validation_count++;
+ if (table_desc->validation_count == 0) {
+ ACPI_ERROR((AE_INFO,
+ "Table %p, Validation count is zero after increment\n",
+ table_desc));
+ table_desc->validation_count--;
+ return_ACPI_STATUS(AE_LIMIT);
+ }
+
+ *out_table = table_desc->pointer;
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_put_table
+ *
+ * PARAMETERS: table_desc - Table descriptor
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Decrease a reference to a table descriptor and release the
+ * validated table pointer if no references.
+ * If the table descriptor is an entry of the root table list,
+ * this API must be invoked with ACPI_MTX_TABLES acquired.
+ *
+ ******************************************************************************/
+
+void acpi_tb_put_table(struct acpi_table_desc *table_desc)
+{
+
+ ACPI_FUNCTION_TRACE(acpi_tb_put_table);
+
+ if (table_desc->validation_count == 0) {
+ ACPI_WARNING((AE_INFO,
+ "Table %p, Validation count is zero before decrement\n",
+ table_desc));
+ return_VOID;
+ }
+ table_desc->validation_count--;
+
+ if (table_desc->validation_count == 0) {
+
+ /* Table need to be "INVALIDATED" */
+
+ acpi_tb_invalidate_table(table_desc);
+ }
+
+ return_VOID;
+}
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index d5adb7ac4684..7684707b254b 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -282,7 +282,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_table_header)
/*******************************************************************************
*
- * FUNCTION: acpi_get_table_with_size
+ * FUNCTION: acpi_get_table
*
* PARAMETERS: signature - ACPI signature of needed table
* instance - Which instance (for SSDTs)
@@ -292,16 +292,21 @@ ACPI_EXPORT_SYMBOL(acpi_get_table_header)
*
* DESCRIPTION: Finds and verifies an ACPI table. Table must be in the
* RSDT/XSDT.
+ * Note that an early stage acpi_get_table() call must be paired
+ * with an early stage acpi_put_table() call. otherwise the table
+ * pointer mapped by the early stage mapping implementation may be
+ * erroneously unmapped by the late stage unmapping implementation
+ * in an acpi_put_table() invoked during the late stage.
*
******************************************************************************/
acpi_status
-acpi_get_table_with_size(char *signature,
- u32 instance, struct acpi_table_header **out_table,
- acpi_size *tbl_size)
+acpi_get_table(char *signature,
+ u32 instance, struct acpi_table_header ** out_table)
{
u32 i;
u32 j;
- acpi_status status;
+ acpi_status status = AE_NOT_FOUND;
+ struct acpi_table_desc *table_desc;
/* Parameter validation */
@@ -309,13 +314,22 @@ acpi_get_table_with_size(char *signature,
return (AE_BAD_PARAMETER);
}
+ /*
+ * Note that the following line is required by some OSPMs, they only
+ * check if the returned table is NULL instead of the returned status
+ * to determined if this function is succeeded.
+ */
+ *out_table = NULL;
+
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
/* Walk the root table list */
for (i = 0, j = 0; i < acpi_gbl_root_table_list.current_table_count;
i++) {
- if (!ACPI_COMPARE_NAME
- (&(acpi_gbl_root_table_list.tables[i].signature),
- signature)) {
+ table_desc = &acpi_gbl_root_table_list.tables[i];
+
+ if (!ACPI_COMPARE_NAME(&table_desc->signature, signature)) {
continue;
}
@@ -323,43 +337,65 @@ acpi_get_table_with_size(char *signature,
continue;
}
- status =
- acpi_tb_validate_table(&acpi_gbl_root_table_list.tables[i]);
- if (ACPI_SUCCESS(status)) {
- *out_table = acpi_gbl_root_table_list.tables[i].pointer;
- *tbl_size = acpi_gbl_root_table_list.tables[i].length;
- }
-
- if (!acpi_gbl_permanent_mmap) {
- acpi_gbl_root_table_list.tables[i].pointer = NULL;
- }
-
- return (status);
+ status = acpi_tb_get_table(table_desc, out_table);
+ break;
}
- return (AE_NOT_FOUND);
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+ return (status);
}
-ACPI_EXPORT_SYMBOL(acpi_get_table_with_size)
+ACPI_EXPORT_SYMBOL(acpi_get_table)
-acpi_status
-acpi_get_table(char *signature,
- u32 instance, struct acpi_table_header **out_table)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_put_table
+ *
+ * PARAMETERS: table - The pointer to the table
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Release a table returned by acpi_get_table() and its clones.
+ * Note that it is not safe if this function was invoked after an
+ * uninstallation happened to the original table descriptor.
+ * Currently there is no OSPMs' requirement to handle such
+ * situations.
+ *
+ ******************************************************************************/
+void acpi_put_table(struct acpi_table_header *table)
{
- acpi_size tbl_size;
+ u32 i;
+ struct acpi_table_desc *table_desc;
+
+ ACPI_FUNCTION_TRACE(acpi_put_table);
+
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
+ /* Walk the root table list */
+
+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; i++) {
+ table_desc = &acpi_gbl_root_table_list.tables[i];
- return acpi_get_table_with_size(signature,
- instance, out_table, &tbl_size);
+ if (table_desc->pointer != table) {
+ continue;
+ }
+
+ acpi_tb_put_table(table_desc);
+ break;
+ }
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+ return_VOID;
}
-ACPI_EXPORT_SYMBOL(acpi_get_table)
+ACPI_EXPORT_SYMBOL(acpi_put_table)
/*******************************************************************************
*
* FUNCTION: acpi_get_table_by_index
*
* PARAMETERS: table_index - Table index
- * table - Where the pointer to the table is returned
+ * out_table - Where the pointer to the table is returned
*
* RETURN: Status and pointer to the requested table
*
@@ -368,7 +404,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_table)
*
******************************************************************************/
acpi_status
-acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table)
+acpi_get_table_by_index(u32 table_index, struct acpi_table_header **out_table)
{
acpi_status status;
@@ -376,35 +412,33 @@ acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table)
/* Parameter validation */
- if (!table) {
+ if (!out_table) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
+ /*
+ * Note that the following line is required by some OSPMs, they only
+ * check if the returned table is NULL instead of the returned status
+ * to determined if this function is succeeded.
+ */
+ *out_table = NULL;
+
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
/* Validate index */
if (table_index >= acpi_gbl_root_table_list.current_table_count) {
- (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
- return_ACPI_STATUS(AE_BAD_PARAMETER);
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
}
- if (!acpi_gbl_root_table_list.tables[table_index].pointer) {
-
- /* Table is not mapped, map it */
+ status =
+ acpi_tb_get_table(&acpi_gbl_root_table_list.tables[table_index],
+ out_table);
- status =
- acpi_tb_validate_table(&acpi_gbl_root_table_list.
- tables[table_index]);
- if (ACPI_FAILURE(status)) {
- (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
- return_ACPI_STATUS(status);
- }
- }
-
- *table = acpi_gbl_root_table_list.tables[table_index].pointer;
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
- return_ACPI_STATUS(AE_OK);
+ return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_get_table_by_index)
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 6b81746cd13c..e0d2e6e6e40c 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -19,8 +19,17 @@
#define pr_fmt(fmt) "ACPI: IORT: " fmt
#include <linux/acpi_iort.h>
+#include <linux/iommu.h>
#include <linux/kernel.h>
+#include <linux/list.h>
#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define IORT_TYPE_MASK(type) (1 << (type))
+#define IORT_MSI_TYPE (1 << ACPI_IORT_NODE_ITS_GROUP)
+#define IORT_IOMMU_TYPE ((1 << ACPI_IORT_NODE_SMMU) | \
+ (1 << ACPI_IORT_NODE_SMMU_V3))
struct iort_its_msi_chip {
struct list_head list;
@@ -28,6 +37,90 @@ struct iort_its_msi_chip {
u32 translation_id;
};
+struct iort_fwnode {
+ struct list_head list;
+ struct acpi_iort_node *iort_node;
+ struct fwnode_handle *fwnode;
+};
+static LIST_HEAD(iort_fwnode_list);
+static DEFINE_SPINLOCK(iort_fwnode_lock);
+
+/**
+ * iort_set_fwnode() - Create iort_fwnode and use it to register
+ * iommu data in the iort_fwnode_list
+ *
+ * @node: IORT table node associated with the IOMMU
+ * @fwnode: fwnode associated with the IORT node
+ *
+ * Returns: 0 on success
+ * <0 on failure
+ */
+static inline int iort_set_fwnode(struct acpi_iort_node *iort_node,
+ struct fwnode_handle *fwnode)
+{
+ struct iort_fwnode *np;
+
+ np = kzalloc(sizeof(struct iort_fwnode), GFP_ATOMIC);
+
+ if (WARN_ON(!np))
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&np->list);
+ np->iort_node = iort_node;
+ np->fwnode = fwnode;
+
+ spin_lock(&iort_fwnode_lock);
+ list_add_tail(&np->list, &iort_fwnode_list);
+ spin_unlock(&iort_fwnode_lock);
+
+ return 0;
+}
+
+/**
+ * iort_get_fwnode() - Retrieve fwnode associated with an IORT node
+ *
+ * @node: IORT table node to be looked-up
+ *
+ * Returns: fwnode_handle pointer on success, NULL on failure
+ */
+static inline
+struct fwnode_handle *iort_get_fwnode(struct acpi_iort_node *node)
+{
+ struct iort_fwnode *curr;
+ struct fwnode_handle *fwnode = NULL;
+
+ spin_lock(&iort_fwnode_lock);
+ list_for_each_entry(curr, &iort_fwnode_list, list) {
+ if (curr->iort_node == node) {
+ fwnode = curr->fwnode;
+ break;
+ }
+ }
+ spin_unlock(&iort_fwnode_lock);
+
+ return fwnode;
+}
+
+/**
+ * iort_delete_fwnode() - Delete fwnode associated with an IORT node
+ *
+ * @node: IORT table node associated with fwnode to delete
+ */
+static inline void iort_delete_fwnode(struct acpi_iort_node *node)
+{
+ struct iort_fwnode *curr, *tmp;
+
+ spin_lock(&iort_fwnode_lock);
+ list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) {
+ if (curr->iort_node == node) {
+ list_del(&curr->list);
+ kfree(curr);
+ break;
+ }
+ }
+ spin_unlock(&iort_fwnode_lock);
+}
+
typedef acpi_status (*iort_find_node_callback)
(struct acpi_iort_node *node, void *context);
@@ -141,6 +234,21 @@ static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type,
return NULL;
}
+static acpi_status
+iort_match_type_callback(struct acpi_iort_node *node, void *context)
+{
+ return AE_OK;
+}
+
+bool iort_node_match(u8 type)
+{
+ struct acpi_iort_node *node;
+
+ node = iort_scan_node(type, iort_match_type_callback, NULL);
+
+ return node != NULL;
+}
+
static acpi_status iort_match_node_callback(struct acpi_iort_node *node,
void *context)
{
@@ -212,9 +320,48 @@ static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
return 0;
}
+static
+struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
+ u32 *id_out, u8 type_mask,
+ int index)
+{
+ struct acpi_iort_node *parent;
+ struct acpi_iort_id_mapping *map;
+
+ if (!node->mapping_offset || !node->mapping_count ||
+ index >= node->mapping_count)
+ return NULL;
+
+ map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
+ node->mapping_offset);
+
+ /* Firmware bug! */
+ if (!map->output_reference) {
+ pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
+ node, node->type);
+ return NULL;
+ }
+
+ parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
+ map->output_reference);
+
+ if (!(IORT_TYPE_MASK(parent->type) & type_mask))
+ return NULL;
+
+ if (map[index].flags & ACPI_IORT_ID_SINGLE_MAPPING) {
+ if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
+ node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
+ *id_out = map[index].output_base;
+ return parent;
+ }
+ }
+
+ return NULL;
+}
+
static struct acpi_iort_node *iort_node_map_rid(struct acpi_iort_node *node,
u32 rid_in, u32 *rid_out,
- u8 type)
+ u8 type_mask)
{
u32 rid = rid_in;
@@ -223,7 +370,7 @@ static struct acpi_iort_node *iort_node_map_rid(struct acpi_iort_node *node,
struct acpi_iort_id_mapping *map;
int i;
- if (node->type == type) {
+ if (IORT_TYPE_MASK(node->type) & type_mask) {
if (rid_out)
*rid_out = rid;
return node;
@@ -296,7 +443,7 @@ u32 iort_msi_map_rid(struct device *dev, u32 req_id)
if (!node)
return req_id;
- iort_node_map_rid(node, req_id, &dev_id, ACPI_IORT_NODE_ITS_GROUP);
+ iort_node_map_rid(node, req_id, &dev_id, IORT_MSI_TYPE);
return dev_id;
}
@@ -318,7 +465,7 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id,
if (!node)
return -ENXIO;
- node = iort_node_map_rid(node, req_id, NULL, ACPI_IORT_NODE_ITS_GROUP);
+ node = iort_node_map_rid(node, req_id, NULL, IORT_MSI_TYPE);
if (!node)
return -ENXIO;
@@ -356,13 +503,459 @@ struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id)
return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI);
}
+static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data)
+{
+ u32 *rid = data;
+
+ *rid = alias;
+ return 0;
+}
+
+static int arm_smmu_iort_xlate(struct device *dev, u32 streamid,
+ struct fwnode_handle *fwnode,
+ const struct iommu_ops *ops)
+{
+ int ret = iommu_fwspec_init(dev, fwnode, ops);
+
+ if (!ret)
+ ret = iommu_fwspec_add_ids(dev, &streamid, 1);
+
+ return ret;
+}
+
+static const struct iommu_ops *iort_iommu_xlate(struct device *dev,
+ struct acpi_iort_node *node,
+ u32 streamid)
+{
+ const struct iommu_ops *ops = NULL;
+ int ret = -ENODEV;
+ struct fwnode_handle *iort_fwnode;
+
+ if (node) {
+ iort_fwnode = iort_get_fwnode(node);
+ if (!iort_fwnode)
+ return NULL;
+
+ ops = iommu_get_instance(iort_fwnode);
+ if (!ops)
+ return NULL;
+
+ ret = arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops);
+ }
+
+ return ret ? NULL : ops;
+}
+
+/**
+ * iort_set_dma_mask - Set-up dma mask for a device.
+ *
+ * @dev: device to configure
+ */
+void iort_set_dma_mask(struct device *dev)
+{
+ /*
+ * Set default coherent_dma_mask to 32 bit. Drivers are expected to
+ * setup the correct supported mask.
+ */
+ if (!dev->coherent_dma_mask)
+ dev->coherent_dma_mask = DMA_BIT_MASK(32);
+
+ /*
+ * Set it to coherent_dma_mask by default if the architecture
+ * code has not set it.
+ */
+ if (!dev->dma_mask)
+ dev->dma_mask = &dev->coherent_dma_mask;
+}
+
+/**
+ * iort_iommu_configure - Set-up IOMMU configuration for a device.
+ *
+ * @dev: device to configure
+ *
+ * Returns: iommu_ops pointer on configuration success
+ * NULL on configuration failure
+ */
+const struct iommu_ops *iort_iommu_configure(struct device *dev)
+{
+ struct acpi_iort_node *node, *parent;
+ const struct iommu_ops *ops = NULL;
+ u32 streamid = 0;
+
+ if (dev_is_pci(dev)) {
+ struct pci_bus *bus = to_pci_dev(dev)->bus;
+ u32 rid;
+
+ pci_for_each_dma_alias(to_pci_dev(dev), __get_pci_rid,
+ &rid);
+
+ node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
+ iort_match_node_callback, &bus->dev);
+ if (!node)
+ return NULL;
+
+ parent = iort_node_map_rid(node, rid, &streamid,
+ IORT_IOMMU_TYPE);
+
+ ops = iort_iommu_xlate(dev, parent, streamid);
+
+ } else {
+ int i = 0;
+
+ node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
+ iort_match_node_callback, dev);
+ if (!node)
+ return NULL;
+
+ parent = iort_node_get_id(node, &streamid,
+ IORT_IOMMU_TYPE, i++);
+
+ while (parent) {
+ ops = iort_iommu_xlate(dev, parent, streamid);
+
+ parent = iort_node_get_id(node, &streamid,
+ IORT_IOMMU_TYPE, i++);
+ }
+ }
+
+ return ops;
+}
+
+static void __init acpi_iort_register_irq(int hwirq, const char *name,
+ int trigger,
+ struct resource *res)
+{
+ int irq = acpi_register_gsi(NULL, hwirq, trigger,
+ ACPI_ACTIVE_HIGH);
+
+ if (irq <= 0) {
+ pr_err("could not register gsi hwirq %d name [%s]\n", hwirq,
+ name);
+ return;
+ }
+
+ res->start = irq;
+ res->end = irq;
+ res->flags = IORESOURCE_IRQ;
+ res->name = name;
+}
+
+static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node)
+{
+ struct acpi_iort_smmu_v3 *smmu;
+ /* Always present mem resource */
+ int num_res = 1;
+
+ /* Retrieve SMMUv3 specific data */
+ smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
+
+ if (smmu->event_gsiv)
+ num_res++;
+
+ if (smmu->pri_gsiv)
+ num_res++;
+
+ if (smmu->gerr_gsiv)
+ num_res++;
+
+ if (smmu->sync_gsiv)
+ num_res++;
+
+ return num_res;
+}
+
+static void __init arm_smmu_v3_init_resources(struct resource *res,
+ struct acpi_iort_node *node)
+{
+ struct acpi_iort_smmu_v3 *smmu;
+ int num_res = 0;
+
+ /* Retrieve SMMUv3 specific data */
+ smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
+
+ res[num_res].start = smmu->base_address;
+ res[num_res].end = smmu->base_address + SZ_128K - 1;
+ res[num_res].flags = IORESOURCE_MEM;
+
+ num_res++;
+
+ if (smmu->event_gsiv)
+ acpi_iort_register_irq(smmu->event_gsiv, "eventq",
+ ACPI_EDGE_SENSITIVE,
+ &res[num_res++]);
+
+ if (smmu->pri_gsiv)
+ acpi_iort_register_irq(smmu->pri_gsiv, "priq",
+ ACPI_EDGE_SENSITIVE,
+ &res[num_res++]);
+
+ if (smmu->gerr_gsiv)
+ acpi_iort_register_irq(smmu->gerr_gsiv, "gerror",
+ ACPI_EDGE_SENSITIVE,
+ &res[num_res++]);
+
+ if (smmu->sync_gsiv)
+ acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync",
+ ACPI_EDGE_SENSITIVE,
+ &res[num_res++]);
+}
+
+static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node)
+{
+ struct acpi_iort_smmu_v3 *smmu;
+
+ /* Retrieve SMMUv3 specific data */
+ smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
+
+ return smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE;
+}
+
+static int __init arm_smmu_count_resources(struct acpi_iort_node *node)
+{
+ struct acpi_iort_smmu *smmu;
+
+ /* Retrieve SMMU specific data */
+ smmu = (struct acpi_iort_smmu *)node->node_data;
+
+ /*
+ * Only consider the global fault interrupt and ignore the
+ * configuration access interrupt.
+ *
+ * MMIO address and global fault interrupt resources are always
+ * present so add them to the context interrupt count as a static
+ * value.
+ */
+ return smmu->context_interrupt_count + 2;
+}
+
+static void __init arm_smmu_init_resources(struct resource *res,
+ struct acpi_iort_node *node)
+{
+ struct acpi_iort_smmu *smmu;
+ int i, hw_irq, trigger, num_res = 0;
+ u64 *ctx_irq, *glb_irq;
+
+ /* Retrieve SMMU specific data */
+ smmu = (struct acpi_iort_smmu *)node->node_data;
+
+ res[num_res].start = smmu->base_address;
+ res[num_res].end = smmu->base_address + smmu->span - 1;
+ res[num_res].flags = IORESOURCE_MEM;
+ num_res++;
+
+ glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset);
+ /* Global IRQs */
+ hw_irq = IORT_IRQ_MASK(glb_irq[0]);
+ trigger = IORT_IRQ_TRIGGER_MASK(glb_irq[0]);
+
+ acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger,
+ &res[num_res++]);
+
+ /* Context IRQs */
+ ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset);
+ for (i = 0; i < smmu->context_interrupt_count; i++) {
+ hw_irq = IORT_IRQ_MASK(ctx_irq[i]);
+ trigger = IORT_IRQ_TRIGGER_MASK(ctx_irq[i]);
+
+ acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger,
+ &res[num_res++]);
+ }
+}
+
+static bool __init arm_smmu_is_coherent(struct acpi_iort_node *node)
+{
+ struct acpi_iort_smmu *smmu;
+
+ /* Retrieve SMMU specific data */
+ smmu = (struct acpi_iort_smmu *)node->node_data;
+
+ return smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK;
+}
+
+struct iort_iommu_config {
+ const char *name;
+ int (*iommu_init)(struct acpi_iort_node *node);
+ bool (*iommu_is_coherent)(struct acpi_iort_node *node);
+ int (*iommu_count_resources)(struct acpi_iort_node *node);
+ void (*iommu_init_resources)(struct resource *res,
+ struct acpi_iort_node *node);
+};
+
+static const struct iort_iommu_config iort_arm_smmu_v3_cfg __initconst = {
+ .name = "arm-smmu-v3",
+ .iommu_is_coherent = arm_smmu_v3_is_coherent,
+ .iommu_count_resources = arm_smmu_v3_count_resources,
+ .iommu_init_resources = arm_smmu_v3_init_resources
+};
+
+static const struct iort_iommu_config iort_arm_smmu_cfg __initconst = {
+ .name = "arm-smmu",
+ .iommu_is_coherent = arm_smmu_is_coherent,
+ .iommu_count_resources = arm_smmu_count_resources,
+ .iommu_init_resources = arm_smmu_init_resources
+};
+
+static __init
+const struct iort_iommu_config *iort_get_iommu_cfg(struct acpi_iort_node *node)
+{
+ switch (node->type) {
+ case ACPI_IORT_NODE_SMMU_V3:
+ return &iort_arm_smmu_v3_cfg;
+ case ACPI_IORT_NODE_SMMU:
+ return &iort_arm_smmu_cfg;
+ default:
+ return NULL;
+ }
+}
+
+/**
+ * iort_add_smmu_platform_device() - Allocate a platform device for SMMU
+ * @node: Pointer to SMMU ACPI IORT node
+ *
+ * Returns: 0 on success, <0 failure
+ */
+static int __init iort_add_smmu_platform_device(struct acpi_iort_node *node)
+{
+ struct fwnode_handle *fwnode;
+ struct platform_device *pdev;
+ struct resource *r;
+ enum dev_dma_attr attr;
+ int ret, count;
+ const struct iort_iommu_config *ops = iort_get_iommu_cfg(node);
+
+ if (!ops)
+ return -ENODEV;
+
+ pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO);
+ if (!pdev)
+ return PTR_ERR(pdev);
+
+ count = ops->iommu_count_resources(node);
+
+ r = kcalloc(count, sizeof(*r), GFP_KERNEL);
+ if (!r) {
+ ret = -ENOMEM;
+ goto dev_put;
+ }
+
+ ops->iommu_init_resources(r, node);
+
+ ret = platform_device_add_resources(pdev, r, count);
+ /*
+ * Resources are duplicated in platform_device_add_resources,
+ * free their allocated memory
+ */
+ kfree(r);
+
+ if (ret)
+ goto dev_put;
+
+ /*
+ * Add a copy of IORT node pointer to platform_data to
+ * be used to retrieve IORT data information.
+ */
+ ret = platform_device_add_data(pdev, &node, sizeof(node));
+ if (ret)
+ goto dev_put;
+
+ /*
+ * We expect the dma masks to be equivalent for
+ * all SMMUs set-ups
+ */
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+
+ fwnode = iort_get_fwnode(node);
+
+ if (!fwnode) {
+ ret = -ENODEV;
+ goto dev_put;
+ }
+
+ pdev->dev.fwnode = fwnode;
+
+ attr = ops->iommu_is_coherent(node) ?
+ DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
+
+ /* Configure DMA for the page table walker */
+ acpi_dma_configure(&pdev->dev, attr);
+
+ ret = platform_device_add(pdev);
+ if (ret)
+ goto dma_deconfigure;
+
+ return 0;
+
+dma_deconfigure:
+ acpi_dma_deconfigure(&pdev->dev);
+dev_put:
+ platform_device_put(pdev);
+
+ return ret;
+}
+
+static void __init iort_init_platform_devices(void)
+{
+ struct acpi_iort_node *iort_node, *iort_end;
+ struct acpi_table_iort *iort;
+ struct fwnode_handle *fwnode;
+ int i, ret;
+
+ /*
+ * iort_table and iort both point to the start of IORT table, but
+ * have different struct types
+ */
+ iort = (struct acpi_table_iort *)iort_table;
+
+ /* Get the first IORT node */
+ iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
+ iort->node_offset);
+ iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
+ iort_table->length);
+
+ for (i = 0; i < iort->node_count; i++) {
+ if (iort_node >= iort_end) {
+ pr_err("iort node pointer overflows, bad table\n");
+ return;
+ }
+
+ if ((iort_node->type == ACPI_IORT_NODE_SMMU) ||
+ (iort_node->type == ACPI_IORT_NODE_SMMU_V3)) {
+
+ fwnode = acpi_alloc_fwnode_static();
+ if (!fwnode)
+ return;
+
+ iort_set_fwnode(iort_node, fwnode);
+
+ ret = iort_add_smmu_platform_device(iort_node);
+ if (ret) {
+ iort_delete_fwnode(iort_node);
+ acpi_free_fwnode_static(fwnode);
+ return;
+ }
+ }
+
+ iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
+ iort_node->length);
+ }
+}
+
void __init acpi_iort_init(void)
{
acpi_status status;
status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table);
- if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
- const char *msg = acpi_format_exception(status);
- pr_err("Failed to get table, %s\n", msg);
+ if (ACPI_FAILURE(status)) {
+ if (status != AE_NOT_FOUND) {
+ const char *msg = acpi_format_exception(status);
+
+ pr_err("Failed to get table, %s\n", msg);
+ }
+
+ return;
}
+
+ iort_init_platform_devices();
+
+ acpi_probe_device_table(iort);
}
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 05fe9ebfb9b5..4ef1e4624b2b 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -36,7 +36,7 @@
#ifdef CONFIG_ACPI_PROCFS_POWER
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#endif
#include <linux/acpi.h>
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 5cbefd7621f0..95855cb9d6fb 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -974,7 +974,7 @@ void __init acpi_early_init(void)
if (!acpi_strict)
acpi_gbl_enable_interpreter_slack = TRUE;
- acpi_gbl_permanent_mmap = 1;
+ acpi_permanent_mmap = true;
/*
* If the machine falls into the DMI check table,
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index e0ea8f56d2bf..3ca0729f7e0e 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -776,9 +776,6 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
init_waitqueue_head(&pcc_data.pcc_write_wait_q);
}
- /* Plug PSD data into this CPUs CPC descriptor. */
- per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
-
/* Everything looks okay */
pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
@@ -789,10 +786,15 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
goto out_free;
}
+ /* Plug PSD data into this CPUs CPC descriptor. */
+ per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
+
ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
"acpi_cppc");
- if (ret)
+ if (ret) {
+ per_cpu(cpc_desc_ptr, pr->id) = NULL;
goto out_free;
+ }
kfree(output.pointer);
return 0;
@@ -826,6 +828,8 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr)
void __iomem *addr;
cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
+ if (!cpc_ptr)
+ return;
/* Free all the mapped sys mem areas for this CPU */
for (i = 2; i < cpc_ptr->num_entries; i++) {
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 5ea5dc219f56..fb19e1cdb641 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -98,7 +98,15 @@ static int find_child_checks(struct acpi_device *adev, bool check_children)
if (check_children && list_empty(&adev->children))
return -ENODEV;
- return sta_present ? FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
+ /*
+ * If the device has a _HID (or _CID) returning a valid ACPI/PNP
+ * device ID, it is better to make it look less attractive here, so that
+ * the other device with the same _ADR value (that may not have a valid
+ * device ID) can be matched going forward. [This means a second spec
+ * violation in a row, so whatever we do here is best effort anyway.]
+ */
+ return sta_present && list_empty(&adev->pnp.ids) ?
+ FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
}
struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
@@ -227,8 +235,7 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev)
attr = acpi_get_dma_attr(acpi_dev);
if (attr != DEV_DMA_NOT_SUPPORTED)
- arch_setup_dma_ops(dev, 0, 0, NULL,
- attr == DEV_DMA_COHERENT);
+ acpi_dma_configure(dev, attr);
acpi_physnode_link_name(physical_node_name, node_id);
retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj,
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 1b41a2739dac..0c452265c111 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -37,6 +37,7 @@ void acpi_amba_init(void);
static inline void acpi_amba_init(void) {}
#endif
int acpi_sysfs_init(void);
+void acpi_gpe_apply_masked_gpes(void);
void acpi_container_init(void);
void acpi_memory_hotplug_init(void);
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 312c4b4dc363..2f82b8eba360 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -2806,12 +2806,13 @@ static int acpi_nfit_add(struct acpi_device *adev)
acpi_size sz;
int rc = 0;
- status = acpi_get_table_with_size(ACPI_SIG_NFIT, 0, &tbl, &sz);
+ status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl);
if (ACPI_FAILURE(status)) {
/* This is ok, we could have an nvdimm hotplugged later */
dev_dbg(dev, "failed to find NFIT at startup\n");
return 0;
}
+ sz = tbl->length;
acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
if (!acpi_desc)
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index ce3a7a16f03f..edb0c79f7c64 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -70,7 +70,7 @@ int acpi_map_pxm_to_node(int pxm)
{
int node;
- if (pxm < 0 || pxm >= MAX_PXM_DOMAINS)
+ if (pxm < 0 || pxm >= MAX_PXM_DOMAINS || numa_off)
return NUMA_NO_NODE;
node = pxm_to_node_map[pxm];
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 9a4c6abee63e..57fb5f468ac2 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -42,7 +42,7 @@
#include <linux/semaphore.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include "internal.h"
@@ -76,6 +76,7 @@ static struct workqueue_struct *kacpi_notify_wq;
static struct workqueue_struct *kacpi_hotplug_wq;
static bool acpi_os_initialized;
unsigned int acpi_sci_irq = INVALID_ACPI_IRQ;
+bool acpi_permanent_mmap = false;
/*
* This list of permanent mappings is for memory that may be accessed from
@@ -306,7 +307,7 @@ static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
* virtual address). If not found, map it, add it to that list and return a
* pointer to it.
*
- * During early init (when acpi_gbl_permanent_mmap has not been set yet) this
+ * During early init (when acpi_permanent_mmap has not been set yet) this
* routine simply calls __acpi_map_table() to get the job done.
*/
void __iomem *__ref
@@ -322,7 +323,7 @@ acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
return NULL;
}
- if (!acpi_gbl_permanent_mmap)
+ if (!acpi_permanent_mmap)
return __acpi_map_table((unsigned long)phys, size);
mutex_lock(&acpi_ioremap_lock);
@@ -392,7 +393,7 @@ static void acpi_os_map_cleanup(struct acpi_ioremap *map)
* mappings, drop a reference to it and unmap it if there are no more active
* references to it.
*
- * During early init (when acpi_gbl_permanent_mmap has not been set yet) this
+ * During early init (when acpi_permanent_mmap has not been set yet) this
* routine simply calls __acpi_unmap_table() to get the job done. Since
* __acpi_unmap_table() is an __init function, the __ref annotation is needed
* here.
@@ -401,7 +402,7 @@ void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
{
struct acpi_ioremap *map;
- if (!acpi_gbl_permanent_mmap) {
+ if (!acpi_permanent_mmap) {
__acpi_unmap_table(virt, size);
return;
}
@@ -426,12 +427,6 @@ void __ref acpi_os_unmap_memory(void *virt, acpi_size size)
}
EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
-void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
-{
- if (!acpi_gbl_permanent_mmap)
- __acpi_unmap_table(virt, size);
-}
-
int acpi_os_map_generic_address(struct acpi_generic_address *gas)
{
u64 addr;
diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
index b5b376e081f5..a6a4ceaa6cc3 100644
--- a/drivers/acpi/pci_mcfg.c
+++ b/drivers/acpi/pci_mcfg.c
@@ -22,6 +22,7 @@
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
/* Structure to hold entries from the MCFG table */
struct mcfg_entry {
@@ -32,12 +33,166 @@ struct mcfg_entry {
u8 bus_end;
};
+#ifdef CONFIG_PCI_QUIRKS
+struct mcfg_fixup {
+ char oem_id[ACPI_OEM_ID_SIZE + 1];
+ char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
+ u32 oem_revision;
+ u16 segment;
+ struct resource bus_range;
+ struct pci_ecam_ops *ops;
+ struct resource cfgres;
+};
+
+#define MCFG_BUS_RANGE(start, end) DEFINE_RES_NAMED((start), \
+ ((end) - (start) + 1), \
+ NULL, IORESOURCE_BUS)
+#define MCFG_BUS_ANY MCFG_BUS_RANGE(0x0, 0xff)
+
+static struct mcfg_fixup mcfg_quirks[] = {
+/* { OEM_ID, OEM_TABLE_ID, REV, SEGMENT, BUS_RANGE, ops, cfgres }, */
+
+#define QCOM_ECAM32(seg) \
+ { "QCOM ", "QDF2432 ", 1, seg, MCFG_BUS_ANY, &pci_32b_ops }
+ QCOM_ECAM32(0),
+ QCOM_ECAM32(1),
+ QCOM_ECAM32(2),
+ QCOM_ECAM32(3),
+ QCOM_ECAM32(4),
+ QCOM_ECAM32(5),
+ QCOM_ECAM32(6),
+ QCOM_ECAM32(7),
+
+#define HISI_QUAD_DOM(table_id, seg, ops) \
+ { "HISI ", table_id, 0, (seg) + 0, MCFG_BUS_ANY, ops }, \
+ { "HISI ", table_id, 0, (seg) + 1, MCFG_BUS_ANY, ops }, \
+ { "HISI ", table_id, 0, (seg) + 2, MCFG_BUS_ANY, ops }, \
+ { "HISI ", table_id, 0, (seg) + 3, MCFG_BUS_ANY, ops }
+ HISI_QUAD_DOM("HIP05 ", 0, &hisi_pcie_ops),
+ HISI_QUAD_DOM("HIP06 ", 0, &hisi_pcie_ops),
+ HISI_QUAD_DOM("HIP07 ", 0, &hisi_pcie_ops),
+ HISI_QUAD_DOM("HIP07 ", 4, &hisi_pcie_ops),
+ HISI_QUAD_DOM("HIP07 ", 8, &hisi_pcie_ops),
+ HISI_QUAD_DOM("HIP07 ", 12, &hisi_pcie_ops),
+
+#define THUNDER_PEM_RES(addr, node) \
+ DEFINE_RES_MEM((addr) + ((u64) (node) << 44), 0x39 * SZ_16M)
+#define THUNDER_PEM_QUIRK(rev, node) \
+ { "CAVIUM", "THUNDERX", rev, 4 + (10 * (node)), MCFG_BUS_ANY, \
+ &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x88001f000000UL, node) }, \
+ { "CAVIUM", "THUNDERX", rev, 5 + (10 * (node)), MCFG_BUS_ANY, \
+ &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x884057000000UL, node) }, \
+ { "CAVIUM", "THUNDERX", rev, 6 + (10 * (node)), MCFG_BUS_ANY, \
+ &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x88808f000000UL, node) }, \
+ { "CAVIUM", "THUNDERX", rev, 7 + (10 * (node)), MCFG_BUS_ANY, \
+ &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x89001f000000UL, node) }, \
+ { "CAVIUM", "THUNDERX", rev, 8 + (10 * (node)), MCFG_BUS_ANY, \
+ &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x894057000000UL, node) }, \
+ { "CAVIUM", "THUNDERX", rev, 9 + (10 * (node)), MCFG_BUS_ANY, \
+ &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x89808f000000UL, node) }
+ /* SoC pass2.x */
+ THUNDER_PEM_QUIRK(1, 0),
+ THUNDER_PEM_QUIRK(1, 1),
+
+#define THUNDER_ECAM_QUIRK(rev, seg) \
+ { "CAVIUM", "THUNDERX", rev, seg, MCFG_BUS_ANY, \
+ &pci_thunder_ecam_ops }
+ /* SoC pass1.x */
+ THUNDER_PEM_QUIRK(2, 0), /* off-chip devices */
+ THUNDER_PEM_QUIRK(2, 1), /* off-chip devices */
+ THUNDER_ECAM_QUIRK(2, 0),
+ THUNDER_ECAM_QUIRK(2, 1),
+ THUNDER_ECAM_QUIRK(2, 2),
+ THUNDER_ECAM_QUIRK(2, 3),
+ THUNDER_ECAM_QUIRK(2, 10),
+ THUNDER_ECAM_QUIRK(2, 11),
+ THUNDER_ECAM_QUIRK(2, 12),
+ THUNDER_ECAM_QUIRK(2, 13),
+
+#define XGENE_V1_ECAM_MCFG(rev, seg) \
+ {"APM ", "XGENE ", rev, seg, MCFG_BUS_ANY, \
+ &xgene_v1_pcie_ecam_ops }
+#define XGENE_V2_ECAM_MCFG(rev, seg) \
+ {"APM ", "XGENE ", rev, seg, MCFG_BUS_ANY, \
+ &xgene_v2_pcie_ecam_ops }
+ /* X-Gene SoC with v1 PCIe controller */
+ XGENE_V1_ECAM_MCFG(1, 0),
+ XGENE_V1_ECAM_MCFG(1, 1),
+ XGENE_V1_ECAM_MCFG(1, 2),
+ XGENE_V1_ECAM_MCFG(1, 3),
+ XGENE_V1_ECAM_MCFG(1, 4),
+ XGENE_V1_ECAM_MCFG(2, 0),
+ XGENE_V1_ECAM_MCFG(2, 1),
+ XGENE_V1_ECAM_MCFG(2, 2),
+ XGENE_V1_ECAM_MCFG(2, 3),
+ XGENE_V1_ECAM_MCFG(2, 4),
+ /* X-Gene SoC with v2.1 PCIe controller */
+ XGENE_V2_ECAM_MCFG(3, 0),
+ XGENE_V2_ECAM_MCFG(3, 1),
+ /* X-Gene SoC with v2.2 PCIe controller */
+ XGENE_V2_ECAM_MCFG(4, 0),
+ XGENE_V2_ECAM_MCFG(4, 1),
+ XGENE_V2_ECAM_MCFG(4, 2),
+};
+
+static char mcfg_oem_id[ACPI_OEM_ID_SIZE];
+static char mcfg_oem_table_id[ACPI_OEM_TABLE_ID_SIZE];
+static u32 mcfg_oem_revision;
+
+static int pci_mcfg_quirk_matches(struct mcfg_fixup *f, u16 segment,
+ struct resource *bus_range)
+{
+ if (!memcmp(f->oem_id, mcfg_oem_id, ACPI_OEM_ID_SIZE) &&
+ !memcmp(f->oem_table_id, mcfg_oem_table_id,
+ ACPI_OEM_TABLE_ID_SIZE) &&
+ f->oem_revision == mcfg_oem_revision &&
+ f->segment == segment &&
+ resource_contains(&f->bus_range, bus_range))
+ return 1;
+
+ return 0;
+}
+#endif
+
+static void pci_mcfg_apply_quirks(struct acpi_pci_root *root,
+ struct resource *cfgres,
+ struct pci_ecam_ops **ecam_ops)
+{
+#ifdef CONFIG_PCI_QUIRKS
+ u16 segment = root->segment;
+ struct resource *bus_range = &root->secondary;
+ struct mcfg_fixup *f;
+ int i;
+
+ for (i = 0, f = mcfg_quirks; i < ARRAY_SIZE(mcfg_quirks); i++, f++) {
+ if (pci_mcfg_quirk_matches(f, segment, bus_range)) {
+ if (f->cfgres.start)
+ *cfgres = f->cfgres;
+ if (f->ops)
+ *ecam_ops = f->ops;
+ dev_info(&root->device->dev, "MCFG quirk: ECAM at %pR for %pR with %ps\n",
+ cfgres, bus_range, *ecam_ops);
+ return;
+ }
+ }
+#endif
+}
+
/* List to save MCFG entries */
static LIST_HEAD(pci_mcfg_list);
-phys_addr_t pci_mcfg_lookup(u16 seg, struct resource *bus_res)
+int pci_mcfg_lookup(struct acpi_pci_root *root, struct resource *cfgres,
+ struct pci_ecam_ops **ecam_ops)
{
+ struct pci_ecam_ops *ops = &pci_generic_ecam_ops;
+ struct resource *bus_res = &root->secondary;
+ u16 seg = root->segment;
struct mcfg_entry *e;
+ struct resource res;
+
+ /* Use address from _CBA if present, otherwise lookup MCFG */
+ if (root->mcfg_addr)
+ goto skip_lookup;
/*
* We expect exact match, unless MCFG entry end bus covers more than
@@ -45,10 +200,32 @@ phys_addr_t pci_mcfg_lookup(u16 seg, struct resource *bus_res)
*/
list_for_each_entry(e, &pci_mcfg_list, list) {
if (e->segment == seg && e->bus_start == bus_res->start &&
- e->bus_end >= bus_res->end)
- return e->addr;
+ e->bus_end >= bus_res->end) {
+ root->mcfg_addr = e->addr;
+ }
+
+ }
+
+skip_lookup:
+ memset(&res, 0, sizeof(res));
+ if (root->mcfg_addr) {
+ res.start = root->mcfg_addr + (bus_res->start << 20);
+ res.end = res.start + (resource_size(bus_res) << 20) - 1;
+ res.flags = IORESOURCE_MEM;
}
+ /*
+ * Allow quirks to override default ECAM ops and CFG resource
+ * range. This may even fabricate a CFG resource range in case
+ * MCFG does not have it. Invalid CFG start address means MCFG
+ * firmware bug or we need another quirk in array.
+ */
+ pci_mcfg_apply_quirks(root, &res, &ops);
+ if (!res.start)
+ return -ENXIO;
+
+ *cfgres = res;
+ *ecam_ops = ops;
return 0;
}
@@ -79,6 +256,13 @@ static __init int pci_mcfg_parse(struct acpi_table_header *header)
list_add(&e->list, &pci_mcfg_list);
}
+#ifdef CONFIG_PCI_QUIRKS
+ /* Save MCFG IDs and revision for quirks matching */
+ memcpy(mcfg_oem_id, header->oem_id, ACPI_OEM_ID_SIZE);
+ memcpy(mcfg_oem_table_id, header->oem_table_id, ACPI_OEM_TABLE_ID_SIZE);
+ mcfg_oem_revision = header->oem_revision;
+#endif
+
pr_info("MCFG table detected, %d entries\n", n);
return 0;
}
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index 2a358154b770..a34669cc823b 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -4,7 +4,7 @@
#include <linux/suspend.h>
#include <linux/bcd.h>
#include <linux/acpi.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "sleep.h"
#include "internal.h"
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 5c78ee1860b0..611a5585a902 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -154,18 +154,16 @@ static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt,
phys_cpuid_t __init acpi_map_madt_entry(u32 acpi_id)
{
struct acpi_table_madt *madt = NULL;
- acpi_size tbl_size;
phys_cpuid_t rv;
- acpi_get_table_with_size(ACPI_SIG_MADT, 0,
- (struct acpi_table_header **)&madt,
- &tbl_size);
+ acpi_get_table(ACPI_SIG_MADT, 0,
+ (struct acpi_table_header **)&madt);
if (!madt)
return PHYS_CPUID_INVALID;
rv = map_madt_entry(madt, 1, acpi_id, true);
- early_acpi_os_unmap_memory(madt, tbl_size);
+ acpi_put_table((struct acpi_table_header *)madt);
return rv;
}
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 1fed84a092c2..59c3a5d1e600 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -28,7 +28,7 @@
#include <linux/cpufreq.h>
#include <linux/acpi.h>
#include <acpi/processor.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define PREFIX "ACPI: "
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index d51ca1c05619..a12f96cc93ff 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -31,7 +31,7 @@
#include <linux/acpi.h>
#include <acpi/processor.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define PREFIX "ACPI: "
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 56241eb341f4..cb57962ef7c4 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -664,3 +664,60 @@ int acpi_dev_filter_resource_type(struct acpi_resource *ares,
return (type & types) ? 0 : 1;
}
EXPORT_SYMBOL_GPL(acpi_dev_filter_resource_type);
+
+static int acpi_dev_consumes_res(struct acpi_device *adev, struct resource *res)
+{
+ struct list_head resource_list;
+ struct resource_entry *rentry;
+ int ret, found = 0;
+
+ INIT_LIST_HEAD(&resource_list);
+ ret = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
+ if (ret < 0)
+ return 0;
+
+ list_for_each_entry(rentry, &resource_list, node) {
+ if (resource_contains(rentry->res, res)) {
+ found = 1;
+ break;
+ }
+
+ }
+
+ acpi_dev_free_resource_list(&resource_list);
+ return found;
+}
+
+static acpi_status acpi_res_consumer_cb(acpi_handle handle, u32 depth,
+ void *context, void **ret)
+{
+ struct resource *res = context;
+ struct acpi_device **consumer = (struct acpi_device **) ret;
+ struct acpi_device *adev;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
+
+ if (acpi_dev_consumes_res(adev, res)) {
+ *consumer = adev;
+ return AE_CTRL_TERMINATE;
+ }
+
+ return AE_OK;
+}
+
+/**
+ * acpi_resource_consumer - Find the ACPI device that consumes @res.
+ * @res: Resource to search for.
+ *
+ * Search the current resource settings (_CRS) of every ACPI device node
+ * for @res. If we find an ACPI device whose _CRS includes @res, return
+ * it. Otherwise, return NULL.
+ */
+struct acpi_device *acpi_resource_consumer(struct resource *res)
+{
+ struct acpi_device *consumer = NULL;
+
+ acpi_get_devices(NULL, acpi_res_consumer_cb, res, (void **) &consumer);
+ return consumer;
+}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 3d1856f1f4d0..192691880d55 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -7,6 +7,7 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/acpi.h>
+#include <linux/acpi_iort.h>
#include <linux/signal.h>
#include <linux/kthread.h>
#include <linux/dmi.h>
@@ -1119,9 +1120,6 @@ acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context,
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found generic backlight "
"support\n"));
*cap |= ACPI_VIDEO_BACKLIGHT;
- if (!acpi_has_method(handle, "_BQC"))
- printk(KERN_WARNING FW_BUG PREFIX "No _BQC method, "
- "cannot determine initial brightness\n");
/* We have backlight support, no need to scan further */
return AE_CTRL_TERMINATE;
}
@@ -1370,6 +1368,38 @@ enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
return DEV_DMA_NON_COHERENT;
}
+/**
+ * acpi_dma_configure - Set-up DMA configuration for the device.
+ * @dev: The pointer to the device
+ * @attr: device dma attributes
+ */
+void acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
+{
+ const struct iommu_ops *iommu;
+
+ iort_set_dma_mask(dev);
+
+ iommu = iort_iommu_configure(dev);
+
+ /*
+ * Assume dma valid range starts at 0 and covers the whole
+ * coherent_dma_mask.
+ */
+ arch_setup_dma_ops(dev, 0, dev->coherent_dma_mask + 1, iommu,
+ attr == DEV_DMA_COHERENT);
+}
+EXPORT_SYMBOL_GPL(acpi_dma_configure);
+
+/**
+ * acpi_dma_deconfigure - Tear-down DMA configuration for the device.
+ * @dev: The pointer to the device
+ */
+void acpi_dma_deconfigure(struct device *dev)
+{
+ arch_teardown_dma_ops(dev);
+}
+EXPORT_SYMBOL_GPL(acpi_dma_deconfigure);
+
static void acpi_init_coherency(struct acpi_device *adev)
{
unsigned long long cca = 0;
@@ -2044,6 +2074,7 @@ int __init acpi_scan_init(void)
}
}
+ acpi_gpe_apply_masked_gpes();
acpi_update_all_gpes();
acpi_ec_ecdt_start();
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index e8d7bc7d4da8..b8019c4c1d38 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -33,7 +33,6 @@ int __init parse_spcr(bool earlycon)
{
static char opts[64];
struct acpi_table_spcr *table;
- acpi_size table_size;
acpi_status status;
char *uart;
char *iotype;
@@ -43,9 +42,8 @@ int __init parse_spcr(bool earlycon)
if (acpi_disabled)
return -ENODEV;
- status = acpi_get_table_with_size(ACPI_SIG_SPCR, 0,
- (struct acpi_table_header **)&table,
- &table_size);
+ status = acpi_get_table(ACPI_SIG_SPCR, 0,
+ (struct acpi_table_header **)&table);
if (ACPI_FAILURE(status))
return -ENOENT;
@@ -106,6 +104,6 @@ int __init parse_spcr(bool earlycon)
err = add_preferred_console(uart, 0, opts + strlen(uart) + 1);
done:
- early_acpi_os_unmap_memory((void __iomem *)table, table_size);
+ acpi_put_table((struct acpi_table_header *)table);
return err;
}
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 703c26e7022c..cf05ae973381 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -708,6 +708,62 @@ end:
return result ? result : size;
}
+/*
+ * A Quirk Mechanism for GPE Flooding Prevention:
+ *
+ * Quirks may be needed to prevent GPE flooding on a specific GPE. The
+ * flooding typically cannot be detected and automatically prevented by
+ * ACPI_GPE_DISPATCH_NONE check because there is a _Lxx/_Exx prepared in
+ * the AML tables. This normally indicates a feature gap in Linux, thus
+ * instead of providing endless quirk tables, we provide a boot parameter
+ * for those who want this quirk. For example, if the users want to prevent
+ * the GPE flooding for GPE 00, they need to specify the following boot
+ * parameter:
+ * acpi_mask_gpe=0x00
+ * The masking status can be modified by the following runtime controlling
+ * interface:
+ * echo unmask > /sys/firmware/acpi/interrupts/gpe00
+ */
+
+/*
+ * Currently, the GPE flooding prevention only supports to mask the GPEs
+ * numbered from 00 to 7f.
+ */
+#define ACPI_MASKABLE_GPE_MAX 0x80
+
+static u64 __initdata acpi_masked_gpes;
+
+static int __init acpi_gpe_set_masked_gpes(char *val)
+{
+ u8 gpe;
+
+ if (kstrtou8(val, 0, &gpe) || gpe > ACPI_MASKABLE_GPE_MAX)
+ return -EINVAL;
+ acpi_masked_gpes |= ((u64)1<<gpe);
+
+ return 1;
+}
+__setup("acpi_mask_gpe=", acpi_gpe_set_masked_gpes);
+
+void __init acpi_gpe_apply_masked_gpes(void)
+{
+ acpi_handle handle;
+ acpi_status status;
+ u8 gpe;
+
+ for (gpe = 0;
+ gpe < min_t(u8, ACPI_MASKABLE_GPE_MAX, acpi_current_gpe_count);
+ gpe++) {
+ if (acpi_masked_gpes & ((u64)1<<gpe)) {
+ status = acpi_get_gpe_device(gpe, &handle);
+ if (ACPI_SUCCESS(status)) {
+ pr_info("Masking GPE 0x%x.\n", gpe);
+ (void)acpi_mask_gpe(handle, gpe, TRUE);
+ }
+ }
+ }
+}
+
void acpi_irq_stats_init(void)
{
acpi_status status;
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index cdd56c4657e0..2604189d6cd1 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -333,7 +333,6 @@ acpi_table_parse_entries_array(char *id,
unsigned int max_entries)
{
struct acpi_table_header *table_header = NULL;
- acpi_size tbl_size;
int count;
u32 instance = 0;
@@ -346,7 +345,7 @@ acpi_table_parse_entries_array(char *id,
if (!strncmp(id, ACPI_SIG_MADT, 4))
instance = acpi_apic_instance;
- acpi_get_table_with_size(id, instance, &table_header, &tbl_size);
+ acpi_get_table(id, instance, &table_header);
if (!table_header) {
pr_warn("%4.4s not present\n", id);
return -ENODEV;
@@ -355,7 +354,7 @@ acpi_table_parse_entries_array(char *id,
count = acpi_parse_entries_array(id, table_size, table_header,
proc, proc_num, max_entries);
- early_acpi_os_unmap_memory((char *)table_header, tbl_size);
+ acpi_put_table(table_header);
return count;
}
@@ -397,7 +396,6 @@ acpi_table_parse_madt(enum acpi_madt_type id,
int __init acpi_table_parse(char *id, acpi_tbl_table_handler handler)
{
struct acpi_table_header *table = NULL;
- acpi_size tbl_size;
if (acpi_disabled)
return -ENODEV;
@@ -406,13 +404,13 @@ int __init acpi_table_parse(char *id, acpi_tbl_table_handler handler)
return -EINVAL;
if (strncmp(id, ACPI_SIG_MADT, 4) == 0)
- acpi_get_table_with_size(id, acpi_apic_instance, &table, &tbl_size);
+ acpi_get_table(id, acpi_apic_instance, &table);
else
- acpi_get_table_with_size(id, 0, &table, &tbl_size);
+ acpi_get_table(id, 0, &table);
if (table) {
handler(table);
- early_acpi_os_unmap_memory(table, tbl_size);
+ acpi_put_table(table);
return 0;
} else
return -ENODEV;
@@ -426,16 +424,15 @@ int __init acpi_table_parse(char *id, acpi_tbl_table_handler handler)
static void __init check_multiple_madt(void)
{
struct acpi_table_header *table = NULL;
- acpi_size tbl_size;
- acpi_get_table_with_size(ACPI_SIG_MADT, 2, &table, &tbl_size);
+ acpi_get_table(ACPI_SIG_MADT, 2, &table);
if (table) {
pr_warn("BIOS bug: multiple APIC/MADT found, using %d\n",
acpi_apic_instance);
pr_warn("If \"acpi_apic_instance=%d\" works better, "
"notify linux-acpi@vger.kernel.org\n",
acpi_apic_instance ? 0 : 2);
- early_acpi_os_unmap_memory(table, tbl_size);
+ acpi_put_table(table);
} else
acpi_apic_instance = 0;
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 35e8fbca10ad..1d0417b87cb7 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -40,7 +40,7 @@
#include <linux/thermal.h>
#include <linux/acpi.h>
#include <linux/workqueue.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define PREFIX "ACPI: "
diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
index f9b983ae6877..1fd25e872ece 100644
--- a/drivers/atm/adummy.c
+++ b/drivers/atm/adummy.c
@@ -16,7 +16,7 @@
#include <linux/slab.h>
#include <asm/io.h>
#include <asm/byteorder.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/atmdev.h>
#include <linux/atm.h>
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index 480fa6ffbc09..3ef6253e1cce 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -10,7 +10,7 @@
#include <linux/bitops.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/atomic.h>
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 40c2d561417b..c53a9dd1353f 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -21,7 +21,7 @@
#include <linux/slab.h>
#include <asm/io.h>
#include <linux/atomic.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/string.h>
#include <asm/byteorder.h>
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 85aaf2222587..80c2ddcfa92c 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -52,7 +52,7 @@
#include <asm/string.h>
#include <asm/io.h>
#include <linux/atomic.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/wait.h>
#include "firestream.h"
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index 81aaa505862c..637c3e6b0f9e 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -43,7 +43,7 @@
#include <asm/irq.h>
#include <asm/dma.h>
#include <asm/byteorder.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/atomic.h>
#ifdef CONFIG_SBUS
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 31b513a23ae0..3617659b9184 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -71,7 +71,7 @@
#include <linux/slab.h>
#include <asm/io.h>
#include <asm/byteorder.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/atmdev.h>
#include <linux/atm.h>
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index 5fc81e240c24..584aa881882b 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -45,7 +45,7 @@
#include <asm/io.h>
#include <linux/atomic.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/string.h>
#include <asm/byteorder.h>
diff --git a/drivers/atm/idt77105.c b/drivers/atm/idt77105.c
index feb023d7eebd..082aa02abc57 100644
--- a/drivers/atm/idt77105.c
+++ b/drivers/atm/idt77105.c
@@ -17,7 +17,7 @@
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <asm/param.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "idt77105.h"
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 074616b39f4d..471ddfd93ea8 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -45,7 +45,7 @@
#include <linux/slab.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/atomic.h>
#include <asm/byteorder.h>
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index b2756765950e..8640bafeb471 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -58,7 +58,7 @@
#include <linux/slab.h>
#include <asm/io.h>
#include <linux/atomic.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/string.h>
#include <asm/byteorder.h>
#include <linux/vmalloc.h>
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index c7296b583787..cb28579e8a94 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -50,7 +50,7 @@
#include <linux/slab.h>
#include <linux/idr.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/atomic.h>
#include <linux/etherdevice.h>
#include "nicstar.h"
diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
index 02159345566c..b0363149b2fd 100644
--- a/drivers/atm/suni.c
+++ b/drivers/atm/suni.c
@@ -23,7 +23,7 @@
#include <linux/atm_suni.h>
#include <linux/slab.h>
#include <asm/param.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/atomic.h>
#include "suni.h"
diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
index 5120a96b3a89..4fa13a807873 100644
--- a/drivers/atm/uPD98402.c
+++ b/drivers/atm/uPD98402.c
@@ -10,7 +10,7 @@
#include <linux/sonet.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/atomic.h>
#include "uPD98402.h"
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index d3dc95484161..292dec18ffb8 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -27,7 +27,7 @@
#include <asm/string.h>
#include <asm/io.h>
#include <linux/atomic.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "uPD98401.h"
#include "uPD98402.h"
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index 4ef4c5caed4f..8a8e403644d6 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -132,9 +132,9 @@ config HT16K33
tristate "Holtek Ht16K33 LED controller with keyscan"
depends on FB && OF && I2C && INPUT
select FB_SYS_FOPS
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
select INPUT_MATRIXKMAP
select FB_BACKLIGHT
help
diff --git a/drivers/base/base.h b/drivers/base/base.h
index ada9dce34e6d..e19b1008e5fb 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -141,8 +141,6 @@ extern void device_unblock_probing(void);
extern struct kset *devices_kset;
extern void devices_kset_move_last(struct device *dev);
-extern struct device_attribute dev_attr_deferred_probe;
-
#if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS)
extern void module_add_driver(struct module *mod, struct device_driver *drv);
extern void module_remove_driver(struct device_driver *drv);
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index 1e3903d0d994..eb3af2739537 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -363,6 +363,7 @@ static ssize_t file_name##_show(struct device *dev, \
return sprintf(buf, "%u\n", this_leaf->object); \
}
+show_one(id, id);
show_one(level, level);
show_one(coherency_line_size, coherency_line_size);
show_one(number_of_sets, number_of_sets);
@@ -444,6 +445,7 @@ static ssize_t write_policy_show(struct device *dev,
return n;
}
+static DEVICE_ATTR_RO(id);
static DEVICE_ATTR_RO(level);
static DEVICE_ATTR_RO(type);
static DEVICE_ATTR_RO(coherency_line_size);
@@ -457,6 +459,7 @@ static DEVICE_ATTR_RO(shared_cpu_list);
static DEVICE_ATTR_RO(physical_line_partition);
static struct attribute *cache_default_attrs[] = {
+ &dev_attr_id.attr,
&dev_attr_type.attr,
&dev_attr_level.attr,
&dev_attr_shared_cpu_map.attr,
@@ -480,6 +483,8 @@ cache_default_attrs_is_visible(struct kobject *kobj,
const struct cpumask *mask = &this_leaf->shared_cpu_map;
umode_t mode = attr->mode;
+ if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
+ return mode;
if ((attr == &dev_attr_type.attr) && this_leaf->type)
return mode;
if ((attr == &dev_attr_level.attr) && this_leaf->level)
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 020ea7f05520..8c25e68e67d7 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1060,14 +1060,8 @@ static int device_add_attrs(struct device *dev)
goto err_remove_dev_groups;
}
- error = device_create_file(dev, &dev_attr_deferred_probe);
- if (error)
- goto err_remove_online;
-
return 0;
- err_remove_online:
- device_remove_file(dev, &dev_attr_online);
err_remove_dev_groups:
device_remove_groups(dev, dev->groups);
err_remove_type_groups:
@@ -1085,7 +1079,6 @@ static void device_remove_attrs(struct device *dev)
struct class *class = dev->class;
const struct device_type *type = dev->type;
- device_remove_file(dev, &dev_attr_deferred_probe);
device_remove_file(dev, &dev_attr_online);
device_remove_groups(dev, dev->groups);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index a8b258e5407b..a1fbf55c4d3a 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -53,19 +53,6 @@ static LIST_HEAD(deferred_probe_pending_list);
static LIST_HEAD(deferred_probe_active_list);
static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
-static ssize_t deferred_probe_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- bool value;
-
- mutex_lock(&deferred_probe_mutex);
- value = !list_empty(&dev->p->deferred_probe);
- mutex_unlock(&deferred_probe_mutex);
-
- return sprintf(buf, "%d\n", value);
-}
-DEVICE_ATTR_RO(deferred_probe);
-
/*
* In some cases, like suspend to RAM or hibernation, It might be reasonable
* to prohibit probing of devices as it could be unsafe.
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index bb69e58c29f3..8ab8ea1253e6 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -23,7 +23,7 @@
#include <linux/slab.h>
#include <linux/atomic.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
static DEFINE_MUTEX(mem_sysfs_mutex);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 5711708532db..2997026b4dfb 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -544,6 +544,7 @@ static int genpd_runtime_suspend(struct device *dev)
return -EBUSY;
/* Measure suspend latency. */
+ time_start = 0;
if (runtime_pm)
time_start = ktime_get();
@@ -625,6 +626,7 @@ static int genpd_runtime_resume(struct device *dev)
out:
/* Measure resume latency. */
+ time_start = 0;
if (timed && runtime_pm)
time_start = ktime_get();
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 48c6294e9c34..249e0304597f 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -194,7 +194,7 @@ void device_pm_move_last(struct device *dev)
static ktime_t initcall_debug_start(struct device *dev)
{
- ktime_t calltime = ktime_set(0, 0);
+ ktime_t calltime = 0;
if (pm_print_times_enabled) {
pr_info("calling %s+ @ %i, parent: %s\n",
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index bf9ba26981a5..f546f8f107b0 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -998,14 +998,14 @@ static int print_wakeup_source_stats(struct seq_file *m,
active_time = ktime_sub(now, ws->last_time);
total_time = ktime_add(total_time, active_time);
- if (active_time.tv64 > max_time.tv64)
+ if (active_time > max_time)
max_time = active_time;
if (ws->autosleep_enabled)
prevent_sleep_time = ktime_add(prevent_sleep_time,
ktime_sub(now, ws->start_prevent_time));
} else {
- active_time = ktime_set(0, 0);
+ active_time = 0;
}
seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 0809cda93cc0..26a51be77227 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -48,7 +48,7 @@
#include <linux/random.h>
#include <linux/scatterlist.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "DAC960.h"
#define DAC960_GAM_MINOR 252
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 5fd50a284168..a328f673adfe 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -70,7 +70,7 @@
#include <linux/platform_device.h>
#include <asm/setup.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/amigahw.h>
#include <asm/amigaints.h>
#include <asm/irq.h>
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index ad793f35632c..3adc32a3153b 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -23,7 +23,7 @@
#include <linux/pfn_t.h>
#endif
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define SECTOR_SHIFT 9
#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index db9d6bb6352d..e5c5b8eb14a9 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -43,7 +43,7 @@
#include <linux/mutex.h>
#include <linux/bitmap.h>
#include <linux/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/dma-mapping.h>
#include <linux/blkdev.h>
diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c
index 3d31761c0ed0..74e03aa537ad 100644
--- a/drivers/block/cryptoloop.c
+++ b/drivers/block/cryptoloop.c
@@ -26,7 +26,7 @@
#include <linux/string.h>
#include <linux/blkdev.h>
#include <linux/scatterlist.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "loop.h"
MODULE_LICENSE("GPL");
diff --git a/drivers/block/hd.c b/drivers/block/hd.c
index 3abb121825bc..a9b48ed7a3cd 100644
--- a/drivers/block/hd.c
+++ b/drivers/block/hd.c
@@ -45,7 +45,7 @@
#define REALLY_SLOW_IO
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#ifdef __arm__
#undef HD_IRQ
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 4af818766797..f347285c67ec 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -78,7 +78,7 @@
#include <linux/uio.h>
#include "loop.h"
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
static DEFINE_IDR(loop_index_idr);
static DEFINE_MUTEX(loop_index_mutex);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 99c84468f154..50a2020b5b72 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -36,7 +36,7 @@
#include <linux/debugfs.h>
#include <linux/blk-mq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/types.h>
#include <linux/nbd.h>
@@ -1042,6 +1042,7 @@ static int __init nbd_init(void)
return -ENOMEM;
for (i = 0; i < nbds_max; i++) {
+ struct request_queue *q;
struct gendisk *disk = alloc_disk(1 << part_shift);
if (!disk)
goto out;
@@ -1067,12 +1068,13 @@ static int __init nbd_init(void)
* every gendisk to have its very own request_queue struct.
* These structs are big so we dynamically allocate them.
*/
- disk->queue = blk_mq_init_queue(&nbd_dev[i].tag_set);
- if (!disk->queue) {
+ q = blk_mq_init_queue(&nbd_dev[i].tag_set);
+ if (IS_ERR(q)) {
blk_mq_free_tag_set(&nbd_dev[i].tag_set);
put_disk(disk);
goto out;
}
+ disk->queue = q;
/*
* Tell the block layer that we are not a rotational device
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 4943ee22716e..c0e14e54909b 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -257,7 +257,7 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
static void null_cmd_end_timer(struct nullb_cmd *cmd)
{
- ktime_t kt = ktime_set(0, completion_nsec);
+ ktime_t kt = completion_nsec;
hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
}
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 93362362aa55..5fd2d0e25567 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -139,7 +139,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_DLY};
#include <linux/spinlock.h>
#include <linux/blkdev.h>
#include <linux/mutex.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
static DEFINE_MUTEX(pcd_mutex);
static DEFINE_SPINLOCK(pcd_lock);
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 78a39f736c64..c3ed2fc72daa 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -155,7 +155,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV};
#include <linux/blkpg.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/workqueue.h>
static DEFINE_MUTEX(pd_mutex);
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index 7a7d977a76c5..ed93e8badf56 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -155,7 +155,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_LUN, D_DLY};
#include <linux/blkdev.h>
#include <linux/blkpg.h>
#include <linux/mutex.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
static DEFINE_MUTEX(pf_mutex);
static DEFINE_SPINLOCK(pf_spin_lock);
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c
index bfbd4c852dd9..5db955fe3a94 100644
--- a/drivers/block/paride/pg.c
+++ b/drivers/block/paride/pg.c
@@ -166,7 +166,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_DLY};
#include <linux/mutex.h>
#include <linux/jiffies.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
module_param(verbose, int, 0644);
module_param(major, int, 0);
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
index 216a94fed5b4..61fc6824299a 100644
--- a/drivers/block/paride/pt.c
+++ b/drivers/block/paride/pt.c
@@ -150,7 +150,7 @@ static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
#include <linux/sched.h> /* current, TASK_*, schedule_timeout() */
#include <linux/mutex.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
module_param(verbose, int, 0);
module_param(major, int, 0);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 95c98de92971..1b94c1ca5c5f 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -68,7 +68,7 @@
#include <linux/debugfs.h>
#include <linux/device.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define DRIVER_NAME "pktcdvd"
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 7b274ff4632c..36d2b9f4e836 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3756,7 +3756,7 @@ static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
struct rbd_device *rbd_dev = arg;
void *p = data;
void *const end = p + data_len;
- u8 struct_v;
+ u8 struct_v = 0;
u32 len;
u32 notify_op;
int ret;
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index c264f2d284a7..aabd8e9d3035 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -34,7 +34,7 @@
#include <asm/io.h>
#include <asm/dbdma.h>
#include <asm/prom.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/mediabay.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index ba4bfe933276..0e93ad7b8511 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -29,7 +29,7 @@
#include <linux/completion.h>
#include <linux/scatterlist.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#if 0
#define CARM_DEBUG
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 46f4c719fed9..c141cc3be22b 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -54,7 +54,7 @@
#include "umem.h"
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
#define MM_MAXCARDS 4
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 5545a679abd8..10332c24f961 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -56,6 +56,7 @@ struct virtblk_req {
struct virtio_blk_outhdr out_hdr;
struct virtio_scsi_inhdr in_hdr;
u8 status;
+ u8 sense[SCSI_SENSE_BUFFERSIZE];
struct scatterlist sg[];
};
@@ -102,7 +103,8 @@ static int __virtblk_add_req(struct virtqueue *vq,
}
if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) {
- sg_init_one(&sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
+ memcpy(vbr->sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
+ sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE);
sgs[num_out + num_in++] = &sense;
sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
sgs[num_out + num_in++] = &inhdr;
@@ -628,11 +630,12 @@ static int virtblk_probe(struct virtio_device *vdev)
if (err)
goto out_put_disk;
- q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set);
+ q = blk_mq_init_queue(&vblk->tag_set);
if (IS_ERR(q)) {
err = -ENOMEM;
goto out_free_tags;
}
+ vblk->disk->queue = q;
q->queuedata = vblk;
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 15f58ab44d0b..e5ab7d9e8c45 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -25,6 +25,7 @@
#include <linux/genhd.h>
#include <linux/highmem.h>
#include <linux/slab.h>
+#include <linux/backing-dev.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
#include <linux/err.h>
@@ -112,6 +113,14 @@ static inline bool is_partial_io(struct bio_vec *bvec)
return bvec->bv_len != PAGE_SIZE;
}
+static void zram_revalidate_disk(struct zram *zram)
+{
+ revalidate_disk(zram->disk);
+ /* revalidate_disk reset the BDI_CAP_STABLE_WRITES so set again */
+ zram->disk->queue->backing_dev_info.capabilities |=
+ BDI_CAP_STABLE_WRITES;
+}
+
/*
* Check if request is within bounds and aligned on zram logical blocks.
*/
@@ -1095,15 +1104,9 @@ static ssize_t disksize_store(struct device *dev,
zram->comp = comp;
zram->disksize = disksize;
set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
+ zram_revalidate_disk(zram);
up_write(&zram->init_lock);
- /*
- * Revalidate disk out of the init_lock to avoid lockdep splat.
- * It's okay because disk's capacity is protected by init_lock
- * so that revalidate_disk always sees up-to-date capacity.
- */
- revalidate_disk(zram->disk);
-
return len;
out_destroy_comp:
@@ -1149,7 +1152,7 @@ static ssize_t reset_store(struct device *dev,
/* Make sure all the pending I/O are finished */
fsync_bdev(bdev);
zram_reset_device(zram);
- revalidate_disk(zram->disk);
+ zram_revalidate_disk(zram);
bdput(bdev);
mutex_lock(&bdev->bd_mutex);
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index b1fc29a697b7..80627187c8b6 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -40,5 +40,3 @@ hci_uart-$(CONFIG_BT_HCIUART_QCA) += hci_qca.o
hci_uart-$(CONFIG_BT_HCIUART_AG6XX) += hci_ag6xx.o
hci_uart-$(CONFIG_BT_HCIUART_MRVL) += hci_mrvl.o
hci_uart-objs := $(hci_uart-y)
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index c4a75a18dcae..233e850fdac7 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -181,7 +181,7 @@ static inline ssize_t vhci_get_user(struct vhci_data *data,
if (!skb)
return -ENOMEM;
- if (copy_from_iter(skb_put(skb, len), len, from) != len) {
+ if (!copy_from_iter_full(skb_put(skb, len), len, from)) {
kfree_skb(skb);
return -EFAULT;
}
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 78751057164a..b9e8cfc93c7e 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -150,6 +150,13 @@ config TEGRA_ACONNECT
Driver for the Tegra ACONNECT bus which is used to interface with
the devices inside the Audio Processing Engine (APE) for Tegra210.
+config TEGRA_GMI
+ tristate "Tegra Generic Memory Interface bus driver"
+ depends on ARCH_TEGRA
+ help
+ Driver for the Tegra Generic Memory Interface bus which can be used
+ to attach devices such as NOR, UART, FPGA and more.
+
config UNIPHIER_SYSTEM_BUS
tristate "UniPhier System Bus driver"
depends on ARCH_UNIPHIER && OF
@@ -167,4 +174,13 @@ config VEXPRESS_CONFIG
help
Platform configuration infrastructure for the ARM Ltd.
Versatile Express.
+
+config DA8XX_MSTPRI
+ bool "TI da8xx master peripheral priority driver"
+ depends on ARCH_DAVINCI_DA8XX
+ help
+ Driver for Texas Instruments da8xx master peripheral priority
+ configuration. Allows to adjust the priorities of all master
+ peripherals.
+
endmenu
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index c6cfa6b2606e..cc6364bec054 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -19,5 +19,8 @@ obj-$(CONFIG_QCOM_EBI2) += qcom-ebi2.o
obj-$(CONFIG_SUNXI_RSB) += sunxi-rsb.o
obj-$(CONFIG_SIMPLE_PM_BUS) += simple-pm-bus.o
obj-$(CONFIG_TEGRA_ACONNECT) += tegra-aconnect.o
+obj-$(CONFIG_TEGRA_GMI) += tegra-gmi.o
obj-$(CONFIG_UNIPHIER_SYSTEM_BUS) += uniphier-system-bus.o
obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o
+
+obj-$(CONFIG_DA8XX_MSTPRI) += da8xx-mstpri.o
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index 890082315054..c49da15d9790 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -1796,7 +1796,7 @@ static int __init cci_platform_init(void)
int ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CCI_ONLINE,
- "AP_PERF_ARM_CCI_ONLINE", NULL,
+ "perf/arm/cci:online", NULL,
cci_pmu_offline_cpu);
if (ret)
return ret;
@@ -2190,6 +2190,9 @@ static int cci_probe_ports(struct device_node *np)
if (!of_match_node(arm_cci_ctrl_if_matches, cp))
continue;
+ if (!of_device_is_available(cp))
+ continue;
+
i = nb_ace + nb_ace_lite;
if (i >= nb_cci_ports)
@@ -2232,6 +2235,13 @@ static int cci_probe_ports(struct device_node *np)
ports[i].dn = cp;
}
+ /*
+ * If there is no CCI port that is under kernel control
+ * return early and report probe status.
+ */
+ if (!nb_ace && !nb_ace_lite)
+ return -ENODEV;
+
/* initialize a stashed array of ACE ports to speed-up look-up */
cci_ace_init_ports();
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
index d1074d9b38ba..4d6a2b7e4d3f 100644
--- a/drivers/bus/arm-ccn.c
+++ b/drivers/bus/arm-ccn.c
@@ -1562,7 +1562,7 @@ static int __init arm_ccn_init(void)
int i, ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CCN_ONLINE,
- "AP_PERF_ARM_CCN_ONLINE", NULL,
+ "perf/arm/ccn:online", NULL,
arm_ccn_pmu_offline_cpu);
if (ret)
return ret;
@@ -1570,7 +1570,10 @@ static int __init arm_ccn_init(void)
for (i = 0; i < ARRAY_SIZE(arm_ccn_pmu_events); i++)
arm_ccn_pmu_events_attrs[i] = &arm_ccn_pmu_events[i].attr.attr;
- return platform_driver_register(&arm_ccn_driver);
+ ret = platform_driver_register(&arm_ccn_driver);
+ if (ret)
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
+ return ret;
}
static void __exit arm_ccn_exit(void)
diff --git a/drivers/bus/da8xx-mstpri.c b/drivers/bus/da8xx-mstpri.c
new file mode 100644
index 000000000000..063397f2c0db
--- /dev/null
+++ b/drivers/bus/da8xx-mstpri.c
@@ -0,0 +1,267 @@
+/*
+ * TI da8xx master peripheral priority driver
+ *
+ * Copyright (C) 2016 BayLibre SAS
+ *
+ * Author:
+ * Bartosz Golaszewski <bgolaszewski@baylibre.com.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/regmap.h>
+
+/*
+ * REVISIT: Linux doesn't have a good framework for the kind of performance
+ * knobs this driver controls. We can't use device tree properties as it deals
+ * with hardware configuration rather than description. We also don't want to
+ * commit to maintaining some random sysfs attributes.
+ *
+ * For now we just hardcode the register values for the boards that need
+ * some changes (as is the case for the LCD controller on da850-lcdk - the
+ * first board we support here). When linux gets an appropriate framework,
+ * we'll easily convert the driver to it.
+ */
+
+#define DA8XX_MSTPRI0_OFFSET 0
+#define DA8XX_MSTPRI1_OFFSET 4
+#define DA8XX_MSTPRI2_OFFSET 8
+
+enum {
+ DA8XX_MSTPRI_ARM_I = 0,
+ DA8XX_MSTPRI_ARM_D,
+ DA8XX_MSTPRI_UPP,
+ DA8XX_MSTPRI_SATA,
+ DA8XX_MSTPRI_PRU0,
+ DA8XX_MSTPRI_PRU1,
+ DA8XX_MSTPRI_EDMA30TC0,
+ DA8XX_MSTPRI_EDMA30TC1,
+ DA8XX_MSTPRI_EDMA31TC0,
+ DA8XX_MSTPRI_VPIF_DMA_0,
+ DA8XX_MSTPRI_VPIF_DMA_1,
+ DA8XX_MSTPRI_EMAC,
+ DA8XX_MSTPRI_USB0CFG,
+ DA8XX_MSTPRI_USB0CDMA,
+ DA8XX_MSTPRI_UHPI,
+ DA8XX_MSTPRI_USB1,
+ DA8XX_MSTPRI_LCDC,
+};
+
+struct da8xx_mstpri_descr {
+ int reg;
+ int shift;
+ int mask;
+};
+
+static const struct da8xx_mstpri_descr da8xx_mstpri_priority_list[] = {
+ [DA8XX_MSTPRI_ARM_I] = {
+ .reg = DA8XX_MSTPRI0_OFFSET,
+ .shift = 0,
+ .mask = 0x0000000f,
+ },
+ [DA8XX_MSTPRI_ARM_D] = {
+ .reg = DA8XX_MSTPRI0_OFFSET,
+ .shift = 4,
+ .mask = 0x000000f0,
+ },
+ [DA8XX_MSTPRI_UPP] = {
+ .reg = DA8XX_MSTPRI0_OFFSET,
+ .shift = 16,
+ .mask = 0x000f0000,
+ },
+ [DA8XX_MSTPRI_SATA] = {
+ .reg = DA8XX_MSTPRI0_OFFSET,
+ .shift = 20,
+ .mask = 0x00f00000,
+ },
+ [DA8XX_MSTPRI_PRU0] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 0,
+ .mask = 0x0000000f,
+ },
+ [DA8XX_MSTPRI_PRU1] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 4,
+ .mask = 0x000000f0,
+ },
+ [DA8XX_MSTPRI_EDMA30TC0] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 8,
+ .mask = 0x00000f00,
+ },
+ [DA8XX_MSTPRI_EDMA30TC1] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 12,
+ .mask = 0x0000f000,
+ },
+ [DA8XX_MSTPRI_EDMA31TC0] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 16,
+ .mask = 0x000f0000,
+ },
+ [DA8XX_MSTPRI_VPIF_DMA_0] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 24,
+ .mask = 0x0f000000,
+ },
+ [DA8XX_MSTPRI_VPIF_DMA_1] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 28,
+ .mask = 0xf0000000,
+ },
+ [DA8XX_MSTPRI_EMAC] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 0,
+ .mask = 0x0000000f,
+ },
+ [DA8XX_MSTPRI_USB0CFG] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 8,
+ .mask = 0x00000f00,
+ },
+ [DA8XX_MSTPRI_USB0CDMA] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 12,
+ .mask = 0x0000f000,
+ },
+ [DA8XX_MSTPRI_UHPI] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 20,
+ .mask = 0x00f00000,
+ },
+ [DA8XX_MSTPRI_USB1] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 24,
+ .mask = 0x0f000000,
+ },
+ [DA8XX_MSTPRI_LCDC] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 28,
+ .mask = 0xf0000000,
+ },
+};
+
+struct da8xx_mstpri_priority {
+ int which;
+ u32 val;
+};
+
+struct da8xx_mstpri_board_priorities {
+ const char *board;
+ const struct da8xx_mstpri_priority *priorities;
+ size_t numprio;
+};
+
+/*
+ * Default memory settings of da850 do not meet the throughput/latency
+ * requirements of tilcdc. This results in the image displayed being
+ * incorrect and the following warning being displayed by the LCDC
+ * drm driver:
+ *
+ * tilcdc da8xx_lcdc.0: tilcdc_crtc_irq(0x00000020): FIFO underfow
+ */
+static const struct da8xx_mstpri_priority da850_lcdk_priorities[] = {
+ {
+ .which = DA8XX_MSTPRI_LCDC,
+ .val = 0,
+ },
+ {
+ .which = DA8XX_MSTPRI_EDMA30TC1,
+ .val = 0,
+ },
+ {
+ .which = DA8XX_MSTPRI_EDMA30TC0,
+ .val = 1,
+ },
+};
+
+static const struct da8xx_mstpri_board_priorities da8xx_mstpri_board_confs[] = {
+ {
+ .board = "ti,da850-lcdk",
+ .priorities = da850_lcdk_priorities,
+ .numprio = ARRAY_SIZE(da850_lcdk_priorities),
+ },
+};
+
+static const struct da8xx_mstpri_board_priorities *
+da8xx_mstpri_get_board_prio(void)
+{
+ const struct da8xx_mstpri_board_priorities *board_prio;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(da8xx_mstpri_board_confs); i++) {
+ board_prio = &da8xx_mstpri_board_confs[i];
+
+ if (of_machine_is_compatible(board_prio->board))
+ return board_prio;
+ }
+
+ return NULL;
+}
+
+static int da8xx_mstpri_probe(struct platform_device *pdev)
+{
+ const struct da8xx_mstpri_board_priorities *prio_list;
+ const struct da8xx_mstpri_descr *prio_descr;
+ const struct da8xx_mstpri_priority *prio;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *mstpri;
+ u32 reg;
+ int i;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mstpri = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mstpri)) {
+ dev_err(dev, "unable to map MSTPRI registers\n");
+ return PTR_ERR(mstpri);
+ }
+
+ prio_list = da8xx_mstpri_get_board_prio();
+ if (!prio_list) {
+ dev_err(dev, "no master priorities defined for this board\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < prio_list->numprio; i++) {
+ prio = &prio_list->priorities[i];
+ prio_descr = &da8xx_mstpri_priority_list[prio->which];
+
+ if (prio_descr->reg + sizeof(u32) > resource_size(res)) {
+ dev_warn(dev, "register offset out of range\n");
+ continue;
+ }
+
+ reg = readl(mstpri + prio_descr->reg);
+ reg &= ~prio_descr->mask;
+ reg |= prio->val << prio_descr->shift;
+
+ writel(reg, mstpri + prio_descr->reg);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id da8xx_mstpri_of_match[] = {
+ { .compatible = "ti,da850-mstpri", },
+ { },
+};
+
+static struct platform_driver da8xx_mstpri_driver = {
+ .probe = da8xx_mstpri_probe,
+ .driver = {
+ .name = "da8xx-mstpri",
+ .of_match_table = da8xx_mstpri_of_match,
+ },
+};
+module_platform_driver(da8xx_mstpri_driver);
+
+MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
+MODULE_DESCRIPTION("TI da8xx master peripheral priority driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/tegra-gmi.c b/drivers/bus/tegra-gmi.c
new file mode 100644
index 000000000000..a6570789f7af
--- /dev/null
+++ b/drivers/bus/tegra-gmi.c
@@ -0,0 +1,284 @@
+/*
+ * Driver for NVIDIA Generic Memory Interface
+ *
+ * Copyright (C) 2016 Host Mobility AB. All rights reserved.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/reset.h>
+
+#define TEGRA_GMI_CONFIG 0x00
+#define TEGRA_GMI_CONFIG_GO BIT(31)
+#define TEGRA_GMI_BUS_WIDTH_32BIT BIT(30)
+#define TEGRA_GMI_MUX_MODE BIT(28)
+#define TEGRA_GMI_RDY_BEFORE_DATA BIT(24)
+#define TEGRA_GMI_RDY_ACTIVE_HIGH BIT(23)
+#define TEGRA_GMI_ADV_ACTIVE_HIGH BIT(22)
+#define TEGRA_GMI_OE_ACTIVE_HIGH BIT(21)
+#define TEGRA_GMI_CS_ACTIVE_HIGH BIT(20)
+#define TEGRA_GMI_CS_SELECT(x) ((x & 0x7) << 4)
+
+#define TEGRA_GMI_TIMING0 0x10
+#define TEGRA_GMI_MUXED_WIDTH(x) ((x & 0xf) << 12)
+#define TEGRA_GMI_HOLD_WIDTH(x) ((x & 0xf) << 8)
+#define TEGRA_GMI_ADV_WIDTH(x) ((x & 0xf) << 4)
+#define TEGRA_GMI_CE_WIDTH(x) (x & 0xf)
+
+#define TEGRA_GMI_TIMING1 0x14
+#define TEGRA_GMI_WE_WIDTH(x) ((x & 0xff) << 16)
+#define TEGRA_GMI_OE_WIDTH(x) ((x & 0xff) << 8)
+#define TEGRA_GMI_WAIT_WIDTH(x) (x & 0xff)
+
+#define TEGRA_GMI_MAX_CHIP_SELECT 8
+
+struct tegra_gmi {
+ struct device *dev;
+ void __iomem *base;
+ struct clk *clk;
+ struct reset_control *rst;
+
+ u32 snor_config;
+ u32 snor_timing0;
+ u32 snor_timing1;
+};
+
+static int tegra_gmi_enable(struct tegra_gmi *gmi)
+{
+ int err;
+
+ err = clk_prepare_enable(gmi->clk);
+ if (err < 0) {
+ dev_err(gmi->dev, "failed to enable clock: %d\n", err);
+ return err;
+ }
+
+ reset_control_assert(gmi->rst);
+ usleep_range(2000, 4000);
+ reset_control_deassert(gmi->rst);
+
+ writel(gmi->snor_timing0, gmi->base + TEGRA_GMI_TIMING0);
+ writel(gmi->snor_timing1, gmi->base + TEGRA_GMI_TIMING1);
+
+ gmi->snor_config |= TEGRA_GMI_CONFIG_GO;
+ writel(gmi->snor_config, gmi->base + TEGRA_GMI_CONFIG);
+
+ return 0;
+}
+
+static void tegra_gmi_disable(struct tegra_gmi *gmi)
+{
+ u32 config;
+
+ /* stop GMI operation */
+ config = readl(gmi->base + TEGRA_GMI_CONFIG);
+ config &= ~TEGRA_GMI_CONFIG_GO;
+ writel(config, gmi->base + TEGRA_GMI_CONFIG);
+
+ reset_control_assert(gmi->rst);
+ clk_disable_unprepare(gmi->clk);
+}
+
+static int tegra_gmi_parse_dt(struct tegra_gmi *gmi)
+{
+ struct device_node *child;
+ u32 property, ranges[4];
+ int err;
+
+ child = of_get_next_available_child(gmi->dev->of_node, NULL);
+ if (!child) {
+ dev_err(gmi->dev, "no child nodes found\n");
+ return -ENODEV;
+ }
+
+ /*
+ * We currently only support one child device due to lack of
+ * chip-select address decoding. Which means that we only have one
+ * chip-select line from the GMI controller.
+ */
+ if (of_get_child_count(gmi->dev->of_node) > 1)
+ dev_warn(gmi->dev, "only one child device is supported.");
+
+ if (of_property_read_bool(child, "nvidia,snor-data-width-32bit"))
+ gmi->snor_config |= TEGRA_GMI_BUS_WIDTH_32BIT;
+
+ if (of_property_read_bool(child, "nvidia,snor-mux-mode"))
+ gmi->snor_config |= TEGRA_GMI_MUX_MODE;
+
+ if (of_property_read_bool(child, "nvidia,snor-rdy-active-before-data"))
+ gmi->snor_config |= TEGRA_GMI_RDY_BEFORE_DATA;
+
+ if (of_property_read_bool(child, "nvidia,snor-rdy-active-high"))
+ gmi->snor_config |= TEGRA_GMI_RDY_ACTIVE_HIGH;
+
+ if (of_property_read_bool(child, "nvidia,snor-adv-active-high"))
+ gmi->snor_config |= TEGRA_GMI_ADV_ACTIVE_HIGH;
+
+ if (of_property_read_bool(child, "nvidia,snor-oe-active-high"))
+ gmi->snor_config |= TEGRA_GMI_OE_ACTIVE_HIGH;
+
+ if (of_property_read_bool(child, "nvidia,snor-cs-active-high"))
+ gmi->snor_config |= TEGRA_GMI_CS_ACTIVE_HIGH;
+
+ /* Decode the CS# */
+ err = of_property_read_u32_array(child, "ranges", ranges, 4);
+ if (err < 0) {
+ /* Invalid binding */
+ if (err == -EOVERFLOW) {
+ dev_err(gmi->dev,
+ "failed to decode CS: invalid ranges length\n");
+ goto error_cs;
+ }
+
+ /*
+ * If we reach here it means that the child node has an empty
+ * ranges or it does not exist at all. Attempt to decode the
+ * CS# from the reg property instead.
+ */
+ err = of_property_read_u32(child, "reg", &property);
+ if (err < 0) {
+ dev_err(gmi->dev,
+ "failed to decode CS: no reg property found\n");
+ goto error_cs;
+ }
+ } else {
+ property = ranges[1];
+ }
+
+ /* Valid chip selects are CS0-CS7 */
+ if (property >= TEGRA_GMI_MAX_CHIP_SELECT) {
+ dev_err(gmi->dev, "invalid chip select: %d", property);
+ err = -EINVAL;
+ goto error_cs;
+ }
+
+ gmi->snor_config |= TEGRA_GMI_CS_SELECT(property);
+
+ /* The default values that are provided below are reset values */
+ if (!of_property_read_u32(child, "nvidia,snor-muxed-width", &property))
+ gmi->snor_timing0 |= TEGRA_GMI_MUXED_WIDTH(property);
+ else
+ gmi->snor_timing0 |= TEGRA_GMI_MUXED_WIDTH(1);
+
+ if (!of_property_read_u32(child, "nvidia,snor-hold-width", &property))
+ gmi->snor_timing0 |= TEGRA_GMI_HOLD_WIDTH(property);
+ else
+ gmi->snor_timing0 |= TEGRA_GMI_HOLD_WIDTH(1);
+
+ if (!of_property_read_u32(child, "nvidia,snor-adv-width", &property))
+ gmi->snor_timing0 |= TEGRA_GMI_ADV_WIDTH(property);
+ else
+ gmi->snor_timing0 |= TEGRA_GMI_ADV_WIDTH(1);
+
+ if (!of_property_read_u32(child, "nvidia,snor-ce-width", &property))
+ gmi->snor_timing0 |= TEGRA_GMI_CE_WIDTH(property);
+ else
+ gmi->snor_timing0 |= TEGRA_GMI_CE_WIDTH(4);
+
+ if (!of_property_read_u32(child, "nvidia,snor-we-width", &property))
+ gmi->snor_timing1 |= TEGRA_GMI_WE_WIDTH(property);
+ else
+ gmi->snor_timing1 |= TEGRA_GMI_WE_WIDTH(1);
+
+ if (!of_property_read_u32(child, "nvidia,snor-oe-width", &property))
+ gmi->snor_timing1 |= TEGRA_GMI_OE_WIDTH(property);
+ else
+ gmi->snor_timing1 |= TEGRA_GMI_OE_WIDTH(1);
+
+ if (!of_property_read_u32(child, "nvidia,snor-wait-width", &property))
+ gmi->snor_timing1 |= TEGRA_GMI_WAIT_WIDTH(property);
+ else
+ gmi->snor_timing1 |= TEGRA_GMI_WAIT_WIDTH(3);
+
+error_cs:
+ of_node_put(child);
+ return err;
+}
+
+static int tegra_gmi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tegra_gmi *gmi;
+ struct resource *res;
+ int err;
+
+ gmi = devm_kzalloc(dev, sizeof(*gmi), GFP_KERNEL);
+ if (!gmi)
+ return -ENOMEM;
+
+ gmi->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ gmi->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(gmi->base))
+ return PTR_ERR(gmi->base);
+
+ gmi->clk = devm_clk_get(dev, "gmi");
+ if (IS_ERR(gmi->clk)) {
+ dev_err(dev, "can not get clock\n");
+ return PTR_ERR(gmi->clk);
+ }
+
+ gmi->rst = devm_reset_control_get(dev, "gmi");
+ if (IS_ERR(gmi->rst)) {
+ dev_err(dev, "can not get reset\n");
+ return PTR_ERR(gmi->rst);
+ }
+
+ err = tegra_gmi_parse_dt(gmi);
+ if (err)
+ return err;
+
+ err = tegra_gmi_enable(gmi);
+ if (err < 0)
+ return err;
+
+ err = of_platform_default_populate(dev->of_node, NULL, dev);
+ if (err < 0) {
+ dev_err(dev, "fail to create devices.\n");
+ tegra_gmi_disable(gmi);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, gmi);
+
+ return 0;
+}
+
+static int tegra_gmi_remove(struct platform_device *pdev)
+{
+ struct tegra_gmi *gmi = platform_get_drvdata(pdev);
+
+ of_platform_depopulate(gmi->dev);
+ tegra_gmi_disable(gmi);
+
+ return 0;
+}
+
+static const struct of_device_id tegra_gmi_id_table[] = {
+ { .compatible = "nvidia,tegra20-gmi", },
+ { .compatible = "nvidia,tegra30-gmi", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tegra_gmi_id_table);
+
+static struct platform_driver tegra_gmi_driver = {
+ .probe = tegra_gmi_probe,
+ .remove = tegra_gmi_remove,
+ .driver = {
+ .name = "tegra-gmi",
+ .of_match_table = tegra_gmi_id_table,
+ },
+};
+module_platform_driver(tegra_gmi_driver);
+
+MODULE_AUTHOR("Mirza Krak <mirza.krak@gmail.com");
+MODULE_DESCRIPTION("NVIDIA Tegra GMI Bus Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c
index 9efdf1de4035..493e7b9fc813 100644
--- a/drivers/bus/vexpress-config.c
+++ b/drivers/bus/vexpress-config.c
@@ -171,6 +171,7 @@ static int vexpress_config_populate(struct device_node *node)
{
struct device_node *bridge;
struct device *parent;
+ int ret;
bridge = of_parse_phandle(node, "arm,vexpress,config-bridge", 0);
if (!bridge)
@@ -182,7 +183,11 @@ static int vexpress_config_populate(struct device_node *node)
if (WARN_ON(!parent))
return -ENODEV;
- return of_platform_populate(node, NULL, NULL, parent);
+ ret = of_platform_populate(node, NULL, NULL, parent);
+
+ put_device(parent);
+
+ return ret;
}
static int __init vexpress_config_init(void)
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 5d475b3a0b2e..59cca72647a6 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -282,7 +282,7 @@
#include <linux/blkdev.h>
#include <linux/times.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/* used to tell the module to turn on full debugging messages */
static bool debug;
diff --git a/drivers/char/agp/alpha-agp.c b/drivers/char/agp/alpha-agp.c
index 199b8e99f7d7..737187865269 100644
--- a/drivers/char/agp/alpha-agp.c
+++ b/drivers/char/agp/alpha-agp.c
@@ -19,8 +19,7 @@ static int alpha_core_agp_vm_fault(struct vm_area_struct *vma,
unsigned long pa;
struct page *page;
- dma_addr = (unsigned long)vmf->virtual_address - vma->vm_start
- + agp->aperture.bus_base;
+ dma_addr = vmf->address - vma->vm_start + agp->aperture.bus_base;
pa = agp->ops->translate(agp, dma_addr);
if (pa == (unsigned long)-EINVAL)
diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
index a48e05b31593..2053f70ef66b 100644
--- a/drivers/char/agp/compat_ioctl.c
+++ b/drivers/char/agp/compat_ioctl.c
@@ -31,7 +31,7 @@
#include <linux/fs.h>
#include <linux/agpgart.h>
#include <linux/slab.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "agp.h"
#include "compat_ioctl.h"
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index 0f64d149c98d..f6955888e676 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -38,7 +38,7 @@
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/sched.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/pgtable.h>
#include "agp.h"
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index 14790304b84b..e5c62dcf2c11 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -34,7 +34,7 @@
#include <linux/fs.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "applicom.h"
diff --git a/drivers/char/bfin-otp.c b/drivers/char/bfin-otp.c
index 35d46da754d7..0584025bb0c2 100644
--- a/drivers/char/bfin-otp.c
+++ b/drivers/char/bfin-otp.c
@@ -20,7 +20,7 @@
#include <asm/blackfin.h>
#include <asm/bfrom.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define stamp(fmt, args...) pr_debug("%s:%i: " fmt "\n", __func__, __LINE__, ## args)
#define stampit() stamp("here i am")
diff --git a/drivers/char/ds1620.c b/drivers/char/ds1620.c
index 0fae5296e311..eb53cbadb68f 100644
--- a/drivers/char/ds1620.c
+++ b/drivers/char/ds1620.c
@@ -13,7 +13,7 @@
#include <mach/hardware.h>
#include <asm/mach-types.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/therm.h>
#ifdef CONFIG_PROC_FS
diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c
index 65a8d96c0e93..58471394beb9 100644
--- a/drivers/char/dtlk.c
+++ b/drivers/char/dtlk.c
@@ -59,7 +59,7 @@
#include <linux/sched.h>
#include <linux/mutex.h>
#include <asm/io.h> /* for inb_p, outb_p, inb, outb, etc. */
-#include <asm/uaccess.h> /* for get_user, etc. */
+#include <linux/uaccess.h> /* for get_user, etc. */
#include <linux/wait.h> /* for wait_queue */
#include <linux/init.h> /* for __init, module_{init,exit} */
#include <linux/poll.h> /* for POLLIN, etc. */
diff --git a/drivers/char/generic_nvram.c b/drivers/char/generic_nvram.c
index 073db9558379..14e728fbb8a0 100644
--- a/drivers/char/generic_nvram.c
+++ b/drivers/char/generic_nvram.c
@@ -21,7 +21,7 @@
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/pagemap.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/nvram.h>
#ifdef CONFIG_PPC_PMAC
#include <asm/machdep.h>
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
index a7c5c59675f0..4f337375252e 100644
--- a/drivers/char/hangcheck-timer.c
+++ b/drivers/char/hangcheck-timer.c
@@ -46,7 +46,7 @@
#include <linux/reboot.h>
#include <linux/init.h>
#include <linux/delay.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/sysrq.h>
#include <linux/timer.h>
#include <linux/hrtimer.h>
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index be54e5331a45..20b32bb8c2af 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -69,9 +69,9 @@ static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ;
#ifdef CONFIG_IA64
static void __iomem *hpet_mctr;
-static cycle_t read_hpet(struct clocksource *cs)
+static u64 read_hpet(struct clocksource *cs)
{
- return (cycle_t)read_counter((void __iomem *)hpet_mctr);
+ return (u64)read_counter((void __iomem *)hpet_mctr);
}
static struct clocksource clocksource_hpet = {
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 200dab5136a7..ceff2fc524b1 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -168,7 +168,7 @@ config HW_RANDOM_IXP4XX
config HW_RANDOM_OMAP
tristate "OMAP Random Number Generator support"
- depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS
+ depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS || ARCH_MVEBU
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index 0fcc9e69a346..661c82cde0f2 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -48,6 +48,16 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
return 0;
}
+static void atmel_trng_enable(struct atmel_trng *trng)
+{
+ writel(TRNG_KEY | 1, trng->base + TRNG_CR);
+}
+
+static void atmel_trng_disable(struct atmel_trng *trng)
+{
+ writel(TRNG_KEY, trng->base + TRNG_CR);
+}
+
static int atmel_trng_probe(struct platform_device *pdev)
{
struct atmel_trng *trng;
@@ -71,7 +81,7 @@ static int atmel_trng_probe(struct platform_device *pdev)
if (ret)
return ret;
- writel(TRNG_KEY | 1, trng->base + TRNG_CR);
+ atmel_trng_enable(trng);
trng->rng.name = pdev->name;
trng->rng.read = atmel_trng_read;
@@ -84,7 +94,7 @@ static int atmel_trng_probe(struct platform_device *pdev)
return 0;
err_register:
- clk_disable(trng->clk);
+ clk_disable_unprepare(trng->clk);
return ret;
}
@@ -94,7 +104,7 @@ static int atmel_trng_remove(struct platform_device *pdev)
hwrng_unregister(&trng->rng);
- writel(TRNG_KEY, trng->base + TRNG_CR);
+ atmel_trng_disable(trng);
clk_disable_unprepare(trng->clk);
return 0;
@@ -105,6 +115,7 @@ static int atmel_trng_suspend(struct device *dev)
{
struct atmel_trng *trng = dev_get_drvdata(dev);
+ atmel_trng_disable(trng);
clk_disable_unprepare(trng->clk);
return 0;
@@ -113,8 +124,15 @@ static int atmel_trng_suspend(struct device *dev)
static int atmel_trng_resume(struct device *dev)
{
struct atmel_trng *trng = dev_get_drvdata(dev);
+ int ret;
- return clk_prepare_enable(trng->clk);
+ ret = clk_prepare_enable(trng->clk);
+ if (ret)
+ return ret;
+
+ atmel_trng_enable(trng);
+
+ return 0;
}
static const struct dev_pm_ops atmel_trng_pm_ops = {
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index d2d2c89de5b4..6ce5ce8be2f2 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -43,7 +43,7 @@
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/err.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define RNG_MODULE_NAME "hw_random"
@@ -92,6 +92,7 @@ static void add_early_randomness(struct hwrng *rng)
mutex_unlock(&reading_mutex);
if (bytes_read > 0)
add_device_randomness(rng_buffer, bytes_read);
+ memset(rng_buffer, 0, size);
}
static inline void cleanup_rng(struct kref *kref)
@@ -287,6 +288,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
}
}
out:
+ memset(rng_buffer, 0, rng_buffer_size());
return ret ? : err;
out_unlock_reading:
@@ -425,6 +427,7 @@ static int hwrng_fillfn(void *unused)
/* Outside lock, sure, but y'know: randomness. */
add_hwgenerator_randomness((void *)rng_fillbuf, rc,
rc * current_quality * 8 >> 10);
+ memset(rng_fillbuf, 0, rng_buffer_size());
}
hwrng_fill = NULL;
return 0;
diff --git a/drivers/char/hw_random/meson-rng.c b/drivers/char/hw_random/meson-rng.c
index 58bef39f7286..119d698439ae 100644
--- a/drivers/char/hw_random/meson-rng.c
+++ b/drivers/char/hw_random/meson-rng.c
@@ -110,6 +110,7 @@ static const struct of_device_id meson_rng_of_match[] = {
{ .compatible = "amlogic,meson-rng", },
{},
};
+MODULE_DEVICE_TABLE(of, meson_rng_of_match);
static struct platform_driver meson_rng_driver = {
.probe = meson_rng_probe,
@@ -121,7 +122,6 @@ static struct platform_driver meson_rng_driver = {
module_platform_driver(meson_rng_driver);
-MODULE_ALIAS("platform:meson-rng");
MODULE_DESCRIPTION("Meson H/W Random Number Generator driver");
MODULE_AUTHOR("Lawrence Mok <lawrence.mok@amlogic.com>");
MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
diff --git a/drivers/char/hw_random/msm-rng.c b/drivers/char/hw_random/msm-rng.c
index 96fb986402eb..841fee845ec9 100644
--- a/drivers/char/hw_random/msm-rng.c
+++ b/drivers/char/hw_random/msm-rng.c
@@ -90,10 +90,6 @@ static int msm_rng_read(struct hwrng *hwrng, void *data, size_t max, bool wait)
/* calculate max size bytes to transfer back to caller */
maxsize = min_t(size_t, MAX_HW_FIFO_SIZE, max);
- /* no room for word data */
- if (maxsize < WORD_SZ)
- return 0;
-
ret = clk_prepare_enable(rng->clk);
if (ret)
return ret;
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index f5c26a5f6875..3ad86fdf954e 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -28,6 +28,7 @@
#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/interrupt.h>
+#include <linux/clk.h>
#include <asm/io.h>
@@ -63,10 +64,13 @@
#define OMAP2_RNG_OUTPUT_SIZE 0x4
#define OMAP4_RNG_OUTPUT_SIZE 0x8
+#define EIP76_RNG_OUTPUT_SIZE 0x10
enum {
- RNG_OUTPUT_L_REG = 0,
- RNG_OUTPUT_H_REG,
+ RNG_OUTPUT_0_REG = 0,
+ RNG_OUTPUT_1_REG,
+ RNG_OUTPUT_2_REG,
+ RNG_OUTPUT_3_REG,
RNG_STATUS_REG,
RNG_INTMASK_REG,
RNG_INTACK_REG,
@@ -82,7 +86,7 @@ enum {
};
static const u16 reg_map_omap2[] = {
- [RNG_OUTPUT_L_REG] = 0x0,
+ [RNG_OUTPUT_0_REG] = 0x0,
[RNG_STATUS_REG] = 0x4,
[RNG_CONFIG_REG] = 0x28,
[RNG_REV_REG] = 0x3c,
@@ -90,8 +94,8 @@ static const u16 reg_map_omap2[] = {
};
static const u16 reg_map_omap4[] = {
- [RNG_OUTPUT_L_REG] = 0x0,
- [RNG_OUTPUT_H_REG] = 0x4,
+ [RNG_OUTPUT_0_REG] = 0x0,
+ [RNG_OUTPUT_1_REG] = 0x4,
[RNG_STATUS_REG] = 0x8,
[RNG_INTMASK_REG] = 0xc,
[RNG_INTACK_REG] = 0x10,
@@ -106,6 +110,23 @@ static const u16 reg_map_omap4[] = {
[RNG_SYSCONFIG_REG] = 0x1FE4,
};
+static const u16 reg_map_eip76[] = {
+ [RNG_OUTPUT_0_REG] = 0x0,
+ [RNG_OUTPUT_1_REG] = 0x4,
+ [RNG_OUTPUT_2_REG] = 0x8,
+ [RNG_OUTPUT_3_REG] = 0xc,
+ [RNG_STATUS_REG] = 0x10,
+ [RNG_INTACK_REG] = 0x10,
+ [RNG_CONTROL_REG] = 0x14,
+ [RNG_CONFIG_REG] = 0x18,
+ [RNG_ALARMCNT_REG] = 0x1c,
+ [RNG_FROENABLE_REG] = 0x20,
+ [RNG_FRODETUNE_REG] = 0x24,
+ [RNG_ALARMMASK_REG] = 0x28,
+ [RNG_ALARMSTOP_REG] = 0x2c,
+ [RNG_REV_REG] = 0x7c,
+};
+
struct omap_rng_dev;
/**
* struct omap_rng_pdata - RNG IP block-specific data
@@ -127,6 +148,8 @@ struct omap_rng_dev {
void __iomem *base;
struct device *dev;
const struct omap_rng_pdata *pdata;
+ struct hwrng rng;
+ struct clk *clk;
};
static inline u32 omap_rng_read(struct omap_rng_dev *priv, u16 reg)
@@ -140,41 +163,35 @@ static inline void omap_rng_write(struct omap_rng_dev *priv, u16 reg,
__raw_writel(val, priv->base + priv->pdata->regs[reg]);
}
-static int omap_rng_data_present(struct hwrng *rng, int wait)
+
+static int omap_rng_do_read(struct hwrng *rng, void *data, size_t max,
+ bool wait)
{
struct omap_rng_dev *priv;
- int data, i;
+ int i, present;
priv = (struct omap_rng_dev *)rng->priv;
+ if (max < priv->pdata->data_size)
+ return 0;
+
for (i = 0; i < 20; i++) {
- data = priv->pdata->data_present(priv);
- if (data || !wait)
+ present = priv->pdata->data_present(priv);
+ if (present || !wait)
break;
- /* RNG produces data fast enough (2+ MBit/sec, even
- * during "rngtest" loads, that these delays don't
- * seem to trigger. We *could* use the RNG IRQ, but
- * that'd be higher overhead ... so why bother?
- */
+
udelay(10);
}
- return data;
-}
-
-static int omap_rng_data_read(struct hwrng *rng, u32 *data)
-{
- struct omap_rng_dev *priv;
- u32 data_size, i;
-
- priv = (struct omap_rng_dev *)rng->priv;
- data_size = priv->pdata->data_size;
+ if (!present)
+ return 0;
- for (i = 0; i < data_size / sizeof(u32); i++)
- data[i] = omap_rng_read(priv, RNG_OUTPUT_L_REG + i);
+ memcpy_fromio(data, priv->base + priv->pdata->regs[RNG_OUTPUT_0_REG],
+ priv->pdata->data_size);
if (priv->pdata->regs[RNG_INTACK_REG])
omap_rng_write(priv, RNG_INTACK_REG, RNG_REG_INTACK_RDY_MASK);
- return data_size;
+
+ return priv->pdata->data_size;
}
static int omap_rng_init(struct hwrng *rng)
@@ -193,13 +210,6 @@ static void omap_rng_cleanup(struct hwrng *rng)
priv->pdata->cleanup(priv);
}
-static struct hwrng omap_rng_ops = {
- .name = "omap",
- .data_present = omap_rng_data_present,
- .data_read = omap_rng_data_read,
- .init = omap_rng_init,
- .cleanup = omap_rng_cleanup,
-};
static inline u32 omap2_rng_data_present(struct omap_rng_dev *priv)
{
@@ -231,6 +241,38 @@ static inline u32 omap4_rng_data_present(struct omap_rng_dev *priv)
return omap_rng_read(priv, RNG_STATUS_REG) & RNG_REG_STATUS_RDY;
}
+static int eip76_rng_init(struct omap_rng_dev *priv)
+{
+ u32 val;
+
+ /* Return if RNG is already running. */
+ if (omap_rng_read(priv, RNG_CONTROL_REG) & RNG_CONTROL_ENABLE_TRNG_MASK)
+ return 0;
+
+ /* Number of 512 bit blocks of raw Noise Source output data that must
+ * be processed by either the Conditioning Function or the
+ * SP 800-90 DRBG ‘BC_DF’ functionality to yield a ‘full entropy’
+ * output value.
+ */
+ val = 0x5 << RNG_CONFIG_MIN_REFIL_CYCLES_SHIFT;
+
+ /* Number of FRO samples that are XOR-ed together into one bit to be
+ * shifted into the main shift register
+ */
+ val |= RNG_CONFIG_MAX_REFIL_CYCLES << RNG_CONFIG_MAX_REFIL_CYCLES_SHIFT;
+ omap_rng_write(priv, RNG_CONFIG_REG, val);
+
+ /* Enable all available FROs */
+ omap_rng_write(priv, RNG_FRODETUNE_REG, 0x0);
+ omap_rng_write(priv, RNG_FROENABLE_REG, RNG_REG_FROENABLE_MASK);
+
+ /* Enable TRNG */
+ val = RNG_CONTROL_ENABLE_TRNG_MASK;
+ omap_rng_write(priv, RNG_CONTROL_REG, val);
+
+ return 0;
+}
+
static int omap4_rng_init(struct omap_rng_dev *priv)
{
u32 val;
@@ -300,6 +342,14 @@ static struct omap_rng_pdata omap4_rng_pdata = {
.cleanup = omap4_rng_cleanup,
};
+static struct omap_rng_pdata eip76_rng_pdata = {
+ .regs = (u16 *)reg_map_eip76,
+ .data_size = EIP76_RNG_OUTPUT_SIZE,
+ .data_present = omap4_rng_data_present,
+ .init = eip76_rng_init,
+ .cleanup = omap4_rng_cleanup,
+};
+
static const struct of_device_id omap_rng_of_match[] = {
{
.compatible = "ti,omap2-rng",
@@ -309,6 +359,10 @@ static const struct of_device_id omap_rng_of_match[] = {
.compatible = "ti,omap4-rng",
.data = &omap4_rng_pdata,
},
+ {
+ .compatible = "inside-secure,safexcel-eip76",
+ .data = &eip76_rng_pdata,
+ },
{},
};
MODULE_DEVICE_TABLE(of, omap_rng_of_match);
@@ -327,7 +381,8 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
}
priv->pdata = match->data;
- if (of_device_is_compatible(dev->of_node, "ti,omap4-rng")) {
+ if (of_device_is_compatible(dev->of_node, "ti,omap4-rng") ||
+ of_device_is_compatible(dev->of_node, "inside-secure,safexcel-eip76")) {
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(dev, "%s: error getting IRQ resource - %d\n",
@@ -343,6 +398,16 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
return err;
}
omap_rng_write(priv, RNG_INTMASK_REG, RNG_SHUTDOWN_OFLO_MASK);
+
+ priv->clk = of_clk_get(pdev->dev.of_node, 0);
+ if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (!IS_ERR(priv->clk)) {
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ dev_err(&pdev->dev, "unable to enable the clk, "
+ "err = %d\n", err);
+ }
}
return 0;
}
@@ -372,7 +437,11 @@ static int omap_rng_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- omap_rng_ops.priv = (unsigned long)priv;
+ priv->rng.read = omap_rng_do_read;
+ priv->rng.init = omap_rng_init;
+ priv->rng.cleanup = omap_rng_cleanup;
+
+ priv->rng.priv = (unsigned long)priv;
platform_set_drvdata(pdev, priv);
priv->dev = dev;
@@ -383,6 +452,12 @@ static int omap_rng_probe(struct platform_device *pdev)
goto err_ioremap;
}
+ priv->rng.name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
+ if (!priv->rng.name) {
+ ret = -ENOMEM;
+ goto err_ioremap;
+ }
+
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
@@ -394,20 +469,24 @@ static int omap_rng_probe(struct platform_device *pdev)
ret = (dev->of_node) ? of_get_omap_rng_device_details(priv, pdev) :
get_omap_rng_device_details(priv);
if (ret)
- goto err_ioremap;
+ goto err_register;
- ret = hwrng_register(&omap_rng_ops);
+ ret = hwrng_register(&priv->rng);
if (ret)
goto err_register;
- dev_info(&pdev->dev, "OMAP Random Number Generator ver. %02x\n",
+ dev_info(&pdev->dev, "Random Number Generator ver. %02x\n",
omap_rng_read(priv, RNG_REV_REG));
return 0;
err_register:
priv->base = NULL;
+ pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
err_ioremap:
dev_err(dev, "initialization failed.\n");
return ret;
@@ -417,13 +496,16 @@ static int omap_rng_remove(struct platform_device *pdev)
{
struct omap_rng_dev *priv = platform_get_drvdata(pdev);
- hwrng_unregister(&omap_rng_ops);
+ hwrng_unregister(&priv->rng);
priv->pdata->cleanup(priv);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
+
return 0;
}
diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c
index 11dc9b7c09ce..9b5e68a71d01 100644
--- a/drivers/char/hw_random/pic32-rng.c
+++ b/drivers/char/hw_random/pic32-rng.c
@@ -62,9 +62,6 @@ static int pic32_rng_read(struct hwrng *rng, void *buf, size_t max,
u32 t;
unsigned int timeout = RNG_TIMEOUT;
- if (max < 8)
- return 0;
-
do {
t = readl(priv->base + RNGRCNT) & RCNT_MASK;
if (t == 64) {
diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c
index 63ce51d09af1..d9f46b437cc2 100644
--- a/drivers/char/hw_random/pseries-rng.c
+++ b/drivers/char/hw_random/pseries-rng.c
@@ -28,7 +28,6 @@
static int pseries_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
u64 buffer[PLPAR_HCALL_BUFSIZE];
- size_t size = max < 8 ? max : 8;
int rc;
rc = plpar_hcall(H_RANDOM, (unsigned long *)buffer);
@@ -36,10 +35,10 @@ static int pseries_rng_read(struct hwrng *rng, void *data, size_t max, bool wait
pr_err_ratelimited("H_RANDOM call failed %d\n", rc);
return -EIO;
}
- memcpy(data, buffer, size);
+ memcpy(data, buffer, 8);
/* The hypervisor interface returns 64 bits */
- return size;
+ return 8;
}
/**
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 1786574536b2..a21407de46ae 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -989,4 +989,3 @@ module_exit(cleanup_ipmi);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
MODULE_DESCRIPTION("Linux device interface for the IPMI message handler.");
-MODULE_ALIAS("platform:ipmi_si");
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index fcdd886819f5..92e53acf2cd2 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -158,15 +158,16 @@ struct seq_table {
* Store the information in a msgid (long) to allow us to find a
* sequence table entry from the msgid.
*/
-#define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
+#define STORE_SEQ_IN_MSGID(seq, seqid) \
+ ((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff))
#define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
do { \
- seq = ((msgid >> 26) & 0x3f); \
- seqid = (msgid & 0x3fffff); \
+ seq = (((msgid) >> 26) & 0x3f); \
+ seqid = ((msgid) & 0x3ffffff); \
} while (0)
-#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
+#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff)
struct ipmi_channel {
unsigned char medium;
@@ -4645,3 +4646,4 @@ MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI"
" interface.");
MODULE_VERSION(IPMI_DRIVER_VERSION);
+MODULE_SOFTDEP("post: ipmi_devintf");
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index a112c0146012..2a7c425ddfa7 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -789,7 +789,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
smi_info->si_state = SI_NORMAL;
break;
}
- start_getting_msg_queue(smi_info);
+ start_getting_events(smi_info);
} else {
smi_info->si_state = SI_NORMAL;
}
@@ -812,7 +812,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
smi_info->si_state = SI_NORMAL;
break;
}
- start_getting_msg_queue(smi_info);
+ start_getting_events(smi_info);
} else {
smi_info->si_state = SI_NORMAL;
}
@@ -1764,7 +1764,7 @@ static int parse_str(const struct hotmod_vals *v, int *val, char *name,
s = strchr(*curr, ',');
if (!s) {
- printk(KERN_WARNING PFX "No hotmod %s given.\n", name);
+ pr_warn(PFX "No hotmod %s given.\n", name);
return -EINVAL;
}
*s = '\0';
@@ -1777,7 +1777,7 @@ static int parse_str(const struct hotmod_vals *v, int *val, char *name,
}
}
- printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr);
+ pr_warn(PFX "Invalid hotmod %s '%s'\n", name, *curr);
return -EINVAL;
}
@@ -1788,16 +1788,12 @@ static int check_hotmod_int_op(const char *curr, const char *option,
if (strcmp(curr, name) == 0) {
if (!option) {
- printk(KERN_WARNING PFX
- "No option given for '%s'\n",
- curr);
+ pr_warn(PFX "No option given for '%s'\n", curr);
return -EINVAL;
}
*val = simple_strtoul(option, &n, 0);
if ((*n != '\0') || (*option == '\0')) {
- printk(KERN_WARNING PFX
- "Bad option given for '%s'\n",
- curr);
+ pr_warn(PFX "Bad option given for '%s'\n", curr);
return -EINVAL;
}
return 1;
@@ -1877,8 +1873,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
}
addr = simple_strtoul(curr, &n, 0);
if ((*n != '\0') || (*curr == '\0')) {
- printk(KERN_WARNING PFX "Invalid hotmod address"
- " '%s'\n", curr);
+ pr_warn(PFX "Invalid hotmod address '%s'\n", curr);
break;
}
@@ -1921,9 +1916,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
continue;
rv = -EINVAL;
- printk(KERN_WARNING PFX
- "Invalid hotmod option '%s'\n",
- curr);
+ pr_warn(PFX "Invalid hotmod option '%s'\n", curr);
goto out;
}
@@ -2003,7 +1996,7 @@ static int hardcode_find_bmc(void)
return -ENOMEM;
info->addr_source = SI_HARDCODED;
- printk(KERN_INFO PFX "probing via hardcoded address\n");
+ pr_info(PFX "probing via hardcoded address\n");
if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
info->si_type = SI_KCS;
@@ -2012,9 +2005,8 @@ static int hardcode_find_bmc(void)
} else if (strcmp(si_type[i], "bt") == 0) {
info->si_type = SI_BT;
} else {
- printk(KERN_WARNING PFX "Interface type specified "
- "for interface %d, was invalid: %s\n",
- i, si_type[i]);
+ pr_warn(PFX "Interface type specified for interface %d, was invalid: %s\n",
+ i, si_type[i]);
kfree(info);
continue;
}
@@ -2030,9 +2022,8 @@ static int hardcode_find_bmc(void)
info->io.addr_data = addrs[i];
info->io.addr_type = IPMI_MEM_ADDR_SPACE;
} else {
- printk(KERN_WARNING PFX "Interface type specified "
- "for interface %d, but port and address were "
- "not set or set to zero.\n", i);
+ pr_warn(PFX "Interface type specified for interface %d, but port and address were not set or set to zero.\n",
+ i);
kfree(info);
continue;
}
@@ -2173,18 +2164,18 @@ static int try_init_spmi(struct SPMITable *spmi)
int rv;
if (spmi->IPMIlegacy != 1) {
- printk(KERN_INFO PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy);
+ pr_info(PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy);
return -ENODEV;
}
info = smi_info_alloc();
if (!info) {
- printk(KERN_ERR PFX "Could not allocate SI data (3)\n");
+ pr_err(PFX "Could not allocate SI data (3)\n");
return -ENOMEM;
}
info->addr_source = SI_SPMI;
- printk(KERN_INFO PFX "probing via SPMI\n");
+ pr_info(PFX "probing via SPMI\n");
/* Figure out the interface type. */
switch (spmi->InterfaceType) {
@@ -2201,8 +2192,8 @@ static int try_init_spmi(struct SPMITable *spmi)
kfree(info);
return -EIO;
default:
- printk(KERN_INFO PFX "Unknown ACPI/SPMI SI type %d\n",
- spmi->InterfaceType);
+ pr_info(PFX "Unknown ACPI/SPMI SI type %d\n",
+ spmi->InterfaceType);
kfree(info);
return -EIO;
}
@@ -2238,15 +2229,15 @@ static int try_init_spmi(struct SPMITable *spmi)
info->io.addr_type = IPMI_IO_ADDR_SPACE;
} else {
kfree(info);
- printk(KERN_WARNING PFX "Unknown ACPI I/O Address type\n");
+ pr_warn(PFX "Unknown ACPI I/O Address type\n");
return -EIO;
}
info->io.addr_data = spmi->addr.address;
pr_info("ipmi_si: SPMI: %s %#lx regsize %d spacing %d irq %d\n",
- (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
- info->io.addr_data, info->io.regsize, info->io.regspacing,
- info->irq);
+ (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
+ info->io.addr_data, info->io.regsize, info->io.regspacing,
+ info->irq);
rv = add_smi(info);
if (rv)
@@ -2356,12 +2347,12 @@ static void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
info = smi_info_alloc();
if (!info) {
- printk(KERN_ERR PFX "Could not allocate SI data\n");
+ pr_err(PFX "Could not allocate SI data\n");
return;
}
info->addr_source = SI_SMBIOS;
- printk(KERN_INFO PFX "probing via SMBIOS\n");
+ pr_info(PFX "probing via SMBIOS\n");
switch (ipmi_data->type) {
case 0x01: /* KCS */
@@ -2391,8 +2382,8 @@ static void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
default:
kfree(info);
- printk(KERN_WARNING PFX "Unknown SMBIOS I/O Address type: %d\n",
- ipmi_data->addr_space);
+ pr_warn(PFX "Unknown SMBIOS I/O Address type: %d\n",
+ ipmi_data->addr_space);
return;
}
info->io.addr_data = ipmi_data->base_addr;
@@ -2410,9 +2401,9 @@ static void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
info->irq_setup = std_irq_setup;
pr_info("ipmi_si: SMBIOS: %s %#lx regsize %d spacing %d irq %d\n",
- (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
- info->io.addr_data, info->io.regsize, info->io.regspacing,
- info->irq);
+ (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
+ info->io.addr_data, info->io.regsize, info->io.regspacing,
+ info->irq);
if (add_smi(info))
kfree(info);
@@ -3141,9 +3132,7 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
rv = wait_for_msg_done(smi_info);
if (rv) {
- printk(KERN_WARNING PFX "Error getting response from get"
- " global enables command, the event buffer is not"
- " enabled.\n");
+ pr_warn(PFX "Error getting response from get global enables command, the event buffer is not enabled.\n");
goto out;
}
@@ -3154,8 +3143,7 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
resp[2] != 0) {
- printk(KERN_WARNING PFX "Invalid return from get global"
- " enables command, cannot enable the event buffer.\n");
+ pr_warn(PFX "Invalid return from get global enables command, cannot enable the event buffer.\n");
rv = -EINVAL;
goto out;
}
@@ -3173,9 +3161,7 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
rv = wait_for_msg_done(smi_info);
if (rv) {
- printk(KERN_WARNING PFX "Error getting response from set"
- " global, enables command, the event buffer is not"
- " enabled.\n");
+ pr_warn(PFX "Error getting response from set global, enables command, the event buffer is not enabled.\n");
goto out;
}
@@ -3185,8 +3171,7 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
if (resp_len < 3 ||
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
- printk(KERN_WARNING PFX "Invalid return from get global,"
- "enables command, not enable the event buffer.\n");
+ pr_warn(PFX "Invalid return from get global, enables command, not enable the event buffer.\n");
rv = -EINVAL;
goto out;
}
@@ -3463,8 +3448,16 @@ static int is_new_interface(struct smi_info *info)
list_for_each_entry(e, &smi_infos, link) {
if (e->io.addr_type != info->io.addr_type)
continue;
- if (e->io.addr_data == info->io.addr_data)
+ if (e->io.addr_data == info->io.addr_data) {
+ /*
+ * This is a cheap hack, ACPI doesn't have a defined
+ * slave address but SMBIOS does. Pick it up from
+ * any source that has it available.
+ */
+ if (info->slave_addr && !e->slave_addr)
+ e->slave_addr = info->slave_addr;
return 0;
+ }
}
return 1;
@@ -3474,17 +3467,18 @@ static int add_smi(struct smi_info *new_smi)
{
int rv = 0;
- printk(KERN_INFO PFX "Adding %s-specified %s state machine",
- ipmi_addr_src_to_str(new_smi->addr_source),
- si_to_str[new_smi->si_type]);
mutex_lock(&smi_infos_lock);
if (!is_new_interface(new_smi)) {
- printk(KERN_CONT " duplicate interface\n");
+ pr_info(PFX "%s-specified %s state machine: duplicate\n",
+ ipmi_addr_src_to_str(new_smi->addr_source),
+ si_to_str[new_smi->si_type]);
rv = -EBUSY;
goto out_err;
}
- printk(KERN_CONT "\n");
+ pr_info(PFX "Adding %s-specified %s state machine\n",
+ ipmi_addr_src_to_str(new_smi->addr_source),
+ si_to_str[new_smi->si_type]);
/* So we know not to free it unless we have allocated one. */
new_smi->intf = NULL;
@@ -3502,15 +3496,14 @@ static int try_smi_init(struct smi_info *new_smi)
{
int rv = 0;
int i;
+ char *init_name = NULL;
- printk(KERN_INFO PFX "Trying %s-specified %s state"
- " machine at %s address 0x%lx, slave address 0x%x,"
- " irq %d\n",
- ipmi_addr_src_to_str(new_smi->addr_source),
- si_to_str[new_smi->si_type],
- addr_space_to_str[new_smi->io.addr_type],
- new_smi->io.addr_data,
- new_smi->slave_addr, new_smi->irq);
+ pr_info(PFX "Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
+ ipmi_addr_src_to_str(new_smi->addr_source),
+ si_to_str[new_smi->si_type],
+ addr_space_to_str[new_smi->io.addr_type],
+ new_smi->io.addr_data,
+ new_smi->slave_addr, new_smi->irq);
switch (new_smi->si_type) {
case SI_KCS:
@@ -3531,11 +3524,30 @@ static int try_smi_init(struct smi_info *new_smi)
goto out_err;
}
+ /* Do this early so it's available for logs. */
+ if (!new_smi->dev) {
+ init_name = kasprintf(GFP_KERNEL, "ipmi_si.%d", 0);
+
+ /*
+ * If we don't already have a device from something
+ * else (like PCI), then register a new one.
+ */
+ new_smi->pdev = platform_device_alloc("ipmi_si",
+ new_smi->intf_num);
+ if (!new_smi->pdev) {
+ pr_err(PFX "Unable to allocate platform device\n");
+ goto out_err;
+ }
+ new_smi->dev = &new_smi->pdev->dev;
+ new_smi->dev->driver = &ipmi_driver.driver;
+ /* Nulled by device_add() */
+ new_smi->dev->init_name = init_name;
+ }
+
/* Allocate the state machine's data and initialize it. */
new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
if (!new_smi->si_sm) {
- printk(KERN_ERR PFX
- "Could not allocate state machine memory\n");
+ pr_err(PFX "Could not allocate state machine memory\n");
rv = -ENOMEM;
goto out_err;
}
@@ -3545,14 +3557,14 @@ static int try_smi_init(struct smi_info *new_smi)
/* Now that we know the I/O size, we can set up the I/O. */
rv = new_smi->io_setup(new_smi);
if (rv) {
- printk(KERN_ERR PFX "Could not set up I/O space\n");
+ dev_err(new_smi->dev, "Could not set up I/O space\n");
goto out_err;
}
/* Do low-level detection first. */
if (new_smi->handlers->detect(new_smi->si_sm)) {
if (new_smi->addr_source)
- printk(KERN_INFO PFX "Interface detection failed\n");
+ dev_err(new_smi->dev, "Interface detection failed\n");
rv = -ENODEV;
goto out_err;
}
@@ -3564,8 +3576,7 @@ static int try_smi_init(struct smi_info *new_smi)
rv = try_get_dev_id(new_smi);
if (rv) {
if (new_smi->addr_source)
- printk(KERN_INFO PFX "There appears to be no BMC"
- " at this location\n");
+ dev_err(new_smi->dev, "There appears to be no BMC at this location\n");
goto out_err;
}
@@ -3604,27 +3615,12 @@ static int try_smi_init(struct smi_info *new_smi)
atomic_set(&new_smi->req_events, 1);
}
- if (!new_smi->dev) {
- /*
- * If we don't already have a device from something
- * else (like PCI), then register a new one.
- */
- new_smi->pdev = platform_device_alloc("ipmi_si",
- new_smi->intf_num);
- if (!new_smi->pdev) {
- printk(KERN_ERR PFX
- "Unable to allocate platform device\n");
- goto out_err;
- }
- new_smi->dev = &new_smi->pdev->dev;
- new_smi->dev->driver = &ipmi_driver.driver;
-
+ if (new_smi->pdev) {
rv = platform_device_add(new_smi->pdev);
if (rv) {
- printk(KERN_ERR PFX
- "Unable to register system interface device:"
- " %d\n",
- rv);
+ dev_err(new_smi->dev,
+ "Unable to register system interface device: %d\n",
+ rv);
goto out_err;
}
new_smi->dev_registered = true;
@@ -3668,6 +3664,9 @@ static int try_smi_init(struct smi_info *new_smi)
dev_info(new_smi->dev, "IPMI %s interface initialized\n",
si_to_str[new_smi->si_type]);
+ WARN_ON(new_smi->dev->init_name != NULL);
+ kfree(init_name);
+
return 0;
out_err_stop_timer:
@@ -3712,8 +3711,14 @@ out_err:
if (new_smi->dev_registered) {
platform_device_unregister(new_smi->pdev);
new_smi->dev_registered = false;
+ new_smi->pdev = NULL;
+ } else if (new_smi->pdev) {
+ platform_device_put(new_smi->pdev);
+ new_smi->pdev = NULL;
}
+ kfree(init_name);
+
return rv;
}
@@ -3732,8 +3737,7 @@ static int init_ipmi_si(void)
if (si_tryplatform) {
rv = platform_driver_register(&ipmi_driver);
if (rv) {
- printk(KERN_ERR PFX "Unable to register "
- "driver: %d\n", rv);
+ pr_err(PFX "Unable to register driver: %d\n", rv);
return rv;
}
}
@@ -3753,7 +3757,7 @@ static int init_ipmi_si(void)
}
}
- printk(KERN_INFO "IPMI System Interface driver.\n");
+ pr_info("IPMI System Interface driver.\n");
/* If the user gave us a device, they presumably want us to use it */
if (!hardcode_find_bmc())
@@ -3763,8 +3767,7 @@ static int init_ipmi_si(void)
if (si_trypci) {
rv = pci_register_driver(&ipmi_pci_driver);
if (rv)
- printk(KERN_ERR PFX "Unable to register "
- "PCI driver: %d\n", rv);
+ pr_err(PFX "Unable to register PCI driver: %d\n", rv);
else
pci_registered = true;
}
@@ -3826,8 +3829,7 @@ static int init_ipmi_si(void)
if (unload_when_empty && list_empty(&smi_infos)) {
mutex_unlock(&smi_infos_lock);
cleanup_ipmi_si();
- printk(KERN_WARNING PFX
- "Unable to find any System Interface(s)\n");
+ pr_warn(PFX "Unable to find any System Interface(s)\n");
return -ENODEV;
} else {
mutex_unlock(&smi_infos_lock);
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 5673ffff00be..cca6e5bc1cea 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -174,7 +174,6 @@ enum ssif_stat_indexes {
};
struct ssif_addr_info {
- unsigned short addr;
struct i2c_board_info binfo;
char *adapter_name;
int debug;
@@ -1154,10 +1153,6 @@ static bool ssif_dbg_probe;
module_param_named(dbg_probe, ssif_dbg_probe, bool, 0);
MODULE_PARM_DESC(dbg_probe, "Enable debugging of probing of adapters.");
-static int use_thread;
-module_param(use_thread, int, 0);
-MODULE_PARM_DESC(use_thread, "Use the thread interface.");
-
static bool ssif_tryacpi = true;
module_param_named(tryacpi, ssif_tryacpi, bool, 0);
MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the default scan of the interfaces identified via ACPI");
@@ -1405,6 +1400,34 @@ static bool check_acpi(struct ssif_info *ssif_info, struct device *dev)
return false;
}
+static int find_slave_address(struct i2c_client *client, int slave_addr)
+{
+ struct ssif_addr_info *info;
+
+ if (slave_addr)
+ return slave_addr;
+
+ /*
+ * Came in without a slave address, search around to see if
+ * the other sources have a slave address. This lets us pick
+ * up an SMBIOS slave address when using ACPI.
+ */
+ list_for_each_entry(info, &ssif_infos, link) {
+ if (info->binfo.addr != client->addr)
+ continue;
+ if (info->adapter_name && client->adapter->name &&
+ strcmp_nospace(info->adapter_name,
+ client->adapter->name))
+ continue;
+ if (info->slave_addr) {
+ slave_addr = info->slave_addr;
+ break;
+ }
+ }
+
+ return slave_addr;
+}
+
/*
* Global enables we care about.
*/
@@ -1447,6 +1470,8 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
}
+ slave_addr = find_slave_address(client, slave_addr);
+
pr_info(PFX "Trying %s-specified SSIF interface at i2c address 0x%x, adapter %s, slave address 0x%x\n",
ipmi_addr_src_to_str(ssif_info->addr_source),
client->addr, client->adapter->name, slave_addr);
@@ -1935,7 +1960,7 @@ static int decode_dmi(const struct dmi_device *dmi_dev)
slave_addr = data[6];
}
- return new_ssif_client(myaddr, NULL, 0, 0, SI_SMBIOS);
+ return new_ssif_client(myaddr, NULL, 0, slave_addr, SI_SMBIOS);
}
static void dmi_iterator(void)
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 4facc7517a6a..4035495f3a86 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -43,7 +43,7 @@
#include <linux/kdebug.h>
#include <linux/rwsem.h>
#include <linux/errno.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/notifier.h>
#include <linux/nmi.h>
#include <linux/reboot.h>
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index c4094c4e22c1..5b6742770656 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -134,7 +134,7 @@
#include <linux/lp.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/* if you have more than 8 printers, remember to increase LP_NO */
#define LP_NO 8
diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
index 67d426470e53..8c9216a0f62e 100644
--- a/drivers/char/mbcs.c
+++ b/drivers/char/mbcs.c
@@ -28,7 +28,7 @@
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/pgtable.h>
#include <asm/sn/addrs.h>
#include <asm/sn/intr.h>
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 5bb1985ec484..6d9cc2d39d22 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -381,9 +381,6 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
int err = 0;
- if (!pfn_valid(PFN_DOWN(p)))
- return -EIO;
-
read = 0;
if (p < (unsigned long) high_memory) {
low_count = count;
@@ -412,6 +409,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
* by the kernel or data corruption may occur
*/
kbuf = xlate_dev_kmem_ptr((void *)p);
+ if (!virt_addr_valid(kbuf))
+ return -ENXIO;
if (copy_to_user(buf, kbuf, sz))
return -EFAULT;
@@ -482,6 +481,8 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
* corruption may occur.
*/
ptr = xlate_dev_kmem_ptr((void *)p);
+ if (!virt_addr_valid(ptr))
+ return -ENXIO;
copied = copy_from_user(ptr, buf, sz);
if (copied) {
@@ -512,9 +513,6 @@ static ssize_t write_kmem(struct file *file, const char __user *buf,
char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
int err = 0;
- if (!pfn_valid(PFN_DOWN(p)))
- return -EIO;
-
if (p < (unsigned long) high_memory) {
unsigned long to_write = min_t(unsigned long, count,
(unsigned long)high_memory - p);
diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
index 3d6c0671e996..f786b18ac500 100644
--- a/drivers/char/mmtimer.c
+++ b/drivers/char/mmtimer.c
@@ -35,7 +35,7 @@
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/sn/addrs.h>
#include <asm/sn/intr.h>
#include <asm/sn/shub_mmr.h>
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index f3f92d5fcda0..a697ca0cab1e 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -227,7 +227,7 @@ mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* be because another thread has installed the pte first, so it
* is no problem.
*/
- vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+ vm_insert_pfn(vma, vmf->address, pfn);
return VM_FAULT_NOPAGE;
}
diff --git a/drivers/char/mwave/3780i.c b/drivers/char/mwave/3780i.c
index 972c40a19629..4a8937f80570 100644
--- a/drivers/char/mwave/3780i.c
+++ b/drivers/char/mwave/3780i.c
@@ -54,7 +54,7 @@
#include <linux/sched.h> /* cond_resched() */
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/irq.h>
#include "smapi.h"
#include "mwavedd.h"
diff --git a/drivers/char/mwave/mwavedd.h b/drivers/char/mwave/mwavedd.h
index 37e0a4926080..21cb09c7bed7 100644
--- a/drivers/char/mwave/mwavedd.h
+++ b/drivers/char/mwave/mwavedd.h
@@ -53,7 +53,7 @@
#include "smapi.h"
#include "mwavepub.h"
#include <linux/ioctl.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/wait.h>
extern int mwave_debug;
diff --git a/drivers/char/nsc_gpio.c b/drivers/char/nsc_gpio.c
index b07b119ae57f..2a91bf048804 100644
--- a/drivers/char/nsc_gpio.c
+++ b/drivers/char/nsc_gpio.c
@@ -14,7 +14,7 @@
#include <linux/init.h>
#include <linux/nsc_gpio.h>
#include <linux/platform_device.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
#define NAME "nsc_gpio"
diff --git a/drivers/char/nwbutton.c b/drivers/char/nwbutton.c
index 0e184426db98..a5b1eb276c0b 100644
--- a/drivers/char/nwbutton.c
+++ b/drivers/char/nwbutton.c
@@ -16,7 +16,7 @@
#include <linux/errno.h>
#include <linux/init.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/irq.h>
#include <asm/mach-types.h>
diff --git a/drivers/char/nwflash.c b/drivers/char/nwflash.c
index dbe598de9b74..a284ae25e69a 100644
--- a/drivers/char/nwflash.c
+++ b/drivers/char/nwflash.c
@@ -31,7 +31,7 @@
#include <asm/hardware/dec21285.h>
#include <asm/io.h>
#include <asm/mach-types.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/*****************************************************************************/
#include <asm/nwflash.h>
diff --git a/drivers/char/pc8736x_gpio.c b/drivers/char/pc8736x_gpio.c
index 3f79a9fb6b1b..5f4be88c0dfc 100644
--- a/drivers/char/pc8736x_gpio.c
+++ b/drivers/char/pc8736x_gpio.c
@@ -20,7 +20,7 @@
#include <linux/mutex.h>
#include <linux/nsc_gpio.h>
#include <linux/platform_device.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define DEVNAME "pc8736x_gpio"
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index fc061f7c2bd1..d7123259143e 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -26,7 +26,7 @@
#include <linux/poll.h>
#include <linux/mutex.h>
#include <linux/wait.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
#include <pcmcia/cistpl.h>
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index a7dd5f4f2c5a..d136db1a10f0 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -84,7 +84,7 @@
#define PUT_USER(error,value,addr) error = put_user(value,addr)
#define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
static MGSL_PARAMS default_params = {
MGSL_MODE_HDLC, /* unsigned long mode */
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index 02819e0703c8..87885d146dbb 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -290,6 +290,7 @@ static int register_device(int minor, struct pp_struct *pp)
struct pardevice *pdev = NULL;
char *name;
struct pardev_cb ppdev_cb;
+ int rc = 0;
name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor);
if (name == NULL)
@@ -298,8 +299,8 @@ static int register_device(int minor, struct pp_struct *pp)
port = parport_find_number(minor);
if (!port) {
pr_warn("%s: no associated port!\n", name);
- kfree(name);
- return -ENXIO;
+ rc = -ENXIO;
+ goto err;
}
memset(&ppdev_cb, 0, sizeof(ppdev_cb));
@@ -308,16 +309,18 @@ static int register_device(int minor, struct pp_struct *pp)
ppdev_cb.private = pp;
pdev = parport_register_dev_model(port, name, &ppdev_cb, minor);
parport_put_port(port);
- kfree(name);
if (!pdev) {
pr_warn("%s: failed to register device!\n", name);
- return -ENXIO;
+ rc = -ENXIO;
+ goto err;
}
pp->pdev = pdev;
dev_dbg(&pdev->dev, "registered pardevice\n");
- return 0;
+err:
+ kfree(name);
+ return rc;
}
static enum ieee1284_phase init_phase(int mode)
diff --git a/drivers/char/random.c b/drivers/char/random.c
index d6876d506220..1ef26403bcc8 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -265,7 +265,7 @@
#include <crypto/chacha20.h>
#include <asm/processor.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/io.h>
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index e83b2adc014a..293167c6e254 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -24,7 +24,7 @@
#include <linux/compat.h>
#include <linux/vmalloc.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
struct raw_device_data {
struct block_device *binding;
diff --git a/drivers/char/scx200_gpio.c b/drivers/char/scx200_gpio.c
index 0bc135b9b16f..903761bc41c9 100644
--- a/drivers/char/scx200_gpio.c
+++ b/drivers/char/scx200_gpio.c
@@ -12,7 +12,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
#include <linux/types.h>
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index 719c5b4eed39..4fa7fcd8af36 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -52,7 +52,7 @@
#include <linux/platform_device.h>
#include <linux/gfp.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
#include <linux/sonypi.h>
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
index 100cd1de9939..572a51704e67 100644
--- a/drivers/char/tlclk.c
+++ b/drivers/char/tlclk.c
@@ -44,7 +44,7 @@
#include <linux/miscdevice.h>
#include <linux/platform_device.h>
#include <asm/io.h> /* inb/outb */
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
MODULE_AUTHOR("Sebastien Bouchard <sebastien.bouchard@ca.kontron.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/toshiba.c b/drivers/char/toshiba.c
index f5a45d887a37..5488516da8ea 100644
--- a/drivers/char/toshiba.c
+++ b/drivers/char/toshiba.c
@@ -63,7 +63,7 @@
#include <linux/miscdevice.h>
#include <linux/ioport.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/init.h>
#include <linux/stat.h>
#include <linux/proc_fs.h>
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 9faa0b1e7766..277186d3b668 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -32,7 +32,7 @@ config TCG_TIS_CORE
config TCG_TIS
tristate "TPM Interface Specification 1.2 Interface / TPM 2.0 FIFO Interface"
- depends on X86
+ depends on X86 || OF
select TCG_TIS_CORE
---help---
If you have a TPM security chip that is compliant with the
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index a385fb8c17de..a05b1ebd0b26 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -2,16 +2,10 @@
# Makefile for the kernel tpm device drivers.
#
obj-$(CONFIG_TCG_TPM) += tpm.o
-tpm-y := tpm-interface.o tpm-dev.o tpm-sysfs.o tpm-chip.o tpm2-cmd.o
-tpm-$(CONFIG_ACPI) += tpm_ppi.o
-
-ifdef CONFIG_ACPI
- tpm-y += tpm_eventlog.o tpm_acpi.o
-else
-ifdef CONFIG_TCG_IBMVTPM
- tpm-y += tpm_eventlog.o tpm_of.o
-endif
-endif
+tpm-y := tpm-interface.o tpm-dev.o tpm-sysfs.o tpm-chip.o tpm2-cmd.o \
+ tpm_eventlog.o
+tpm-$(CONFIG_ACPI) += tpm_ppi.o tpm_acpi.o
+tpm-$(CONFIG_OF) += tpm_of.o
obj-$(CONFIG_TCG_TIS_CORE) += tpm_tis_core.o
obj-$(CONFIG_TCG_TIS) += tpm_tis.o
obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi.o
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index e5950131bd90..a77262d31911 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -84,7 +84,7 @@ EXPORT_SYMBOL_GPL(tpm_put_ops);
*
* The return'd chip has been tpm_try_get_ops'd and must be released via
* tpm_put_ops
- */
+ */
struct tpm_chip *tpm_chip_find_get(int chip_num)
{
struct tpm_chip *chip, *res = NULL;
@@ -103,7 +103,7 @@ struct tpm_chip *tpm_chip_find_get(int chip_num)
}
} while (chip_prev != chip_num);
} else {
- chip = idr_find_slowpath(&dev_nums_idr, chip_num);
+ chip = idr_find(&dev_nums_idr, chip_num);
if (chip && !tpm_try_get_ops(chip))
res = chip;
}
@@ -127,6 +127,7 @@ static void tpm_dev_release(struct device *dev)
idr_remove(&dev_nums_idr, chip->dev_num);
mutex_unlock(&idr_lock);
+ kfree(chip->log.bios_event_log);
kfree(chip);
}
@@ -276,27 +277,6 @@ static void tpm_del_char_device(struct tpm_chip *chip)
up_write(&chip->ops_sem);
}
-static int tpm1_chip_register(struct tpm_chip *chip)
-{
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
- return 0;
-
- tpm_sysfs_add_device(chip);
-
- chip->bios_dir = tpm_bios_log_setup(dev_name(&chip->dev));
-
- return 0;
-}
-
-static void tpm1_chip_unregister(struct tpm_chip *chip)
-{
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
- return;
-
- if (chip->bios_dir)
- tpm_bios_log_teardown(chip->bios_dir);
-}
-
static void tpm_del_legacy_sysfs(struct tpm_chip *chip)
{
struct attribute **i;
@@ -363,20 +343,20 @@ int tpm_chip_register(struct tpm_chip *chip)
return rc;
}
- rc = tpm1_chip_register(chip);
- if (rc)
+ tpm_sysfs_add_device(chip);
+
+ rc = tpm_bios_log_setup(chip);
+ if (rc != 0 && rc != -ENODEV)
return rc;
tpm_add_ppi(chip);
rc = tpm_add_char_device(chip);
if (rc) {
- tpm1_chip_unregister(chip);
+ tpm_bios_log_teardown(chip);
return rc;
}
- chip->flags |= TPM_CHIP_FLAG_REGISTERED;
-
rc = tpm_add_legacy_sysfs(chip);
if (rc) {
tpm_chip_unregister(chip);
@@ -402,12 +382,8 @@ EXPORT_SYMBOL_GPL(tpm_chip_register);
*/
void tpm_chip_unregister(struct tpm_chip *chip)
{
- if (!(chip->flags & TPM_CHIP_FLAG_REGISTERED))
- return;
-
tpm_del_legacy_sysfs(chip);
-
- tpm1_chip_unregister(chip);
+ tpm_bios_log_teardown(chip);
tpm_del_char_device(chip);
}
EXPORT_SYMBOL_GPL(tpm_chip_unregister);
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 3a9149cf0110..a2688ac2b48f 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -29,6 +29,7 @@
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/freezer.h>
+#include <linux/pm_runtime.h>
#include "tpm.h"
#include "tpm_eventlog.h"
@@ -356,6 +357,9 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz,
if (!(flags & TPM_TRANSMIT_UNLOCKED))
mutex_lock(&chip->tpm_mutex);
+ if (chip->dev.parent)
+ pm_runtime_get_sync(chip->dev.parent);
+
rc = chip->ops->send(chip, (u8 *) buf, count);
if (rc < 0) {
dev_err(&chip->dev,
@@ -397,6 +401,9 @@ out_recv:
dev_err(&chip->dev,
"tpm_transmit: tpm_recv: error %zd\n", rc);
out:
+ if (chip->dev.parent)
+ pm_runtime_put_sync(chip->dev.parent);
+
if (!(flags & TPM_TRANSMIT_UNLOCKED))
mutex_unlock(&chip->tpm_mutex);
return rc;
@@ -437,26 +444,29 @@ static const struct tpm_input_header tpm_getcap_header = {
.ordinal = TPM_ORD_GET_CAP
};
-ssize_t tpm_getcap(struct tpm_chip *chip, __be32 subcap_id, cap_t *cap,
+ssize_t tpm_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap,
const char *desc)
{
struct tpm_cmd_t tpm_cmd;
int rc;
tpm_cmd.header.in = tpm_getcap_header;
- if (subcap_id == CAP_VERSION_1_1 || subcap_id == CAP_VERSION_1_2) {
- tpm_cmd.params.getcap_in.cap = subcap_id;
+ if (subcap_id == TPM_CAP_VERSION_1_1 ||
+ subcap_id == TPM_CAP_VERSION_1_2) {
+ tpm_cmd.params.getcap_in.cap = cpu_to_be32(subcap_id);
/*subcap field not necessary */
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(0);
tpm_cmd.header.in.length -= cpu_to_be32(sizeof(__be32));
} else {
if (subcap_id == TPM_CAP_FLAG_PERM ||
subcap_id == TPM_CAP_FLAG_VOL)
- tpm_cmd.params.getcap_in.cap = TPM_CAP_FLAG;
+ tpm_cmd.params.getcap_in.cap =
+ cpu_to_be32(TPM_CAP_FLAG);
else
- tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
+ tpm_cmd.params.getcap_in.cap =
+ cpu_to_be32(TPM_CAP_PROP);
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
- tpm_cmd.params.getcap_in.subcap = subcap_id;
+ tpm_cmd.params.getcap_in.subcap = cpu_to_be32(subcap_id);
}
rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, 0,
desc);
@@ -488,12 +498,14 @@ static int tpm_startup(struct tpm_chip *chip, __be16 startup_type)
int tpm_get_timeouts(struct tpm_chip *chip)
{
- struct tpm_cmd_t tpm_cmd;
+ cap_t cap;
unsigned long new_timeout[4];
unsigned long old_timeout[4];
- struct duration_t *duration_cap;
ssize_t rc;
+ if (chip->flags & TPM_CHIP_FLAG_HAVE_TIMEOUTS)
+ return 0;
+
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
/* Fixed timeouts for TPM2 */
chip->timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A);
@@ -506,46 +518,30 @@ int tpm_get_timeouts(struct tpm_chip *chip)
msecs_to_jiffies(TPM2_DURATION_MEDIUM);
chip->duration[TPM_LONG] =
msecs_to_jiffies(TPM2_DURATION_LONG);
+
+ chip->flags |= TPM_CHIP_FLAG_HAVE_TIMEOUTS;
return 0;
}
- tpm_cmd.header.in = tpm_getcap_header;
- tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
- tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
- tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
- rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, 0,
- NULL);
-
+ rc = tpm_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap,
+ "attempting to determine the timeouts");
if (rc == TPM_ERR_INVALID_POSTINIT) {
/* The TPM is not started, we are the first to talk to it.
Execute a startup command. */
- dev_info(&chip->dev, "Issuing TPM_STARTUP");
+ dev_info(&chip->dev, "Issuing TPM_STARTUP\n");
if (tpm_startup(chip, TPM_ST_CLEAR))
return rc;
- tpm_cmd.header.in = tpm_getcap_header;
- tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
- tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
- tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
- rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
- 0, NULL);
+ rc = tpm_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap,
+ "attempting to determine the timeouts");
}
- if (rc) {
- dev_err(&chip->dev,
- "A TPM error (%zd) occurred attempting to determine the timeouts\n",
- rc);
- goto duration;
- }
-
- if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
- be32_to_cpu(tpm_cmd.header.out.length)
- != sizeof(tpm_cmd.header.out) + sizeof(u32) + 4 * sizeof(u32))
- return -EINVAL;
+ if (rc)
+ return rc;
- old_timeout[0] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.a);
- old_timeout[1] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.b);
- old_timeout[2] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.c);
- old_timeout[3] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.d);
+ old_timeout[0] = be32_to_cpu(cap.timeout.a);
+ old_timeout[1] = be32_to_cpu(cap.timeout.b);
+ old_timeout[2] = be32_to_cpu(cap.timeout.c);
+ old_timeout[3] = be32_to_cpu(cap.timeout.d);
memcpy(new_timeout, old_timeout, sizeof(new_timeout));
/*
@@ -583,29 +579,17 @@ int tpm_get_timeouts(struct tpm_chip *chip)
chip->timeout_c = usecs_to_jiffies(new_timeout[2]);
chip->timeout_d = usecs_to_jiffies(new_timeout[3]);
-duration:
- tpm_cmd.header.in = tpm_getcap_header;
- tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
- tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
- tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_DURATION;
-
- rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, 0,
- "attempting to determine the durations");
+ rc = tpm_getcap(chip, TPM_CAP_PROP_TIS_DURATION, &cap,
+ "attempting to determine the durations");
if (rc)
return rc;
- if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
- be32_to_cpu(tpm_cmd.header.out.length)
- != sizeof(tpm_cmd.header.out) + sizeof(u32) + 3 * sizeof(u32))
- return -EINVAL;
-
- duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
chip->duration[TPM_SHORT] =
- usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short));
+ usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_short));
chip->duration[TPM_MEDIUM] =
- usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_medium));
+ usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_medium));
chip->duration[TPM_LONG] =
- usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_long));
+ usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_long));
/* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above
* value wrong and apparently reports msecs rather than usecs. So we
@@ -619,6 +603,8 @@ duration:
chip->duration_adjusted = true;
dev_info(&chip->dev, "Adjusting TPM timeout parameters.");
}
+
+ chip->flags |= TPM_CHIP_FLAG_HAVE_TIMEOUTS;
return 0;
}
EXPORT_SYMBOL_GPL(tpm_get_timeouts);
@@ -726,6 +712,14 @@ int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf)
}
EXPORT_SYMBOL_GPL(tpm_pcr_read);
+#define TPM_ORD_PCR_EXTEND cpu_to_be32(20)
+#define EXTEND_PCR_RESULT_SIZE 34
+static const struct tpm_input_header pcrextend_header = {
+ .tag = TPM_TAG_RQU_COMMAND,
+ .length = cpu_to_be32(34),
+ .ordinal = TPM_ORD_PCR_EXTEND
+};
+
/**
* tpm_pcr_extend - extend pcr value with hash
* @chip_num: tpm idx # or AN&
@@ -736,14 +730,6 @@ EXPORT_SYMBOL_GPL(tpm_pcr_read);
* isn't, protect against the chip disappearing, by incrementing
* the module usage count.
*/
-#define TPM_ORD_PCR_EXTEND cpu_to_be32(20)
-#define EXTEND_PCR_RESULT_SIZE 34
-static const struct tpm_input_header pcrextend_header = {
- .tag = TPM_TAG_RQU_COMMAND,
- .length = cpu_to_be32(34),
- .ordinal = TPM_ORD_PCR_EXTEND
-};
-
int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash)
{
struct tpm_cmd_t cmd;
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
index a76ab4af9fb2..848ad6580b46 100644
--- a/drivers/char/tpm/tpm-sysfs.c
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -193,7 +193,7 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
be32_to_cpu(cap.manufacturer_id));
/* Try to get a TPM version 1.2 TPM_CAP_VERSION_INFO */
- rc = tpm_getcap(chip, CAP_VERSION_1_2, &cap,
+ rc = tpm_getcap(chip, TPM_CAP_VERSION_1_2, &cap,
"attempting to determine the 1.2 version");
if (!rc) {
str += sprintf(str,
@@ -204,7 +204,7 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
cap.tpm_version_1_2.revMinor);
} else {
/* Otherwise just use TPM_STRUCT_VER */
- rc = tpm_getcap(chip, CAP_VERSION_1_1, &cap,
+ rc = tpm_getcap(chip, TPM_CAP_VERSION_1_1, &cap,
"attempting to determine the 1.1 version");
if (rc)
return 0;
@@ -284,6 +284,9 @@ static const struct attribute_group tpm_dev_group = {
void tpm_sysfs_add_device(struct tpm_chip *chip)
{
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ return;
+
/* The sysfs routines rely on an implicit tpm_try_get_ops, device_del
* is called before ops is null'd and the sysfs core synchronizes this
* removal so that no callbacks are running or can run again
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 4d183c97f6a6..1ae976894257 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -35,11 +35,14 @@
#include <linux/cdev.h>
#include <linux/highmem.h>
+#include "tpm_eventlog.h"
+
enum tpm_const {
TPM_MINOR = 224, /* officially assigned */
TPM_BUFSIZE = 4096,
TPM_NUM_DEVICES = 65536,
TPM_RETRY = 50, /* 5 seconds */
+ TPM_NUM_EVENT_LOG_FILES = 3,
};
enum tpm_timeout {
@@ -139,10 +142,15 @@ enum tpm2_startup_types {
#define TPM_PPI_VERSION_LEN 3
enum tpm_chip_flags {
- TPM_CHIP_FLAG_REGISTERED = BIT(0),
TPM_CHIP_FLAG_TPM2 = BIT(1),
TPM_CHIP_FLAG_IRQ = BIT(2),
TPM_CHIP_FLAG_VIRTUAL = BIT(3),
+ TPM_CHIP_FLAG_HAVE_TIMEOUTS = BIT(4),
+};
+
+struct tpm_chip_seqops {
+ struct tpm_chip *chip;
+ const struct seq_operations *seqops;
};
struct tpm_chip {
@@ -156,6 +164,10 @@ struct tpm_chip {
struct rw_semaphore ops_sem;
const struct tpm_class_ops *ops;
+ struct tpm_bios_log log;
+ struct tpm_chip_seqops bin_log_seqops;
+ struct tpm_chip_seqops ascii_log_seqops;
+
unsigned int flags;
int dev_num; /* /dev/tpm# */
@@ -171,7 +183,7 @@ struct tpm_chip {
unsigned long duration[3]; /* jiffies */
bool duration_adjusted;
- struct dentry **bios_dir;
+ struct dentry *bios_dir[TPM_NUM_EVENT_LOG_FILES];
const struct attribute_group *groups[3];
unsigned int groups_cnt;
@@ -282,21 +294,20 @@ typedef union {
} cap_t;
enum tpm_capabilities {
- TPM_CAP_FLAG = cpu_to_be32(4),
- TPM_CAP_PROP = cpu_to_be32(5),
- CAP_VERSION_1_1 = cpu_to_be32(0x06),
- CAP_VERSION_1_2 = cpu_to_be32(0x1A)
+ TPM_CAP_FLAG = 4,
+ TPM_CAP_PROP = 5,
+ TPM_CAP_VERSION_1_1 = 0x06,
+ TPM_CAP_VERSION_1_2 = 0x1A,
};
enum tpm_sub_capabilities {
- TPM_CAP_PROP_PCR = cpu_to_be32(0x101),
- TPM_CAP_PROP_MANUFACTURER = cpu_to_be32(0x103),
- TPM_CAP_FLAG_PERM = cpu_to_be32(0x108),
- TPM_CAP_FLAG_VOL = cpu_to_be32(0x109),
- TPM_CAP_PROP_OWNER = cpu_to_be32(0x111),
- TPM_CAP_PROP_TIS_TIMEOUT = cpu_to_be32(0x115),
- TPM_CAP_PROP_TIS_DURATION = cpu_to_be32(0x120),
-
+ TPM_CAP_PROP_PCR = 0x101,
+ TPM_CAP_PROP_MANUFACTURER = 0x103,
+ TPM_CAP_FLAG_PERM = 0x108,
+ TPM_CAP_FLAG_VOL = 0x109,
+ TPM_CAP_PROP_OWNER = 0x111,
+ TPM_CAP_PROP_TIS_TIMEOUT = 0x115,
+ TPM_CAP_PROP_TIS_DURATION = 0x120,
};
struct tpm_getcap_params_in {
@@ -484,7 +495,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz,
unsigned int flags);
ssize_t tpm_transmit_cmd(struct tpm_chip *chip, const void *cmd, int len,
unsigned int flags, const char *desc);
-ssize_t tpm_getcap(struct tpm_chip *chip, __be32 subcap_id, cap_t *cap,
+ssize_t tpm_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap,
const char *desc);
int tpm_get_timeouts(struct tpm_chip *);
int tpm1_auto_startup(struct tpm_chip *chip);
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 7df55d58c939..da5b782a9731 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -680,7 +680,7 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
}
/**
- * tpm_unseal_trusted() - unseal the payload of a trusted key
+ * tpm2_unseal_trusted() - unseal the payload of a trusted key
* @chip_num: TPM chip to use
* @payload: the key data in clear and encrypted form
* @options: authentication values and other options
diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
index 565a9478cb94..b7718c95fd0b 100644
--- a/drivers/char/tpm/tpm_acpi.c
+++ b/drivers/char/tpm/tpm_acpi.c
@@ -6,10 +6,11 @@
* Stefan Berger <stefanb@us.ibm.com>
* Reiner Sailer <sailer@watson.ibm.com>
* Kylene Hall <kjhall@us.ibm.com>
+ * Nayna Jain <nayna@linux.vnet.ibm.com>
*
* Maintained by: <tpmdd-devel@lists.sourceforge.net>
*
- * Access to the eventlog extended by the TCG BIOS of PC platform
+ * Access to the event log extended by the TCG BIOS of PC platform
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -45,29 +46,28 @@ struct acpi_tcpa {
};
/* read binary bios log */
-int read_log(struct tpm_bios_log *log)
+int tpm_read_log_acpi(struct tpm_chip *chip)
{
struct acpi_tcpa *buff;
acpi_status status;
void __iomem *virt;
u64 len, start;
+ struct tpm_bios_log *log;
- if (log->bios_event_log != NULL) {
- printk(KERN_ERR
- "%s: ERROR - Eventlog already initialized\n",
- __func__);
- return -EFAULT;
- }
+ log = &chip->log;
+
+ /* Unfortuntely ACPI does not associate the event log with a specific
+ * TPM, like PPI. Thus all ACPI TPMs will read the same log.
+ */
+ if (!chip->acpi_dev_handle)
+ return -ENODEV;
/* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */
status = acpi_get_table(ACPI_SIG_TCPA, 1,
(struct acpi_table_header **)&buff);
- if (ACPI_FAILURE(status)) {
- printk(KERN_ERR "%s: ERROR - Could not get TCPA table\n",
- __func__);
- return -EIO;
- }
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
switch(buff->platform_class) {
case BIOS_SERVER:
@@ -81,29 +81,29 @@ int read_log(struct tpm_bios_log *log)
break;
}
if (!len) {
- printk(KERN_ERR "%s: ERROR - TCPA log area empty\n", __func__);
+ dev_warn(&chip->dev, "%s: TCPA log area empty\n", __func__);
return -EIO;
}
/* malloc EventLog space */
log->bios_event_log = kmalloc(len, GFP_KERNEL);
- if (!log->bios_event_log) {
- printk("%s: ERROR - Not enough Memory for BIOS measurements\n",
- __func__);
+ if (!log->bios_event_log)
return -ENOMEM;
- }
log->bios_event_log_end = log->bios_event_log + len;
virt = acpi_os_map_iomem(start, len);
- if (!virt) {
- kfree(log->bios_event_log);
- printk("%s: ERROR - Unable to map memory\n", __func__);
- return -EIO;
- }
+ if (!virt)
+ goto err;
memcpy_fromio(log->bios_event_log, virt, len);
acpi_os_unmap_iomem(virt, len);
return 0;
+
+err:
+ kfree(log->bios_event_log);
+ log->bios_event_log = NULL;
+ return -EIO;
+
}
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index a7c870af916c..717b6b47c042 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -19,6 +19,7 @@
#include <linux/highmem.h>
#include <linux/rculist.h>
#include <linux/module.h>
+#include <linux/pm_runtime.h>
#include "tpm.h"
#define ACPI_SIG_TPM2 "TPM2"
@@ -83,7 +84,71 @@ struct crb_priv {
u32 cmd_size;
};
-static SIMPLE_DEV_PM_OPS(crb_pm, tpm_pm_suspend, tpm_pm_resume);
+/**
+ * crb_go_idle - request tpm crb device to go the idle state
+ *
+ * @dev: crb device
+ * @priv: crb private data
+ *
+ * Write CRB_CTRL_REQ_GO_IDLE to TPM_CRB_CTRL_REQ
+ * The device should respond within TIMEOUT_C by clearing the bit.
+ * Anyhow, we do not wait here as a consequent CMD_READY request
+ * will be handled correctly even if idle was not completed.
+ *
+ * The function does nothing for devices with ACPI-start method.
+ *
+ * Return: 0 always
+ */
+static int __maybe_unused crb_go_idle(struct device *dev, struct crb_priv *priv)
+{
+ if (priv->flags & CRB_FL_ACPI_START)
+ return 0;
+
+ iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->cca->req);
+ /* we don't really care when this settles */
+
+ return 0;
+}
+
+/**
+ * crb_cmd_ready - request tpm crb device to enter ready state
+ *
+ * @dev: crb device
+ * @priv: crb private data
+ *
+ * Write CRB_CTRL_REQ_CMD_READY to TPM_CRB_CTRL_REQ
+ * and poll till the device acknowledge it by clearing the bit.
+ * The device should respond within TIMEOUT_C.
+ *
+ * The function does nothing for devices with ACPI-start method
+ *
+ * Return: 0 on success -ETIME on timeout;
+ */
+static int __maybe_unused crb_cmd_ready(struct device *dev,
+ struct crb_priv *priv)
+{
+ ktime_t stop, start;
+
+ if (priv->flags & CRB_FL_ACPI_START)
+ return 0;
+
+ iowrite32(CRB_CTRL_REQ_CMD_READY, &priv->cca->req);
+
+ start = ktime_get();
+ stop = ktime_add(start, ms_to_ktime(TPM2_TIMEOUT_C));
+ do {
+ if (!(ioread32(&priv->cca->req) & CRB_CTRL_REQ_CMD_READY))
+ return 0;
+ usleep_range(50, 100);
+ } while (ktime_before(ktime_get(), stop));
+
+ if (ioread32(&priv->cca->req) & CRB_CTRL_REQ_CMD_READY) {
+ dev_warn(dev, "cmdReady timed out\n");
+ return -ETIME;
+ }
+
+ return 0;
+}
static u8 crb_status(struct tpm_chip *chip)
{
@@ -196,21 +261,6 @@ static const struct tpm_class_ops tpm_crb = {
.req_complete_val = CRB_DRV_STS_COMPLETE,
};
-static int crb_init(struct acpi_device *device, struct crb_priv *priv)
-{
- struct tpm_chip *chip;
-
- chip = tpmm_chip_alloc(&device->dev, &tpm_crb);
- if (IS_ERR(chip))
- return PTR_ERR(chip);
-
- dev_set_drvdata(&chip->dev, priv);
- chip->acpi_dev_handle = device->handle;
- chip->flags = TPM_CHIP_FLAG_TPM2;
-
- return tpm_chip_register(chip);
-}
-
static int crb_check_resource(struct acpi_resource *ares, void *data)
{
struct resource *io_res = data;
@@ -249,6 +299,7 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
struct list_head resources;
struct resource io_res;
struct device *dev = &device->dev;
+ u32 pa_high, pa_low;
u64 cmd_pa;
u32 cmd_size;
u64 rsp_pa;
@@ -276,12 +327,27 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
if (IS_ERR(priv->cca))
return PTR_ERR(priv->cca);
- cmd_pa = ((u64) ioread32(&priv->cca->cmd_pa_high) << 32) |
- (u64) ioread32(&priv->cca->cmd_pa_low);
+ /*
+ * PTT HW bug w/a: wake up the device to access
+ * possibly not retained registers.
+ */
+ ret = crb_cmd_ready(dev, priv);
+ if (ret)
+ return ret;
+
+ pa_high = ioread32(&priv->cca->cmd_pa_high);
+ pa_low = ioread32(&priv->cca->cmd_pa_low);
+ cmd_pa = ((u64)pa_high << 32) | pa_low;
cmd_size = ioread32(&priv->cca->cmd_size);
+
+ dev_dbg(dev, "cmd_hi = %X cmd_low = %X cmd_size %X\n",
+ pa_high, pa_low, cmd_size);
+
priv->cmd = crb_map_res(dev, priv, &io_res, cmd_pa, cmd_size);
- if (IS_ERR(priv->cmd))
- return PTR_ERR(priv->cmd);
+ if (IS_ERR(priv->cmd)) {
+ ret = PTR_ERR(priv->cmd);
+ goto out;
+ }
memcpy_fromio(&rsp_pa, &priv->cca->rsp_pa, 8);
rsp_pa = le64_to_cpu(rsp_pa);
@@ -289,7 +355,8 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
if (cmd_pa != rsp_pa) {
priv->rsp = crb_map_res(dev, priv, &io_res, rsp_pa, rsp_size);
- return PTR_ERR_OR_ZERO(priv->rsp);
+ ret = PTR_ERR_OR_ZERO(priv->rsp);
+ goto out;
}
/* According to the PTP specification, overlapping command and response
@@ -297,18 +364,25 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
*/
if (cmd_size != rsp_size) {
dev_err(dev, FW_BUG "overlapping command and response buffer sizes are not identical");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
+
priv->cmd_size = cmd_size;
priv->rsp = priv->cmd;
- return 0;
+
+out:
+ crb_go_idle(dev, priv);
+
+ return ret;
}
static int crb_acpi_add(struct acpi_device *device)
{
struct acpi_table_tpm2 *buf;
struct crb_priv *priv;
+ struct tpm_chip *chip;
struct device *dev = &device->dev;
acpi_status status;
u32 sm;
@@ -346,7 +420,33 @@ static int crb_acpi_add(struct acpi_device *device)
if (rc)
return rc;
- return crb_init(device, priv);
+ chip = tpmm_chip_alloc(dev, &tpm_crb);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ dev_set_drvdata(&chip->dev, priv);
+ chip->acpi_dev_handle = device->handle;
+ chip->flags = TPM_CHIP_FLAG_TPM2;
+
+ rc = crb_cmd_ready(dev, priv);
+ if (rc)
+ return rc;
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ rc = tpm_chip_register(chip);
+ if (rc) {
+ crb_go_idle(dev, priv);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ return rc;
+ }
+
+ pm_runtime_put(dev);
+
+ return 0;
}
static int crb_acpi_remove(struct acpi_device *device)
@@ -356,9 +456,34 @@ static int crb_acpi_remove(struct acpi_device *device)
tpm_chip_unregister(chip);
+ pm_runtime_disable(dev);
+
return 0;
}
+#ifdef CONFIG_PM
+static int crb_pm_runtime_suspend(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+
+ return crb_go_idle(dev, priv);
+}
+
+static int crb_pm_runtime_resume(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+
+ return crb_cmd_ready(dev, priv);
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops crb_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(tpm_pm_suspend, tpm_pm_resume)
+ SET_RUNTIME_PM_OPS(crb_pm_runtime_suspend, crb_pm_runtime_resume, NULL)
+};
+
static struct acpi_device_id crb_device_ids[] = {
{"MSFT0101", 0},
{"", 0},
diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
index e7228863290e..11bb1138a828 100644
--- a/drivers/char/tpm/tpm_eventlog.c
+++ b/drivers/char/tpm/tpm_eventlog.c
@@ -7,10 +7,11 @@
* Stefan Berger <stefanb@us.ibm.com>
* Reiner Sailer <sailer@watson.ibm.com>
* Kylene Hall <kjhall@us.ibm.com>
+ * Nayna Jain <nayna@linux.vnet.ibm.com>
*
* Maintained by: <tpmdd-devel@lists.sourceforge.net>
*
- * Access to the eventlog created by a system's firmware / BIOS
+ * Access to the event log created by a system's firmware / BIOS
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -72,7 +73,8 @@ static const char* tcpa_pc_event_id_strings[] = {
static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
{
loff_t i;
- struct tpm_bios_log *log = m->private;
+ struct tpm_chip *chip = m->private;
+ struct tpm_bios_log *log = &chip->log;
void *addr = log->bios_event_log;
void *limit = log->bios_event_log_end;
struct tcpa_event *event;
@@ -119,7 +121,8 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
loff_t *pos)
{
struct tcpa_event *event = v;
- struct tpm_bios_log *log = m->private;
+ struct tpm_chip *chip = m->private;
+ struct tpm_bios_log *log = &chip->log;
void *limit = log->bios_event_log_end;
u32 converted_event_size;
u32 converted_event_type;
@@ -260,13 +263,10 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
static int tpm_bios_measurements_release(struct inode *inode,
struct file *file)
{
- struct seq_file *seq = file->private_data;
- struct tpm_bios_log *log = seq->private;
+ struct seq_file *seq = (struct seq_file *)file->private_data;
+ struct tpm_chip *chip = (struct tpm_chip *)seq->private;
- if (log) {
- kfree(log->bios_event_log);
- kfree(log);
- }
+ put_device(&chip->dev);
return seq_release(inode, file);
}
@@ -304,151 +304,159 @@ static int tpm_ascii_bios_measurements_show(struct seq_file *m, void *v)
return 0;
}
-static const struct seq_operations tpm_ascii_b_measurments_seqops = {
+static const struct seq_operations tpm_ascii_b_measurements_seqops = {
.start = tpm_bios_measurements_start,
.next = tpm_bios_measurements_next,
.stop = tpm_bios_measurements_stop,
.show = tpm_ascii_bios_measurements_show,
};
-static const struct seq_operations tpm_binary_b_measurments_seqops = {
+static const struct seq_operations tpm_binary_b_measurements_seqops = {
.start = tpm_bios_measurements_start,
.next = tpm_bios_measurements_next,
.stop = tpm_bios_measurements_stop,
.show = tpm_binary_bios_measurements_show,
};
-static int tpm_ascii_bios_measurements_open(struct inode *inode,
+static int tpm_bios_measurements_open(struct inode *inode,
struct file *file)
{
int err;
- struct tpm_bios_log *log;
struct seq_file *seq;
-
- log = kzalloc(sizeof(struct tpm_bios_log), GFP_KERNEL);
- if (!log)
- return -ENOMEM;
-
- if ((err = read_log(log)))
- goto out_free;
+ struct tpm_chip_seqops *chip_seqops;
+ const struct seq_operations *seqops;
+ struct tpm_chip *chip;
+
+ inode_lock(inode);
+ if (!inode->i_private) {
+ inode_unlock(inode);
+ return -ENODEV;
+ }
+ chip_seqops = (struct tpm_chip_seqops *)inode->i_private;
+ seqops = chip_seqops->seqops;
+ chip = chip_seqops->chip;
+ get_device(&chip->dev);
+ inode_unlock(inode);
/* now register seq file */
- err = seq_open(file, &tpm_ascii_b_measurments_seqops);
+ err = seq_open(file, seqops);
if (!err) {
seq = file->private_data;
- seq->private = log;
- } else {
- goto out_free;
+ seq->private = chip;
}
-out:
return err;
-out_free:
- kfree(log->bios_event_log);
- kfree(log);
- goto out;
}
-static const struct file_operations tpm_ascii_bios_measurements_ops = {
- .open = tpm_ascii_bios_measurements_open,
+static const struct file_operations tpm_bios_measurements_ops = {
+ .owner = THIS_MODULE,
+ .open = tpm_bios_measurements_open,
.read = seq_read,
.llseek = seq_lseek,
.release = tpm_bios_measurements_release,
};
-static int tpm_binary_bios_measurements_open(struct inode *inode,
- struct file *file)
+static int tpm_read_log(struct tpm_chip *chip)
{
- int err;
- struct tpm_bios_log *log;
- struct seq_file *seq;
-
- log = kzalloc(sizeof(struct tpm_bios_log), GFP_KERNEL);
- if (!log)
- return -ENOMEM;
+ int rc;
- if ((err = read_log(log)))
- goto out_free;
-
- /* now register seq file */
- err = seq_open(file, &tpm_binary_b_measurments_seqops);
- if (!err) {
- seq = file->private_data;
- seq->private = log;
- } else {
- goto out_free;
+ if (chip->log.bios_event_log != NULL) {
+ dev_dbg(&chip->dev,
+ "%s: ERROR - event log already initialized\n",
+ __func__);
+ return -EFAULT;
}
-out:
- return err;
-out_free:
- kfree(log->bios_event_log);
- kfree(log);
- goto out;
-}
-
-static const struct file_operations tpm_binary_bios_measurements_ops = {
- .open = tpm_binary_bios_measurements_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = tpm_bios_measurements_release,
-};
+ rc = tpm_read_log_acpi(chip);
+ if (rc != -ENODEV)
+ return rc;
-static int is_bad(void *p)
-{
- if (!p)
- return 1;
- if (IS_ERR(p) && (PTR_ERR(p) != -ENODEV))
- return 1;
- return 0;
+ return tpm_read_log_of(chip);
}
-struct dentry **tpm_bios_log_setup(const char *name)
+/*
+ * tpm_bios_log_setup() - Read the event log from the firmware
+ * @chip: TPM chip to use.
+ *
+ * If an event log is found then the securityfs files are setup to
+ * export it to userspace, otherwise nothing is done.
+ *
+ * Returns -ENODEV if the firmware has no event log or securityfs is not
+ * supported.
+ */
+int tpm_bios_log_setup(struct tpm_chip *chip)
{
- struct dentry **ret = NULL, *tpm_dir, *bin_file, *ascii_file;
-
- tpm_dir = securityfs_create_dir(name, NULL);
- if (is_bad(tpm_dir))
- goto out;
-
- bin_file =
+ const char *name = dev_name(&chip->dev);
+ unsigned int cnt;
+ int rc = 0;
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ return 0;
+
+ rc = tpm_read_log(chip);
+ if (rc)
+ return rc;
+
+ cnt = 0;
+ chip->bios_dir[cnt] = securityfs_create_dir(name, NULL);
+ /* NOTE: securityfs_create_dir can return ENODEV if securityfs is
+ * compiled out. The caller should ignore the ENODEV return code.
+ */
+ if (IS_ERR(chip->bios_dir[cnt]))
+ goto err;
+ cnt++;
+
+ chip->bin_log_seqops.chip = chip;
+ chip->bin_log_seqops.seqops = &tpm_binary_b_measurements_seqops;
+
+ chip->bios_dir[cnt] =
securityfs_create_file("binary_bios_measurements",
- S_IRUSR | S_IRGRP, tpm_dir, NULL,
- &tpm_binary_bios_measurements_ops);
- if (is_bad(bin_file))
- goto out_tpm;
+ 0440, chip->bios_dir[0],
+ (void *)&chip->bin_log_seqops,
+ &tpm_bios_measurements_ops);
+ if (IS_ERR(chip->bios_dir[cnt]))
+ goto err;
+ cnt++;
+
+ chip->ascii_log_seqops.chip = chip;
+ chip->ascii_log_seqops.seqops = &tpm_ascii_b_measurements_seqops;
- ascii_file =
+ chip->bios_dir[cnt] =
securityfs_create_file("ascii_bios_measurements",
- S_IRUSR | S_IRGRP, tpm_dir, NULL,
- &tpm_ascii_bios_measurements_ops);
- if (is_bad(ascii_file))
- goto out_bin;
-
- ret = kmalloc(3 * sizeof(struct dentry *), GFP_KERNEL);
- if (!ret)
- goto out_ascii;
-
- ret[0] = ascii_file;
- ret[1] = bin_file;
- ret[2] = tpm_dir;
-
- return ret;
-
-out_ascii:
- securityfs_remove(ascii_file);
-out_bin:
- securityfs_remove(bin_file);
-out_tpm:
- securityfs_remove(tpm_dir);
-out:
- return NULL;
+ 0440, chip->bios_dir[0],
+ (void *)&chip->ascii_log_seqops,
+ &tpm_bios_measurements_ops);
+ if (IS_ERR(chip->bios_dir[cnt]))
+ goto err;
+ cnt++;
+
+ return 0;
+
+err:
+ rc = PTR_ERR(chip->bios_dir[cnt]);
+ chip->bios_dir[cnt] = NULL;
+ tpm_bios_log_teardown(chip);
+ return rc;
}
-void tpm_bios_log_teardown(struct dentry **lst)
+void tpm_bios_log_teardown(struct tpm_chip *chip)
{
int i;
-
- for (i = 0; i < 3; i++)
- securityfs_remove(lst[i]);
+ struct inode *inode;
+
+ /* securityfs_remove currently doesn't take care of handling sync
+ * between removal and opening of pseudo files. To handle this, a
+ * workaround is added by making i_private = NULL here during removal
+ * and to check it during open(), both within inode_lock()/unlock().
+ * This design ensures that open() either safely gets kref or fails.
+ */
+ for (i = (TPM_NUM_EVENT_LOG_FILES - 1); i >= 0; i--) {
+ if (chip->bios_dir[i]) {
+ inode = d_inode(chip->bios_dir[i]);
+ inode_lock(inode);
+ inode->i_private = NULL;
+ inode_unlock(inode);
+ securityfs_remove(chip->bios_dir[i]);
+ }
+ }
}
diff --git a/drivers/char/tpm/tpm_eventlog.h b/drivers/char/tpm/tpm_eventlog.h
index 8de62b09be51..1660d74ea79a 100644
--- a/drivers/char/tpm/tpm_eventlog.h
+++ b/drivers/char/tpm/tpm_eventlog.h
@@ -73,20 +73,24 @@ enum tcpa_pc_event_ids {
HOST_TABLE_OF_DEVICES,
};
-int read_log(struct tpm_bios_log *log);
-
-#if defined(CONFIG_TCG_IBMVTPM) || defined(CONFIG_TCG_IBMVTPM_MODULE) || \
- defined(CONFIG_ACPI)
-extern struct dentry **tpm_bios_log_setup(const char *);
-extern void tpm_bios_log_teardown(struct dentry **);
+#if defined(CONFIG_ACPI)
+int tpm_read_log_acpi(struct tpm_chip *chip);
#else
-static inline struct dentry **tpm_bios_log_setup(const char *name)
+static inline int tpm_read_log_acpi(struct tpm_chip *chip)
{
- return NULL;
+ return -ENODEV;
}
-static inline void tpm_bios_log_teardown(struct dentry **dir)
+#endif
+#if defined(CONFIG_OF)
+int tpm_read_log_of(struct tpm_chip *chip);
+#else
+static inline int tpm_read_log_of(struct tpm_chip *chip)
{
+ return -ENODEV;
}
#endif
+int tpm_bios_log_setup(struct tpm_chip *chip);
+void tpm_bios_log_teardown(struct tpm_chip *chip);
+
#endif
diff --git a/drivers/char/tpm/tpm_of.c b/drivers/char/tpm/tpm_of.c
index 570f30c5c5f4..7dee42d7b5e0 100644
--- a/drivers/char/tpm/tpm_of.c
+++ b/drivers/char/tpm/tpm_of.c
@@ -2,6 +2,7 @@
* Copyright 2012 IBM Corporation
*
* Author: Ashley Lai <ashleydlai@gmail.com>
+ * Nayna Jain <nayna@linux.vnet.ibm.com>
*
* Maintained by: <tpmdd-devel@lists.sourceforge.net>
*
@@ -20,55 +21,38 @@
#include "tpm.h"
#include "tpm_eventlog.h"
-int read_log(struct tpm_bios_log *log)
+int tpm_read_log_of(struct tpm_chip *chip)
{
struct device_node *np;
const u32 *sizep;
const u64 *basep;
+ struct tpm_bios_log *log;
- if (log->bios_event_log != NULL) {
- pr_err("%s: ERROR - Eventlog already initialized\n", __func__);
- return -EFAULT;
- }
-
- np = of_find_node_by_name(NULL, "vtpm");
- if (!np) {
- pr_err("%s: ERROR - IBMVTPM not supported\n", __func__);
+ log = &chip->log;
+ if (chip->dev.parent && chip->dev.parent->of_node)
+ np = chip->dev.parent->of_node;
+ else
return -ENODEV;
- }
sizep = of_get_property(np, "linux,sml-size", NULL);
- if (sizep == NULL) {
- pr_err("%s: ERROR - SML size not found\n", __func__);
- goto cleanup_eio;
- }
- if (*sizep == 0) {
- pr_err("%s: ERROR - event log area empty\n", __func__);
- goto cleanup_eio;
- }
-
basep = of_get_property(np, "linux,sml-base", NULL);
- if (basep == NULL) {
- pr_err("%s: ERROR - SML not found\n", __func__);
- goto cleanup_eio;
+ if (sizep == NULL && basep == NULL)
+ return -ENODEV;
+ if (sizep == NULL || basep == NULL)
+ return -EIO;
+
+ if (*sizep == 0) {
+ dev_warn(&chip->dev, "%s: Event log area empty\n", __func__);
+ return -EIO;
}
log->bios_event_log = kmalloc(*sizep, GFP_KERNEL);
- if (!log->bios_event_log) {
- pr_err("%s: ERROR - Not enough memory for BIOS measurements\n",
- __func__);
- of_node_put(np);
+ if (!log->bios_event_log)
return -ENOMEM;
- }
log->bios_event_log_end = log->bios_event_log + *sizep;
memcpy(log->bios_event_log, __va(*basep), *sizep);
- of_node_put(np);
return 0;
-
-cleanup_eio:
- of_node_put(np);
- return -EIO;
}
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index eaf5730d79eb..0127af130cb1 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -28,6 +28,8 @@
#include <linux/wait.h>
#include <linux/acpi.h>
#include <linux/freezer.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include "tpm.h"
#include "tpm_tis_core.h"
@@ -354,12 +356,21 @@ static int tpm_tis_plat_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id tis_of_platform_match[] = {
+ {.compatible = "tcg,tpm-tis-mmio"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, tis_of_platform_match);
+#endif
+
static struct platform_driver tis_drv = {
.probe = tpm_tis_plat_probe,
.remove = tpm_tis_plat_remove,
.driver = {
.name = "tpm_tis",
.pm = &tpm_tis_pm,
+ .of_match_table = of_match_ptr(tis_of_platform_match),
},
};
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index e3bf31b37138..7993678954a2 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -180,12 +180,19 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
int size = 0, burstcnt, rc;
- while (size < count &&
- wait_for_tpm_stat(chip,
+ while (size < count) {
+ rc = wait_for_tpm_stat(chip,
TPM_STS_DATA_AVAIL | TPM_STS_VALID,
chip->timeout_c,
- &priv->read_queue, true) == 0) {
- burstcnt = min_t(int, get_burstcount(chip), count - size);
+ &priv->read_queue, true);
+ if (rc < 0)
+ return rc;
+ burstcnt = get_burstcount(chip);
+ if (burstcnt < 0) {
+ dev_err(&chip->dev, "Unable to read burstcount\n");
+ return burstcnt;
+ }
+ burstcnt = min_t(int, burstcnt, count - size);
rc = tpm_tis_read_bytes(priv, TPM_DATA_FIFO(priv->locality),
burstcnt, buf + size);
@@ -229,8 +236,11 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
goto out;
}
- wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
- &priv->int_queue, false);
+ if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
+ &priv->int_queue, false) < 0) {
+ size = -ETIME;
+ goto out;
+ }
status = tpm_tis_status(chip);
if (status & TPM_STS_DATA_AVAIL) { /* retry? */
dev_err(&chip->dev, "Error left over data\n");
@@ -271,7 +281,13 @@ static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
}
while (count < len - 1) {
- burstcnt = min_t(int, get_burstcount(chip), len - count - 1);
+ burstcnt = get_burstcount(chip);
+ if (burstcnt < 0) {
+ dev_err(&chip->dev, "Unable to read burstcount\n");
+ rc = burstcnt;
+ goto out_err;
+ }
+ burstcnt = min_t(int, burstcnt, len - count - 1);
rc = tpm_tis_write_bytes(priv, TPM_DATA_FIFO(priv->locality),
burstcnt, buf + count);
if (rc < 0)
@@ -279,8 +295,11 @@ static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
count += burstcnt;
- wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
- &priv->int_queue, false);
+ if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
+ &priv->int_queue, false) < 0) {
+ rc = -ETIME;
+ goto out_err;
+ }
status = tpm_tis_status(chip);
if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
rc = -EIO;
@@ -293,8 +312,11 @@ static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
if (rc < 0)
goto out_err;
- wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
- &priv->int_queue, false);
+ if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
+ &priv->int_queue, false) < 0) {
+ rc = -ETIME;
+ goto out_err;
+ }
status = tpm_tis_status(chip);
if (!itpm && (status & TPM_STS_DATA_EXPECT) != 0) {
rc = -EIO;
@@ -755,20 +777,20 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
dev_dbg(dev, "\tData Avail Int Support\n");
- /* Very early on issue a command to the TPM in polling mode to make
- * sure it works. May as well use that command to set the proper
- * timeouts for the driver.
- */
- if (tpm_get_timeouts(chip)) {
- dev_err(dev, "Could not get TPM timeouts and durations\n");
- rc = -ENODEV;
- goto out_err;
- }
-
/* INTERRUPT Setup */
init_waitqueue_head(&priv->read_queue);
init_waitqueue_head(&priv->int_queue);
if (irq != -1) {
+ /* Before doing irq testing issue a command to the TPM in polling mode
+ * to make sure it works. May as well use that command to set the
+ * proper timeouts for the driver.
+ */
+ if (tpm_get_timeouts(chip)) {
+ dev_err(dev, "Could not get TPM timeouts and durations\n");
+ rc = -ENODEV;
+ goto out_err;
+ }
+
if (irq) {
tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
irq);
diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
index 9a940332c157..5463b58af26e 100644
--- a/drivers/char/tpm/tpm_vtpm_proxy.c
+++ b/drivers/char/tpm/tpm_vtpm_proxy.c
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2015, 2016 IBM Corporation
+ * Copyright (C) 2016 Intel Corporation
*
* Author: Stefan Berger <stefanb@us.ibm.com>
*
@@ -41,6 +42,7 @@ struct proxy_dev {
long state; /* internal state */
#define STATE_OPENED_FLAG BIT(0)
#define STATE_WAIT_RESPONSE_FLAG BIT(1) /* waiting for emulator response */
+#define STATE_REGISTERED_FLAG BIT(2)
size_t req_len; /* length of queued TPM request */
size_t resp_len; /* length of queued TPM response */
@@ -369,12 +371,9 @@ static void vtpm_proxy_work(struct work_struct *work)
rc = tpm_chip_register(proxy_dev->chip);
if (rc)
- goto err;
-
- return;
-
-err:
- vtpm_proxy_fops_undo_open(proxy_dev);
+ vtpm_proxy_fops_undo_open(proxy_dev);
+ else
+ proxy_dev->state |= STATE_REGISTERED_FLAG;
}
/*
@@ -515,7 +514,8 @@ static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev)
*/
vtpm_proxy_fops_undo_open(proxy_dev);
- tpm_chip_unregister(proxy_dev->chip);
+ if (proxy_dev->state & STATE_REGISTERED_FLAG)
+ tpm_chip_unregister(proxy_dev->chip);
vtpm_proxy_delete_proxy_dev(proxy_dev);
}
@@ -524,6 +524,50 @@ static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev)
* Code related to the control device /dev/vtpmx
*/
+/**
+ * vtpmx_ioc_new_dev - handler for the %VTPM_PROXY_IOC_NEW_DEV ioctl
+ * @file: /dev/vtpmx
+ * @ioctl: the ioctl number
+ * @arg: pointer to the struct vtpmx_proxy_new_dev
+ *
+ * Creates an anonymous file that is used by the process acting as a TPM to
+ * communicate with the client processes. The function will also add a new TPM
+ * device through which data is proxied to this TPM acting process. The caller
+ * will be provided with a file descriptor to communicate with the clients and
+ * major and minor numbers for the TPM device.
+ */
+static long vtpmx_ioc_new_dev(struct file *file, unsigned int ioctl,
+ unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ struct vtpm_proxy_new_dev __user *vtpm_new_dev_p;
+ struct vtpm_proxy_new_dev vtpm_new_dev;
+ struct file *vtpm_file;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ vtpm_new_dev_p = argp;
+
+ if (copy_from_user(&vtpm_new_dev, vtpm_new_dev_p,
+ sizeof(vtpm_new_dev)))
+ return -EFAULT;
+
+ vtpm_file = vtpm_proxy_create_device(&vtpm_new_dev);
+ if (IS_ERR(vtpm_file))
+ return PTR_ERR(vtpm_file);
+
+ if (copy_to_user(vtpm_new_dev_p, &vtpm_new_dev,
+ sizeof(vtpm_new_dev))) {
+ put_unused_fd(vtpm_new_dev.fd);
+ fput(vtpm_file);
+ return -EFAULT;
+ }
+
+ fd_install(vtpm_new_dev.fd, vtpm_file);
+ return 0;
+}
+
/*
* vtpmx_fops_ioctl: ioctl on /dev/vtpmx
*
@@ -531,34 +575,11 @@ static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev)
* Returns 0 on success, a negative error code otherwise.
*/
static long vtpmx_fops_ioctl(struct file *f, unsigned int ioctl,
- unsigned long arg)
+ unsigned long arg)
{
- void __user *argp = (void __user *)arg;
- struct vtpm_proxy_new_dev __user *vtpm_new_dev_p;
- struct vtpm_proxy_new_dev vtpm_new_dev;
- struct file *file;
-
switch (ioctl) {
case VTPM_PROXY_IOC_NEW_DEV:
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- vtpm_new_dev_p = argp;
- if (copy_from_user(&vtpm_new_dev, vtpm_new_dev_p,
- sizeof(vtpm_new_dev)))
- return -EFAULT;
- file = vtpm_proxy_create_device(&vtpm_new_dev);
- if (IS_ERR(file))
- return PTR_ERR(file);
- if (copy_to_user(vtpm_new_dev_p, &vtpm_new_dev,
- sizeof(vtpm_new_dev))) {
- put_unused_fd(vtpm_new_dev.fd);
- fput(file);
- return -EFAULT;
- }
-
- fd_install(vtpm_new_dev.fd, file);
- return 0;
-
+ return vtpmx_ioc_new_dev(f, ioctl, arg);
default:
return -ENOIOCTLCMD;
}
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 50072cc4fe5c..5aaa268f3a78 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -307,7 +307,6 @@ static int tpmfront_probe(struct xenbus_device *dev,
rv = setup_ring(dev, priv);
if (rv) {
chip = dev_get_drvdata(&dev->dev);
- tpm_chip_unregister(chip);
ring_free(priv);
return rv;
}
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 5649234b7316..8b00e79c2683 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -152,8 +152,8 @@ struct ports_device {
spinlock_t c_ivq_lock;
spinlock_t c_ovq_lock;
- /* The current config space is stored here */
- struct virtio_console_config config;
+ /* max. number of ports this device can hold */
+ u32 max_nr_ports;
/* The virtio device we're associated with */
struct virtio_device *vdev;
@@ -1649,11 +1649,11 @@ static void handle_control_message(struct virtio_device *vdev,
break;
}
if (virtio32_to_cpu(vdev, cpkt->id) >=
- portdev->config.max_nr_ports) {
+ portdev->max_nr_ports) {
dev_warn(&portdev->vdev->dev,
"Request for adding port with "
"out-of-bound id %u, max. supported id: %u\n",
- cpkt->id, portdev->config.max_nr_ports - 1);
+ cpkt->id, portdev->max_nr_ports - 1);
break;
}
add_port(portdev, virtio32_to_cpu(vdev, cpkt->id));
@@ -1894,7 +1894,7 @@ static int init_vqs(struct ports_device *portdev)
u32 i, j, nr_ports, nr_queues;
int err;
- nr_ports = portdev->config.max_nr_ports;
+ nr_ports = portdev->max_nr_ports;
nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2;
vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL);
@@ -2047,13 +2047,13 @@ static int virtcons_probe(struct virtio_device *vdev)
}
multiport = false;
- portdev->config.max_nr_ports = 1;
+ portdev->max_nr_ports = 1;
/* Don't test MULTIPORT at all if we're rproc: not a valid feature! */
if (!is_rproc_serial(vdev) &&
virtio_cread_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT,
struct virtio_console_config, max_nr_ports,
- &portdev->config.max_nr_ports) == 0) {
+ &portdev->max_nr_ports) == 0) {
multiport = true;
}
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index c07dfe5c4da3..3e6b23c3453c 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -88,7 +88,7 @@
#include <linux/slab.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#ifdef CONFIG_OF
/* For open firmware. */
diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c
index 5eb05dbf59b8..fc585f370549 100644
--- a/drivers/clk/clk-stm32f4.c
+++ b/drivers/clk/clk-stm32f4.c
@@ -768,5 +768,5 @@ fail:
kfree(clks);
iounmap(base);
}
-CLK_OF_DECLARE(stm32f42xx_rcc, "st,stm32f42xx-rcc", stm32f4_rcc_init);
-CLK_OF_DECLARE(stm32f46xx_rcc, "st,stm32f469-rcc", stm32f4_rcc_init);
+CLK_OF_DECLARE_DRIVER(stm32f42xx_rcc, "st,stm32f42xx-rcc", stm32f4_rcc_init);
+CLK_OF_DECLARE_DRIVER(stm32f46xx_rcc, "st,stm32f469-rcc", stm32f4_rcc_init);
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 97ae60fa1584..bb8a77a5985f 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -448,12 +448,20 @@ EXPORT_SYMBOL(clk_register_clkdev);
*
* con_id or dev_id may be NULL as a wildcard, just as in the rest of
* clkdev.
+ *
+ * To make things easier for mass registration, we detect error clk_hws
+ * from a previous clk_hw_register_*() call, and return the error code for
+ * those. This is to permit this function to be called immediately
+ * after clk_hw_register_*().
*/
int clk_hw_register_clkdev(struct clk_hw *hw, const char *con_id,
const char *dev_id)
{
struct clk_lookup *cl;
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
/*
* Since dev_id can be NULL, and NULL is handled specially, we must
* pass it as either a NULL format string, or with "%s".
diff --git a/drivers/clk/imx/clk-imx31.c b/drivers/clk/imx/clk-imx31.c
index 6a964144a5b5..cbce308aad04 100644
--- a/drivers/clk/imx/clk-imx31.c
+++ b/drivers/clk/imx/clk-imx31.c
@@ -21,6 +21,7 @@
#include <linux/io.h>
#include <linux/err.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <soc/imx/revision.h>
#include <soc/imx/timer.h>
#include <asm/irq.h>
@@ -72,14 +73,8 @@ static struct clk ** const uart_clks[] __initconst = {
NULL
};
-static void __init _mx31_clocks_init(unsigned long fref)
+static void __init _mx31_clocks_init(void __iomem *base, unsigned long fref)
{
- void __iomem *base;
- struct device_node *np;
-
- base = ioremap(MX31_CCM_BASE_ADDR, SZ_4K);
- BUG_ON(!base);
-
clk[dummy] = imx_clk_fixed("dummy", 0);
clk[ckih] = imx_clk_fixed("ckih", fref);
clk[ckil] = imx_clk_fixed("ckil", 32768);
@@ -147,21 +142,17 @@ static void __init _mx31_clocks_init(unsigned long fref)
clk_prepare_enable(clk[iim_gate]);
mx31_revision();
clk_disable_unprepare(clk[iim_gate]);
-
- np = of_find_compatible_node(NULL, NULL, "fsl,imx31-ccm");
-
- if (np) {
- clk_data.clks = clk;
- clk_data.clk_num = ARRAY_SIZE(clk);
- of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
- }
}
-int __init mx31_clocks_init(void)
+int __init mx31_clocks_init(unsigned long fref)
{
- u32 fref = 26000000; /* default */
+ void __iomem *base;
+
+ base = ioremap(MX31_CCM_BASE_ADDR, SZ_4K);
+ if (!base)
+ panic("%s: failed to map registers\n", __func__);
- _mx31_clocks_init(fref);
+ _mx31_clocks_init(base, fref);
clk_register_clkdev(clk[gpt_gate], "per", "imx-gpt.0");
clk_register_clkdev(clk[ipg], "ipg", "imx-gpt.0");
@@ -224,22 +215,31 @@ int __init mx31_clocks_init(void)
return 0;
}
-int __init mx31_clocks_init_dt(void)
+static void __init mx31_clocks_init_dt(struct device_node *np)
{
- struct device_node *np;
+ struct device_node *osc_np;
u32 fref = 26000000; /* default */
+ void __iomem *ccm;
- for_each_compatible_node(np, NULL, "fixed-clock") {
- if (!of_device_is_compatible(np, "fsl,imx-osc26m"))
+ for_each_compatible_node(osc_np, NULL, "fixed-clock") {
+ if (!of_device_is_compatible(osc_np, "fsl,imx-osc26m"))
continue;
- if (!of_property_read_u32(np, "clock-frequency", &fref)) {
- of_node_put(np);
+ if (!of_property_read_u32(osc_np, "clock-frequency", &fref)) {
+ of_node_put(osc_np);
break;
}
}
- _mx31_clocks_init(fref);
+ ccm = of_iomap(np, 0);
+ if (!ccm)
+ panic("%s: failed to map registers\n", __func__);
- return 0;
+ _mx31_clocks_init(ccm, fref);
+
+ clk_data.clks = clk;
+ clk_data.clk_num = ARRAY_SIZE(clk);
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
}
+
+CLK_OF_DECLARE(imx31_ccm, "fsl,imx31-ccm", mx31_clocks_init_dt);
diff --git a/drivers/clk/pxa/clk-pxa25x.c b/drivers/clk/pxa/clk-pxa25x.c
index c53993b6bf87..6416c1f8e632 100644
--- a/drivers/clk/pxa/clk-pxa25x.c
+++ b/drivers/clk/pxa/clk-pxa25x.c
@@ -322,7 +322,7 @@ static struct dummy_clk dummy_clks[] __initdata = {
DUMMY_CLK("GPIO11_CLK", NULL, "osc_3_6864mhz"),
DUMMY_CLK("GPIO12_CLK", NULL, "osc_32_768khz"),
DUMMY_CLK(NULL, "sa1100-rtc", "osc_32_768khz"),
- DUMMY_CLK("OSTIMER0", NULL, "osc_32_768khz"),
+ DUMMY_CLK("OSTIMER0", NULL, "osc_3_6864mhz"),
DUMMY_CLK("UARTCLK", "pxa2xx-ir", "STUART"),
};
diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c
index 9375777776d9..b533f99550e1 100644
--- a/drivers/clk/renesas/clk-mstp.c
+++ b/drivers/clk/renesas/clk-mstp.c
@@ -37,12 +37,14 @@
* @smstpcr: module stop control register
* @mstpsr: module stop status register (optional)
* @lock: protects writes to SMSTPCR
+ * @width_8bit: registers are 8-bit, not 32-bit
*/
struct mstp_clock_group {
struct clk_onecell_data data;
void __iomem *smstpcr;
void __iomem *mstpsr;
spinlock_t lock;
+ bool width_8bit;
};
/**
@@ -59,6 +61,18 @@ struct mstp_clock {
#define to_mstp_clock(_hw) container_of(_hw, struct mstp_clock, hw)
+static inline u32 cpg_mstp_read(struct mstp_clock_group *group,
+ u32 __iomem *reg)
+{
+ return group->width_8bit ? readb(reg) : clk_readl(reg);
+}
+
+static inline void cpg_mstp_write(struct mstp_clock_group *group, u32 val,
+ u32 __iomem *reg)
+{
+ group->width_8bit ? writeb(val, reg) : clk_writel(val, reg);
+}
+
static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
{
struct mstp_clock *clock = to_mstp_clock(hw);
@@ -70,12 +84,12 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
spin_lock_irqsave(&group->lock, flags);
- value = clk_readl(group->smstpcr);
+ value = cpg_mstp_read(group, group->smstpcr);
if (enable)
value &= ~bitmask;
else
value |= bitmask;
- clk_writel(value, group->smstpcr);
+ cpg_mstp_write(group, value, group->smstpcr);
spin_unlock_irqrestore(&group->lock, flags);
@@ -83,7 +97,7 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
return 0;
for (i = 1000; i > 0; --i) {
- if (!(clk_readl(group->mstpsr) & bitmask))
+ if (!(cpg_mstp_read(group, group->mstpsr) & bitmask))
break;
cpu_relax();
}
@@ -114,9 +128,9 @@ static int cpg_mstp_clock_is_enabled(struct clk_hw *hw)
u32 value;
if (group->mstpsr)
- value = clk_readl(group->mstpsr);
+ value = cpg_mstp_read(group, group->mstpsr);
else
- value = clk_readl(group->smstpcr);
+ value = cpg_mstp_read(group, group->smstpcr);
return !(value & BIT(clock->bit_index));
}
@@ -188,6 +202,9 @@ static void __init cpg_mstp_clocks_init(struct device_node *np)
return;
}
+ if (of_device_is_compatible(np, "renesas,r7s72100-mstp-clocks"))
+ group->width_8bit = true;
+
for (i = 0; i < MSTP_MAX_CLOCKS; ++i)
clks[i] = ERR_PTR(-ENOENT);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index e2c6e43cf8ca..4866f7aa32e6 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -282,6 +282,26 @@ config CLKSRC_MPS2
select CLKSRC_MMIO
select CLKSRC_OF
+config ARC_TIMERS
+ bool "Support for 32-bit TIMERn counters in ARC Cores" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_OF
+ help
+ These are legacy 32-bit TIMER0 and TIMER1 counters found on all ARC cores
+ (ARC700 as well as ARC HS38).
+ TIMER0 serves as clockevent while TIMER1 provides clocksource
+
+config ARC_TIMERS_64BIT
+ bool "Support for 64-bit counters in ARC HS38 cores" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ depends on ARC_TIMERS
+ select CLKSRC_OF
+ help
+ This enables 2 different 64-bit timers: RTC (for UP) and GFRC (for SMP)
+ RTC is implemented inside the core, while GFRC sits outside the core in
+ ARConnect IP block. Driver automatically picks one of them for clocksource
+ as appropriate.
+
config ARM_ARCH_TIMER
bool
select CLKSRC_OF if OF
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index cf87f407f1ad..a14111e1f087 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -51,6 +51,7 @@ obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o
obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o
obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o
+obj-$(CONFIG_ARC_TIMERS) += arc_timer.o
obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o
obj-$(CONFIG_ARMV7M_SYSTICK) += armv7m_systick.o
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index 28037d0b8dcd..1961e3539b57 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -58,16 +58,16 @@ u32 acpi_pm_read_verified(void)
return v2;
}
-static cycle_t acpi_pm_read(struct clocksource *cs)
+static u64 acpi_pm_read(struct clocksource *cs)
{
- return (cycle_t)read_pmtmr();
+ return (u64)read_pmtmr();
}
static struct clocksource clocksource_acpi_pm = {
.name = "acpi_pm",
.rating = 200,
.read = acpi_pm_read,
- .mask = (cycle_t)ACPI_PM_MASK,
+ .mask = (u64)ACPI_PM_MASK,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
@@ -81,9 +81,9 @@ static int __init acpi_pm_good_setup(char *__str)
}
__setup("acpi_pm_good", acpi_pm_good_setup);
-static cycle_t acpi_pm_read_slow(struct clocksource *cs)
+static u64 acpi_pm_read_slow(struct clocksource *cs)
{
- return (cycle_t)acpi_pm_read_verified();
+ return (u64)acpi_pm_read_verified();
}
static inline void acpi_pm_need_workaround(void)
@@ -145,7 +145,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_LE,
*/
static int verify_pmtmr_rate(void)
{
- cycle_t value1, value2;
+ u64 value1, value2;
unsigned long count, delta;
mach_prepare_counter();
@@ -175,7 +175,7 @@ static int verify_pmtmr_rate(void)
static int __init init_acpi_pm_clocksource(void)
{
- cycle_t value1, value2;
+ u64 value1, value2;
unsigned int i, j = 0;
if (!pmtmr_ioport)
diff --git a/drivers/clocksource/arc_timer.c b/drivers/clocksource/arc_timer.c
new file mode 100644
index 000000000000..7517f959cba7
--- /dev/null
+++ b/drivers/clocksource/arc_timer.c
@@ -0,0 +1,336 @@
+/*
+ * Copyright (C) 2016-17 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* ARC700 has two 32bit independent prog Timers: TIMER0 and TIMER1, Each can be
+ * programmed to go from @count to @limit and optionally interrupt.
+ * We've designated TIMER0 for clockevents and TIMER1 for clocksource
+ *
+ * ARCv2 based HS38 cores have RTC (in-core) and GFRC (inside ARConnect/MCIP)
+ * which are suitable for UP and SMP based clocksources respectively
+ */
+
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+
+#include <soc/arc/timers.h>
+#include <soc/arc/mcip.h>
+
+
+static unsigned long arc_timer_freq;
+
+static int noinline arc_get_timer_clk(struct device_node *node)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = of_clk_get(node, 0);
+ if (IS_ERR(clk)) {
+ pr_err("timer missing clk");
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ pr_err("Couldn't enable parent clk\n");
+ return ret;
+ }
+
+ arc_timer_freq = clk_get_rate(clk);
+
+ return 0;
+}
+
+/********** Clock Source Device *********/
+
+#ifdef CONFIG_ARC_TIMERS_64BIT
+
+static u64 arc_read_gfrc(struct clocksource *cs)
+{
+ unsigned long flags;
+ u32 l, h;
+
+ local_irq_save(flags);
+
+ __mcip_cmd(CMD_GFRC_READ_LO, 0);
+ l = read_aux_reg(ARC_REG_MCIP_READBACK);
+
+ __mcip_cmd(CMD_GFRC_READ_HI, 0);
+ h = read_aux_reg(ARC_REG_MCIP_READBACK);
+
+ local_irq_restore(flags);
+
+ return (((u64)h) << 32) | l;
+}
+
+static struct clocksource arc_counter_gfrc = {
+ .name = "ARConnect GFRC",
+ .rating = 400,
+ .read = arc_read_gfrc,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static int __init arc_cs_setup_gfrc(struct device_node *node)
+{
+ struct mcip_bcr mp;
+ int ret;
+
+ READ_BCR(ARC_REG_MCIP_BCR, mp);
+ if (!mp.gfrc) {
+ pr_warn("Global-64-bit-Ctr clocksource not detected");
+ return -ENXIO;
+ }
+
+ ret = arc_get_timer_clk(node);
+ if (ret)
+ return ret;
+
+ return clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq);
+}
+CLOCKSOURCE_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc);
+
+#define AUX_RTC_CTRL 0x103
+#define AUX_RTC_LOW 0x104
+#define AUX_RTC_HIGH 0x105
+
+static u64 arc_read_rtc(struct clocksource *cs)
+{
+ unsigned long status;
+ u32 l, h;
+
+ /*
+ * hardware has an internal state machine which tracks readout of
+ * low/high and updates the CTRL.status if
+ * - interrupt/exception taken between the two reads
+ * - high increments after low has been read
+ */
+ do {
+ l = read_aux_reg(AUX_RTC_LOW);
+ h = read_aux_reg(AUX_RTC_HIGH);
+ status = read_aux_reg(AUX_RTC_CTRL);
+ } while (!(status & _BITUL(31)));
+
+ return (((u64)h) << 32) | l;
+}
+
+static struct clocksource arc_counter_rtc = {
+ .name = "ARCv2 RTC",
+ .rating = 350,
+ .read = arc_read_rtc,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static int __init arc_cs_setup_rtc(struct device_node *node)
+{
+ struct bcr_timer timer;
+ int ret;
+
+ READ_BCR(ARC_REG_TIMERS_BCR, timer);
+ if (!timer.rtc) {
+ pr_warn("Local-64-bit-Ctr clocksource not detected");
+ return -ENXIO;
+ }
+
+ /* Local to CPU hence not usable in SMP */
+ if (IS_ENABLED(CONFIG_SMP)) {
+ pr_warn("Local-64-bit-Ctr not usable in SMP");
+ return -EINVAL;
+ }
+
+ ret = arc_get_timer_clk(node);
+ if (ret)
+ return ret;
+
+ write_aux_reg(AUX_RTC_CTRL, 1);
+
+ return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq);
+}
+CLOCKSOURCE_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc);
+
+#endif
+
+/*
+ * 32bit TIMER1 to keep counting monotonically and wraparound
+ */
+
+static u64 arc_read_timer1(struct clocksource *cs)
+{
+ return (u64) read_aux_reg(ARC_REG_TIMER1_CNT);
+}
+
+static struct clocksource arc_counter_timer1 = {
+ .name = "ARC Timer1",
+ .rating = 300,
+ .read = arc_read_timer1,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static int __init arc_cs_setup_timer1(struct device_node *node)
+{
+ int ret;
+
+ /* Local to CPU hence not usable in SMP */
+ if (IS_ENABLED(CONFIG_SMP))
+ return -EINVAL;
+
+ ret = arc_get_timer_clk(node);
+ if (ret)
+ return ret;
+
+ write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMERN_MAX);
+ write_aux_reg(ARC_REG_TIMER1_CNT, 0);
+ write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
+
+ return clocksource_register_hz(&arc_counter_timer1, arc_timer_freq);
+}
+
+/********** Clock Event Device *********/
+
+static int arc_timer_irq;
+
+/*
+ * Arm the timer to interrupt after @cycles
+ * The distinction for oneshot/periodic is done in arc_event_timer_ack() below
+ */
+static void arc_timer_event_setup(unsigned int cycles)
+{
+ write_aux_reg(ARC_REG_TIMER0_LIMIT, cycles);
+ write_aux_reg(ARC_REG_TIMER0_CNT, 0); /* start from 0 */
+
+ write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH);
+}
+
+
+static int arc_clkevent_set_next_event(unsigned long delta,
+ struct clock_event_device *dev)
+{
+ arc_timer_event_setup(delta);
+ return 0;
+}
+
+static int arc_clkevent_set_periodic(struct clock_event_device *dev)
+{
+ /*
+ * At X Hz, 1 sec = 1000ms -> X cycles;
+ * 10ms -> X / 100 cycles
+ */
+ arc_timer_event_setup(arc_timer_freq / HZ);
+ return 0;
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
+ .name = "ARC Timer0",
+ .features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_PERIODIC,
+ .rating = 300,
+ .set_next_event = arc_clkevent_set_next_event,
+ .set_state_periodic = arc_clkevent_set_periodic,
+};
+
+static irqreturn_t timer_irq_handler(int irq, void *dev_id)
+{
+ /*
+ * Note that generic IRQ core could have passed @evt for @dev_id if
+ * irq_set_chip_and_handler() asked for handle_percpu_devid_irq()
+ */
+ struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
+ int irq_reenable = clockevent_state_periodic(evt);
+
+ /*
+ * Any write to CTRL reg ACks the interrupt, we rewrite the
+ * Count when [N]ot [H]alted bit.
+ * And re-arm it if perioid by [I]nterrupt [E]nable bit
+ */
+ write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH);
+
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+
+static int arc_timer_starting_cpu(unsigned int cpu)
+{
+ struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
+
+ evt->cpumask = cpumask_of(smp_processor_id());
+
+ clockevents_config_and_register(evt, arc_timer_freq, 0, ARC_TIMERN_MAX);
+ enable_percpu_irq(arc_timer_irq, 0);
+ return 0;
+}
+
+static int arc_timer_dying_cpu(unsigned int cpu)
+{
+ disable_percpu_irq(arc_timer_irq);
+ return 0;
+}
+
+/*
+ * clockevent setup for boot CPU
+ */
+static int __init arc_clockevent_setup(struct device_node *node)
+{
+ struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
+ int ret;
+
+ arc_timer_irq = irq_of_parse_and_map(node, 0);
+ if (arc_timer_irq <= 0) {
+ pr_err("clockevent: missing irq");
+ return -EINVAL;
+ }
+
+ ret = arc_get_timer_clk(node);
+ if (ret) {
+ pr_err("clockevent: missing clk");
+ return ret;
+ }
+
+ /* Needs apriori irq_set_percpu_devid() done in intc map function */
+ ret = request_percpu_irq(arc_timer_irq, timer_irq_handler,
+ "Timer0 (per-cpu-tick)", evt);
+ if (ret) {
+ pr_err("clockevent: unable to request irq\n");
+ return ret;
+ }
+
+ ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING,
+ "clockevents/arc/timer:starting",
+ arc_timer_starting_cpu,
+ arc_timer_dying_cpu);
+ if (ret) {
+ pr_err("Failed to setup hotplug state");
+ return ret;
+ }
+ return 0;
+}
+
+static int __init arc_of_timer_init(struct device_node *np)
+{
+ static int init_count = 0;
+ int ret;
+
+ if (!init_count) {
+ init_count = 1;
+ ret = arc_clockevent_setup(np);
+ } else {
+ ret = arc_cs_setup_timer1(np);
+ }
+
+ return ret;
+}
+CLOCKSOURCE_OF_DECLARE(arc_clkevt, "snps,arc-timer", arc_of_timer_init);
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 02fef6830e72..4c8c3fb2e8b2 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -562,12 +562,12 @@ static u64 arch_counter_get_cntvct_mem(void)
*/
u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
-static cycle_t arch_counter_read(struct clocksource *cs)
+static u64 arch_counter_read(struct clocksource *cs)
{
return arch_timer_read_counter();
}
-static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
+static u64 arch_counter_read_cc(const struct cyclecounter *cc)
{
return arch_timer_read_counter();
}
@@ -738,7 +738,7 @@ static int __init arch_timer_register(void)
/* Register and immediately configure the timer on the boot CPU */
err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
- "AP_ARM_ARCH_TIMER_STARTING",
+ "clockevents/arm/arch_timer:starting",
arch_timer_starting_cpu, arch_timer_dying_cpu);
if (err)
goto out_unreg_cpupm;
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index 8da03298f844..123ed20ac2ff 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -195,7 +195,7 @@ static int gt_dying_cpu(unsigned int cpu)
return 0;
}
-static cycle_t gt_clocksource_read(struct clocksource *cs)
+static u64 gt_clocksource_read(struct clocksource *cs)
{
return gt_counter_read();
}
@@ -316,7 +316,7 @@ static int __init global_timer_of_register(struct device_node *np)
goto out_irq;
err = cpuhp_setup_state(CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
- "AP_ARM_GLOBAL_TIMER_STARTING",
+ "clockevents/arm/global_timer:starting",
gt_starting_cpu, gt_dying_cpu);
if (err)
goto out_irq;
diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c
index fbfbdec13b08..44e5e951583b 100644
--- a/drivers/clocksource/cadence_ttc_timer.c
+++ b/drivers/clocksource/cadence_ttc_timer.c
@@ -158,11 +158,11 @@ static irqreturn_t ttc_clock_event_interrupt(int irq, void *dev_id)
*
* returns: Current timer counter register value
**/
-static cycle_t __ttc_clocksource_read(struct clocksource *cs)
+static u64 __ttc_clocksource_read(struct clocksource *cs)
{
struct ttc_timer *timer = &to_ttc_timer_clksrc(cs)->ttc;
- return (cycle_t)readl_relaxed(timer->base_addr +
+ return (u64)readl_relaxed(timer->base_addr +
TTC_COUNT_VAL_OFFSET);
}
diff --git a/drivers/clocksource/clksrc-dbx500-prcmu.c b/drivers/clocksource/clksrc-dbx500-prcmu.c
index 77a365f573d7..c69e2772658d 100644
--- a/drivers/clocksource/clksrc-dbx500-prcmu.c
+++ b/drivers/clocksource/clksrc-dbx500-prcmu.c
@@ -30,7 +30,7 @@
static void __iomem *clksrc_dbx500_timer_base;
-static cycle_t notrace clksrc_dbx500_prcmu_read(struct clocksource *cs)
+static u64 notrace clksrc_dbx500_prcmu_read(struct clocksource *cs)
{
void __iomem *base = clksrc_dbx500_timer_base;
u32 count, count2;
diff --git a/drivers/clocksource/dummy_timer.c b/drivers/clocksource/dummy_timer.c
index 89f1c2edbe02..01f3f5a59bc6 100644
--- a/drivers/clocksource/dummy_timer.c
+++ b/drivers/clocksource/dummy_timer.c
@@ -34,7 +34,7 @@ static int dummy_timer_starting_cpu(unsigned int cpu)
static int __init dummy_timer_register(void)
{
return cpuhp_setup_state(CPUHP_AP_DUMMY_TIMER_STARTING,
- "AP_DUMMY_TIMER_STARTING",
+ "clockevents/dummy_timer:starting",
dummy_timer_starting_cpu, NULL);
}
early_initcall(dummy_timer_register);
diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c
index 797505aa2ba4..63e4f5519577 100644
--- a/drivers/clocksource/dw_apb_timer.c
+++ b/drivers/clocksource/dw_apb_timer.c
@@ -348,7 +348,7 @@ void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs)
dw_apb_clocksource_read(dw_cs);
}
-static cycle_t __apbt_read_clocksource(struct clocksource *cs)
+static u64 __apbt_read_clocksource(struct clocksource *cs)
{
u32 current_count;
struct dw_apb_clocksource *dw_cs =
@@ -357,7 +357,7 @@ static cycle_t __apbt_read_clocksource(struct clocksource *cs)
current_count = apbt_readl_relaxed(&dw_cs->timer,
APBTMR_N_CURRENT_VALUE);
- return (cycle_t)~current_count;
+ return (u64)~current_count;
}
static void apbt_restart_clocksource(struct clocksource *cs)
@@ -416,7 +416,7 @@ void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs)
*
* @dw_cs: The clocksource to read.
*/
-cycle_t dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs)
+u64 dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs)
{
- return (cycle_t)~apbt_readl(&dw_cs->timer, APBTMR_N_CURRENT_VALUE);
+ return (u64)~apbt_readl(&dw_cs->timer, APBTMR_N_CURRENT_VALUE);
}
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
index 19bb1792d647..aff87df07449 100644
--- a/drivers/clocksource/em_sti.c
+++ b/drivers/clocksource/em_sti.c
@@ -110,9 +110,9 @@ static void em_sti_disable(struct em_sti_priv *p)
clk_disable_unprepare(p->clk);
}
-static cycle_t em_sti_count(struct em_sti_priv *p)
+static u64 em_sti_count(struct em_sti_priv *p)
{
- cycle_t ticks;
+ u64 ticks;
unsigned long flags;
/* the STI hardware buffers the 48-bit count, but to
@@ -121,14 +121,14 @@ static cycle_t em_sti_count(struct em_sti_priv *p)
* Always read STI_COUNT_H before STI_COUNT_L.
*/
raw_spin_lock_irqsave(&p->lock, flags);
- ticks = (cycle_t)(em_sti_read(p, STI_COUNT_H) & 0xffff) << 32;
+ ticks = (u64)(em_sti_read(p, STI_COUNT_H) & 0xffff) << 32;
ticks |= em_sti_read(p, STI_COUNT_L);
raw_spin_unlock_irqrestore(&p->lock, flags);
return ticks;
}
-static cycle_t em_sti_set_next(struct em_sti_priv *p, cycle_t next)
+static u64 em_sti_set_next(struct em_sti_priv *p, u64 next)
{
unsigned long flags;
@@ -198,7 +198,7 @@ static struct em_sti_priv *cs_to_em_sti(struct clocksource *cs)
return container_of(cs, struct em_sti_priv, cs);
}
-static cycle_t em_sti_clocksource_read(struct clocksource *cs)
+static u64 em_sti_clocksource_read(struct clocksource *cs)
{
return em_sti_count(cs_to_em_sti(cs));
}
@@ -271,7 +271,7 @@ static int em_sti_clock_event_next(unsigned long delta,
struct clock_event_device *ced)
{
struct em_sti_priv *p = ced_to_em_sti(ced);
- cycle_t next;
+ u64 next;
int safe;
next = em_sti_set_next(p, em_sti_count(p) + delta);
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 8f3488b80896..670ff0f25b67 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -183,7 +183,7 @@ static u64 exynos4_read_count_64(void)
hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U);
} while (hi != hi2);
- return ((cycle_t)hi << 32) | lo;
+ return ((u64)hi << 32) | lo;
}
/**
@@ -199,7 +199,7 @@ static u32 notrace exynos4_read_count_32(void)
return readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L);
}
-static cycle_t exynos4_frc_read(struct clocksource *cs)
+static u64 exynos4_frc_read(struct clocksource *cs)
{
return exynos4_read_count_32();
}
@@ -266,7 +266,7 @@ static void exynos4_mct_comp0_stop(void)
static void exynos4_mct_comp0_start(bool periodic, unsigned long cycles)
{
unsigned int tcon;
- cycle_t comp_cycle;
+ u64 comp_cycle;
tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
@@ -495,6 +495,7 @@ static int exynos4_mct_dying_cpu(unsigned int cpu)
if (mct_int_type == MCT_INT_SPI) {
if (evt->irq != -1)
disable_irq_nosync(evt->irq);
+ exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
} else {
disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
}
@@ -552,7 +553,7 @@ static int __init exynos4_timer_resources(struct device_node *np, void __iomem *
/* Install hotplug callbacks which configure the timer on this CPU */
err = cpuhp_setup_state(CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
- "AP_EXYNOS4_MCT_TIMER_STARTING",
+ "clockevents/exynos4/mct_timer:starting",
exynos4_mct_starting_cpu,
exynos4_mct_dying_cpu);
if (err)
diff --git a/drivers/clocksource/h8300_timer16.c b/drivers/clocksource/h8300_timer16.c
index 07d9d5be9054..5b27fb9997c2 100644
--- a/drivers/clocksource/h8300_timer16.c
+++ b/drivers/clocksource/h8300_timer16.c
@@ -72,7 +72,7 @@ static inline struct timer16_priv *cs_to_priv(struct clocksource *cs)
return container_of(cs, struct timer16_priv, cs);
}
-static cycle_t timer16_clocksource_read(struct clocksource *cs)
+static u64 timer16_clocksource_read(struct clocksource *cs)
{
struct timer16_priv *p = cs_to_priv(cs);
unsigned long raw, value;
diff --git a/drivers/clocksource/h8300_tpu.c b/drivers/clocksource/h8300_tpu.c
index 7bdf1991c847..72e1cf2b3096 100644
--- a/drivers/clocksource/h8300_tpu.c
+++ b/drivers/clocksource/h8300_tpu.c
@@ -64,7 +64,7 @@ static inline struct tpu_priv *cs_to_priv(struct clocksource *cs)
return container_of(cs, struct tpu_priv, cs);
}
-static cycle_t tpu_clocksource_read(struct clocksource *cs)
+static u64 tpu_clocksource_read(struct clocksource *cs)
{
struct tpu_priv *p = cs_to_priv(cs);
unsigned long flags;
diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c
index 0efd36e483ab..64f6490740d7 100644
--- a/drivers/clocksource/i8253.c
+++ b/drivers/clocksource/i8253.c
@@ -25,7 +25,7 @@ EXPORT_SYMBOL(i8253_lock);
* to just read by itself. So use jiffies to emulate a free
* running counter:
*/
-static cycle_t i8253_read(struct clocksource *cs)
+static u64 i8253_read(struct clocksource *cs)
{
static int old_count;
static u32 old_jifs;
@@ -83,7 +83,7 @@ static cycle_t i8253_read(struct clocksource *cs)
count = (PIT_LATCH - 1) - count;
- return (cycle_t)(jifs * PIT_LATCH) + count;
+ return (u64)(jifs * PIT_LATCH) + count;
}
static struct clocksource i8253_cs = {
diff --git a/drivers/clocksource/jcore-pit.c b/drivers/clocksource/jcore-pit.c
index 54e1665aa03c..7c61226f4359 100644
--- a/drivers/clocksource/jcore-pit.c
+++ b/drivers/clocksource/jcore-pit.c
@@ -57,7 +57,7 @@ static notrace u64 jcore_sched_clock_read(void)
return seclo * NSEC_PER_SEC + nsec;
}
-static cycle_t jcore_clocksource_read(struct clocksource *cs)
+static u64 jcore_clocksource_read(struct clocksource *cs)
{
return jcore_sched_clock_read();
}
@@ -240,7 +240,7 @@ static int __init jcore_pit_init(struct device_node *node)
}
cpuhp_setup_state(CPUHP_AP_JCORE_TIMER_STARTING,
- "AP_JCORE_TIMER_STARTING",
+ "clockevents/jcore:starting",
jcore_pit_local_init, NULL);
return 0;
diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c
index a80ab3e446b7..6fcf96540631 100644
--- a/drivers/clocksource/metag_generic.c
+++ b/drivers/clocksource/metag_generic.c
@@ -56,7 +56,7 @@ static int metag_timer_set_next_event(unsigned long delta,
return 0;
}
-static cycle_t metag_clocksource_read(struct clocksource *cs)
+static u64 metag_clocksource_read(struct clocksource *cs)
{
return __core_reg_get(TXTIMER);
}
@@ -154,6 +154,6 @@ int __init metag_generic_timer_init(void)
/* Hook cpu boot to configure the CPU's timers */
return cpuhp_setup_state(CPUHP_AP_METAG_TIMER_STARTING,
- "AP_METAG_TIMER_STARTING",
+ "clockevents/metag:starting",
arch_timer_starting_cpu, NULL);
}
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index 7a960cd01104..d9ef7a61e093 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -120,12 +120,12 @@ static int gic_clockevent_init(void)
}
cpuhp_setup_state(CPUHP_AP_MIPS_GIC_TIMER_STARTING,
- "AP_MIPS_GIC_TIMER_STARTING", gic_starting_cpu,
- gic_dying_cpu);
+ "clockevents/mips/gic/timer:starting",
+ gic_starting_cpu, gic_dying_cpu);
return 0;
}
-static cycle_t gic_hpt_read(struct clocksource *cs)
+static u64 gic_hpt_read(struct clocksource *cs)
{
return gic_read_count();
}
diff --git a/drivers/clocksource/mmio.c b/drivers/clocksource/mmio.c
index c4f7d7a9b689..4c4df981d8cc 100644
--- a/drivers/clocksource/mmio.c
+++ b/drivers/clocksource/mmio.c
@@ -20,24 +20,24 @@ static inline struct clocksource_mmio *to_mmio_clksrc(struct clocksource *c)
return container_of(c, struct clocksource_mmio, clksrc);
}
-cycle_t clocksource_mmio_readl_up(struct clocksource *c)
+u64 clocksource_mmio_readl_up(struct clocksource *c)
{
- return (cycle_t)readl_relaxed(to_mmio_clksrc(c)->reg);
+ return (u64)readl_relaxed(to_mmio_clksrc(c)->reg);
}
-cycle_t clocksource_mmio_readl_down(struct clocksource *c)
+u64 clocksource_mmio_readl_down(struct clocksource *c)
{
- return ~(cycle_t)readl_relaxed(to_mmio_clksrc(c)->reg) & c->mask;
+ return ~(u64)readl_relaxed(to_mmio_clksrc(c)->reg) & c->mask;
}
-cycle_t clocksource_mmio_readw_up(struct clocksource *c)
+u64 clocksource_mmio_readw_up(struct clocksource *c)
{
- return (cycle_t)readw_relaxed(to_mmio_clksrc(c)->reg);
+ return (u64)readw_relaxed(to_mmio_clksrc(c)->reg);
}
-cycle_t clocksource_mmio_readw_down(struct clocksource *c)
+u64 clocksource_mmio_readw_down(struct clocksource *c)
{
- return ~(cycle_t)readw_relaxed(to_mmio_clksrc(c)->reg) & c->mask;
+ return ~(u64)readw_relaxed(to_mmio_clksrc(c)->reg) & c->mask;
}
/**
@@ -51,7 +51,7 @@ cycle_t clocksource_mmio_readw_down(struct clocksource *c)
*/
int __init clocksource_mmio_init(void __iomem *base, const char *name,
unsigned long hz, int rating, unsigned bits,
- cycle_t (*read)(struct clocksource *))
+ u64 (*read)(struct clocksource *))
{
struct clocksource_mmio *cs;
diff --git a/drivers/clocksource/moxart_timer.c b/drivers/clocksource/moxart_timer.c
index 2a8f4705c734..7f3430654fbd 100644
--- a/drivers/clocksource/moxart_timer.c
+++ b/drivers/clocksource/moxart_timer.c
@@ -161,19 +161,22 @@ static int __init moxart_timer_init(struct device_node *node)
timer->base = of_iomap(node, 0);
if (!timer->base) {
pr_err("%s: of_iomap failed\n", node->full_name);
- return -ENXIO;
+ ret = -ENXIO;
+ goto out_free;
}
irq = irq_of_parse_and_map(node, 0);
if (irq <= 0) {
pr_err("%s: irq_of_parse_and_map failed\n", node->full_name);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_unmap;
}
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
pr_err("%s: of_clk_get failed\n", node->full_name);
- return PTR_ERR(clk);
+ ret = PTR_ERR(clk);
+ goto out_unmap;
}
pclk = clk_get_rate(clk);
@@ -186,7 +189,8 @@ static int __init moxart_timer_init(struct device_node *node)
timer->t1_disable_val = ASPEED_TIMER1_DISABLE;
} else {
pr_err("%s: unknown platform\n", node->full_name);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_unmap;
}
timer->count_per_tick = DIV_ROUND_CLOSEST(pclk, HZ);
@@ -208,14 +212,14 @@ static int __init moxart_timer_init(struct device_node *node)
clocksource_mmio_readl_down);
if (ret) {
pr_err("%s: clocksource_mmio_init failed\n", node->full_name);
- return ret;
+ goto out_unmap;
}
ret = request_irq(irq, moxart_timer_interrupt, IRQF_TIMER,
node->name, &timer->clkevt);
if (ret) {
pr_err("%s: setup_irq failed\n", node->full_name);
- return ret;
+ goto out_unmap;
}
/* Clear match registers */
@@ -241,6 +245,12 @@ static int __init moxart_timer_init(struct device_node *node)
clockevents_config_and_register(&timer->clkevt, pclk, 0x4, 0xfffffffe);
return 0;
+
+out_unmap:
+ iounmap(timer->base);
+out_free:
+ kfree(timer);
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(moxart, "moxa,moxart-timer", moxart_timer_init);
CLOCKSOURCE_OF_DECLARE(aspeed, "aspeed,ast2400-timer", moxart_timer_init);
diff --git a/drivers/clocksource/mxs_timer.c b/drivers/clocksource/mxs_timer.c
index 0ba0a913b41d..99b77aff0839 100644
--- a/drivers/clocksource/mxs_timer.c
+++ b/drivers/clocksource/mxs_timer.c
@@ -97,7 +97,7 @@ static void timrot_irq_acknowledge(void)
HW_TIMROT_TIMCTRLn(0) + STMP_OFFSET_REG_CLR);
}
-static cycle_t timrotv1_get_cycles(struct clocksource *cs)
+static u64 timrotv1_get_cycles(struct clocksource *cs)
{
return ~((__raw_readl(mxs_timrot_base + HW_TIMROT_TIMCOUNTn(1))
& 0xffff0000) >> 16);
diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/pxa_timer.c
index 3e1cb512f3ce..9cae38eebec2 100644
--- a/drivers/clocksource/pxa_timer.c
+++ b/drivers/clocksource/pxa_timer.c
@@ -220,17 +220,16 @@ CLOCKSOURCE_OF_DECLARE(pxa_timer, "marvell,pxa-timer", pxa_timer_dt_init);
/*
* Legacy timer init for non device-tree boards.
*/
-void __init pxa_timer_nodt_init(int irq, void __iomem *base,
- unsigned long clock_tick_rate)
+void __init pxa_timer_nodt_init(int irq, void __iomem *base)
{
struct clk *clk;
timer_base = base;
clk = clk_get(NULL, "OSTIMER0");
- if (clk && !IS_ERR(clk))
+ if (clk && !IS_ERR(clk)) {
clk_prepare_enable(clk);
- else
+ pxa_timer_common_init(irq, clk_get_rate(clk));
+ } else {
pr_crit("%s: unable to get clk\n", __func__);
-
- pxa_timer_common_init(irq, clock_tick_rate);
+ }
}
diff --git a/drivers/clocksource/qcom-timer.c b/drivers/clocksource/qcom-timer.c
index 3283cfa2aa52..ee358cdf4a07 100644
--- a/drivers/clocksource/qcom-timer.c
+++ b/drivers/clocksource/qcom-timer.c
@@ -89,7 +89,7 @@ static struct clock_event_device __percpu *msm_evt;
static void __iomem *source_base;
-static notrace cycle_t msm_read_timer_count(struct clocksource *cs)
+static notrace u64 msm_read_timer_count(struct clocksource *cs)
{
return readl_relaxed(source_base + TIMER_COUNT_VAL);
}
@@ -182,7 +182,7 @@ static int __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq,
} else {
/* Install and invoke hotplug callbacks */
res = cpuhp_setup_state(CPUHP_AP_QCOM_TIMER_STARTING,
- "AP_QCOM_TIMER_STARTING",
+ "clockevents/qcom/timer:starting",
msm_local_timer_starting_cpu,
msm_local_timer_dying_cpu);
if (res) {
diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c
index 54565bd0093b..0093ece661fe 100644
--- a/drivers/clocksource/samsung_pwm_timer.c
+++ b/drivers/clocksource/samsung_pwm_timer.c
@@ -307,7 +307,7 @@ static void samsung_clocksource_resume(struct clocksource *cs)
samsung_time_start(pwm.source_id, true);
}
-static cycle_t notrace samsung_clocksource_read(struct clocksource *c)
+static u64 notrace samsung_clocksource_read(struct clocksource *c)
{
return ~readl_relaxed(pwm.source_reg);
}
diff --git a/drivers/clocksource/scx200_hrt.c b/drivers/clocksource/scx200_hrt.c
index 64f9e8294434..a46660bf6588 100644
--- a/drivers/clocksource/scx200_hrt.c
+++ b/drivers/clocksource/scx200_hrt.c
@@ -43,10 +43,10 @@ MODULE_PARM_DESC(ppm, "+-adjust to actual XO freq (ppm)");
/* The base timer frequency, * 27 if selected */
#define HRT_FREQ 1000000
-static cycle_t read_hrt(struct clocksource *cs)
+static u64 read_hrt(struct clocksource *cs)
{
/* Read the timer value */
- return (cycle_t) inl(scx200_cb_base + SCx200_TIMER_OFFSET);
+ return (u64) inl(scx200_cb_base + SCx200_TIMER_OFFSET);
}
static struct clocksource cs_hrt = {
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 103c49362c68..28757edf6aca 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -612,7 +612,7 @@ static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
return container_of(cs, struct sh_cmt_channel, cs);
}
-static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
+static u64 sh_cmt_clocksource_read(struct clocksource *cs)
{
struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
unsigned long flags, raw;
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 469e776ec17a..1fbf2aadcfd4 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -255,7 +255,7 @@ static struct sh_tmu_channel *cs_to_sh_tmu(struct clocksource *cs)
return container_of(cs, struct sh_tmu_channel, cs);
}
-static cycle_t sh_tmu_clocksource_read(struct clocksource *cs)
+static u64 sh_tmu_clocksource_read(struct clocksource *cs)
{
struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 4da2af9694a2..d4ca9962a759 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -41,7 +41,7 @@
static void __iomem *tcaddr;
-static cycle_t tc_get_cycles(struct clocksource *cs)
+static u64 tc_get_cycles(struct clocksource *cs)
{
unsigned long flags;
u32 lower, upper;
@@ -56,7 +56,7 @@ static cycle_t tc_get_cycles(struct clocksource *cs)
return (upper << 16) | lower;
}
-static cycle_t tc_get_cycles32(struct clocksource *cs)
+static u64 tc_get_cycles32(struct clocksource *cs)
{
return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
}
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index 3c39e6f45971..4440aefc59cd 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -320,7 +320,7 @@ static int __init armada_370_xp_timer_common_init(struct device_node *np)
}
res = cpuhp_setup_state(CPUHP_AP_ARMADA_TIMER_STARTING,
- "AP_ARMADA_TIMER_STARTING",
+ "clockevents/armada:starting",
armada_370_xp_timer_starting_cpu,
armada_370_xp_timer_dying_cpu);
if (res) {
diff --git a/drivers/clocksource/time-pistachio.c b/drivers/clocksource/time-pistachio.c
index a8e6c7df853d..3710e4d9dcba 100644
--- a/drivers/clocksource/time-pistachio.c
+++ b/drivers/clocksource/time-pistachio.c
@@ -67,7 +67,7 @@ static inline void gpt_writel(void __iomem *base, u32 value, u32 offset,
writel(value, base + 0x20 * gpt_id + offset);
}
-static cycle_t notrace
+static u64 notrace
pistachio_clocksource_read_cycles(struct clocksource *cs)
{
struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs);
@@ -84,7 +84,7 @@ pistachio_clocksource_read_cycles(struct clocksource *cs)
counter = gpt_readl(pcs->base, TIMER_CURRENT_VALUE, 0);
raw_spin_unlock_irqrestore(&pcs->lock, flags);
- return (cycle_t)~counter;
+ return (u64)~counter;
}
static u64 notrace pistachio_read_sched_clock(void)
diff --git a/drivers/clocksource/timer-atlas7.c b/drivers/clocksource/timer-atlas7.c
index 4334e0330ada..3d8a181f0252 100644
--- a/drivers/clocksource/timer-atlas7.c
+++ b/drivers/clocksource/timer-atlas7.c
@@ -85,7 +85,7 @@ static irqreturn_t sirfsoc_timer_interrupt(int irq, void *dev_id)
}
/* read 64-bit timer counter */
-static cycle_t sirfsoc_timer_read(struct clocksource *cs)
+static u64 sirfsoc_timer_read(struct clocksource *cs)
{
u64 cycles;
@@ -221,7 +221,7 @@ static int __init sirfsoc_clockevent_init(void)
/* Install and invoke hotplug callbacks */
return cpuhp_setup_state(CPUHP_AP_MARCO_TIMER_STARTING,
- "AP_MARCO_TIMER_STARTING",
+ "clockevents/marco:starting",
sirfsoc_local_timer_starting_cpu,
sirfsoc_local_timer_dying_cpu);
}
diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c
index 6555821bbdae..c0b5df3167a0 100644
--- a/drivers/clocksource/timer-atmel-pit.c
+++ b/drivers/clocksource/timer-atmel-pit.c
@@ -73,7 +73,7 @@ static inline void pit_write(void __iomem *base, unsigned int reg_offset, unsign
* Clocksource: just a monotonic counter of MCK/16 cycles.
* We don't care whether or not PIT irqs are enabled.
*/
-static cycle_t read_pit_clk(struct clocksource *cs)
+static u64 read_pit_clk(struct clocksource *cs)
{
struct pit_data *data = clksrc_to_pit_data(cs);
unsigned long flags;
diff --git a/drivers/clocksource/timer-atmel-st.c b/drivers/clocksource/timer-atmel-st.c
index e90ab5b63a90..be4ac7604136 100644
--- a/drivers/clocksource/timer-atmel-st.c
+++ b/drivers/clocksource/timer-atmel-st.c
@@ -92,7 +92,7 @@ static irqreturn_t at91rm9200_timer_interrupt(int irq, void *dev_id)
return IRQ_NONE;
}
-static cycle_t read_clk32k(struct clocksource *cs)
+static u64 read_clk32k(struct clocksource *cs)
{
return read_CRTR();
}
diff --git a/drivers/clocksource/timer-nps.c b/drivers/clocksource/timer-nps.c
index 70c149af8ee0..da1f7986e477 100644
--- a/drivers/clocksource/timer-nps.c
+++ b/drivers/clocksource/timer-nps.c
@@ -46,35 +46,62 @@
/* This array is per cluster of CPUs (Each NPS400 cluster got 256 CPUs) */
static void *nps_msu_reg_low_addr[NPS_CLUSTER_NUM] __read_mostly;
-static unsigned long nps_timer_rate;
+static int __init nps_get_timer_clk(struct device_node *node,
+ unsigned long *timer_freq,
+ struct clk **clk)
+{
+ int ret;
+
+ *clk = of_clk_get(node, 0);
+ ret = PTR_ERR_OR_ZERO(*clk);
+ if (ret) {
+ pr_err("timer missing clk");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(*clk);
+ if (ret) {
+ pr_err("Couldn't enable parent clk\n");
+ clk_put(*clk);
+ return ret;
+ }
+
+ *timer_freq = clk_get_rate(*clk);
+ if (!(*timer_freq)) {
+ pr_err("Couldn't get clk rate\n");
+ clk_disable_unprepare(*clk);
+ clk_put(*clk);
+ return -EINVAL;
+ }
+
+ return 0;
+}
-static cycle_t nps_clksrc_read(struct clocksource *clksrc)
+static u64 nps_clksrc_read(struct clocksource *clksrc)
{
int cluster = raw_smp_processor_id() >> NPS_CLUSTER_OFFSET;
- return (cycle_t)ioread32be(nps_msu_reg_low_addr[cluster]);
+ return (u64)ioread32be(nps_msu_reg_low_addr[cluster]);
}
-static int __init nps_setup_clocksource(struct device_node *node,
- struct clk *clk)
+static int __init nps_setup_clocksource(struct device_node *node)
{
int ret, cluster;
+ struct clk *clk;
+ unsigned long nps_timer1_freq;
+
for (cluster = 0; cluster < NPS_CLUSTER_NUM; cluster++)
nps_msu_reg_low_addr[cluster] =
nps_host_reg((cluster << NPS_CLUSTER_OFFSET),
- NPS_MSU_BLKID, NPS_MSU_TICK_LOW);
+ NPS_MSU_BLKID, NPS_MSU_TICK_LOW);
- ret = clk_prepare_enable(clk);
- if (ret) {
- pr_err("Couldn't enable parent clock\n");
+ ret = nps_get_timer_clk(node, &nps_timer1_freq, &clk);
+ if (ret)
return ret;
- }
-
- nps_timer_rate = clk_get_rate(clk);
- ret = clocksource_mmio_init(nps_msu_reg_low_addr, "EZnps-tick",
- nps_timer_rate, 301, 32, nps_clksrc_read);
+ ret = clocksource_mmio_init(nps_msu_reg_low_addr, "nps-tick",
+ nps_timer1_freq, 300, 32, nps_clksrc_read);
if (ret) {
pr_err("Couldn't register clock source.\n");
clk_disable_unprepare(clk);
@@ -83,18 +110,175 @@ static int __init nps_setup_clocksource(struct device_node *node,
return ret;
}
-static int __init nps_timer_init(struct device_node *node)
+CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clksrc, "ezchip,nps400-timer",
+ nps_setup_clocksource);
+CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clk_src, "ezchip,nps400-timer1",
+ nps_setup_clocksource);
+
+#ifdef CONFIG_EZNPS_MTM_EXT
+#include <soc/nps/mtm.h>
+
+/* Timer related Aux registers */
+#define NPS_REG_TIMER0_TSI 0xFFFFF850
+#define NPS_REG_TIMER0_LIMIT 0x23
+#define NPS_REG_TIMER0_CTRL 0x22
+#define NPS_REG_TIMER0_CNT 0x21
+
+/*
+ * Interrupt Enabled (IE) - re-arm the timer
+ * Not Halted (NH) - is cleared when working with JTAG (for debug)
+ */
+#define TIMER0_CTRL_IE BIT(0)
+#define TIMER0_CTRL_NH BIT(1)
+
+static unsigned long nps_timer0_freq;
+static unsigned long nps_timer0_irq;
+
+static void nps_clkevent_rm_thread(void)
+{
+ int thread;
+ unsigned int cflags, enabled_threads;
+
+ hw_schd_save(&cflags);
+
+ enabled_threads = read_aux_reg(NPS_REG_TIMER0_TSI);
+
+ /* remove thread from TSI1 */
+ thread = read_aux_reg(CTOP_AUX_THREAD_ID);
+ enabled_threads &= ~(1 << thread);
+ write_aux_reg(NPS_REG_TIMER0_TSI, enabled_threads);
+
+ /* Acknowledge and if needed re-arm the timer */
+ if (!enabled_threads)
+ write_aux_reg(NPS_REG_TIMER0_CTRL, TIMER0_CTRL_NH);
+ else
+ write_aux_reg(NPS_REG_TIMER0_CTRL,
+ TIMER0_CTRL_IE | TIMER0_CTRL_NH);
+
+ hw_schd_restore(cflags);
+}
+
+static void nps_clkevent_add_thread(unsigned long delta)
+{
+ int thread;
+ unsigned int cflags, enabled_threads;
+
+ hw_schd_save(&cflags);
+
+ /* add thread to TSI1 */
+ thread = read_aux_reg(CTOP_AUX_THREAD_ID);
+ enabled_threads = read_aux_reg(NPS_REG_TIMER0_TSI);
+ enabled_threads |= (1 << thread);
+ write_aux_reg(NPS_REG_TIMER0_TSI, enabled_threads);
+
+ /* set next timer event */
+ write_aux_reg(NPS_REG_TIMER0_LIMIT, delta);
+ write_aux_reg(NPS_REG_TIMER0_CNT, 0);
+ write_aux_reg(NPS_REG_TIMER0_CTRL,
+ TIMER0_CTRL_IE | TIMER0_CTRL_NH);
+
+ hw_schd_restore(cflags);
+}
+
+/*
+ * Whenever anyone tries to change modes, we just mask interrupts
+ * and wait for the next event to get set.
+ */
+static int nps_clkevent_set_state(struct clock_event_device *dev)
+{
+ nps_clkevent_rm_thread();
+ disable_percpu_irq(nps_timer0_irq);
+
+ return 0;
+}
+
+static int nps_clkevent_set_next_event(unsigned long delta,
+ struct clock_event_device *dev)
+{
+ nps_clkevent_add_thread(delta);
+ enable_percpu_irq(nps_timer0_irq, IRQ_TYPE_NONE);
+
+ return 0;
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, nps_clockevent_device) = {
+ .name = "NPS Timer0",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 300,
+ .set_next_event = nps_clkevent_set_next_event,
+ .set_state_oneshot = nps_clkevent_set_state,
+ .set_state_oneshot_stopped = nps_clkevent_set_state,
+ .set_state_shutdown = nps_clkevent_set_state,
+ .tick_resume = nps_clkevent_set_state,
+};
+
+static irqreturn_t timer_irq_handler(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+
+ nps_clkevent_rm_thread();
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static int nps_timer_starting_cpu(unsigned int cpu)
+{
+ struct clock_event_device *evt = this_cpu_ptr(&nps_clockevent_device);
+
+ evt->cpumask = cpumask_of(smp_processor_id());
+
+ clockevents_config_and_register(evt, nps_timer0_freq, 0, ULONG_MAX);
+ enable_percpu_irq(nps_timer0_irq, IRQ_TYPE_NONE);
+
+ return 0;
+}
+
+static int nps_timer_dying_cpu(unsigned int cpu)
+{
+ disable_percpu_irq(nps_timer0_irq);
+ return 0;
+}
+
+static int __init nps_setup_clockevent(struct device_node *node)
{
struct clk *clk;
+ int ret;
- clk = of_clk_get(node, 0);
- if (IS_ERR(clk)) {
- pr_err("Can't get timer clock.\n");
- return PTR_ERR(clk);
+ nps_timer0_irq = irq_of_parse_and_map(node, 0);
+ if (nps_timer0_irq <= 0) {
+ pr_err("clockevent: missing irq");
+ return -EINVAL;
}
- return nps_setup_clocksource(node, clk);
+ ret = nps_get_timer_clk(node, &nps_timer0_freq, &clk);
+ if (ret)
+ return ret;
+
+ /* Needs apriori irq_set_percpu_devid() done in intc map function */
+ ret = request_percpu_irq(nps_timer0_irq, timer_irq_handler,
+ "Timer0 (per-cpu-tick)",
+ &nps_clockevent_device);
+ if (ret) {
+ pr_err("Couldn't request irq\n");
+ clk_disable_unprepare(clk);
+ return ret;
+ }
+
+ ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING,
+ "clockevents/nps:starting",
+ nps_timer_starting_cpu,
+ nps_timer_dying_cpu);
+ if (ret) {
+ pr_err("Failed to setup hotplug state");
+ clk_disable_unprepare(clk);
+ free_percpu_irq(nps_timer0_irq, &nps_clockevent_device);
+ return ret;
+ }
+
+ return 0;
}
-CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clksrc, "ezchip,nps400-timer",
- nps_timer_init);
+CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clk_evt, "ezchip,nps400-timer0",
+ nps_setup_clockevent);
+#endif /* CONFIG_EZNPS_MTM_EXT */
diff --git a/drivers/clocksource/timer-prima2.c b/drivers/clocksource/timer-prima2.c
index c32148ec7a38..bfa981ac1eaf 100644
--- a/drivers/clocksource/timer-prima2.c
+++ b/drivers/clocksource/timer-prima2.c
@@ -72,7 +72,7 @@ static irqreturn_t sirfsoc_timer_interrupt(int irq, void *dev_id)
}
/* read 64-bit timer counter */
-static cycle_t notrace sirfsoc_timer_read(struct clocksource *cs)
+static u64 notrace sirfsoc_timer_read(struct clocksource *cs)
{
u64 cycles;
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 4f87f3e76d83..a3e662b15964 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -152,7 +152,7 @@ static irqreturn_t sun5i_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static cycle_t sun5i_clksrc_read(struct clocksource *clksrc)
+static u64 sun5i_clksrc_read(struct clocksource *clksrc)
{
struct sun5i_timer_clksrc *cs = to_sun5i_timer_clksrc(clksrc);
diff --git a/drivers/clocksource/timer-ti-32k.c b/drivers/clocksource/timer-ti-32k.c
index cf5b14e442e4..624067712ef0 100644
--- a/drivers/clocksource/timer-ti-32k.c
+++ b/drivers/clocksource/timer-ti-32k.c
@@ -65,11 +65,11 @@ static inline struct ti_32k *to_ti_32k(struct clocksource *cs)
return container_of(cs, struct ti_32k, cs);
}
-static cycle_t notrace ti_32k_read_cycles(struct clocksource *cs)
+static u64 notrace ti_32k_read_cycles(struct clocksource *cs)
{
struct ti_32k *ti = to_ti_32k(cs);
- return (cycle_t)readl_relaxed(ti->counter);
+ return (u64)readl_relaxed(ti->counter);
}
static struct ti_32k ti_32k_timer = {
diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c
index b15069483fbd..d02b51075ad1 100644
--- a/drivers/clocksource/vt8500_timer.c
+++ b/drivers/clocksource/vt8500_timer.c
@@ -53,7 +53,7 @@
static void __iomem *regbase;
-static cycle_t vt8500_timer_read(struct clocksource *cs)
+static u64 vt8500_timer_read(struct clocksource *cs)
{
int loops = msecs_to_loops(10);
writel(3, regbase + TIMER_CTRL_VAL);
@@ -75,7 +75,7 @@ static int vt8500_timer_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
int loops = msecs_to_loops(10);
- cycle_t alarm = clocksource.read(&clocksource) + cycles;
+ u64 alarm = clocksource.read(&clocksource) + cycles;
while ((readl(regbase + TIMER_AS_VAL) & TIMER_MATCH_W_ACTIVE)
&& --loops)
cpu_relax();
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 3a98702b7445..3a2ca0f79daf 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -930,7 +930,7 @@ static void __init acpi_cpufreq_boost_init(void)
static void acpi_cpufreq_boost_exit(void)
{
- if (acpi_cpufreq_online >= 0)
+ if (acpi_cpufreq_online > 0)
cpuhp_remove_state_nocalls(acpi_cpufreq_online);
}
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index bc97b6a4b1cf..7fcaf26e8f81 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -26,6 +26,8 @@ static const struct of_device_id machines[] __initconst = {
{ .compatible = "allwinner,sun8i-a83t", },
{ .compatible = "allwinner,sun8i-h3", },
+ { .compatible = "apm,xgene-shadowcat", },
+
{ .compatible = "arm,integrator-ap", },
{ .compatible = "arm,integrator-cp", },
diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c
index 759612da4fdc..e28a31a40829 100644
--- a/drivers/cpufreq/ia64-acpi-cpufreq.c
+++ b/drivers/cpufreq/ia64-acpi-cpufreq.c
@@ -18,7 +18,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/pal.h>
#include <linux/acpi.h>
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 6acbd4af632e..f91c25718d16 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -857,13 +857,13 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
NULL,
};
-static void intel_pstate_hwp_set(const struct cpumask *cpumask)
+static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
{
int min, hw_min, max, hw_max, cpu, range, adj_range;
struct perf_limits *perf_limits = limits;
u64 value, cap;
- for_each_cpu(cpu, cpumask) {
+ for_each_cpu(cpu, policy->cpus) {
int max_perf_pct, min_perf_pct;
struct cpudata *cpu_data = all_cpu_data[cpu];
s16 epp;
@@ -949,7 +949,7 @@ skip_epp:
static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy)
{
if (hwp_active)
- intel_pstate_hwp_set(policy->cpus);
+ intel_pstate_hwp_set(policy);
return 0;
}
@@ -968,19 +968,28 @@ static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy)
static int intel_pstate_resume(struct cpufreq_policy *policy)
{
+ int ret;
+
if (!hwp_active)
return 0;
+ mutex_lock(&intel_pstate_limits_lock);
+
all_cpu_data[policy->cpu]->epp_policy = 0;
- return intel_pstate_hwp_set_policy(policy);
+ ret = intel_pstate_hwp_set_policy(policy);
+
+ mutex_unlock(&intel_pstate_limits_lock);
+
+ return ret;
}
-static void intel_pstate_hwp_set_online_cpus(void)
+static void intel_pstate_update_policies(void)
{
- get_online_cpus();
- intel_pstate_hwp_set(cpu_online_mask);
- put_online_cpus();
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ cpufreq_update_policy(cpu);
}
/************************** debugfs begin ************************/
@@ -1018,10 +1027,6 @@ static void __init intel_pstate_debug_expose_params(void)
struct dentry *debugfs_parent;
int i = 0;
- if (hwp_active ||
- pstate_funcs.get_target_pstate == get_target_pstate_use_cpu_load)
- return;
-
debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
if (IS_ERR_OR_NULL(debugfs_parent))
return;
@@ -1105,11 +1110,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
limits->no_turbo = clamp_t(int, input, 0, 1);
- if (hwp_active)
- intel_pstate_hwp_set_online_cpus();
-
mutex_unlock(&intel_pstate_limits_lock);
+ intel_pstate_update_policies();
+
return count;
}
@@ -1134,11 +1138,10 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
limits->max_perf_pct);
limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
- if (hwp_active)
- intel_pstate_hwp_set_online_cpus();
-
mutex_unlock(&intel_pstate_limits_lock);
+ intel_pstate_update_policies();
+
return count;
}
@@ -1163,11 +1166,10 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
limits->min_perf_pct);
limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
- if (hwp_active)
- intel_pstate_hwp_set_online_cpus();
-
mutex_unlock(&intel_pstate_limits_lock);
+ intel_pstate_update_policies();
+
return count;
}
@@ -2153,8 +2155,12 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
if (per_cpu_limits)
perf_limits = cpu->perf_limits;
+ mutex_lock(&intel_pstate_limits_lock);
+
intel_pstate_update_perf_limits(policy, perf_limits);
+ mutex_unlock(&intel_pstate_limits_lock);
+
return 0;
}
@@ -2487,7 +2493,10 @@ hwp_cpu_matched:
if (rc)
goto out;
- intel_pstate_debug_expose_params();
+ if (intel_pstate_driver == &intel_pstate && !hwp_active &&
+ pstate_funcs.get_target_pstate != get_target_pstate_use_cpu_load)
+ intel_pstate_debug_expose_params();
+
intel_pstate_sysfs_expose_params();
if (hwp_active)
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
index 176e84cc3991..0cb9040eca49 100644
--- a/drivers/cpufreq/s3c64xx-cpufreq.c
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -107,7 +107,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
}
#ifdef CONFIG_REGULATOR
-static void __init s3c64xx_cpufreq_config_regulator(void)
+static void s3c64xx_cpufreq_config_regulator(void)
{
int count, v, i, found;
struct cpufreq_frequency_table *freq;
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index fe8f08948fcb..ac321f09e717 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -19,7 +19,7 @@
#include <linux/tick.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define PROMOTION_COUNT 4
#define DEMOTION_COUNT 1
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 4d2b81f2b223..79564785ae30 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -555,4 +555,6 @@ config CRYPTO_DEV_ROCKCHIP
source "drivers/crypto/chelsio/Kconfig"
+source "drivers/crypto/virtio/Kconfig"
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index ad7250fa1348..bc53cb833a06 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -32,3 +32,4 @@ obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/
obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/
+obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index dae1e39139e9..d10b4ae5e0da 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -135,8 +135,7 @@ int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
ctx->sa_out = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
&ctx->sa_out_dma_addr, GFP_ATOMIC);
if (ctx->sa_out == NULL) {
- dma_free_coherent(ctx->dev->core_dev->device,
- ctx->sa_len * 4,
+ dma_free_coherent(ctx->dev->core_dev->device, size * 4,
ctx->sa_in, ctx->sa_in_dma_addr);
return -ENOMEM;
}
diff --git a/drivers/crypto/atmel-aes-regs.h b/drivers/crypto/atmel-aes-regs.h
index 6c2951bb70b1..0ec04407b533 100644
--- a/drivers/crypto/atmel-aes-regs.h
+++ b/drivers/crypto/atmel-aes-regs.h
@@ -28,6 +28,7 @@
#define AES_MR_OPMOD_CFB (0x3 << 12)
#define AES_MR_OPMOD_CTR (0x4 << 12)
#define AES_MR_OPMOD_GCM (0x5 << 12)
+#define AES_MR_OPMOD_XTS (0x6 << 12)
#define AES_MR_LOD (0x1 << 15)
#define AES_MR_CFBS_MASK (0x7 << 16)
#define AES_MR_CFBS_128b (0x0 << 16)
@@ -67,6 +68,9 @@
#define AES_CTRR 0x98
#define AES_GCMHR(x) (0x9c + ((x) * 0x04))
+#define AES_TWR(x) (0xc0 + ((x) * 0x04))
+#define AES_ALPHAR(x) (0xd0 + ((x) * 0x04))
+
#define AES_HW_VERSION 0xFC
#endif /* __ATMEL_AES_REGS_H__ */
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index e3d40a8dfffb..0e3d0d655b96 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -36,6 +36,7 @@
#include <crypto/scatterwalk.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
+#include <crypto/xts.h>
#include <crypto/internal/aead.h>
#include <linux/platform_data/crypto-atmel.h>
#include <dt-bindings/dma/at91.h>
@@ -68,6 +69,7 @@
#define AES_FLAGS_CFB8 (AES_MR_OPMOD_CFB | AES_MR_CFBS_8b)
#define AES_FLAGS_CTR AES_MR_OPMOD_CTR
#define AES_FLAGS_GCM AES_MR_OPMOD_GCM
+#define AES_FLAGS_XTS AES_MR_OPMOD_XTS
#define AES_FLAGS_MODE_MASK (AES_FLAGS_OPMODE_MASK | \
AES_FLAGS_ENCRYPT | \
@@ -89,6 +91,7 @@ struct atmel_aes_caps {
bool has_cfb64;
bool has_ctr32;
bool has_gcm;
+ bool has_xts;
u32 max_burst_size;
};
@@ -135,6 +138,12 @@ struct atmel_aes_gcm_ctx {
atmel_aes_fn_t ghash_resume;
};
+struct atmel_aes_xts_ctx {
+ struct atmel_aes_base_ctx base;
+
+ u32 key2[AES_KEYSIZE_256 / sizeof(u32)];
+};
+
struct atmel_aes_reqctx {
unsigned long mode;
};
@@ -282,6 +291,20 @@ static const char *atmel_aes_reg_name(u32 offset, char *tmp, size_t sz)
snprintf(tmp, sz, "GCMHR[%u]", (offset - AES_GCMHR(0)) >> 2);
break;
+ case AES_TWR(0):
+ case AES_TWR(1):
+ case AES_TWR(2):
+ case AES_TWR(3):
+ snprintf(tmp, sz, "TWR[%u]", (offset - AES_TWR(0)) >> 2);
+ break;
+
+ case AES_ALPHAR(0):
+ case AES_ALPHAR(1):
+ case AES_ALPHAR(2):
+ case AES_ALPHAR(3):
+ snprintf(tmp, sz, "ALPHAR[%u]", (offset - AES_ALPHAR(0)) >> 2);
+ break;
+
default:
snprintf(tmp, sz, "0x%02x", offset);
break;
@@ -317,7 +340,7 @@ static inline void atmel_aes_write(struct atmel_aes_dev *dd,
char tmp[16];
dev_vdbg(dd->dev, "write 0x%08x into %s\n", value,
- atmel_aes_reg_name(offset, tmp));
+ atmel_aes_reg_name(offset, tmp, sizeof(tmp)));
}
#endif /* VERBOSE_DEBUG */
@@ -453,15 +476,15 @@ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
return err;
}
-static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
- const u32 *iv)
+static void atmel_aes_write_ctrl_key(struct atmel_aes_dev *dd, bool use_dma,
+ const u32 *iv, const u32 *key, int keylen)
{
u32 valmr = 0;
/* MR register must be set before IV registers */
- if (dd->ctx->keylen == AES_KEYSIZE_128)
+ if (keylen == AES_KEYSIZE_128)
valmr |= AES_MR_KEYSIZE_128;
- else if (dd->ctx->keylen == AES_KEYSIZE_192)
+ else if (keylen == AES_KEYSIZE_192)
valmr |= AES_MR_KEYSIZE_192;
else
valmr |= AES_MR_KEYSIZE_256;
@@ -478,13 +501,19 @@ static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
atmel_aes_write(dd, AES_MR, valmr);
- atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key,
- SIZE_IN_WORDS(dd->ctx->keylen));
+ atmel_aes_write_n(dd, AES_KEYWR(0), key, SIZE_IN_WORDS(keylen));
if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB)
atmel_aes_write_block(dd, AES_IVR(0), iv);
}
+static inline void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
+ const u32 *iv)
+
+{
+ atmel_aes_write_ctrl_key(dd, use_dma, iv,
+ dd->ctx->key, dd->ctx->keylen);
+}
/* CPU transfer */
@@ -1769,6 +1798,137 @@ static struct aead_alg aes_gcm_alg = {
};
+/* xts functions */
+
+static inline struct atmel_aes_xts_ctx *
+atmel_aes_xts_ctx_cast(struct atmel_aes_base_ctx *ctx)
+{
+ return container_of(ctx, struct atmel_aes_xts_ctx, base);
+}
+
+static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd);
+
+static int atmel_aes_xts_start(struct atmel_aes_dev *dd)
+{
+ struct atmel_aes_xts_ctx *ctx = atmel_aes_xts_ctx_cast(dd->ctx);
+ struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
+ struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ unsigned long flags;
+ int err;
+
+ atmel_aes_set_mode(dd, rctx);
+
+ err = atmel_aes_hw_init(dd);
+ if (err)
+ return atmel_aes_complete(dd, err);
+
+ /* Compute the tweak value from req->info with ecb(aes). */
+ flags = dd->flags;
+ dd->flags &= ~AES_FLAGS_MODE_MASK;
+ dd->flags |= (AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
+ atmel_aes_write_ctrl_key(dd, false, NULL,
+ ctx->key2, ctx->base.keylen);
+ dd->flags = flags;
+
+ atmel_aes_write_block(dd, AES_IDATAR(0), req->info);
+ return atmel_aes_wait_for_data_ready(dd, atmel_aes_xts_process_data);
+}
+
+static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd)
+{
+ struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
+ bool use_dma = (req->nbytes >= ATMEL_AES_DMA_THRESHOLD);
+ u32 tweak[AES_BLOCK_SIZE / sizeof(u32)];
+ static const u32 one[AES_BLOCK_SIZE / sizeof(u32)] = {cpu_to_le32(1), };
+ u8 *tweak_bytes = (u8 *)tweak;
+ int i;
+
+ /* Read the computed ciphered tweak value. */
+ atmel_aes_read_block(dd, AES_ODATAR(0), tweak);
+ /*
+ * Hardware quirk:
+ * the order of the ciphered tweak bytes need to be reversed before
+ * writing them into the ODATARx registers.
+ */
+ for (i = 0; i < AES_BLOCK_SIZE/2; ++i) {
+ u8 tmp = tweak_bytes[AES_BLOCK_SIZE - 1 - i];
+
+ tweak_bytes[AES_BLOCK_SIZE - 1 - i] = tweak_bytes[i];
+ tweak_bytes[i] = tmp;
+ }
+
+ /* Process the data. */
+ atmel_aes_write_ctrl(dd, use_dma, NULL);
+ atmel_aes_write_block(dd, AES_TWR(0), tweak);
+ atmel_aes_write_block(dd, AES_ALPHAR(0), one);
+ if (use_dma)
+ return atmel_aes_dma_start(dd, req->src, req->dst, req->nbytes,
+ atmel_aes_transfer_complete);
+
+ return atmel_aes_cpu_start(dd, req->src, req->dst, req->nbytes,
+ atmel_aes_transfer_complete);
+}
+
+static int atmel_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct atmel_aes_xts_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ int err;
+
+ err = xts_check_key(crypto_ablkcipher_tfm(tfm), key, keylen);
+ if (err)
+ return err;
+
+ memcpy(ctx->base.key, key, keylen/2);
+ memcpy(ctx->key2, key + keylen/2, keylen/2);
+ ctx->base.keylen = keylen/2;
+
+ return 0;
+}
+
+static int atmel_aes_xts_encrypt(struct ablkcipher_request *req)
+{
+ return atmel_aes_crypt(req, AES_FLAGS_XTS | AES_FLAGS_ENCRYPT);
+}
+
+static int atmel_aes_xts_decrypt(struct ablkcipher_request *req)
+{
+ return atmel_aes_crypt(req, AES_FLAGS_XTS);
+}
+
+static int atmel_aes_xts_cra_init(struct crypto_tfm *tfm)
+{
+ struct atmel_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
+ ctx->base.start = atmel_aes_xts_start;
+
+ return 0;
+}
+
+static struct crypto_alg aes_xts_alg = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "atmel-xts-aes",
+ .cra_priority = ATMEL_AES_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_aes_xts_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_aes_xts_cra_init,
+ .cra_exit = atmel_aes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = atmel_aes_xts_setkey,
+ .encrypt = atmel_aes_xts_encrypt,
+ .decrypt = atmel_aes_xts_decrypt,
+ }
+};
+
+
/* Probe functions */
static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
@@ -1877,6 +2037,9 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
{
int i;
+ if (dd->caps.has_xts)
+ crypto_unregister_alg(&aes_xts_alg);
+
if (dd->caps.has_gcm)
crypto_unregister_aead(&aes_gcm_alg);
@@ -1909,8 +2072,16 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
goto err_aes_gcm_alg;
}
+ if (dd->caps.has_xts) {
+ err = crypto_register_alg(&aes_xts_alg);
+ if (err)
+ goto err_aes_xts_alg;
+ }
+
return 0;
+err_aes_xts_alg:
+ crypto_unregister_aead(&aes_gcm_alg);
err_aes_gcm_alg:
crypto_unregister_alg(&aes_cfb64_alg);
err_aes_cfb64_alg:
@@ -1928,6 +2099,7 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
dd->caps.has_cfb64 = 0;
dd->caps.has_ctr32 = 0;
dd->caps.has_gcm = 0;
+ dd->caps.has_xts = 0;
dd->caps.max_burst_size = 1;
/* keep only major version number */
@@ -1937,6 +2109,7 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
dd->caps.has_cfb64 = 1;
dd->caps.has_ctr32 = 1;
dd->caps.has_gcm = 1;
+ dd->caps.has_xts = 1;
dd->caps.max_burst_size = 4;
break;
case 0x200:
@@ -2138,7 +2311,7 @@ aes_dd_err:
static int atmel_aes_remove(struct platform_device *pdev)
{
- static struct atmel_aes_dev *aes_dd;
+ struct atmel_aes_dev *aes_dd;
aes_dd = platform_get_drvdata(pdev);
if (!aes_dd)
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 64bf3024b680..bc0d3569f8d9 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -74,7 +74,7 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
tristate "Register algorithm implementations with the Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
+ depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_AEAD
select CRYPTO_AUTHENC
@@ -89,7 +89,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
config CRYPTO_DEV_FSL_CAAM_AHASH_API
tristate "Register hash algorithm implementations with Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
+ depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_HASH
help
@@ -101,7 +101,7 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
config CRYPTO_DEV_FSL_CAAM_PKC_API
tristate "Register public key cryptography implementations with Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
+ depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_RSA
help
@@ -113,7 +113,7 @@ config CRYPTO_DEV_FSL_CAAM_PKC_API
config CRYPTO_DEV_FSL_CAAM_RNG_API
tristate "Register caam device for hwrng API"
- depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
+ depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_RNG
select HW_RANDOM
@@ -134,3 +134,6 @@ config CRYPTO_DEV_FSL_CAAM_DEBUG
help
Selecting this will enable printing of various debug
information in the CAAM driver.
+
+config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
+ def_tristate CRYPTO_DEV_FSL_CAAM_CRYPTO_API
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index 08bf5515ae8a..6554742f357e 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -8,6 +8,7 @@ endif
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 954a64c7757b..662fe94cb2f8 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -2,6 +2,7 @@
* caam - Freescale FSL CAAM support for crypto API
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2016 NXP
*
* Based on talitos crypto API driver.
*
@@ -53,6 +54,7 @@
#include "error.h"
#include "sg_sw_sec4.h"
#include "key_gen.h"
+#include "caamalg_desc.h"
/*
* crypto alg
@@ -62,8 +64,6 @@
#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
CTR_RFC3686_NONCE_SIZE + \
SHA512_DIGEST_SIZE * 2)
-/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
-#define CAAM_MAX_IV_LENGTH 16
#define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
#define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
@@ -71,37 +71,6 @@
#define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
CAAM_CMD_SZ * 5)
-/* length of descriptors text */
-#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
-#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
-#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
-#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
-
-/* Note: Nonce is counted in enckeylen */
-#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
-
-#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
-#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
-#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
-
-#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
-#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
-#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
-
-#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
-#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
-#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
-
-#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
-#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
-#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
-
-#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
-#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
- 20 * CAAM_CMD_SZ)
-#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
- 15 * CAAM_CMD_SZ)
-
#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
@@ -117,8 +86,7 @@
static void dbg_dump_sg(const char *level, const char *prefix_str,
int prefix_type, int rowsize, int groupsize,
- struct scatterlist *sg, size_t tlen, bool ascii,
- bool may_sleep)
+ struct scatterlist *sg, size_t tlen, bool ascii)
{
struct scatterlist *it;
void *it_page;
@@ -152,7 +120,6 @@ static struct list_head alg_list;
struct caam_alg_entry {
int class1_alg_type;
int class2_alg_type;
- int alg_op;
bool rfc3686;
bool geniv;
};
@@ -163,52 +130,6 @@ struct caam_aead_alg {
bool registered;
};
-/* Set DK bit in class 1 operation if shared */
-static inline void append_dec_op1(u32 *desc, u32 type)
-{
- u32 *jump_cmd, *uncond_jump_cmd;
-
- /* DK bit is valid only for AES */
- if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
- append_operation(desc, type | OP_ALG_AS_INITFINAL |
- OP_ALG_DECRYPT);
- return;
- }
-
- jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
- append_operation(desc, type | OP_ALG_AS_INITFINAL |
- OP_ALG_DECRYPT);
- uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
- set_jump_tgt_here(desc, jump_cmd);
- append_operation(desc, type | OP_ALG_AS_INITFINAL |
- OP_ALG_DECRYPT | OP_ALG_AAI_DK);
- set_jump_tgt_here(desc, uncond_jump_cmd);
-}
-
-/*
- * For aead functions, read payload and write payload,
- * both of which are specified in req->src and req->dst
- */
-static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
-{
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
- KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
-}
-
-/*
- * For ablkcipher encrypt and decrypt, read from req->src and
- * write to req->dst
- */
-static inline void ablkcipher_append_src_dst(u32 *desc)
-{
- append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
- KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
-}
-
/*
* per-session context
*/
@@ -220,147 +141,36 @@ struct caam_ctx {
dma_addr_t sh_desc_enc_dma;
dma_addr_t sh_desc_dec_dma;
dma_addr_t sh_desc_givenc_dma;
- u32 class1_alg_type;
- u32 class2_alg_type;
- u32 alg_op;
u8 key[CAAM_MAX_KEY_SIZE];
dma_addr_t key_dma;
- unsigned int enckeylen;
- unsigned int split_key_len;
- unsigned int split_key_pad_len;
+ struct alginfo adata;
+ struct alginfo cdata;
unsigned int authsize;
};
-static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
- int keys_fit_inline, bool is_rfc3686)
-{
- u32 *nonce;
- unsigned int enckeylen = ctx->enckeylen;
-
- /*
- * RFC3686 specific:
- * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
- * | enckeylen = encryption key size + nonce size
- */
- if (is_rfc3686)
- enckeylen -= CTR_RFC3686_NONCE_SIZE;
-
- if (keys_fit_inline) {
- append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
- ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- append_key_as_imm(desc, (void *)ctx->key +
- ctx->split_key_pad_len, enckeylen,
- enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- } else {
- append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
- enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- }
-
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686) {
- nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
- enckeylen);
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
- append_move(desc,
- MOVE_SRC_OUTFIFO |
- MOVE_DEST_CLASS1CTX |
- (16 << MOVE_OFFSET_SHIFT) |
- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
- }
-}
-
-static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
- int keys_fit_inline, bool is_rfc3686)
-{
- u32 *key_jump_cmd;
-
- /* Note: Context registers are saved. */
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
-
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
-
- set_jump_tgt_here(desc, key_jump_cmd);
-}
-
static int aead_null_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline = false;
- u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
+ ctx->adata.keylen_pad;
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
- ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) {
+ ctx->adata.key_inline = true;
+ ctx->adata.key_virt = ctx->key;
+ } else {
+ ctx->adata.key_inline = false;
+ ctx->adata.key_dma = ctx->key_dma;
+ }
/* aead_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
- ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- else
- append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* assoclen + cryptlen = seqinlen */
- append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Prepare to read and write cryptlen + assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /*
- * MOVE_LEN opcode is not available in all SEC HW revisions,
- * thus need to do some magic, i.e. self-patch the descriptor
- * buffer.
- */
- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
- MOVE_DEST_MATH3 |
- (0x6 << MOVE_LEN_SHIFT));
- write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
- MOVE_DEST_DESCBUF |
- MOVE_WAITCOMP |
- (0x8 << MOVE_LEN_SHIFT));
-
- /* Class 2 operation */
- append_operation(desc, ctx->class2_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Read and write cryptlen bytes */
- aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
-
- set_move_tgt_here(desc, read_move_cmd);
- set_move_tgt_here(desc, write_move_cmd);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
- MOVE_AUX_LS);
-
- /* Write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-
+ cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -368,84 +178,22 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "aead null enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
- ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
-
- desc = ctx->sh_desc_dec;
+ if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) {
+ ctx->adata.key_inline = true;
+ ctx->adata.key_virt = ctx->key;
+ } else {
+ ctx->adata.key_inline = false;
+ ctx->adata.key_dma = ctx->key_dma;
+ }
/* aead_decrypt shared descriptor */
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
- ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- else
- append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Class 2 operation */
- append_operation(desc, ctx->class2_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-
- /* assoclen + cryptlen = seqoutlen */
- append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* Prepare to read and write cryptlen + assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
-
- /*
- * MOVE_LEN opcode is not available in all SEC HW revisions,
- * thus need to do some magic, i.e. self-patch the descriptor
- * buffer.
- */
- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
- MOVE_DEST_MATH2 |
- (0x6 << MOVE_LEN_SHIFT));
- write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
- MOVE_DEST_DESCBUF |
- MOVE_WAITCOMP |
- (0x8 << MOVE_LEN_SHIFT));
-
- /* Read and write cryptlen bytes */
- aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
-
- /*
- * Insert a NOP here, since we need at least 4 instructions between
- * code patching the descriptor buffer and the location being patched.
- */
- jump_cmd = append_jump(desc, JUMP_TEST_ALL);
- set_jump_tgt_here(desc, jump_cmd);
-
- set_move_tgt_here(desc, read_move_cmd);
- set_move_tgt_here(desc, write_move_cmd);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
- MOVE_AUX_LS);
- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
- /* Load ICV */
- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
- FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
-
+ desc = ctx->sh_desc_dec;
+ cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -453,12 +201,6 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "aead null dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
return 0;
}
@@ -470,11 +212,11 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline;
- u32 geniv, moveiv;
u32 ctx1_iv_off = 0;
- u32 *desc;
- const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+ u32 *desc, *nonce = NULL;
+ u32 inl_mask;
+ unsigned int data_len[2];
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
@@ -482,7 +224,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
return 0;
/* NULL encryption / decryption */
- if (!ctx->enckeylen)
+ if (!ctx->cdata.keylen)
return aead_null_set_sh_desc(aead);
/*
@@ -497,8 +239,14 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
* RFC3686 specific:
* CONTEXT1[255:128] = {NONCE, IV, COUNTER}
*/
- if (is_rfc3686)
+ if (is_rfc3686) {
ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+ nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
+ ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
+ }
+
+ data_len[0] = ctx->adata.keylen_pad;
+ data_len[1] = ctx->cdata.keylen;
if (alg->caam.geniv)
goto skip_enc;
@@ -507,54 +255,29 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
- ctx->split_key_pad_len + ctx->enckeylen +
- (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
- CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
-
- /* aead_encrypt shared descriptor */
- desc = ctx->sh_desc_enc;
-
- /* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
-
- /* Class 2 operation */
- append_operation(desc, ctx->class2_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* read assoc before reading payload */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
- FIFOLDST_VLF);
-
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
+ if (desc_inline_query(DESC_AEAD_ENC_LEN +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
+ else
+ ctx->adata.key_dma = ctx->key_dma;
- /* Read and write cryptlen bytes */
- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ else
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
- /* Write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+ /* aead_encrypt shared descriptor */
+ desc = ctx->sh_desc_enc;
+ cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ctx->authsize,
+ is_rfc3686, nonce, ctx1_iv_off);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -562,79 +285,36 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
skip_enc:
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
- ctx->split_key_pad_len + ctx->enckeylen +
- (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
- CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
-
- /* aead_decrypt shared descriptor */
- desc = ctx->sh_desc_dec;
-
- /* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
-
- /* Class 2 operation */
- append_operation(desc, ctx->class2_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+ if (desc_inline_query(DESC_AEAD_DEC_LEN +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
- /* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- if (alg->caam.geniv)
- append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
else
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* read assoc before reading payload */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
- KEY_VLF);
+ ctx->adata.key_dma = ctx->key_dma;
- if (alg->caam.geniv) {
- append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- (ctx1_iv_off << LDST_OFFSET_SHIFT));
- append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
- (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
- }
-
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
- /* Choose operation */
- if (ctr_mode)
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
else
- append_dec_op1(desc, ctx->class1_alg_type);
-
- /* Read and write cryptlen bytes */
- append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
- aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
- /* Load ICV */
- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
- FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+ /* aead_decrypt shared descriptor */
+ desc = ctx->sh_desc_dec;
+ cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
+ ctx->authsize, alg->caam.geniv, is_rfc3686,
+ nonce, ctx1_iv_off);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -642,11 +322,6 @@ skip_enc:
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
if (!alg->caam.geniv)
goto skip_givenc;
@@ -655,93 +330,30 @@ skip_enc:
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
- ctx->split_key_pad_len + ctx->enckeylen +
- (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
- CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
-
- /* aead_givencrypt shared descriptor */
- desc = ctx->sh_desc_enc;
-
- /* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
-
- if (is_rfc3686)
- goto copy_iv;
-
- /* Generate IV */
- geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
- NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
- NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
- append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
- LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- append_move(desc, MOVE_WAITCOMP |
- MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
- (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
- (ivsize << MOVE_LEN_SHIFT));
- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
-copy_iv:
- /* Copy IV to class 1 context */
- append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
- (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
- (ivsize << MOVE_LEN_SHIFT));
-
- /* Return to encryption */
- append_operation(desc, ctx->class2_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* ivsize + cryptlen = seqoutlen - authsize */
- append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* read assoc before reading payload */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
- KEY_VLF);
-
- /* Copy iv from outfifo to class 2 fifo */
- moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
- NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
- append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
- LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
- append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
-
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Will write ivsize + cryptlen */
- append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ if (desc_inline_query(DESC_AEAD_GIVENC_LEN +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
- /* Not need to reload iv */
- append_seq_fifo_load(desc, ivsize,
- FIFOLD_CLASS_SKIP);
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
+ else
+ ctx->adata.key_dma = ctx->key_dma;
- /* Will read cryptlen */
- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ else
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
- /* Write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+ /* aead_givencrypt shared descriptor */
+ desc = ctx->sh_desc_enc;
+ cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
+ ctx->authsize, is_rfc3686, nonce,
+ ctx1_iv_off);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -749,11 +361,6 @@ copy_iv:
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
skip_givenc:
return 0;
@@ -774,12 +381,11 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline = false;
- u32 *key_jump_cmd, *zero_payload_jump_cmd,
- *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
- if (!ctx->enckeylen || !ctx->authsize)
+ if (!ctx->cdata.keylen || !ctx->authsize)
return 0;
/*
@@ -787,82 +393,16 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptor
* must fit into the 64-word Descriptor h/w Buffer
*/
- if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_GCM_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_enc;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* skip key loading if they are loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD | JUMP_COND_SELF);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* if assoclen + cryptlen is ZERO, skip to ICV write */
- append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
-
- /* if assoclen is ZERO, skip reading the assoc data */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
-
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* cryptlen = seqinlen - assoclen */
- append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
-
- /* if cryptlen is ZERO jump to zero-payload commands */
- zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
-
- /* read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
- set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
-
- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* write encrypted data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
- /* read payload data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
-
- /* jump the zero-payload commands */
- append_jump(desc, JUMP_TEST_ALL | 2);
-
- /* zero-payload commands */
- set_jump_tgt_here(desc, zero_payload_jump_cmd);
-
- /* read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
-
- /* There is no input data */
- set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
-
- /* write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-
+ cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -870,80 +410,21 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_GCM_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_dec;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* skip key loading if they are loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL |
- JUMP_TEST_ALL | JUMP_COND_SHRD |
- JUMP_COND_SELF);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-
- /* if assoclen is ZERO, skip reading the assoc data */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
-
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
-
- set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
-
- /* cryptlen = seqoutlen - assoclen */
- append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* jump to zero-payload command if cryptlen is zero */
- zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
-
- append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* store encrypted data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
- /* read payload data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
-
- /* zero-payload command */
- set_jump_tgt_here(desc, zero_payload_jump_cmd);
-
- /* read ICV */
- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
- FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
-
+ cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -951,11 +432,6 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
return 0;
}
@@ -974,11 +450,11 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline = false;
- u32 *key_jump_cmd;
u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
- if (!ctx->enckeylen || !ctx->authsize)
+ if (!ctx->cdata.keylen || !ctx->authsize)
return 0;
/*
@@ -986,62 +462,16 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptor
* must fit into the 64-word Descriptor h/w Buffer
*/
- if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_RFC4106_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_enc;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip key loading if it is loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* Read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
-
- /* Skip IV */
- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
-
- /* Will read cryptlen bytes */
- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* cryptlen = seqoutlen - assoclen */
- append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Write encrypted data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
- /* Read payload data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
-
- /* Write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-
+ cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1049,73 +479,21 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_RFC4106_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_dec;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip key loading if it is loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL |
- JUMP_TEST_ALL | JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-
- append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* Read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
-
- /* Skip IV */
- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
-
- /* Will read cryptlen bytes */
- append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
-
- /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* Will write cryptlen bytes */
- append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* Store payload data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
- /* Read encrypted data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
-
- /* Read ICV */
- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
- FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
-
+ cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1123,11 +501,6 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
return 0;
}
@@ -1147,12 +520,11 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline = false;
- u32 *key_jump_cmd;
- u32 *read_move_cmd, *write_move_cmd;
u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
- if (!ctx->enckeylen || !ctx->authsize)
+ if (!ctx->cdata.keylen || !ctx->authsize)
return 0;
/*
@@ -1160,61 +532,16 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptor
* must fit into the 64-word Descriptor h/w Buffer
*/
- if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_RFC4543_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_enc;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip key loading if it is loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* assoclen + cryptlen = seqinlen */
- append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /*
- * MOVE_LEN opcode is not available in all SEC HW revisions,
- * thus need to do some magic, i.e. self-patch the descriptor
- * buffer.
- */
- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
- (0x6 << MOVE_LEN_SHIFT));
- write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
- (0x8 << MOVE_LEN_SHIFT));
-
- /* Will read assoclen + cryptlen bytes */
- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Will write assoclen + cryptlen bytes */
- append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Read and write assoclen + cryptlen bytes */
- aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
-
- set_move_tgt_here(desc, read_move_cmd);
- set_move_tgt_here(desc, write_move_cmd);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- /* Move payload data to OFIFO */
- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
-
- /* Write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-
+ cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1222,77 +549,21 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_RFC4543_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_dec;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip key loading if it is loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL |
- JUMP_TEST_ALL | JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-
- /* assoclen + cryptlen = seqoutlen */
- append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /*
- * MOVE_LEN opcode is not available in all SEC HW revisions,
- * thus need to do some magic, i.e. self-patch the descriptor
- * buffer.
- */
- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
- (0x6 << MOVE_LEN_SHIFT));
- write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
- (0x8 << MOVE_LEN_SHIFT));
-
- /* Will read assoclen + cryptlen bytes */
- append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* Will write assoclen + cryptlen bytes */
- append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* Store payload data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
- /* In-snoop assoclen + cryptlen data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
-
- set_move_tgt_here(desc, read_move_cmd);
- set_move_tgt_here(desc, write_move_cmd);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- /* Move payload data to OFIFO */
- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
- /* Read ICV */
- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
- FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
-
+ cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1300,11 +571,6 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
return 0;
}
@@ -1320,19 +586,9 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc,
return 0;
}
-static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
- u32 authkeylen)
-{
- return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
- ctx->split_key_pad_len, key_in, authkeylen,
- ctx->alg_op);
-}
-
static int aead_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
- /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
- static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
struct crypto_authenc_keys keys;
@@ -1341,33 +597,25 @@ static int aead_setkey(struct crypto_aead *aead,
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
- /* Pick class 2 key length from algorithm submask */
- ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
- OP_ALG_ALGSEL_SHIFT] * 2;
- ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
-
- if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
- goto badkey;
-
#ifdef DEBUG
printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
keys.authkeylen + keys.enckeylen, keys.enckeylen,
keys.authkeylen);
- printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
- ctx->split_key_len, ctx->split_key_pad_len);
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
- ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
+ ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
+ keys.authkeylen, CAAM_MAX_KEY_SIZE -
+ keys.enckeylen);
if (ret) {
goto badkey;
}
/* postpend encryption key to auth split key */
- memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->adata.keylen_pad +
keys.enckeylen, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
@@ -1376,14 +624,14 @@ static int aead_setkey(struct crypto_aead *aead,
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- ctx->split_key_pad_len + keys.enckeylen, 1);
+ ctx->adata.keylen_pad + keys.enckeylen, 1);
#endif
- ctx->enckeylen = keys.enckeylen;
+ ctx->cdata.keylen = keys.enckeylen;
ret = aead_set_sh_desc(aead);
if (ret) {
- dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
keys.enckeylen, DMA_TO_DEVICE);
}
@@ -1412,11 +660,11 @@ static int gcm_setkey(struct crypto_aead *aead,
dev_err(jrdev, "unable to map key i/o memory\n");
return -ENOMEM;
}
- ctx->enckeylen = keylen;
+ ctx->cdata.keylen = keylen;
ret = gcm_set_sh_desc(aead);
if (ret) {
- dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen,
DMA_TO_DEVICE);
}
@@ -1444,9 +692,9 @@ static int rfc4106_setkey(struct crypto_aead *aead,
* The last four bytes of the key material are used as the salt value
* in the nonce. Update the AES key length.
*/
- ctx->enckeylen = keylen - 4;
+ ctx->cdata.keylen = keylen - 4;
- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->cdata.keylen,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
@@ -1455,7 +703,7 @@ static int rfc4106_setkey(struct crypto_aead *aead,
ret = rfc4106_set_sh_desc(aead);
if (ret) {
- dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen,
DMA_TO_DEVICE);
}
@@ -1483,9 +731,9 @@ static int rfc4543_setkey(struct crypto_aead *aead,
* The last four bytes of the key material are used as the salt value
* in the nonce. Update the AES key length.
*/
- ctx->enckeylen = keylen - 4;
+ ctx->cdata.keylen = keylen - 4;
- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->cdata.keylen,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
@@ -1494,7 +742,7 @@ static int rfc4543_setkey(struct crypto_aead *aead,
ret = rfc4543_set_sh_desc(aead);
if (ret) {
- dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen,
DMA_TO_DEVICE);
}
@@ -1505,21 +753,18 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
const u8 *key, unsigned int keylen)
{
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
const char *alg_name = crypto_tfm_alg_name(tfm);
struct device *jrdev = ctx->jrdev;
- int ret = 0;
- u32 *key_jump_cmd;
+ unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
u32 *desc;
- u8 *nonce;
- u32 geniv;
u32 ctx1_iv_off = 0;
- const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = (ctr_mode &&
(strstr(alg_name, "rfc3686") != NULL));
+ memcpy(ctx->key, key, keylen);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -1542,60 +787,20 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
}
- memcpy(ctx->key, key, keylen);
ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
return -ENOMEM;
}
- ctx->enckeylen = keylen;
+ ctx->cdata.keylen = keylen;
+ ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_inline = true;
/* ablkcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- /* Load class1 key only */
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 |
- KEY_DEST_CLASS_REG);
-
- /* Load nonce into CONTEXT1 reg */
- if (is_rfc3686) {
- nonce = (u8 *)key + keylen;
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
- append_move(desc, MOVE_WAITCOMP |
- MOVE_SRC_OUTFIFO |
- MOVE_DEST_CLASS1CTX |
- (16 << MOVE_OFFSET_SHIFT) |
- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
- }
-
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Load iv */
- append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
-
- /* Load counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
- /* Load operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
-
+ cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1603,61 +808,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
+
/* ablkcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- /* Load class1 key only */
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 |
- KEY_DEST_CLASS_REG);
-
- /* Load nonce into CONTEXT1 reg */
- if (is_rfc3686) {
- nonce = (u8 *)key + keylen;
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
- append_move(desc, MOVE_WAITCOMP |
- MOVE_SRC_OUTFIFO |
- MOVE_DEST_CLASS1CTX |
- (16 << MOVE_OFFSET_SHIFT) |
- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
- }
-
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* load IV */
- append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
-
- /* Load counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
- /* Choose operation */
- if (ctr_mode)
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
- else
- append_dec_op1(desc, ctx->class1_alg_type);
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
-
+ cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1666,76 +821,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
/* ablkcipher_givencrypt shared descriptor */
desc = ctx->sh_desc_givenc;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- /* Load class1 key only */
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 |
- KEY_DEST_CLASS_REG);
-
- /* Load Nonce into CONTEXT1 reg */
- if (is_rfc3686) {
- nonce = (u8 *)key + keylen;
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
- append_move(desc, MOVE_WAITCOMP |
- MOVE_SRC_OUTFIFO |
- MOVE_DEST_CLASS1CTX |
- (16 << MOVE_OFFSET_SHIFT) |
- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
- }
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Generate IV */
- geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
- NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
- NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
- append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
- LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- append_move(desc, MOVE_WAITCOMP |
- MOVE_SRC_INFIFO |
- MOVE_DEST_CLASS1CTX |
- (crt->ivsize << MOVE_LEN_SHIFT) |
- (ctx1_iv_off << MOVE_OFFSET_SHIFT));
- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
- /* Copy generated IV to memory */
- append_seq_store(desc, crt->ivsize,
- LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
- (ctx1_iv_off << LDST_OFFSET_SHIFT));
-
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
- if (ctx1_iv_off)
- append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
- (1 << JUMP_OFFSET_SHIFT));
-
- /* Load operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
-
+ cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1743,14 +832,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
- return ret;
+ return 0;
}
static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
@@ -1758,8 +841,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
{
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
struct device *jrdev = ctx->jrdev;
- u32 *key_jump_cmd, *desc;
- __be64 sector_size = cpu_to_be64(512);
+ u32 *desc;
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
crypto_ablkcipher_set_flags(ablkcipher,
@@ -1774,88 +856,23 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
dev_err(jrdev, "unable to map key i/o memory\n");
return -ENOMEM;
}
- ctx->enckeylen = keylen;
+ ctx->cdata.keylen = keylen;
+ ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_inline = true;
/* xts_ablkcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- /* Load class1 keys only */
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
-
- /* Load sector size with index 40 bytes (0x28) */
- append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
- append_data(desc, (void *)&sector_size, 8);
-
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /*
- * create sequence for loading the sector index
- * Upper 8B of IV - will be used as sector index
- * Lower 8B of IV - will be discarded
- */
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
-
- /* Load operation */
- append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL |
- OP_ALG_ENCRYPT);
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
-
+ cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
/* xts_ablkcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- /* Load class1 key only */
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
-
- /* Load sector size with index 40 bytes (0x28) */
- append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
- append_data(desc, (void *)&sector_size, 8);
-
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /*
- * create sequence for loading the sector index
- * Upper 8B of IV - will be used as sector index
- * Lower 8B of IV - will be discarded
- */
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
-
- /* Load operation */
- append_dec_op1(desc, ctx->class1_alg_type);
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
-
+ cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
@@ -1864,31 +881,22 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
return 0;
}
/*
* aead_edesc - s/w-extended aead descriptor
- * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
* @src_nents: number of segments in input scatterlist
* @dst_nents: number of segments in output scatterlist
- * @iv_dma: dma address of iv for checking continuity and link table
- * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
* @sec4_sg_bytes: length of dma mapped sec4_sg space
* @sec4_sg_dma: bus physical mapped address of h/w link table
+ * @sec4_sg: pointer to h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
*/
struct aead_edesc {
- int assoc_nents;
int src_nents;
int dst_nents;
- dma_addr_t iv_dma;
int sec4_sg_bytes;
dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg;
@@ -1900,9 +908,9 @@ struct aead_edesc {
* @src_nents: number of segments in input scatterlist
* @dst_nents: number of segments in output scatterlist
* @iv_dma: dma address of iv for checking continuity and link table
- * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
* @sec4_sg_bytes: length of dma mapped sec4_sg space
* @sec4_sg_dma: bus physical mapped address of h/w link table
+ * @sec4_sg: pointer to h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
*/
struct ablkcipher_edesc {
@@ -2019,8 +1027,7 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ablkcipher_edesc *)((char *)desc -
- offsetof(struct ablkcipher_edesc, hw_desc));
+ edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -2031,7 +1038,7 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
edesc->src_nents > 1 ? 100 : ivsize, 1);
dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
#endif
ablkcipher_unmap(jrdev, edesc, req);
@@ -2052,8 +1059,7 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ablkcipher_edesc *)((char *)desc -
- offsetof(struct ablkcipher_edesc, hw_desc));
+ edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -2063,7 +1069,7 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
ivsize, 1);
dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
#endif
ablkcipher_unmap(jrdev, edesc, req);
@@ -2157,7 +1163,7 @@ static void init_gcm_job(struct aead_request *req,
FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
/* Append Salt */
if (!generic_gcm)
- append_data(desc, ctx->key + ctx->enckeylen, 4);
+ append_data(desc, ctx->key + ctx->cdata.keylen, 4);
/* Append IV */
append_data(desc, req->iv, ivsize);
/* End of blank commands */
@@ -2172,7 +1178,7 @@ static void init_authenc_job(struct aead_request *req,
struct caam_aead_alg, aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
- const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
u32 *desc = edesc->hw_desc;
@@ -2218,15 +1224,13 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
int len, sec4_sg_index = 0;
#ifdef DEBUG
- bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1);
printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes);
dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
+ edesc->src_nents ? 100 : req->nbytes, 1);
#endif
len = desc_len(sh_desc);
@@ -2278,14 +1282,12 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
int len, sec4_sg_index = 0;
#ifdef DEBUG
- bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1);
dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
+ edesc->src_nents ? 100 : req->nbytes, 1);
#endif
len = desc_len(sh_desc);
@@ -2344,10 +1346,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
/* Check if data are contiguous. */
all_contig = !src_nents;
- if (!all_contig) {
- src_nents = src_nents ? : 1;
+ if (!all_contig)
sec4_sg_len = src_nents;
- }
sec4_sg_len += dst_nents;
@@ -2556,11 +1556,9 @@ static int aead_decrypt(struct aead_request *req)
int ret = 0;
#ifdef DEBUG
- bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- req->assoclen + req->cryptlen, 1, may_sleep);
+ req->assoclen + req->cryptlen, 1);
#endif
/* allocate extended descriptor */
@@ -2618,16 +1616,33 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
if (likely(req->src == req->dst)) {
sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
DMA_BIDIRECTIONAL);
+ if (unlikely(!sgc)) {
+ dev_err(jrdev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
} else {
sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
DMA_TO_DEVICE);
+ if (unlikely(!sgc)) {
+ dev_err(jrdev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
DMA_FROM_DEVICE);
+ if (unlikely(!sgc)) {
+ dev_err(jrdev, "unable to map destination\n");
+ dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE);
+ return ERR_PTR(-ENOMEM);
+ }
}
iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, iv_dma)) {
dev_err(jrdev, "unable to map IV\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -2647,6 +1662,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -2673,6 +1690,9 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
sec4_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
+ kfree(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -2794,11 +1814,26 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
if (likely(req->src == req->dst)) {
sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
DMA_BIDIRECTIONAL);
+ if (unlikely(!sgc)) {
+ dev_err(jrdev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
} else {
sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
DMA_TO_DEVICE);
+ if (unlikely(!sgc)) {
+ dev_err(jrdev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
DMA_FROM_DEVICE);
+ if (unlikely(!sgc)) {
+ dev_err(jrdev, "unable to map destination\n");
+ dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE);
+ return ERR_PTR(-ENOMEM);
+ }
}
/*
@@ -2808,6 +1843,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, iv_dma)) {
dev_err(jrdev, "unable to map IV\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -2823,6 +1860,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -2850,6 +1889,9 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
sec4_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
+ kfree(edesc);
return ERR_PTR(-ENOMEM);
}
edesc->iv_dma = iv_dma;
@@ -2916,7 +1958,6 @@ struct caam_alg_template {
} template_u;
u32 class1_alg_type;
u32 class2_alg_type;
- u32 alg_op;
};
static struct caam_alg_template driver_algs[] = {
@@ -3101,7 +2142,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3123,7 +2163,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3145,7 +2184,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3167,7 +2205,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3189,7 +2226,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3211,7 +2247,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3233,7 +2268,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3256,7 +2290,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3279,7 +2312,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3302,7 +2334,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3325,7 +2356,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3348,7 +2378,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3371,7 +2400,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3394,7 +2422,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3417,7 +2444,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3440,7 +2466,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3463,7 +2488,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3486,7 +2510,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3509,7 +2532,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
}
},
{
@@ -3532,7 +2554,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
.geniv = true,
}
},
@@ -3556,7 +2577,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3580,7 +2600,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3604,7 +2623,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3628,7 +2646,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3652,7 +2669,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3676,7 +2692,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3700,7 +2715,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3724,7 +2738,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3748,7 +2761,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3772,7 +2784,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3795,7 +2806,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3818,7 +2828,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3841,7 +2850,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3864,7 +2872,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3887,7 +2894,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3910,7 +2916,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3933,7 +2938,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3956,7 +2960,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3979,7 +2982,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
},
},
{
@@ -4002,7 +3004,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -4025,7 +3026,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
},
{
@@ -4048,7 +3048,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -4073,7 +3072,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4098,7 +3096,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4124,7 +3121,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4149,7 +3145,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4175,7 +3170,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4200,7 +3194,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4226,7 +3219,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4251,7 +3243,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4277,7 +3268,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4302,7 +3292,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4328,7 +3317,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4353,7 +3341,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4375,9 +3362,8 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
}
/* copy descriptor header template value */
- ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
- ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
- ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
+ ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
+ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
return 0;
}
@@ -4420,7 +3406,7 @@ static void caam_exit_common(struct caam_ctx *ctx)
if (ctx->key_dma &&
!dma_mapping_error(ctx->jrdev, ctx->key_dma))
dma_unmap_single(ctx->jrdev, ctx->key_dma,
- ctx->enckeylen + ctx->split_key_pad_len,
+ ctx->cdata.keylen + ctx->adata.keylen_pad,
DMA_TO_DEVICE);
caam_jr_free(ctx->jrdev);
@@ -4498,7 +3484,6 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
t_alg->caam.class1_alg_type = template->class1_alg_type;
t_alg->caam.class2_alg_type = template->class2_alg_type;
- t_alg->caam.alg_op = template->alg_op;
return t_alg;
}
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
new file mode 100644
index 000000000000..f3f48c10b9d6
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -0,0 +1,1306 @@
+/*
+ * Shared descriptors for aead, ablkcipher algorithms
+ *
+ * Copyright 2016 NXP
+ */
+
+#include "compat.h"
+#include "desc_constr.h"
+#include "caamalg_desc.h"
+
+/*
+ * For aead functions, read payload and write payload,
+ * both of which are specified in req->src and req->dst
+ */
+static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
+{
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
+ KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
+}
+
+/* Set DK bit in class 1 operation if shared */
+static inline void append_dec_op1(u32 *desc, u32 type)
+{
+ u32 *jump_cmd, *uncond_jump_cmd;
+
+ /* DK bit is valid only for AES */
+ if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
+ append_operation(desc, type | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+ return;
+ }
+
+ jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
+ append_operation(desc, type | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+ uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, jump_cmd);
+ append_operation(desc, type | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_AAI_DK);
+ set_jump_tgt_here(desc, uncond_jump_cmd);
+}
+
+/**
+ * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
+ * (non-protocol) with no (null) encryption.
+ * @desc: pointer to buffer used for descriptor construction
+ * @adata: pointer to authentication transform definitions. Note that since a
+ * split key is to be used, the size of the split key itself is
+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ *
+ * Note: Requires an MDHA split key.
+ */
+void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
+ adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
+ KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* assoclen + cryptlen = seqinlen */
+ append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Prepare to read and write cryptlen + assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
+ MOVE_DEST_MATH3 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
+ MOVE_DEST_DESCBUF |
+ MOVE_WAITCOMP |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Class 2 operation */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Read and write cryptlen bytes */
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
+ MOVE_AUX_LS);
+
+ /* Write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "aead null enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap);
+
+/**
+ * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
+ * (non-protocol) with no (null) decryption.
+ * @desc: pointer to buffer used for descriptor construction
+ * @adata: pointer to authentication transform definitions. Note that since a
+ * split key is to be used, the size of the split key itself is
+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ *
+ * Note: Requires an MDHA split key.
+ */
+void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
+ adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 2 operation */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* assoclen + cryptlen = seqoutlen */
+ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* Prepare to read and write cryptlen + assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
+ MOVE_DEST_MATH2 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
+ MOVE_DEST_DESCBUF |
+ MOVE_WAITCOMP |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Read and write cryptlen bytes */
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ /*
+ * Insert a NOP here, since we need at least 4 instructions between
+ * code patching the descriptor buffer and the location being patched.
+ */
+ jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, jump_cmd);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
+ MOVE_AUX_LS);
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Load ICV */
+ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "aead null dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap);
+
+static void init_sh_desc_key_aead(u32 * const desc,
+ struct alginfo * const cdata,
+ struct alginfo * const adata,
+ const bool is_rfc3686, u32 *nonce)
+{
+ u32 *key_jump_cmd;
+ unsigned int enckeylen = cdata->keylen;
+
+ /* Note: Context registers are saved. */
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /*
+ * RFC3686 specific:
+ * | key = {AUTH_KEY, ENC_KEY, NONCE}
+ * | enckeylen = encryption key size + nonce size
+ */
+ if (is_rfc3686)
+ enckeylen -= CTR_RFC3686_NONCE_SIZE;
+
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
+ adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, enckeylen,
+ enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, enckeylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686) {
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc,
+ MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX |
+ (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+ }
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+}
+
+/**
+ * cnstr_shdsc_aead_encap - IPSec ESP encapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @adata: pointer to authentication transform definitions. Note that since a
+ * split key is to be used, the size of the split key itself is
+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @nonce: pointer to rfc3686 nonce
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ *
+ * Note: Requires an MDHA split key.
+ */
+void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int icvsize,
+ const bool is_rfc3686, u32 *nonce,
+ const u32 ctx1_iv_off)
+{
+ /* Note: Context registers are saved. */
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+
+ /* Class 2 operation */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Read and write assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ FIFOLDST_VLF);
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
+
+ /* Write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
+
+/**
+ * cnstr_shdsc_aead_decap - IPSec ESP decapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @adata: pointer to authentication transform definitions. Note that since a
+ * split key is to be used, the size of the split key itself is
+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @nonce: pointer to rfc3686 nonce
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ *
+ * Note: Requires an MDHA split key.
+ */
+void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool geniv,
+ const bool is_rfc3686, u32 *nonce,
+ const u32 ctx1_iv_off)
+{
+ /* Note: Context registers are saved. */
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+
+ /* Class 2 operation */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* Read and write assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (geniv)
+ append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
+ else
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+
+ if (geniv) {
+ append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ (ctx1_iv_off << LDST_OFFSET_SHIFT));
+ append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
+ }
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Choose operation */
+ if (ctx1_iv_off)
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+ else
+ append_dec_op1(desc, cdata->algtype);
+
+ /* Read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
+
+ /* Load ICV */
+ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
+
+/**
+ * cnstr_shdsc_aead_givencap - IPSec ESP encapsulation shared descriptor
+ * (non-protocol) with HW-generated initialization
+ * vector.
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @adata: pointer to authentication transform definitions. Note that since a
+ * split key is to be used, the size of the split key itself is
+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @nonce: pointer to rfc3686 nonce
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ *
+ * Note: Requires an MDHA split key.
+ */
+void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool is_rfc3686,
+ u32 *nonce, const u32 ctx1_iv_off)
+{
+ u32 geniv, moveiv;
+
+ /* Note: Context registers are saved. */
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+
+ if (is_rfc3686)
+ goto copy_iv;
+
+ /* Generate IV */
+ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+ NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
+ NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_WAITCOMP |
+ MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
+ (ivsize << MOVE_LEN_SHIFT));
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+copy_iv:
+ /* Copy IV to class 1 context */
+ append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
+ (ivsize << MOVE_LEN_SHIFT));
+
+ /* Return to encryption */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Read and write assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+
+ /* Copy iv from outfifo to class 2 fifo */
+ moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
+ NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Will write ivsize + cryptlen */
+ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Not need to reload iv */
+ append_seq_fifo_load(desc, ivsize,
+ FIFOLD_CLASS_SKIP);
+
+ /* Will read cryptlen */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
+ FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+
+ /* Write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "aead givenc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
+
+/**
+ * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1,
+ *zero_assoc_jump_cmd2;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* skip key loading if they are loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD | JUMP_COND_SELF);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* if assoclen + cryptlen is ZERO, skip to ICV write */
+ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ /* if assoclen is ZERO, skip reading the assoc data */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* cryptlen = seqinlen - assoclen */
+ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+
+ /* if cryptlen is ZERO jump to zero-payload commands */
+ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ /* read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+ set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
+
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* write encrypted data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* read payload data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+
+ /* jump the zero-payload commands */
+ append_jump(desc, JUMP_TEST_ALL | 2);
+
+ /* zero-payload commands */
+ set_jump_tgt_here(desc, zero_payload_jump_cmd);
+
+ /* read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
+
+ /* There is no input data */
+ set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
+
+ /* write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "gcm enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
+
+/**
+ * cnstr_shdsc_gcm_decap - gcm decapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* skip key loading if they are loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL |
+ JUMP_TEST_ALL | JUMP_COND_SHRD |
+ JUMP_COND_SELF);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* if assoclen is ZERO, skip reading the assoc data */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+ set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
+
+ /* cryptlen = seqoutlen - assoclen */
+ append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* jump to zero-payload command if cryptlen is zero */
+ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* store encrypted data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* read payload data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ /* zero-payload command */
+ set_jump_tgt_here(desc, zero_payload_jump_cmd);
+
+ /* read ICV */
+ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "gcm dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
+
+/**
+ * cnstr_shdsc_rfc4106_encap - IPSec ESP gcm encapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+ /* Skip IV */
+ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+
+ /* Will read cryptlen bytes */
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* cryptlen = seqoutlen - assoclen */
+ append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Write encrypted data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* Read payload data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+
+ /* Write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "rfc4106 enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap);
+
+/**
+ * cnstr_shdsc_rfc4106_decap - IPSec ESP gcm decapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+ /* Skip IV */
+ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+
+ /* Will read cryptlen bytes */
+ append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
+
+ /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* Will write cryptlen bytes */
+ append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* Store payload data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* Read encrypted data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ /* Read ICV */
+ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "rfc4106 dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap);
+
+/**
+ * cnstr_shdsc_rfc4543_encap - IPSec ESP gmac encapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* assoclen + cryptlen = seqinlen */
+ append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Will read assoclen + cryptlen bytes */
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Will write assoclen + cryptlen bytes */
+ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Read and write assoclen + cryptlen bytes */
+ aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ /* Move payload data to OFIFO */
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
+
+ /* Write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "rfc4543 enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap);
+
+/**
+ * cnstr_shdsc_rfc4543_decap - IPSec ESP gmac decapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* assoclen + cryptlen = seqoutlen */
+ append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Will read assoclen + cryptlen bytes */
+ append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* Will write assoclen + cryptlen bytes */
+ append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* Store payload data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* In-snoop assoclen + cryptlen data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ /* Move payload data to OFIFO */
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Read ICV */
+ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "rfc4543 dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
+
+/*
+ * For ablkcipher encrypt and decrypt, read from req->src and
+ * write to req->dst
+ */
+static inline void ablkcipher_append_src_dst(u32 *desc)
+{
+ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
+ KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+}
+
+/**
+ * cnstr_shdsc_ablkcipher_encap - ablkcipher encapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @ivsize: initialization vector size
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ */
+void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off)
+{
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+ /* Load nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
+ u8 *nonce = cdata->key_virt + cdata->keylen;
+
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+ }
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Load iv */
+ append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+ /* Load counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Load operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "ablkcipher enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_encap);
+
+/**
+ * cnstr_shdsc_ablkcipher_decap - ablkcipher decapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @ivsize: initialization vector size
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ */
+void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off)
+{
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+ /* Load nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
+ u8 *nonce = cdata->key_virt + cdata->keylen;
+
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+ }
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* load IV */
+ append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+ /* Load counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Choose operation */
+ if (ctx1_iv_off)
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+ else
+ append_dec_op1(desc, cdata->algtype);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "ablkcipher dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_decap);
+
+/**
+ * cnstr_shdsc_ablkcipher_givencap - ablkcipher encapsulation shared descriptor
+ * with HW-generated initialization vector.
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC.
+ * @ivsize: initialization vector size
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ */
+void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off)
+{
+ u32 *key_jump_cmd, geniv;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+ /* Load Nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
+ u8 *nonce = cdata->key_virt + cdata->keylen;
+
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+ }
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Generate IV */
+ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+ NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | NFIFOENTRY_PTYPE_RND |
+ (ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_INFIFO |
+ MOVE_DEST_CLASS1CTX | (ivsize << MOVE_LEN_SHIFT) |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT));
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Copy generated IV to memory */
+ append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ if (ctx1_iv_off)
+ append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
+ (1 << JUMP_OFFSET_SHIFT));
+
+ /* Load operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_givencap);
+
+/**
+ * cnstr_shdsc_xts_ablkcipher_encap - xts ablkcipher encapsulation shared
+ * descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
+ */
+void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata)
+{
+ __be64 sector_size = cpu_to_be64(512);
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 keys only */
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+ /* Load sector size with index 40 bytes (0x28) */
+ append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ (0x28 << LDST_OFFSET_SHIFT));
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /*
+ * create sequence for loading the sector index
+ * Upper 8B of IV - will be used as sector index
+ * Lower 8B of IV - will be discarded
+ */
+ append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+ (0x20 << LDST_OFFSET_SHIFT));
+ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+
+ /* Load operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_encap);
+
+/**
+ * cnstr_shdsc_xts_ablkcipher_decap - xts ablkcipher decapsulation shared
+ * descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
+ */
+void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata)
+{
+ __be64 sector_size = cpu_to_be64(512);
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+ /* Load sector size with index 40 bytes (0x28) */
+ append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ (0x28 << LDST_OFFSET_SHIFT));
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /*
+ * create sequence for loading the sector index
+ * Upper 8B of IV - will be used as sector index
+ * Lower 8B of IV - will be discarded
+ */
+ append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+ (0x20 << LDST_OFFSET_SHIFT));
+ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+
+ /* Load operation */
+ append_dec_op1(desc, cdata->algtype);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_decap);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM descriptor support");
+MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
new file mode 100644
index 000000000000..95551737333a
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -0,0 +1,97 @@
+/*
+ * Shared descriptors for aead, ablkcipher algorithms
+ *
+ * Copyright 2016 NXP
+ */
+
+#ifndef _CAAMALG_DESC_H_
+#define _CAAMALG_DESC_H_
+
+/* length of descriptors text */
+#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
+#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
+#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
+
+/* Note: Nonce is counted in cdata.keylen */
+#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
+
+#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
+
+#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
+#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
+#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
+
+#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
+#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
+#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
+
+#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
+#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
+
+#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
+#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
+ 20 * CAAM_CMD_SZ)
+#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
+ 15 * CAAM_CMD_SZ)
+
+void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int icvsize,
+ const bool is_rfc3686, u32 *nonce,
+ const u32 ctx1_iv_off);
+
+void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool geniv,
+ const bool is_rfc3686, u32 *nonce,
+ const u32 ctx1_iv_off);
+
+void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool is_rfc3686,
+ u32 *nonce, const u32 ctx1_iv_off);
+
+void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off);
+
+void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off);
+
+void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off);
+
+void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata);
+
+void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata);
+
+#endif /* _CAAMALG_DESC_H_ */
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 660dc206969f..e58639ea53b1 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -72,7 +72,7 @@
#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
/* length of descriptors text */
-#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
+#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
@@ -103,20 +103,15 @@ struct caam_hash_ctx {
u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
- u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
dma_addr_t sh_desc_update_dma ____cacheline_aligned;
dma_addr_t sh_desc_update_first_dma;
dma_addr_t sh_desc_fin_dma;
dma_addr_t sh_desc_digest_dma;
- dma_addr_t sh_desc_finup_dma;
struct device *jrdev;
- u32 alg_type;
- u32 alg_op;
u8 key[CAAM_MAX_HASH_KEY_SIZE];
dma_addr_t key_dma;
int ctx_len;
- unsigned int split_key_len;
- unsigned int split_key_pad_len;
+ struct alginfo adata;
};
/* ahash state */
@@ -222,89 +217,54 @@ static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
return 0;
}
-/* Common shared descriptor commands */
-static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
-{
- append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
- ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
-}
-
-/* Append key if it has been set */
-static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
-{
- u32 *key_jump_cmd;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- if (ctx->split_key_len) {
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- append_key_ahash(desc, ctx);
-
- set_jump_tgt_here(desc, key_jump_cmd);
- }
-
- /* Propagate errors from shared to job descriptor */
- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
-}
-
/*
- * For ahash read data from seqin following state->caam_ctx,
- * and write resulting class2 context to seqout, which may be state->caam_ctx
- * or req->result
+ * For ahash update, final and finup (import_ctx = true)
+ * import context, read and write to seqout
+ * For ahash firsts and digest (import_ctx = false)
+ * read and write to seqout
*/
-static inline void ahash_append_load_str(u32 *desc, int digestsize)
+static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
+ struct caam_hash_ctx *ctx, bool import_ctx)
{
- /* Calculate remaining bytes to read */
- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Read remaining bytes */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
- FIFOLD_TYPE_MSG | KEY_VLF);
+ u32 op = ctx->adata.algtype;
+ u32 *skip_key_load;
- /* Store class2 context bytes */
- append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-}
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
-/*
- * For ahash update, final and finup, import context, read and write to seqout
- */
-static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
- int digestsize,
- struct caam_hash_ctx *ctx)
-{
- init_sh_desc_key_ahash(desc, ctx);
+ /* Append key if it has been set; ahash update excluded */
+ if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
+ /* Skip key loading if already shared */
+ skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
- /* Import context from software */
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_2_CCB | ctx->ctx_len);
+ append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
+ ctx->adata.keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
- /* Class 2 operation */
- append_operation(desc, op | state | OP_ALG_ENCRYPT);
+ set_jump_tgt_here(desc, skip_key_load);
- /*
- * Load from buf and/or src and write to req->result or state->context
- */
- ahash_append_load_str(desc, digestsize);
-}
+ op |= OP_ALG_AAI_HMAC_PRECOMP;
+ }
-/* For ahash firsts and digest, read and write to seqout */
-static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
- int digestsize, struct caam_hash_ctx *ctx)
-{
- init_sh_desc_key_ahash(desc, ctx);
+ /* If needed, import context from software */
+ if (import_ctx)
+ append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
/* Class 2 operation */
append_operation(desc, op | state | OP_ALG_ENCRYPT);
/*
* Load from buf and/or src and write to req->result or state->context
+ * Calculate remaining bytes to read
*/
- ahash_append_load_str(desc, digestsize);
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ /* Read remaining bytes */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
+ FIFOLD_TYPE_MSG | KEY_VLF);
+ /* Store class2 context bytes */
+ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
}
static int ahash_set_sh_desc(struct crypto_ahash *ahash)
@@ -312,28 +272,11 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
int digestsize = crypto_ahash_digestsize(ahash);
struct device *jrdev = ctx->jrdev;
- u32 have_key = 0;
u32 *desc;
- if (ctx->split_key_len)
- have_key = OP_ALG_AAI_HMAC_PRECOMP;
-
/* ahash_update shared descriptor */
desc = ctx->sh_desc_update;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Import context from software */
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_2_CCB | ctx->ctx_len);
-
- /* Class 2 operation */
- append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
- OP_ALG_ENCRYPT);
-
- /* Load data and write to result or context */
- ahash_append_load_str(desc, ctx->ctx_len);
-
+ ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
@@ -348,10 +291,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_update_first shared descriptor */
desc = ctx->sh_desc_update_first;
-
- ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
- ctx->ctx_len, ctx);
-
+ ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -367,10 +307,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_final shared descriptor */
desc = ctx->sh_desc_fin;
-
- ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
- OP_ALG_AS_FINALIZE, digestsize, ctx);
-
+ ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
@@ -383,30 +320,9 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
desc_bytes(desc), 1);
#endif
- /* ahash_finup shared descriptor */
- desc = ctx->sh_desc_finup;
-
- ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
- OP_ALG_AS_FINALIZE, digestsize, ctx);
-
- ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
-
/* ahash_digest shared descriptor */
desc = ctx->sh_desc_digest;
-
- ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
- digestsize, ctx);
-
+ ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -424,14 +340,6 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
return 0;
}
-static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
- u32 keylen)
-{
- return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
- ctx->split_key_pad_len, key_in, keylen,
- ctx->alg_op);
-}
-
/* Digest hash size if it is too large */
static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
u32 *keylen, u8 *key_out, u32 digestsize)
@@ -467,7 +375,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
}
/* Job descriptor to perform unkeyed hash on key_in */
- append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
+ append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
OP_ALG_AS_INITFINAL);
append_seq_in_ptr(desc, src_dma, *keylen, 0);
append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
@@ -511,8 +419,6 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
static int ahash_setkey(struct crypto_ahash *ahash,
const u8 *key, unsigned int keylen)
{
- /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
- static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct device *jrdev = ctx->jrdev;
int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
@@ -537,23 +443,12 @@ static int ahash_setkey(struct crypto_ahash *ahash,
key = hashed_key;
}
- /* Pick class 2 key length from algorithm submask */
- ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
- OP_ALG_ALGSEL_SHIFT] * 2;
- ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
-
-#ifdef DEBUG
- printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
- ctx->split_key_len, ctx->split_key_pad_len);
- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
-
- ret = gen_split_hash_key(ctx, key, keylen);
+ ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
+ CAAM_MAX_HASH_KEY_SIZE);
if (ret)
goto bad_free_key;
- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->adata.keylen_pad,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
@@ -563,14 +458,15 @@ static int ahash_setkey(struct crypto_ahash *ahash,
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- ctx->split_key_pad_len, 1);
+ ctx->adata.keylen_pad, 1);
#endif
ret = ahash_set_sh_desc(ahash);
if (ret) {
- dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->adata.keylen_pad,
DMA_TO_DEVICE);
}
+
error_free_key:
kfree(hashed_key);
return ret;
@@ -639,8 +535,7 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ahash_edesc *)((char *)desc -
- offsetof(struct ahash_edesc, hw_desc));
+ edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -674,8 +569,7 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ahash_edesc *)((char *)desc -
- offsetof(struct ahash_edesc, hw_desc));
+ edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -709,8 +603,7 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ahash_edesc *)((char *)desc -
- offsetof(struct ahash_edesc, hw_desc));
+ edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -744,8 +637,7 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ahash_edesc *)((char *)desc -
- offsetof(struct ahash_edesc, hw_desc));
+ edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -1078,7 +970,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
/* allocate space for base edesc and hw desc commands, link tables */
edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
- ctx->sh_desc_finup, ctx->sh_desc_finup_dma,
+ ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
flags);
if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
@@ -1683,7 +1575,6 @@ struct caam_hash_template {
unsigned int blocksize;
struct ahash_alg template_ahash;
u32 alg_type;
- u32 alg_op;
};
/* ahash descriptors */
@@ -1709,7 +1600,6 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_SHA1,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
}, {
.name = "sha224",
.driver_name = "sha224-caam",
@@ -1731,7 +1621,6 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_SHA224,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
}, {
.name = "sha256",
.driver_name = "sha256-caam",
@@ -1753,7 +1642,6 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_SHA256,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
}, {
.name = "sha384",
.driver_name = "sha384-caam",
@@ -1775,7 +1663,6 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_SHA384,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
}, {
.name = "sha512",
.driver_name = "sha512-caam",
@@ -1797,7 +1684,6 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_SHA512,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
}, {
.name = "md5",
.driver_name = "md5-caam",
@@ -1819,14 +1705,12 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_MD5,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
},
};
struct caam_hash_alg {
struct list_head entry;
int alg_type;
- int alg_op;
struct ahash_alg ahash_alg;
};
@@ -1859,10 +1743,10 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
return PTR_ERR(ctx->jrdev);
}
/* copy descriptor header template value */
- ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
- ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
+ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
- ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
+ ctx->ctx_len = runninglen[(ctx->adata.algtype &
+ OP_ALG_ALGSEL_SUBMASK) >>
OP_ALG_ALGSEL_SHIFT];
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
@@ -1893,10 +1777,6 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
desc_bytes(ctx->sh_desc_digest),
DMA_TO_DEVICE);
- if (ctx->sh_desc_finup_dma &&
- !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
- dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
- desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
caam_jr_free(ctx->jrdev);
}
@@ -1956,7 +1836,6 @@ caam_hash_alloc(struct caam_hash_template *template,
alg->cra_type = &crypto_ahash_type;
t_alg->alg_type = template->alg_type;
- t_alg->alg_op = template->alg_op;
return t_alg;
}
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 851015e652b8..32100c4851dd 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -395,7 +395,7 @@ static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
- struct rsa_key raw_key = {0};
+ struct rsa_key raw_key = {NULL};
struct caam_rsa_key *rsa_key = &ctx->key;
int ret;
@@ -441,7 +441,7 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
- struct rsa_key raw_key = {0};
+ struct rsa_key raw_key = {NULL};
struct caam_rsa_key *rsa_key = &ctx->key;
int ret;
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index 9b92af2c7241..41398da3edf4 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -52,7 +52,7 @@
/* length of descriptors */
#define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2)
-#define DESC_RNG_LEN (4 * CAAM_CMD_SZ)
+#define DESC_RNG_LEN (3 * CAAM_CMD_SZ)
/* Buffer, its dma address and lock */
struct buf_data {
@@ -100,8 +100,7 @@ static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
{
struct buf_data *bd;
- bd = (struct buf_data *)((char *)desc -
- offsetof(struct buf_data, hw_desc));
+ bd = container_of(desc, struct buf_data, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -196,9 +195,6 @@ static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx)
init_sh_desc(desc, HDR_SHARE_SERIAL);
- /* Propagate errors from shared to job descriptor */
- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
-
/* Generate random bytes */
append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
@@ -351,7 +347,7 @@ static int __init caam_rng_init(void)
pr_err("Job Ring Device allocation for transform failed\n");
return PTR_ERR(dev);
}
- rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA);
+ rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL);
if (!rng_ctx) {
err = -ENOMEM;
goto free_caam_alloc;
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index e483b78c6343..755109841cfd 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -330,8 +330,8 @@ static int caam_remove(struct platform_device *pdev)
clk_disable_unprepare(ctrlpriv->caam_ipg);
clk_disable_unprepare(ctrlpriv->caam_mem);
clk_disable_unprepare(ctrlpriv->caam_aclk);
- clk_disable_unprepare(ctrlpriv->caam_emi_slow);
-
+ if (ctrlpriv->caam_emi_slow)
+ clk_disable_unprepare(ctrlpriv->caam_emi_slow);
return 0;
}
@@ -365,11 +365,8 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
*/
val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
>> RTSDCTL_ENT_DLY_SHIFT;
- if (ent_delay <= val) {
- /* put RNG4 into run mode */
- clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, 0);
- return;
- }
+ if (ent_delay <= val)
+ goto start_rng;
val = rd_reg32(&r4tst->rtsdctl);
val = (val & ~RTSDCTL_ENT_DLY_MASK) |
@@ -381,15 +378,12 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
/* read the control register */
val = rd_reg32(&r4tst->rtmctl);
+start_rng:
/*
* select raw sampling in both entropy shifter
- * and statistical checker
+ * and statistical checker; ; put RNG4 into run mode
*/
- clrsetbits_32(&val, 0, RTMCTL_SAMP_MODE_RAW_ES_SC);
- /* put RNG4 into run mode */
- clrsetbits_32(&val, RTMCTL_PRGM, 0);
- /* write back the control register */
- wr_reg32(&r4tst->rtmctl, val);
+ clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
}
/**
@@ -482,14 +476,16 @@ static int caam_probe(struct platform_device *pdev)
}
ctrlpriv->caam_aclk = clk;
- clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- dev_err(&pdev->dev,
- "can't identify CAAM emi_slow clk: %d\n", ret);
- return ret;
+ if (!of_machine_is_compatible("fsl,imx6ul")) {
+ clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(&pdev->dev,
+ "can't identify CAAM emi_slow clk: %d\n", ret);
+ return ret;
+ }
+ ctrlpriv->caam_emi_slow = clk;
}
- ctrlpriv->caam_emi_slow = clk;
ret = clk_prepare_enable(ctrlpriv->caam_ipg);
if (ret < 0) {
@@ -510,11 +506,13 @@ static int caam_probe(struct platform_device *pdev)
goto disable_caam_mem;
}
- ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
- if (ret < 0) {
- dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
- ret);
- goto disable_caam_aclk;
+ if (ctrlpriv->caam_emi_slow) {
+ ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
+ ret);
+ goto disable_caam_aclk;
+ }
}
/* Get configuration properties from device tree */
@@ -541,13 +539,13 @@ static int caam_probe(struct platform_device *pdev)
else
BLOCK_OFFSET = PG_SIZE_64K;
- ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
- ctrlpriv->assure = (struct caam_assurance __force *)
- ((uint8_t *)ctrl +
+ ctrlpriv->ctrl = (struct caam_ctrl __iomem __force *)ctrl;
+ ctrlpriv->assure = (struct caam_assurance __iomem __force *)
+ ((__force uint8_t *)ctrl +
BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
);
- ctrlpriv->deco = (struct caam_deco __force *)
- ((uint8_t *)ctrl +
+ ctrlpriv->deco = (struct caam_deco __iomem __force *)
+ ((__force uint8_t *)ctrl +
BLOCK_OFFSET * DECO_BLOCK_NUMBER
);
@@ -627,8 +625,8 @@ static int caam_probe(struct platform_device *pdev)
ring);
continue;
}
- ctrlpriv->jr[ring] = (struct caam_job_ring __force *)
- ((uint8_t *)ctrl +
+ ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
+ ((__force uint8_t *)ctrl +
(ring + JR_BLOCK_NUMBER) *
BLOCK_OFFSET
);
@@ -641,8 +639,8 @@ static int caam_probe(struct platform_device *pdev)
!!(rd_reg32(&ctrl->perfmon.comp_parms_ms) &
CTPR_MS_QI_MASK);
if (ctrlpriv->qi_present) {
- ctrlpriv->qi = (struct caam_queue_if __force *)
- ((uint8_t *)ctrl +
+ ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
+ ((__force uint8_t *)ctrl +
BLOCK_OFFSET * QI_BLOCK_NUMBER
);
/* This is all that's required to physically enable QI */
@@ -800,7 +798,7 @@ static int caam_probe(struct platform_device *pdev)
&caam_fops_u32_ro);
/* Internal covering keys (useful in non-secure mode only) */
- ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
+ ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_kek = debugfs_create_blob("kek",
S_IRUSR |
@@ -808,7 +806,7 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->ctl,
&ctrlpriv->ctl_kek_wrap);
- ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
+ ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
S_IRUSR |
@@ -816,7 +814,7 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->ctl,
&ctrlpriv->ctl_tkek_wrap);
- ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
+ ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
S_IRUSR |
@@ -833,7 +831,8 @@ caam_remove:
iounmap_ctrl:
iounmap(ctrl);
disable_caam_emi_slow:
- clk_disable_unprepare(ctrlpriv->caam_emi_slow);
+ if (ctrlpriv->caam_emi_slow)
+ clk_disable_unprepare(ctrlpriv->caam_emi_slow);
disable_caam_aclk:
clk_disable_unprepare(ctrlpriv->caam_aclk);
disable_caam_mem:
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 513b6646bb36..2e6766a1573f 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -22,12 +22,6 @@
#define SEC4_SG_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */
#define SEC4_SG_OFFSET_MASK 0x00001fff
-struct sec4_sg_entry {
- u64 ptr;
- u32 len;
- u32 bpid_offset;
-};
-
/* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
#define MAX_CAAM_DESCSIZE 64
@@ -90,8 +84,8 @@ struct sec4_sg_entry {
#define HDR_ZRO 0x00008000
/* Start Index or SharedDesc Length */
-#define HDR_START_IDX_MASK 0x3f
#define HDR_START_IDX_SHIFT 16
+#define HDR_START_IDX_MASK (0x3f << HDR_START_IDX_SHIFT)
/* If shared descriptor header, 6-bit length */
#define HDR_DESCLEN_SHR_MASK 0x3f
@@ -121,10 +115,10 @@ struct sec4_sg_entry {
#define HDR_PROP_DNR 0x00000800
/* JobDesc/SharedDesc share property */
-#define HDR_SD_SHARE_MASK 0x03
#define HDR_SD_SHARE_SHIFT 8
-#define HDR_JD_SHARE_MASK 0x07
+#define HDR_SD_SHARE_MASK (0x03 << HDR_SD_SHARE_SHIFT)
#define HDR_JD_SHARE_SHIFT 8
+#define HDR_JD_SHARE_MASK (0x07 << HDR_JD_SHARE_SHIFT)
#define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT)
#define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT)
@@ -235,7 +229,7 @@ struct sec4_sg_entry {
#define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_CLASS1_ICV_SZ (0x0c << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLASS1_IV_SZ (0x0c << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT)
@@ -400,7 +394,7 @@ struct sec4_sg_entry {
#define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_AF_SBOX_JKEK (0x10 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT)
@@ -1107,8 +1101,8 @@ struct sec4_sg_entry {
/* For non-protocol/alg-only op commands */
#define OP_ALG_TYPE_SHIFT 24
#define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT)
-#define OP_ALG_TYPE_CLASS1 2
-#define OP_ALG_TYPE_CLASS2 4
+#define OP_ALG_TYPE_CLASS1 (2 << OP_ALG_TYPE_SHIFT)
+#define OP_ALG_TYPE_CLASS2 (4 << OP_ALG_TYPE_SHIFT)
#define OP_ALG_ALGSEL_SHIFT 16
#define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT)
@@ -1249,7 +1243,7 @@ struct sec4_sg_entry {
#define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f
/* PKHA mode copy-memory functions */
-#define OP_ALG_PKMODE_SRC_REG_SHIFT 13
+#define OP_ALG_PKMODE_SRC_REG_SHIFT 17
#define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
#define OP_ALG_PKMODE_DST_REG_SHIFT 10
#define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index a8cd8a78ec1f..b9c8d98ef826 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -33,38 +33,39 @@
extern bool caam_little_end;
-static inline int desc_len(u32 *desc)
+static inline int desc_len(u32 * const desc)
{
return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK;
}
-static inline int desc_bytes(void *desc)
+static inline int desc_bytes(void * const desc)
{
return desc_len(desc) * CAAM_CMD_SZ;
}
-static inline u32 *desc_end(u32 *desc)
+static inline u32 *desc_end(u32 * const desc)
{
return desc + desc_len(desc);
}
-static inline void *sh_desc_pdb(u32 *desc)
+static inline void *sh_desc_pdb(u32 * const desc)
{
return desc + 1;
}
-static inline void init_desc(u32 *desc, u32 options)
+static inline void init_desc(u32 * const desc, u32 options)
{
*desc = cpu_to_caam32((options | HDR_ONE) + 1);
}
-static inline void init_sh_desc(u32 *desc, u32 options)
+static inline void init_sh_desc(u32 * const desc, u32 options)
{
PRINT_POS;
init_desc(desc, CMD_SHARED_DESC_HDR | options);
}
-static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
+static inline void init_sh_desc_pdb(u32 * const desc, u32 options,
+ size_t pdb_bytes)
{
u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
@@ -72,19 +73,20 @@ static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
options);
}
-static inline void init_job_desc(u32 *desc, u32 options)
+static inline void init_job_desc(u32 * const desc, u32 options)
{
init_desc(desc, CMD_DESC_HDR | options);
}
-static inline void init_job_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
+static inline void init_job_desc_pdb(u32 * const desc, u32 options,
+ size_t pdb_bytes)
{
u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
init_job_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT)) | options);
}
-static inline void append_ptr(u32 *desc, dma_addr_t ptr)
+static inline void append_ptr(u32 * const desc, dma_addr_t ptr)
{
dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
@@ -94,8 +96,8 @@ static inline void append_ptr(u32 *desc, dma_addr_t ptr)
CAAM_PTR_SZ / CAAM_CMD_SZ);
}
-static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
- u32 options)
+static inline void init_job_desc_shared(u32 * const desc, dma_addr_t ptr,
+ int len, u32 options)
{
PRINT_POS;
init_job_desc(desc, HDR_SHARED | options |
@@ -103,7 +105,7 @@ static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
append_ptr(desc, ptr);
}
-static inline void append_data(u32 *desc, void *data, int len)
+static inline void append_data(u32 * const desc, void *data, int len)
{
u32 *offset = desc_end(desc);
@@ -114,7 +116,7 @@ static inline void append_data(u32 *desc, void *data, int len)
(len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ);
}
-static inline void append_cmd(u32 *desc, u32 command)
+static inline void append_cmd(u32 * const desc, u32 command)
{
u32 *cmd = desc_end(desc);
@@ -125,7 +127,7 @@ static inline void append_cmd(u32 *desc, u32 command)
#define append_u32 append_cmd
-static inline void append_u64(u32 *desc, u64 data)
+static inline void append_u64(u32 * const desc, u64 data)
{
u32 *offset = desc_end(desc);
@@ -142,14 +144,14 @@ static inline void append_u64(u32 *desc, u64 data)
}
/* Write command without affecting header, and return pointer to next word */
-static inline u32 *write_cmd(u32 *desc, u32 command)
+static inline u32 *write_cmd(u32 * const desc, u32 command)
{
*desc = cpu_to_caam32(command);
return desc + 1;
}
-static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
+static inline void append_cmd_ptr(u32 * const desc, dma_addr_t ptr, int len,
u32 command)
{
append_cmd(desc, command | len);
@@ -157,7 +159,7 @@ static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
}
/* Write length after pointer, rather than inside command */
-static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr,
+static inline void append_cmd_ptr_extlen(u32 * const desc, dma_addr_t ptr,
unsigned int len, u32 command)
{
append_cmd(desc, command);
@@ -166,7 +168,7 @@ static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr,
append_cmd(desc, len);
}
-static inline void append_cmd_data(u32 *desc, void *data, int len,
+static inline void append_cmd_data(u32 * const desc, void *data, int len,
u32 command)
{
append_cmd(desc, command | IMMEDIATE | len);
@@ -174,7 +176,7 @@ static inline void append_cmd_data(u32 *desc, void *data, int len,
}
#define APPEND_CMD_RET(cmd, op) \
-static inline u32 *append_##cmd(u32 *desc, u32 options) \
+static inline u32 *append_##cmd(u32 * const desc, u32 options) \
{ \
u32 *cmd = desc_end(desc); \
PRINT_POS; \
@@ -184,13 +186,13 @@ static inline u32 *append_##cmd(u32 *desc, u32 options) \
APPEND_CMD_RET(jump, JUMP)
APPEND_CMD_RET(move, MOVE)
-static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd)
+static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd)
{
*jump_cmd = cpu_to_caam32(caam32_to_cpu(*jump_cmd) |
(desc_len(desc) - (jump_cmd - desc)));
}
-static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
+static inline void set_move_tgt_here(u32 * const desc, u32 *move_cmd)
{
u32 val = caam32_to_cpu(*move_cmd);
@@ -200,7 +202,7 @@ static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
}
#define APPEND_CMD(cmd, op) \
-static inline void append_##cmd(u32 *desc, u32 options) \
+static inline void append_##cmd(u32 * const desc, u32 options) \
{ \
PRINT_POS; \
append_cmd(desc, CMD_##op | options); \
@@ -208,7 +210,8 @@ static inline void append_##cmd(u32 *desc, u32 options) \
APPEND_CMD(operation, OPERATION)
#define APPEND_CMD_LEN(cmd, op) \
-static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
+static inline void append_##cmd(u32 * const desc, unsigned int len, \
+ u32 options) \
{ \
PRINT_POS; \
append_cmd(desc, CMD_##op | len | options); \
@@ -220,8 +223,8 @@ APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_LOAD)
APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE)
#define APPEND_CMD_PTR(cmd, op) \
-static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \
- u32 options) \
+static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
+ unsigned int len, u32 options) \
{ \
PRINT_POS; \
append_cmd_ptr(desc, ptr, len, CMD_##op | options); \
@@ -231,8 +234,8 @@ APPEND_CMD_PTR(load, LOAD)
APPEND_CMD_PTR(fifo_load, FIFO_LOAD)
APPEND_CMD_PTR(fifo_store, FIFO_STORE)
-static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len,
- u32 options)
+static inline void append_store(u32 * const desc, dma_addr_t ptr,
+ unsigned int len, u32 options)
{
u32 cmd_src;
@@ -249,7 +252,8 @@ static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len,
}
#define APPEND_SEQ_PTR_INTLEN(cmd, op) \
-static inline void append_seq_##cmd##_ptr_intlen(u32 *desc, dma_addr_t ptr, \
+static inline void append_seq_##cmd##_ptr_intlen(u32 * const desc, \
+ dma_addr_t ptr, \
unsigned int len, \
u32 options) \
{ \
@@ -263,7 +267,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
APPEND_SEQ_PTR_INTLEN(out, OUT)
#define APPEND_CMD_PTR_TO_IMM(cmd, op) \
-static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
+static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
unsigned int len, u32 options) \
{ \
PRINT_POS; \
@@ -273,7 +277,7 @@ APPEND_CMD_PTR_TO_IMM(load, LOAD);
APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD);
#define APPEND_CMD_PTR_EXTLEN(cmd, op) \
-static inline void append_##cmd##_extlen(u32 *desc, dma_addr_t ptr, \
+static inline void append_##cmd##_extlen(u32 * const desc, dma_addr_t ptr, \
unsigned int len, u32 options) \
{ \
PRINT_POS; \
@@ -287,7 +291,7 @@ APPEND_CMD_PTR_EXTLEN(seq_out_ptr, SEQ_OUT_PTR)
* the size of its type
*/
#define APPEND_CMD_PTR_LEN(cmd, op, type) \
-static inline void append_##cmd(u32 *desc, dma_addr_t ptr, \
+static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
type len, u32 options) \
{ \
PRINT_POS; \
@@ -304,7 +308,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_PTR, u32)
* from length of immediate data provided, e.g., split keys
*/
#define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
-static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
+static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
unsigned int data_len, \
unsigned int len, u32 options) \
{ \
@@ -315,7 +319,7 @@ static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
APPEND_CMD_PTR_TO_IMM2(key, KEY);
#define APPEND_CMD_RAW_IMM(cmd, op, type) \
-static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
+static inline void append_##cmd##_imm_##type(u32 * const desc, type immediate, \
u32 options) \
{ \
PRINT_POS; \
@@ -426,3 +430,64 @@ do { \
APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data)
#define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \
APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data)
+
+/**
+ * struct alginfo - Container for algorithm details
+ * @algtype: algorithm selector; for valid values, see documentation of the
+ * functions where it is used.
+ * @keylen: length of the provided algorithm key, in bytes
+ * @keylen_pad: padded length of the provided algorithm key, in bytes
+ * @key: address where algorithm key resides; virtual address if key_inline
+ * is true, dma (bus) address if key_inline is false.
+ * @key_inline: true - key can be inlined in the descriptor; false - key is
+ * referenced by the descriptor
+ */
+struct alginfo {
+ u32 algtype;
+ unsigned int keylen;
+ unsigned int keylen_pad;
+ union {
+ dma_addr_t key_dma;
+ void *key_virt;
+ };
+ bool key_inline;
+};
+
+/**
+ * desc_inline_query() - Provide indications on which data items can be inlined
+ * and which shall be referenced in a shared descriptor.
+ * @sd_base_len: Shared descriptor base length - bytes consumed by the commands,
+ * excluding the data items to be inlined (or corresponding
+ * pointer if an item is not inlined). Each cnstr_* function that
+ * generates descriptors should have a define mentioning
+ * corresponding length.
+ * @jd_len: Maximum length of the job descriptor(s) that will be used
+ * together with the shared descriptor.
+ * @data_len: Array of lengths of the data items trying to be inlined
+ * @inl_mask: 32bit mask with bit x = 1 if data item x can be inlined, 0
+ * otherwise.
+ * @count: Number of data items (size of @data_len array); must be <= 32
+ *
+ * Return: 0 if data can be inlined / referenced, negative value if not. If 0,
+ * check @inl_mask for details.
+ */
+static inline int desc_inline_query(unsigned int sd_base_len,
+ unsigned int jd_len, unsigned int *data_len,
+ u32 *inl_mask, unsigned int count)
+{
+ int rem_bytes = (int)(CAAM_DESC_BYTES_MAX - sd_base_len - jd_len);
+ unsigned int i;
+
+ *inl_mask = 0;
+ for (i = 0; (i < count) && (rem_bytes > 0); i++) {
+ if (rem_bytes - (int)(data_len[i] +
+ (count - i - 1) * CAAM_PTR_SZ) >= 0) {
+ rem_bytes -= data_len[i];
+ *inl_mask |= (1 << i);
+ } else {
+ rem_bytes -= CAAM_PTR_SZ;
+ }
+ }
+
+ return (rem_bytes >= 0) ? 0 : -1;
+}
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 33e41ea83fcc..79a0cc70717f 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -146,10 +146,9 @@ static void report_ccb_status(struct device *jrdev, const u32 status,
strlen(rng_err_id_list[err_id])) {
/* RNG-only error */
err_str = rng_err_id_list[err_id];
- } else if (err_id < ARRAY_SIZE(err_id_list))
+ } else {
err_str = err_id_list[err_id];
- else
- snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
+ }
/*
* CCB ICV check failures are part of normal operation life;
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 5d4c05074a5c..e2bcacc1a921 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -41,6 +41,7 @@ struct caam_drv_private_jr {
struct device *dev;
int ridx;
struct caam_job_ring __iomem *rregs; /* JobR's register space */
+ struct tasklet_struct irqtask;
int irq; /* One per queue */
/* Number of scatterlist crypt transforms active on the JobR */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 757c27f9953d..c8604dfadbf5 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -73,6 +73,8 @@ static int caam_jr_shutdown(struct device *dev)
ret = caam_reset_hw_jr(dev);
+ tasklet_kill(&jrp->irqtask);
+
/* Release interrupt */
free_irq(jrp->irq, dev);
@@ -128,7 +130,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
/*
* Check the output ring for ready responses, kick
- * the threaded irq if jobs done.
+ * tasklet if jobs done.
*/
irqstate = rd_reg32(&jrp->rregs->jrintstatus);
if (!irqstate)
@@ -150,13 +152,18 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
/* Have valid interrupt at this point, just ACK and trigger */
wr_reg32(&jrp->rregs->jrintstatus, irqstate);
- return IRQ_WAKE_THREAD;
+ preempt_disable();
+ tasklet_schedule(&jrp->irqtask);
+ preempt_enable();
+
+ return IRQ_HANDLED;
}
-static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
+/* Deferred service handler, run as interrupt-fired tasklet */
+static void caam_jr_dequeue(unsigned long devarg)
{
int hw_idx, sw_idx, i, head, tail;
- struct device *dev = st_dev;
+ struct device *dev = (struct device *)devarg;
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
u32 *userdesc, userstatus;
@@ -230,8 +237,6 @@ static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
/* reenable / unmask IRQs */
clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
-
- return IRQ_HANDLED;
}
/**
@@ -389,10 +394,11 @@ static int caam_jr_init(struct device *dev)
jrp = dev_get_drvdata(dev);
+ tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
+
/* Connect job ring interrupt handler. */
- error = request_threaded_irq(jrp->irq, caam_jr_interrupt,
- caam_jr_threadirq, IRQF_SHARED,
- dev_name(dev), dev);
+ error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
+ dev_name(dev), dev);
if (error) {
dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
jrp->ridx, jrp->irq);
@@ -454,6 +460,7 @@ out_free_inpring:
out_free_irq:
free_irq(jrp->irq, dev);
out_kill_deq:
+ tasklet_kill(&jrp->irqtask);
return error;
}
@@ -489,7 +496,7 @@ static int caam_jr_probe(struct platform_device *pdev)
return -ENOMEM;
}
- jrpriv->rregs = (struct caam_job_ring __force *)ctrl;
+ jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl;
if (sizeof(dma_addr_t) == sizeof(u64))
if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index e1eaf4ff9762..1bb2816a9b4d 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -10,6 +10,36 @@
#include "desc_constr.h"
#include "key_gen.h"
+/**
+ * split_key_len - Compute MDHA split key length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
+ * SHA224, SHA384, SHA512.
+ *
+ * Return: MDHA split key length
+ */
+static inline u32 split_key_len(u32 hash)
+{
+ /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
+ static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
+ u32 idx;
+
+ idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
+
+ return (u32)(mdpadlen[idx] * 2);
+}
+
+/**
+ * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
+ * SHA224, SHA384, SHA512.
+ *
+ * Return: MDHA split key pad length
+ */
+static inline u32 split_key_pad_len(u32 hash)
+{
+ return ALIGN(split_key_len(hash), 16);
+}
+
void split_key_done(struct device *dev, u32 *desc, u32 err,
void *context)
{
@@ -41,15 +71,29 @@ Split key generation-----------------------------------------------
[06] 0x64260028 fifostr: class2 mdsplit-jdk len=40
@0xffe04000
*/
-int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
- int split_key_pad_len, const u8 *key_in, u32 keylen,
- u32 alg_op)
+int gen_split_key(struct device *jrdev, u8 *key_out,
+ struct alginfo * const adata, const u8 *key_in, u32 keylen,
+ int max_keylen)
{
u32 *desc;
struct split_key_result result;
dma_addr_t dma_addr_in, dma_addr_out;
int ret = -ENOMEM;
+ adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK);
+ adata->keylen_pad = split_key_pad_len(adata->algtype &
+ OP_ALG_ALGSEL_MASK);
+
+#ifdef DEBUG
+ dev_err(jrdev, "split keylen %d split keylen padded %d\n",
+ adata->keylen, adata->keylen_pad);
+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
+#endif
+
+ if (adata->keylen_pad > max_keylen)
+ return -EINVAL;
+
desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
if (!desc) {
dev_err(jrdev, "unable to allocate key input memory\n");
@@ -63,7 +107,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
goto out_free;
}
- dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
+ dma_addr_out = dma_map_single(jrdev, key_out, adata->keylen_pad,
DMA_FROM_DEVICE);
if (dma_mapping_error(jrdev, dma_addr_out)) {
dev_err(jrdev, "unable to map key output memory\n");
@@ -74,7 +118,9 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
/* Sets MDHA up into an HMAC-INIT */
- append_operation(desc, alg_op | OP_ALG_DECRYPT | OP_ALG_AS_INIT);
+ append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) |
+ OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT |
+ OP_ALG_AS_INIT);
/*
* do a FIFO_LOAD of zero, this will trigger the internal key expansion
@@ -87,7 +133,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
* FIFO_STORE with the explicit split-key content store
* (0x26 output type)
*/
- append_fifo_store(desc, dma_addr_out, split_key_len,
+ append_fifo_store(desc, dma_addr_out, adata->keylen,
LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
#ifdef DEBUG
@@ -108,11 +154,11 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key_out,
- split_key_pad_len, 1);
+ adata->keylen_pad, 1);
#endif
}
- dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
+ dma_unmap_single(jrdev, dma_addr_out, adata->keylen_pad,
DMA_FROM_DEVICE);
out_unmap_in:
dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
diff --git a/drivers/crypto/caam/key_gen.h b/drivers/crypto/caam/key_gen.h
index c5588f6d8109..4628f389eb64 100644
--- a/drivers/crypto/caam/key_gen.h
+++ b/drivers/crypto/caam/key_gen.h
@@ -12,6 +12,6 @@ struct split_key_result {
void split_key_done(struct device *dev, u32 *desc, u32 err, void *context);
-int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
- int split_key_pad_len, const u8 *key_in, u32 keylen,
- u32 alg_op);
+int gen_split_key(struct device *jrdev, u8 *key_out,
+ struct alginfo * const adata, const u8 *key_in, u32 keylen,
+ int max_keylen);
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index 41cd5a356d05..6afa20c4a013 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -7,7 +7,11 @@
#include "regs.h"
-struct sec4_sg_entry;
+struct sec4_sg_entry {
+ u64 ptr;
+ u32 len;
+ u32 bpid_offset;
+};
/*
* convert single dma address to h/w link table format
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
index 8d2dbacc6161..7bc09989e18a 100644
--- a/drivers/crypto/ccp/ccp-dev-v3.c
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -404,10 +404,6 @@ static int ccp_init(struct ccp_device *ccp)
goto e_pool;
}
- /* Initialize the queues used to wait for KSB space and suspend */
- init_waitqueue_head(&ccp->sb_queue);
- init_waitqueue_head(&ccp->suspend_queue);
-
dev_dbg(dev, "Starting threads...\n");
/* Create a kthread for each queue */
for (i = 0; i < ccp->cmd_q_count; i++) {
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index faf3cb3ddce2..e2ce8190ecc9 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -21,6 +21,12 @@
#include "ccp-dev.h"
+/* Allocate the requested number of contiguous LSB slots
+ * from the LSB bitmap. Look in the private range for this
+ * queue first; failing that, check the public area.
+ * If no space is available, wait around.
+ * Return: first slot number
+ */
static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count)
{
struct ccp_device *ccp;
@@ -50,7 +56,7 @@ static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count)
bitmap_set(ccp->lsbmap, start, count);
mutex_unlock(&ccp->sb_mutex);
- return start * LSB_ITEM_SIZE;
+ return start;
}
ccp->sb_avail = 0;
@@ -63,17 +69,18 @@ static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count)
}
}
+/* Free a number of LSB slots from the bitmap, starting at
+ * the indicated starting slot number.
+ */
static void ccp_lsb_free(struct ccp_cmd_queue *cmd_q, unsigned int start,
unsigned int count)
{
- int lsbno = start / LSB_SIZE;
-
if (!start)
return;
- if (cmd_q->lsb == lsbno) {
+ if (cmd_q->lsb == start) {
/* An entry from the private LSB */
- bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count);
+ bitmap_clear(cmd_q->lsbmap, start, count);
} else {
/* From the shared LSBs */
struct ccp_device *ccp = cmd_q->ccp;
@@ -396,7 +403,7 @@ static int ccp5_perform_rsa(struct ccp_op *op)
CCP5_CMD_PROT(&desc) = 0;
function.raw = 0;
- CCP_RSA_SIZE(&function) = op->u.rsa.mod_size;
+ CCP_RSA_SIZE(&function) = op->u.rsa.mod_size >> 3;
CCP5_CMD_FUNCTION(&desc) = function.raw;
CCP5_CMD_LEN(&desc) = op->u.rsa.input_len;
@@ -411,10 +418,10 @@ static int ccp5_perform_rsa(struct ccp_op *op)
CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
- /* Key (Exponent) is in external memory */
- CCP5_CMD_KEY_LO(&desc) = ccp_addr_lo(&op->exp.u.dma);
- CCP5_CMD_KEY_HI(&desc) = ccp_addr_hi(&op->exp.u.dma);
- CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+ /* Exponent is in LSB memory */
+ CCP5_CMD_KEY_LO(&desc) = op->sb_key * LSB_ITEM_SIZE;
+ CCP5_CMD_KEY_HI(&desc) = 0;
+ CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
return ccp5_do_cmd(&desc, op->cmd_q);
}
@@ -751,9 +758,6 @@ static int ccp5_init(struct ccp_device *ccp)
goto e_pool;
}
- /* Initialize the queue used to suspend */
- init_waitqueue_head(&ccp->suspend_queue);
-
dev_dbg(dev, "Loading LSB map...\n");
/* Copy the private LSB mask to the public registers */
status_lo = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET);
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index cafa633aae10..511ab042b5e7 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -41,7 +41,7 @@ struct ccp_tasklet_data {
};
/* Human-readable error strings */
-char *ccp_error_codes[] = {
+static char *ccp_error_codes[] = {
"",
"ERR 01: ILLEGAL_ENGINE",
"ERR 02: ILLEGAL_KEY_ID",
@@ -478,6 +478,10 @@ struct ccp_device *ccp_alloc_struct(struct device *dev)
ccp->sb_count = KSB_COUNT;
ccp->sb_start = 0;
+ /* Initialize the wait queues */
+ init_waitqueue_head(&ccp->sb_queue);
+ init_waitqueue_head(&ccp->suspend_queue);
+
ccp->ord = ccp_increment_unit_ordinal();
snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord);
snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", ccp->ord);
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index da5f4a678083..830f35e6005f 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -278,7 +278,7 @@ struct ccp_cmd_queue {
/* Private LSB that is assigned to this queue, or -1 if none.
* Bitmap for my private LSB, unused otherwise
*/
- unsigned int lsb;
+ int lsb;
DECLARE_BITMAP(lsbmap, PLSB_MAP_SIZE);
/* Queue processing thread */
@@ -515,7 +515,6 @@ struct ccp_op {
struct ccp_passthru_op passthru;
struct ccp_ecc_op ecc;
} u;
- struct ccp_mem key;
};
static inline u32 ccp_addr_lo(struct ccp_dma_info *info)
@@ -541,23 +540,23 @@ static inline u32 ccp_addr_hi(struct ccp_dma_info *info)
* word 7: upper 16 bits of key pointer; key memory type
*/
struct dword0 {
- __le32 soc:1;
- __le32 ioc:1;
- __le32 rsvd1:1;
- __le32 init:1;
- __le32 eom:1; /* AES/SHA only */
- __le32 function:15;
- __le32 engine:4;
- __le32 prot:1;
- __le32 rsvd2:7;
+ unsigned int soc:1;
+ unsigned int ioc:1;
+ unsigned int rsvd1:1;
+ unsigned int init:1;
+ unsigned int eom:1; /* AES/SHA only */
+ unsigned int function:15;
+ unsigned int engine:4;
+ unsigned int prot:1;
+ unsigned int rsvd2:7;
};
struct dword3 {
- __le32 src_hi:16;
- __le32 src_mem:2;
- __le32 lsb_cxt_id:8;
- __le32 rsvd1:5;
- __le32 fixed:1;
+ unsigned int src_hi:16;
+ unsigned int src_mem:2;
+ unsigned int lsb_cxt_id:8;
+ unsigned int rsvd1:5;
+ unsigned int fixed:1;
};
union dword4 {
@@ -567,18 +566,18 @@ union dword4 {
union dword5 {
struct {
- __le32 dst_hi:16;
- __le32 dst_mem:2;
- __le32 rsvd1:13;
- __le32 fixed:1;
+ unsigned int dst_hi:16;
+ unsigned int dst_mem:2;
+ unsigned int rsvd1:13;
+ unsigned int fixed:1;
} fields;
__le32 sha_len_hi;
};
struct dword7 {
- __le32 key_hi:16;
- __le32 key_mem:2;
- __le32 rsvd1:14;
+ unsigned int key_hi:16;
+ unsigned int key_mem:2;
+ unsigned int rsvd1:14;
};
struct ccp5_desc {
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index 4ce67fb9a880..3e104f5aa0c2 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -4,6 +4,7 @@ config CRYPTO_DEV_CHELSIO
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
+ select CRYPTO_AUTHENC
---help---
The Chelsio Crypto Co-processor driver for T6 adapters.
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 56b153805462..2ed1e24b44a8 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -54,6 +54,12 @@
#include <crypto/algapi.h>
#include <crypto/hash.h>
#include <crypto/sha.h>
+#include <crypto/authenc.h>
+#include <crypto/internal/aead.h>
+#include <crypto/null.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/aead.h>
+#include <crypto/scatterwalk.h>
#include <crypto/internal/hash.h>
#include "t4fw_api.h"
@@ -62,6 +68,11 @@
#include "chcr_algo.h"
#include "chcr_crypto.h"
+static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
+{
+ return ctx->crypto_ctx->aeadctx;
+}
+
static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
{
return ctx->crypto_ctx->ablkctx;
@@ -72,6 +83,16 @@ static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
return ctx->crypto_ctx->hmacctx;
}
+static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
+{
+ return gctx->ctx->gcm;
+}
+
+static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
+{
+ return gctx->ctx->authenc;
+}
+
static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
{
return ctx->dev->u_ctx;
@@ -94,12 +115,37 @@ static inline unsigned int sgl_len(unsigned int n)
return (3 * n) / 2 + (n & 1) + 2;
}
+static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
+{
+ u8 temp[SHA512_DIGEST_SIZE];
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ int authsize = crypto_aead_authsize(tfm);
+ struct cpl_fw6_pld *fw6_pld;
+ int cmp = 0;
+
+ fw6_pld = (struct cpl_fw6_pld *)input;
+ if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
+ (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
+ cmp = memcmp(&fw6_pld->data[2], (fw6_pld + 1), authsize);
+ } else {
+
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
+ authsize, req->assoclen +
+ req->cryptlen - authsize);
+ cmp = memcmp(temp, (fw6_pld + 1), authsize);
+ }
+ if (cmp)
+ *err = -EBADMSG;
+ else
+ *err = 0;
+}
+
/*
* chcr_handle_resp - Unmap the DMA buffers associated with the request
* @req: crypto request
*/
int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
- int error_status)
+ int err)
{
struct crypto_tfm *tfm = req->tfm;
struct chcr_context *ctx = crypto_tfm_ctx(tfm);
@@ -109,17 +155,33 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
unsigned int digestsize, updated_digestsize;
switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
+ case CRYPTO_ALG_TYPE_AEAD:
+ ctx_req.req.aead_req = (struct aead_request *)req;
+ ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
+ dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.aead_req->dst,
+ ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
+ if (ctx_req.ctx.reqctx->skb) {
+ kfree_skb(ctx_req.ctx.reqctx->skb);
+ ctx_req.ctx.reqctx->skb = NULL;
+ }
+ if (ctx_req.ctx.reqctx->verify == VERIFY_SW) {
+ chcr_verify_tag(ctx_req.req.aead_req, input,
+ &err);
+ ctx_req.ctx.reqctx->verify = VERIFY_HW;
+ }
+ break;
+
case CRYPTO_ALG_TYPE_BLKCIPHER:
ctx_req.req.ablk_req = (struct ablkcipher_request *)req;
ctx_req.ctx.ablk_ctx =
ablkcipher_request_ctx(ctx_req.req.ablk_req);
- if (!error_status) {
+ if (!err) {
fw6_pld = (struct cpl_fw6_pld *)input;
memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2],
AES_BLOCK_SIZE);
}
dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.ablk_req->dst,
- ABLK_CTX(ctx)->dst_nents, DMA_FROM_DEVICE);
+ ctx_req.ctx.ablk_ctx->dst_nents, DMA_FROM_DEVICE);
if (ctx_req.ctx.ablk_ctx->skb) {
kfree_skb(ctx_req.ctx.ablk_ctx->skb);
ctx_req.ctx.ablk_ctx->skb = NULL;
@@ -138,8 +200,10 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
updated_digestsize = SHA256_DIGEST_SIZE;
else if (digestsize == SHA384_DIGEST_SIZE)
updated_digestsize = SHA512_DIGEST_SIZE;
- if (ctx_req.ctx.ahash_ctx->skb)
+ if (ctx_req.ctx.ahash_ctx->skb) {
+ kfree_skb(ctx_req.ctx.ahash_ctx->skb);
ctx_req.ctx.ahash_ctx->skb = NULL;
+ }
if (ctx_req.ctx.ahash_ctx->result == 1) {
ctx_req.ctx.ahash_ctx->result = 0;
memcpy(ctx_req.req.ahash_req->result, input +
@@ -150,11 +214,9 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
sizeof(struct cpl_fw6_pld),
updated_digestsize);
}
- kfree(ctx_req.ctx.ahash_ctx->dummy_payload_ptr);
- ctx_req.ctx.ahash_ctx->dummy_payload_ptr = NULL;
break;
}
- return 0;
+ return err;
}
/*
@@ -178,40 +240,81 @@ static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
return flits + sgl_len(cnt);
}
-static struct shash_desc *chcr_alloc_shash(unsigned int ds)
+static inline void get_aes_decrypt_key(unsigned char *dec_key,
+ const unsigned char *key,
+ unsigned int keylength)
+{
+ u32 temp;
+ u32 w_ring[MAX_NK];
+ int i, j, k;
+ u8 nr, nk;
+
+ switch (keylength) {
+ case AES_KEYLENGTH_128BIT:
+ nk = KEYLENGTH_4BYTES;
+ nr = NUMBER_OF_ROUNDS_10;
+ break;
+ case AES_KEYLENGTH_192BIT:
+ nk = KEYLENGTH_6BYTES;
+ nr = NUMBER_OF_ROUNDS_12;
+ break;
+ case AES_KEYLENGTH_256BIT:
+ nk = KEYLENGTH_8BYTES;
+ nr = NUMBER_OF_ROUNDS_14;
+ break;
+ default:
+ return;
+ }
+ for (i = 0; i < nk; i++)
+ w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
+
+ i = 0;
+ temp = w_ring[nk - 1];
+ while (i + nk < (nr + 1) * 4) {
+ if (!(i % nk)) {
+ /* RotWord(temp) */
+ temp = (temp << 8) | (temp >> 24);
+ temp = aes_ks_subword(temp);
+ temp ^= round_constant[i / nk];
+ } else if (nk == 8 && (i % 4 == 0)) {
+ temp = aes_ks_subword(temp);
+ }
+ w_ring[i % nk] ^= temp;
+ temp = w_ring[i % nk];
+ i++;
+ }
+ i--;
+ for (k = 0, j = i % nk; k < nk; k++) {
+ *((u32 *)dec_key + k) = htonl(w_ring[j]);
+ j--;
+ if (j < 0)
+ j += nk;
+ }
+}
+
+static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
{
struct crypto_shash *base_hash = NULL;
- struct shash_desc *desc;
switch (ds) {
case SHA1_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha1-generic", 0, 0);
+ base_hash = crypto_alloc_shash("sha1", 0, 0);
break;
case SHA224_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha224-generic", 0, 0);
+ base_hash = crypto_alloc_shash("sha224", 0, 0);
break;
case SHA256_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha256-generic", 0, 0);
+ base_hash = crypto_alloc_shash("sha256", 0, 0);
break;
case SHA384_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha384-generic", 0, 0);
+ base_hash = crypto_alloc_shash("sha384", 0, 0);
break;
case SHA512_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha512-generic", 0, 0);
+ base_hash = crypto_alloc_shash("sha512", 0, 0);
break;
}
- if (IS_ERR(base_hash)) {
- pr_err("Can not allocate sha-generic algo.\n");
- return (void *)base_hash;
- }
- desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(base_hash),
- GFP_KERNEL);
- if (!desc)
- return ERR_PTR(-ENOMEM);
- desc->tfm = base_hash;
- desc->flags = crypto_shash_get_flags(base_hash);
- return desc;
+ return base_hash;
}
static int chcr_compute_partial_hash(struct shash_desc *desc,
@@ -279,31 +382,18 @@ static inline int is_hmac(struct crypto_tfm *tfm)
struct chcr_alg_template *chcr_crypto_alg =
container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
alg.hash);
- if ((chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK) ==
- CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
+ if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
return 1;
return 0;
}
-static inline unsigned int ch_nents(struct scatterlist *sg,
- unsigned int *total_size)
-{
- unsigned int nents;
-
- for (nents = 0, *total_size = 0; sg; sg = sg_next(sg)) {
- nents++;
- *total_size += sg->length;
- }
- return nents;
-}
-
static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
struct scatterlist *sg,
struct phys_sge_parm *sg_param)
{
struct phys_sge_pairs *to;
- unsigned int out_buf_size = sg_param->obsize;
- unsigned int nents = sg_param->nents, i, j, tot_len = 0;
+ int out_buf_size = sg_param->obsize;
+ unsigned int nents = sg_param->nents, i, j = 0;
phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
| CPL_RX_PHYS_DSGL_ISRDMA_V(0));
@@ -321,25 +411,24 @@ static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
sizeof(struct cpl_rx_phys_dsgl));
for (i = 0; nents; to++) {
- for (j = i; (nents && (j < (8 + i))); j++, nents--) {
- to->len[j] = htons(sg->length);
+ for (j = 0; j < 8 && nents; j++, nents--) {
+ out_buf_size -= sg_dma_len(sg);
+ to->len[j] = htons(sg_dma_len(sg));
to->addr[j] = cpu_to_be64(sg_dma_address(sg));
- if (out_buf_size) {
- if (tot_len + sg_dma_len(sg) >= out_buf_size) {
- to->len[j] = htons(out_buf_size -
- tot_len);
- return;
- }
- tot_len += sg_dma_len(sg);
- }
sg = sg_next(sg);
}
}
+ if (out_buf_size) {
+ j--;
+ to--;
+ to->len[j] = htons(ntohs(to->len[j]) + (out_buf_size));
+ }
}
-static inline unsigned
-int map_writesg_phys_cpl(struct device *dev, struct cpl_rx_phys_dsgl *phys_cpl,
- struct scatterlist *sg, struct phys_sge_parm *sg_param)
+static inline int map_writesg_phys_cpl(struct device *dev,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ struct scatterlist *sg,
+ struct phys_sge_parm *sg_param)
{
if (!sg || !sg_param->nents)
return 0;
@@ -353,6 +442,14 @@ int map_writesg_phys_cpl(struct device *dev, struct cpl_rx_phys_dsgl *phys_cpl,
return 0;
}
+static inline int get_aead_subtype(struct crypto_aead *aead)
+{
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct chcr_alg_template *chcr_crypto_alg =
+ container_of(alg, struct chcr_alg_template, alg.aead);
+ return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
+}
+
static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
{
struct crypto_alg *alg = tfm->__crt_alg;
@@ -362,8 +459,23 @@ static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
}
+static inline void write_buffer_to_skb(struct sk_buff *skb,
+ unsigned int *frags,
+ char *bfr,
+ u8 bfr_len)
+{
+ skb->len += bfr_len;
+ skb->data_len += bfr_len;
+ skb->truesize += bfr_len;
+ get_page(virt_to_page(bfr));
+ skb_fill_page_desc(skb, *frags, virt_to_page(bfr),
+ offset_in_page(bfr), bfr_len);
+ (*frags)++;
+}
+
+
static inline void
-write_sg_data_page_desc(struct sk_buff *skb, unsigned int *frags,
+write_sg_to_skb(struct sk_buff *skb, unsigned int *frags,
struct scatterlist *sg, unsigned int count)
{
struct page *spage;
@@ -372,8 +484,9 @@ write_sg_data_page_desc(struct sk_buff *skb, unsigned int *frags,
skb->len += count;
skb->data_len += count;
skb->truesize += count;
+
while (count > 0) {
- if (sg && (!(sg->length)))
+ if (!sg || (!(sg->length)))
break;
spage = sg_page(sg);
get_page(spage);
@@ -389,29 +502,25 @@ static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
struct _key_ctx *key_ctx)
{
if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
- get_aes_decrypt_key(key_ctx->key, ablkctx->key,
- ablkctx->enckey_len << 3);
- memset(key_ctx->key + ablkctx->enckey_len, 0,
- CHCR_AES_MAX_KEY_LEN - ablkctx->enckey_len);
+ memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
} else {
memcpy(key_ctx->key,
ablkctx->key + (ablkctx->enckey_len >> 1),
ablkctx->enckey_len >> 1);
- get_aes_decrypt_key(key_ctx->key + (ablkctx->enckey_len >> 1),
- ablkctx->key, ablkctx->enckey_len << 2);
+ memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
+ ablkctx->rrkey, ablkctx->enckey_len >> 1);
}
return 0;
}
static inline void create_wreq(struct chcr_context *ctx,
- struct fw_crypto_lookaside_wr *wreq,
+ struct chcr_wr *chcr_req,
void *req, struct sk_buff *skb,
int kctx_len, int hash_sz,
- unsigned int phys_dsgl)
+ int is_iv,
+ unsigned int sc_len)
{
struct uld_ctx *u_ctx = ULD_CTX(ctx);
- struct ulp_txpkt *ulptx = (struct ulp_txpkt *)(wreq + 1);
- struct ulptx_idata *sc_imm = (struct ulptx_idata *)(ulptx + 1);
int iv_loc = IV_DSGL;
int qid = u_ctx->lldi.rxq_ids[ctx->tx_channel_id];
unsigned int immdatalen = 0, nr_frags = 0;
@@ -423,27 +532,27 @@ static inline void create_wreq(struct chcr_context *ctx,
nr_frags = skb_shinfo(skb)->nr_frags;
}
- wreq->op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
- (kctx_len >> 4));
- wreq->pld_size_hash_size =
+ chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
+ ((sizeof(chcr_req->key_ctx) + kctx_len) >> 4));
+ chcr_req->wreq.pld_size_hash_size =
htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) |
FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
- wreq->len16_pkd = htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
+ chcr_req->wreq.len16_pkd =
+ htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
(calc_tx_flits_ofld(skb) * 8), 16)));
- wreq->cookie = cpu_to_be64((uintptr_t)req);
- wreq->rx_chid_to_rx_q_id =
+ chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
+ chcr_req->wreq.rx_chid_to_rx_q_id =
FILL_WR_RX_Q_ID(ctx->dev->tx_channel_id, qid,
- (hash_sz) ? IV_NOP : iv_loc);
+ is_iv ? iv_loc : IV_NOP);
- ulptx->cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id);
- ulptx->len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
- 16) - ((sizeof(*wreq)) >> 4)));
+ chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id);
+ chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
+ 16) - ((sizeof(chcr_req->wreq)) >> 4)));
- sc_imm->cmd_more = FILL_CMD_MORE(immdatalen);
- sc_imm->len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + kctx_len +
- ((hash_sz) ? DUMMY_BYTES :
- (sizeof(struct cpl_rx_phys_dsgl) +
- phys_dsgl)) + immdatalen);
+ chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
+ chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
+ sizeof(chcr_req->key_ctx) +
+ kctx_len + sc_len + immdatalen);
}
/**
@@ -454,86 +563,83 @@ static inline void create_wreq(struct chcr_context *ctx,
* @op_type: encryption or decryption
*/
static struct sk_buff
-*create_cipher_wr(struct crypto_async_request *req_base,
- struct chcr_context *ctx, unsigned short qid,
+*create_cipher_wr(struct ablkcipher_request *req,
+ unsigned short qid,
unsigned short op_type)
{
- struct ablkcipher_request *req = (struct ablkcipher_request *)req_base;
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
struct sk_buff *skb = NULL;
- struct _key_ctx *key_ctx;
- struct fw_crypto_lookaside_wr *wreq;
- struct cpl_tx_sec_pdu *sec_cpl;
+ struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl;
- struct chcr_blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+ struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
struct phys_sge_parm sg_param;
- unsigned int frags = 0, transhdr_len, phys_dsgl, dst_bufsize = 0;
+ unsigned int frags = 0, transhdr_len, phys_dsgl;
unsigned int ivsize = crypto_ablkcipher_ivsize(tfm), kctx_len;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
if (!req->info)
return ERR_PTR(-EINVAL);
- ablkctx->dst_nents = ch_nents(req->dst, &dst_bufsize);
- ablkctx->enc = op_type;
-
+ reqctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+ if (reqctx->dst_nents <= 0) {
+ pr_err("AES:Invalid Destination sg lists\n");
+ return ERR_PTR(-EINVAL);
+ }
if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
- (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE))
+ (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE)) {
+ pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
+ ablkctx->enckey_len, req->nbytes, ivsize);
return ERR_PTR(-EINVAL);
+ }
- phys_dsgl = get_space_for_phys_dsgl(ablkctx->dst_nents);
+ phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents);
- kctx_len = sizeof(*key_ctx) +
- (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
+ kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
- skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),
- GFP_ATOMIC);
+ skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
if (!skb)
return ERR_PTR(-ENOMEM);
skb_reserve(skb, sizeof(struct sge_opaque_hdr));
- wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len);
-
- sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET);
- sec_cpl->op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 1, 1);
-
- sec_cpl->pldlen = htonl(ivsize + req->nbytes);
- sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(0, 0,
- ivsize + 1, 0);
-
- sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0, 0,
- 0, 0);
- sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0,
+ chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
+ memset(chcr_req, 0, transhdr_len);
+ chcr_req->sec_cpl.op_ivinsrtofst =
+ FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 1);
+
+ chcr_req->sec_cpl.pldlen = htonl(ivsize + req->nbytes);
+ chcr_req->sec_cpl.aadstart_cipherstop_hi =
+ FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize + 1, 0);
+
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
+ FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
+ chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0,
ablkctx->ciph_mode,
- 0, 0, ivsize >> 1, 1);
- sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
+ 0, 0, ivsize >> 1);
+ chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
0, 1, phys_dsgl);
- key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl));
- key_ctx->ctx_hdr = ablkctx->key_ctx_hdr;
+ chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
if (op_type == CHCR_DECRYPT_OP) {
- if (generate_copy_rrkey(ablkctx, key_ctx))
- goto map_fail1;
+ generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
} else {
if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
- memcpy(key_ctx->key, ablkctx->key, ablkctx->enckey_len);
+ memcpy(chcr_req->key_ctx.key, ablkctx->key,
+ ablkctx->enckey_len);
} else {
- memcpy(key_ctx->key, ablkctx->key +
+ memcpy(chcr_req->key_ctx.key, ablkctx->key +
(ablkctx->enckey_len >> 1),
ablkctx->enckey_len >> 1);
- memcpy(key_ctx->key +
+ memcpy(chcr_req->key_ctx.key +
(ablkctx->enckey_len >> 1),
ablkctx->key,
ablkctx->enckey_len >> 1);
}
}
- phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)key_ctx + kctx_len);
-
- memcpy(ablkctx->iv, req->info, ivsize);
- sg_init_table(&ablkctx->iv_sg, 1);
- sg_set_buf(&ablkctx->iv_sg, ablkctx->iv, ivsize);
- sg_param.nents = ablkctx->dst_nents;
- sg_param.obsize = dst_bufsize;
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ sg_param.nents = reqctx->dst_nents;
+ sg_param.obsize = req->nbytes;
sg_param.qid = qid;
sg_param.align = 1;
if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, req->dst,
@@ -541,10 +647,12 @@ static struct sk_buff
goto map_fail1;
skb_set_transport_header(skb, transhdr_len);
- write_sg_data_page_desc(skb, &frags, &ablkctx->iv_sg, ivsize);
- write_sg_data_page_desc(skb, &frags, req->src, req->nbytes);
- create_wreq(ctx, wreq, req, skb, kctx_len, 0, phys_dsgl);
- req_ctx->skb = skb;
+ memcpy(reqctx->iv, req->info, ivsize);
+ write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
+ write_sg_to_skb(skb, &frags, req->src, req->nbytes);
+ create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
+ sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl);
+ reqctx->skb = skb;
skb_get(skb);
return skb;
map_fail1:
@@ -557,15 +665,9 @@ static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
{
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
- struct ablkcipher_alg *alg = crypto_ablkcipher_alg(tfm);
unsigned int ck_size, context_size;
u16 alignment = 0;
- if ((keylen < alg->min_keysize) || (keylen > alg->max_keysize))
- goto badkey_err;
-
- memcpy(ablkctx->key, key, keylen);
- ablkctx->enckey_len = keylen;
if (keylen == AES_KEYSIZE_128) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
} else if (keylen == AES_KEYSIZE_192) {
@@ -576,7 +678,9 @@ static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
} else {
goto badkey_err;
}
-
+ memcpy(ablkctx->key, key, keylen);
+ ablkctx->enckey_len = keylen;
+ get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
keylen + alignment) >> 4;
@@ -612,7 +716,6 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
- struct crypto_async_request *req_base = &req->base;
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct sk_buff *skb;
@@ -622,8 +725,7 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req)
return -EBUSY;
}
- skb = create_cipher_wr(req_base, ctx,
- u_ctx->lldi.rxq_ids[ctx->tx_channel_id],
+ skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id],
CHCR_ENCRYPT_OP);
if (IS_ERR(skb)) {
pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
@@ -639,7 +741,6 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
- struct crypto_async_request *req_base = &req->base;
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct sk_buff *skb;
@@ -649,7 +750,7 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
return -EBUSY;
}
- skb = create_cipher_wr(req_base, ctx, u_ctx->lldi.rxq_ids[0],
+ skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[0],
CHCR_DECRYPT_OP);
if (IS_ERR(skb)) {
pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
@@ -729,50 +830,33 @@ static int get_alg_config(struct algo_param *params,
return 0;
}
-static inline int
-write_buffer_data_page_desc(struct chcr_ahash_req_ctx *req_ctx,
- struct sk_buff *skb, unsigned int *frags, char *bfr,
- u8 bfr_len)
+static inline void chcr_free_shash(struct crypto_shash *base_hash)
{
- void *page_ptr = NULL;
-
- skb->len += bfr_len;
- skb->data_len += bfr_len;
- skb->truesize += bfr_len;
- page_ptr = kmalloc(CHCR_HASH_MAX_BLOCK_SIZE_128, GFP_ATOMIC | GFP_DMA);
- if (!page_ptr)
- return -ENOMEM;
- get_page(virt_to_page(page_ptr));
- req_ctx->dummy_payload_ptr = page_ptr;
- memcpy(page_ptr, bfr, bfr_len);
- skb_fill_page_desc(skb, *frags, virt_to_page(page_ptr),
- offset_in_page(page_ptr), bfr_len);
- (*frags)++;
- return 0;
+ crypto_free_shash(base_hash);
}
/**
- * create_final_hash_wr - Create hash work request
+ * create_hash_wr - Create hash work request
* @req - Cipher req base
*/
-static struct sk_buff *create_final_hash_wr(struct ahash_request *req,
- struct hash_wr_param *param)
+static struct sk_buff *create_hash_wr(struct ahash_request *req,
+ struct hash_wr_param *param)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
struct sk_buff *skb = NULL;
- struct _key_ctx *key_ctx;
- struct fw_crypto_lookaside_wr *wreq;
- struct cpl_tx_sec_pdu *sec_cpl;
+ struct chcr_wr *chcr_req;
unsigned int frags = 0, transhdr_len, iopad_alignment = 0;
unsigned int digestsize = crypto_ahash_digestsize(tfm);
- unsigned int kctx_len = sizeof(*key_ctx);
+ unsigned int kctx_len = 0;
u8 hash_size_in_response = 0;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
- kctx_len += param->alg_prm.result_size + iopad_alignment;
+ kctx_len = param->alg_prm.result_size + iopad_alignment;
if (param->opad_needed)
kctx_len += param->alg_prm.result_size + iopad_alignment;
@@ -781,54 +865,54 @@ static struct sk_buff *create_final_hash_wr(struct ahash_request *req,
else
hash_size_in_response = param->alg_prm.result_size;
transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
- skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),
- GFP_ATOMIC);
+ skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
if (!skb)
return skb;
skb_reserve(skb, sizeof(struct sge_opaque_hdr));
- wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len);
- memset(wreq, 0, transhdr_len);
+ chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
+ memset(chcr_req, 0, transhdr_len);
- sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET);
- sec_cpl->op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 0, 0);
- sec_cpl->pldlen = htonl(param->bfr_len + param->sg_len);
+ chcr_req->sec_cpl.op_ivinsrtofst =
+ FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 0);
+ chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
- sec_cpl->aadstart_cipherstop_hi =
+ chcr_req->sec_cpl.aadstart_cipherstop_hi =
FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
- sec_cpl->cipherstop_lo_authinsert =
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
- sec_cpl->seqno_numivs =
+ chcr_req->sec_cpl.seqno_numivs =
FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
- param->opad_needed, 0, 0);
+ param->opad_needed, 0);
- sec_cpl->ivgen_hdrlen =
+ chcr_req->sec_cpl.ivgen_hdrlen =
FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
- key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl));
- memcpy(key_ctx->key, req_ctx->partial_hash, param->alg_prm.result_size);
+ memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
+ param->alg_prm.result_size);
if (param->opad_needed)
- memcpy(key_ctx->key + ((param->alg_prm.result_size <= 32) ? 32 :
- CHCR_HASH_MAX_DIGEST_SIZE),
+ memcpy(chcr_req->key_ctx.key +
+ ((param->alg_prm.result_size <= 32) ? 32 :
+ CHCR_HASH_MAX_DIGEST_SIZE),
hmacctx->opad, param->alg_prm.result_size);
- key_ctx->ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
+ chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
param->alg_prm.mk_size, 0,
param->opad_needed,
- (kctx_len >> 4));
- sec_cpl->scmd1 = cpu_to_be64((u64)param->scmd1);
+ ((kctx_len +
+ sizeof(chcr_req->key_ctx)) >> 4));
+ chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
skb_set_transport_header(skb, transhdr_len);
if (param->bfr_len != 0)
- write_buffer_data_page_desc(req_ctx, skb, &frags, req_ctx->bfr,
- param->bfr_len);
+ write_buffer_to_skb(skb, &frags, req_ctx->reqbfr,
+ param->bfr_len);
if (param->sg_len != 0)
- write_sg_data_page_desc(skb, &frags, req->src, param->sg_len);
+ write_sg_to_skb(skb, &frags, req->src, param->sg_len);
- create_wreq(ctx, wreq, req, skb, kctx_len, hash_size_in_response,
- 0);
+ create_wreq(ctx, chcr_req, req, skb, kctx_len, hash_size_in_response, 0,
+ DUMMY_BYTES);
req_ctx->skb = skb;
skb_get(skb);
return skb;
@@ -854,34 +938,40 @@ static int chcr_ahash_update(struct ahash_request *req)
return -EBUSY;
}
- if (nbytes + req_ctx->bfr_len >= bs) {
- remainder = (nbytes + req_ctx->bfr_len) % bs;
- nbytes = nbytes + req_ctx->bfr_len - remainder;
+ if (nbytes + req_ctx->reqlen >= bs) {
+ remainder = (nbytes + req_ctx->reqlen) % bs;
+ nbytes = nbytes + req_ctx->reqlen - remainder;
} else {
- sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->bfr +
- req_ctx->bfr_len, nbytes, 0);
- req_ctx->bfr_len += nbytes;
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
+ + req_ctx->reqlen, nbytes, 0);
+ req_ctx->reqlen += nbytes;
return 0;
}
params.opad_needed = 0;
params.more = 1;
params.last = 0;
- params.sg_len = nbytes - req_ctx->bfr_len;
- params.bfr_len = req_ctx->bfr_len;
+ params.sg_len = nbytes - req_ctx->reqlen;
+ params.bfr_len = req_ctx->reqlen;
params.scmd1 = 0;
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
req_ctx->result = 0;
req_ctx->data_len += params.sg_len + params.bfr_len;
- skb = create_final_hash_wr(req, &params);
+ skb = create_hash_wr(req, &params);
if (!skb)
return -ENOMEM;
- req_ctx->bfr_len = remainder;
- if (remainder)
+ if (remainder) {
+ u8 *temp;
+ /* Swap buffers */
+ temp = req_ctx->reqbfr;
+ req_ctx->reqbfr = req_ctx->skbfr;
+ req_ctx->skbfr = temp;
sg_pcopy_to_buffer(req->src, sg_nents(req->src),
- req_ctx->bfr, remainder, req->nbytes -
+ req_ctx->reqbfr, remainder, req->nbytes -
remainder);
+ }
+ req_ctx->reqlen = remainder;
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
chcr_send_wr(skb);
@@ -917,10 +1007,10 @@ static int chcr_ahash_final(struct ahash_request *req)
params.sg_len = 0;
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
req_ctx->result = 1;
- params.bfr_len = req_ctx->bfr_len;
+ params.bfr_len = req_ctx->reqlen;
req_ctx->data_len += params.bfr_len + params.sg_len;
- if (req_ctx->bfr && (req_ctx->bfr_len == 0)) {
- create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len);
+ if (req_ctx->reqlen == 0) {
+ create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
params.last = 0;
params.more = 1;
params.scmd1 = 0;
@@ -931,7 +1021,10 @@ static int chcr_ahash_final(struct ahash_request *req)
params.last = 1;
params.more = 0;
}
- skb = create_final_hash_wr(req, &params);
+ skb = create_hash_wr(req, &params);
+ if (!skb)
+ return -ENOMEM;
+
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
chcr_send_wr(skb);
@@ -963,12 +1056,12 @@ static int chcr_ahash_finup(struct ahash_request *req)
params.opad_needed = 0;
params.sg_len = req->nbytes;
- params.bfr_len = req_ctx->bfr_len;
+ params.bfr_len = req_ctx->reqlen;
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
req_ctx->data_len += params.bfr_len + params.sg_len;
req_ctx->result = 1;
- if (req_ctx->bfr && (req_ctx->bfr_len + req->nbytes) == 0) {
- create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len);
+ if ((req_ctx->reqlen + req->nbytes) == 0) {
+ create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
params.last = 0;
params.more = 1;
params.scmd1 = 0;
@@ -979,9 +1072,10 @@ static int chcr_ahash_finup(struct ahash_request *req)
params.more = 0;
}
- skb = create_final_hash_wr(req, &params);
+ skb = create_hash_wr(req, &params);
if (!skb)
return -ENOMEM;
+
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
chcr_send_wr(skb);
@@ -1023,13 +1117,13 @@ static int chcr_ahash_digest(struct ahash_request *req)
req_ctx->result = 1;
req_ctx->data_len += params.bfr_len + params.sg_len;
- if (req_ctx->bfr && req->nbytes == 0) {
- create_last_hash_block(req_ctx->bfr, bs, 0);
+ if (req->nbytes == 0) {
+ create_last_hash_block(req_ctx->reqbfr, bs, 0);
params.more = 1;
params.bfr_len = bs;
}
- skb = create_final_hash_wr(req, &params);
+ skb = create_hash_wr(req, &params);
if (!skb)
return -ENOMEM;
@@ -1044,12 +1138,12 @@ static int chcr_ahash_export(struct ahash_request *areq, void *out)
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
struct chcr_ahash_req_ctx *state = out;
- state->bfr_len = req_ctx->bfr_len;
+ state->reqlen = req_ctx->reqlen;
state->data_len = req_ctx->data_len;
- memcpy(state->bfr, req_ctx->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128);
+ memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
memcpy(state->partial_hash, req_ctx->partial_hash,
CHCR_HASH_MAX_DIGEST_SIZE);
- return 0;
+ return 0;
}
static int chcr_ahash_import(struct ahash_request *areq, const void *in)
@@ -1057,10 +1151,11 @@ static int chcr_ahash_import(struct ahash_request *areq, const void *in)
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
- req_ctx->bfr_len = state->bfr_len;
+ req_ctx->reqlen = state->reqlen;
req_ctx->data_len = state->data_len;
- req_ctx->dummy_payload_ptr = NULL;
- memcpy(req_ctx->bfr, state->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128);
+ req_ctx->reqbfr = req_ctx->bfr1;
+ req_ctx->skbfr = req_ctx->bfr2;
+ memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
memcpy(req_ctx->partial_hash, state->partial_hash,
CHCR_HASH_MAX_DIGEST_SIZE);
return 0;
@@ -1075,15 +1170,16 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
unsigned int i, err = 0, updated_digestsize;
- /*
- * use the key to calculate the ipad and opad. ipad will sent with the
+ SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
+
+ /* use the key to calculate the ipad and opad. ipad will sent with the
* first request's data. opad will be sent with the final hash result
* ipad in hmacctx->ipad and opad in hmacctx->opad location
*/
- if (!hmacctx->desc)
- return -EINVAL;
+ shash->tfm = hmacctx->base_hash;
+ shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
if (keylen > bs) {
- err = crypto_shash_digest(hmacctx->desc, key, keylen,
+ err = crypto_shash_digest(shash, key, keylen,
hmacctx->ipad);
if (err)
goto out;
@@ -1104,13 +1200,13 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
updated_digestsize = SHA256_DIGEST_SIZE;
else if (digestsize == SHA384_DIGEST_SIZE)
updated_digestsize = SHA512_DIGEST_SIZE;
- err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->ipad,
+ err = chcr_compute_partial_hash(shash, hmacctx->ipad,
hmacctx->ipad, digestsize);
if (err)
goto out;
chcr_change_order(hmacctx->ipad, updated_digestsize);
- err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->opad,
+ err = chcr_compute_partial_hash(shash, hmacctx->opad,
hmacctx->opad, digestsize);
if (err)
goto out;
@@ -1124,28 +1220,29 @@ static int chcr_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
{
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
- int status = 0;
unsigned short context_size = 0;
- if ((key_len == (AES_KEYSIZE_128 << 1)) ||
- (key_len == (AES_KEYSIZE_256 << 1))) {
- memcpy(ablkctx->key, key, key_len);
- ablkctx->enckey_len = key_len;
- context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
- ablkctx->key_ctx_hdr =
- FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
- CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
- CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
- CHCR_KEYCTX_NO_KEY, 1,
- 0, context_size);
- ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
- } else {
+ if ((key_len != (AES_KEYSIZE_128 << 1)) &&
+ (key_len != (AES_KEYSIZE_256 << 1))) {
crypto_tfm_set_flags((struct crypto_tfm *)tfm,
CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
- status = -EINVAL;
+ return -EINVAL;
+
}
- return status;
+
+ memcpy(ablkctx->key, key, key_len);
+ ablkctx->enckey_len = key_len;
+ get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
+ context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
+ ablkctx->key_ctx_hdr =
+ FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
+ CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
+ CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
+ CHCR_KEYCTX_NO_KEY, 1,
+ 0, context_size);
+ ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
+ return 0;
}
static int chcr_sha_init(struct ahash_request *areq)
@@ -1155,8 +1252,9 @@ static int chcr_sha_init(struct ahash_request *areq)
int digestsize = crypto_ahash_digestsize(tfm);
req_ctx->data_len = 0;
- req_ctx->dummy_payload_ptr = NULL;
- req_ctx->bfr_len = 0;
+ req_ctx->reqlen = 0;
+ req_ctx->reqbfr = req_ctx->bfr1;
+ req_ctx->skbfr = req_ctx->bfr2;
req_ctx->skb = NULL;
req_ctx->result = 0;
copy_hash_init_values(req_ctx->partial_hash, digestsize);
@@ -1204,29 +1302,1184 @@ static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct chcr_ahash_req_ctx));
- hmacctx->desc = chcr_alloc_shash(digestsize);
- if (IS_ERR(hmacctx->desc))
- return PTR_ERR(hmacctx->desc);
+ hmacctx->base_hash = chcr_alloc_shash(digestsize);
+ if (IS_ERR(hmacctx->base_hash))
+ return PTR_ERR(hmacctx->base_hash);
return chcr_device_init(crypto_tfm_ctx(tfm));
}
-static void chcr_free_shash(struct shash_desc *desc)
-{
- crypto_free_shash(desc->tfm);
- kfree(desc);
-}
-
static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
{
struct chcr_context *ctx = crypto_tfm_ctx(tfm);
struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
- if (hmacctx->desc) {
- chcr_free_shash(hmacctx->desc);
- hmacctx->desc = NULL;
+ if (hmacctx->base_hash) {
+ chcr_free_shash(hmacctx->base_hash);
+ hmacctx->base_hash = NULL;
+ }
+}
+
+static int chcr_copy_assoc(struct aead_request *req,
+ struct chcr_aead_ctx *ctx)
+{
+ SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
+
+ skcipher_request_set_tfm(skreq, ctx->null);
+ skcipher_request_set_callback(skreq, aead_request_flags(req),
+ NULL, NULL);
+ skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
+ NULL);
+
+ return crypto_skcipher_encrypt(skreq);
+}
+
+static unsigned char get_hmac(unsigned int authsize)
+{
+ switch (authsize) {
+ case ICV_8:
+ return CHCR_SCMD_HMAC_CTRL_PL1;
+ case ICV_10:
+ return CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
+ case ICV_12:
+ return CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ }
+ return CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+}
+
+
+static struct sk_buff *create_authenc_wr(struct aead_request *req,
+ unsigned short qid,
+ int size,
+ unsigned short op_type)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_context *ctx = crypto_aead_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct sk_buff *skb = NULL;
+ struct chcr_wr *chcr_req;
+ struct cpl_rx_phys_dsgl *phys_cpl;
+ struct phys_sge_parm sg_param;
+ struct scatterlist *src, *dst;
+ struct scatterlist src_sg[2], dst_sg[2];
+ unsigned int frags = 0, transhdr_len;
+ unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
+ unsigned int kctx_len = 0;
+ unsigned short stop_offset = 0;
+ unsigned int assoclen = req->assoclen;
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ int err = 0;
+ int null = 0;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
+
+ if (aeadctx->enckey_len == 0 || (req->cryptlen == 0))
+ goto err;
+
+ if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
+ goto err;
+
+ if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
+ goto err;
+ src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
+ dst = src;
+ if (req->src != req->dst) {
+ err = chcr_copy_assoc(req, aeadctx);
+ if (err)
+ return ERR_PTR(err);
+ dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+ }
+ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
+ null = 1;
+ assoclen = 0;
+ }
+ reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+ (op_type ? -authsize : authsize));
+ if (reqctx->dst_nents <= 0) {
+ pr_err("AUTHENC:Invalid Destination sg entries\n");
+ goto err;
+ }
+ dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+ kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
+ - sizeof(chcr_req->key_ctx);
+ transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+ skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+ if (!skb)
+ goto err;
+
+ /* LLD is going to write the sge hdr. */
+ skb_reserve(skb, sizeof(struct sge_opaque_hdr));
+
+ /* Write WR */
+ chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len);
+ memset(chcr_req, 0, transhdr_len);
+
+ stop_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
+
+ /*
+ * Input order is AAD,IV and Payload. where IV should be included as
+ * the part of authdata. All other fields should be filled according
+ * to the hardware spec
+ */
+ chcr_req->sec_cpl.op_ivinsrtofst =
+ FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2,
+ (ivsize ? (assoclen + 1) : 0));
+ chcr_req->sec_cpl.pldlen = htonl(assoclen + ivsize + req->cryptlen);
+ chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+ assoclen ? 1 : 0, assoclen,
+ assoclen + ivsize + 1,
+ (stop_offset & 0x1F0) >> 4);
+ chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
+ stop_offset & 0xF,
+ null ? 0 : assoclen + ivsize + 1,
+ stop_offset, stop_offset);
+ chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
+ (op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
+ CHCR_SCMD_CIPHER_MODE_AES_CBC,
+ actx->auth_mode, aeadctx->hmac_ctrl,
+ ivsize >> 1);
+ chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
+ 0, 1, dst_size);
+
+ chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
+ if (op_type == CHCR_ENCRYPT_OP)
+ memcpy(chcr_req->key_ctx.key, aeadctx->key,
+ aeadctx->enckey_len);
+ else
+ memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
+ aeadctx->enckey_len);
+
+ memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) <<
+ 4), actx->h_iopad, kctx_len -
+ (DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
+
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ sg_param.nents = reqctx->dst_nents;
+ sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
+ sg_param.qid = qid;
+ sg_param.align = 0;
+ if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+ &sg_param))
+ goto dstmap_fail;
+
+ skb_set_transport_header(skb, transhdr_len);
+
+ if (assoclen) {
+ /* AAD buffer in */
+ write_sg_to_skb(skb, &frags, req->src, assoclen);
+
+ }
+ write_buffer_to_skb(skb, &frags, req->iv, ivsize);
+ write_sg_to_skb(skb, &frags, src, req->cryptlen);
+ create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
+ sizeof(struct cpl_rx_phys_dsgl) + dst_size);
+ reqctx->skb = skb;
+ skb_get(skb);
+
+ return skb;
+dstmap_fail:
+ /* ivmap_fail: */
+ kfree_skb(skb);
+err:
+ return ERR_PTR(-EINVAL);
+}
+
+static void aes_gcm_empty_pld_pad(struct scatterlist *sg,
+ unsigned short offset)
+{
+ struct page *spage;
+ unsigned char *addr;
+
+ spage = sg_page(sg);
+ get_page(spage); /* so that it is not freed by NIC */
+#ifdef KMAP_ATOMIC_ARGS
+ addr = kmap_atomic(spage, KM_SOFTIRQ0);
+#else
+ addr = kmap_atomic(spage);
+#endif
+ memset(addr + sg->offset, 0, offset + 1);
+
+ kunmap_atomic(addr);
+}
+
+static int set_msg_len(u8 *block, unsigned int msglen, int csize)
+{
+ __be32 data;
+
+ memset(block, 0, csize);
+ block += csize;
+
+ if (csize >= 4)
+ csize = 4;
+ else if (msglen > (unsigned int)(1 << (8 * csize)))
+ return -EOVERFLOW;
+
+ data = cpu_to_be32(msglen);
+ memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
+
+ return 0;
+}
+
+static void generate_b0(struct aead_request *req,
+ struct chcr_aead_ctx *aeadctx,
+ unsigned short op_type)
+{
+ unsigned int l, lp, m;
+ int rc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ u8 *b0 = reqctx->scratch_pad;
+
+ m = crypto_aead_authsize(aead);
+
+ memcpy(b0, reqctx->iv, 16);
+
+ lp = b0[0];
+ l = lp + 1;
+
+ /* set m, bits 3-5 */
+ *b0 |= (8 * ((m - 2) / 2));
+
+ /* set adata, bit 6, if associated data is used */
+ if (req->assoclen)
+ *b0 |= 64;
+ rc = set_msg_len(b0 + 16 - l,
+ (op_type == CHCR_DECRYPT_OP) ?
+ req->cryptlen - m : req->cryptlen, l);
+}
+
+static inline int crypto_ccm_check_iv(const u8 *iv)
+{
+ /* 2 <= L <= 8, so 1 <= L' <= 7. */
+ if (iv[0] < 1 || iv[0] > 7)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ccm_format_packet(struct aead_request *req,
+ struct chcr_aead_ctx *aeadctx,
+ unsigned int sub_type,
+ unsigned short op_type)
+{
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ int rc = 0;
+
+ if (req->assoclen > T5_MAX_AAD_SIZE) {
+ pr_err("CCM: Unsupported AAD data. It should be < %d\n",
+ T5_MAX_AAD_SIZE);
+ return -EINVAL;
+ }
+ if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
+ reqctx->iv[0] = 3;
+ memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
+ memcpy(reqctx->iv + 4, req->iv, 8);
+ memset(reqctx->iv + 12, 0, 4);
+ *((unsigned short *)(reqctx->scratch_pad + 16)) =
+ htons(req->assoclen - 8);
+ } else {
+ memcpy(reqctx->iv, req->iv, 16);
+ *((unsigned short *)(reqctx->scratch_pad + 16)) =
+ htons(req->assoclen);
+ }
+ generate_b0(req, aeadctx, op_type);
+ /* zero the ctr value */
+ memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
+ return rc;
+}
+
+static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
+ unsigned int dst_size,
+ struct aead_request *req,
+ unsigned short op_type,
+ struct chcr_context *chcrctx)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ unsigned int ivsize = AES_BLOCK_SIZE;
+ unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
+ unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
+ unsigned int c_id = chcrctx->dev->tx_channel_id;
+ unsigned int ccm_xtra;
+ unsigned char tag_offset = 0, auth_offset = 0;
+ unsigned char hmac_ctrl = get_hmac(crypto_aead_authsize(tfm));
+ unsigned int assoclen;
+
+ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
+ assoclen = req->assoclen - 8;
+ else
+ assoclen = req->assoclen;
+ ccm_xtra = CCM_B0_SIZE +
+ ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
+
+ auth_offset = req->cryptlen ?
+ (assoclen + ivsize + 1 + ccm_xtra) : 0;
+ if (op_type == CHCR_DECRYPT_OP) {
+ if (crypto_aead_authsize(tfm) != req->cryptlen)
+ tag_offset = crypto_aead_authsize(tfm);
+ else
+ auth_offset = 0;
+ }
+
+
+ sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
+ 2, (ivsize ? (assoclen + 1) : 0) +
+ ccm_xtra);
+ sec_cpl->pldlen =
+ htonl(assoclen + ivsize + req->cryptlen + ccm_xtra);
+ /* For CCM there wil be b0 always. So AAD start will be 1 always */
+ sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+ 1, assoclen + ccm_xtra, assoclen
+ + ivsize + 1 + ccm_xtra, 0);
+
+ sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
+ auth_offset, tag_offset,
+ (op_type == CHCR_ENCRYPT_OP) ? 0 :
+ crypto_aead_authsize(tfm));
+ sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
+ (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
+ cipher_mode, mac_mode, hmac_ctrl,
+ ivsize >> 1);
+
+ sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
+ 1, dst_size);
+}
+
+int aead_ccm_validate_input(unsigned short op_type,
+ struct aead_request *req,
+ struct chcr_aead_ctx *aeadctx,
+ unsigned int sub_type)
+{
+ if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
+ if (crypto_ccm_check_iv(req->iv)) {
+ pr_err("CCM: IV check fails\n");
+ return -EINVAL;
+ }
+ } else {
+ if (req->assoclen != 16 && req->assoclen != 20) {
+ pr_err("RFC4309: Invalid AAD length %d\n",
+ req->assoclen);
+ return -EINVAL;
+ }
+ }
+ if (aeadctx->enckey_len == 0) {
+ pr_err("CCM: Encryption key not set\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+unsigned int fill_aead_req_fields(struct sk_buff *skb,
+ struct aead_request *req,
+ struct scatterlist *src,
+ unsigned int ivsize,
+ struct chcr_aead_ctx *aeadctx)
+{
+ unsigned int frags = 0;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ /* b0 and aad length(if available) */
+
+ write_buffer_to_skb(skb, &frags, reqctx->scratch_pad, CCM_B0_SIZE +
+ (req->assoclen ? CCM_AAD_FIELD_SIZE : 0));
+ if (req->assoclen) {
+ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
+ write_sg_to_skb(skb, &frags, req->src,
+ req->assoclen - 8);
+ else
+ write_sg_to_skb(skb, &frags, req->src, req->assoclen);
+ }
+ write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
+ if (req->cryptlen)
+ write_sg_to_skb(skb, &frags, src, req->cryptlen);
+
+ return frags;
+}
+
+static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
+ unsigned short qid,
+ int size,
+ unsigned short op_type)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_context *ctx = crypto_aead_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct sk_buff *skb = NULL;
+ struct chcr_wr *chcr_req;
+ struct cpl_rx_phys_dsgl *phys_cpl;
+ struct phys_sge_parm sg_param;
+ struct scatterlist *src, *dst;
+ struct scatterlist src_sg[2], dst_sg[2];
+ unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
+ unsigned int dst_size = 0, kctx_len;
+ unsigned int sub_type;
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ int err = 0;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
+
+
+ if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
+ goto err;
+
+ if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
+ goto err;
+ sub_type = get_aead_subtype(tfm);
+ src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
+ dst = src;
+ if (req->src != req->dst) {
+ err = chcr_copy_assoc(req, aeadctx);
+ if (err) {
+ pr_err("AAD copy to destination buffer fails\n");
+ return ERR_PTR(err);
+ }
+ dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+ }
+ reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+ (op_type ? -authsize : authsize));
+ if (reqctx->dst_nents <= 0) {
+ pr_err("CCM:Invalid Destination sg entries\n");
+ goto err;
+ }
+
+
+ if (aead_ccm_validate_input(op_type, req, aeadctx, sub_type))
+ goto err;
+
+ dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+ kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
+ transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+ skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+
+ if (!skb)
+ goto err;
+
+ skb_reserve(skb, sizeof(struct sge_opaque_hdr));
+
+ chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len);
+ memset(chcr_req, 0, transhdr_len);
+
+ fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type, ctx);
+
+ chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
+ memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
+ memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
+ 16), aeadctx->key, aeadctx->enckey_len);
+
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ if (ccm_format_packet(req, aeadctx, sub_type, op_type))
+ goto dstmap_fail;
+
+ sg_param.nents = reqctx->dst_nents;
+ sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
+ sg_param.qid = qid;
+ sg_param.align = 0;
+ if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+ &sg_param))
+ goto dstmap_fail;
+
+ skb_set_transport_header(skb, transhdr_len);
+ frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx);
+ create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
+ sizeof(struct cpl_rx_phys_dsgl) + dst_size);
+ reqctx->skb = skb;
+ skb_get(skb);
+ return skb;
+dstmap_fail:
+ kfree_skb(skb);
+ skb = NULL;
+err:
+ return ERR_PTR(-EINVAL);
+}
+
+static struct sk_buff *create_gcm_wr(struct aead_request *req,
+ unsigned short qid,
+ int size,
+ unsigned short op_type)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_context *ctx = crypto_aead_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct sk_buff *skb = NULL;
+ struct chcr_wr *chcr_req;
+ struct cpl_rx_phys_dsgl *phys_cpl;
+ struct phys_sge_parm sg_param;
+ struct scatterlist *src, *dst;
+ struct scatterlist src_sg[2], dst_sg[2];
+ unsigned int frags = 0, transhdr_len;
+ unsigned int ivsize = AES_BLOCK_SIZE;
+ unsigned int dst_size = 0, kctx_len;
+ unsigned char tag_offset = 0;
+ unsigned int crypt_len = 0;
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ unsigned char hmac_ctrl = get_hmac(authsize);
+ int err = 0;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
+
+ /* validate key size */
+ if (aeadctx->enckey_len == 0)
+ goto err;
+
+ if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
+ goto err;
+
+ if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
+ goto err;
+
+ src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
+ dst = src;
+ if (req->src != req->dst) {
+ err = chcr_copy_assoc(req, aeadctx);
+ if (err)
+ return ERR_PTR(err);
+ dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+ }
+
+ if (!req->cryptlen)
+ /* null-payload is not supported in the hardware.
+ * software is sending block size
+ */
+ crypt_len = AES_BLOCK_SIZE;
+ else
+ crypt_len = req->cryptlen;
+ reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+ (op_type ? -authsize : authsize));
+ if (reqctx->dst_nents <= 0) {
+ pr_err("GCM:Invalid Destination sg entries\n");
+ goto err;
+ }
+
+
+ dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+ kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
+ AEAD_H_SIZE;
+ transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+ skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+ if (!skb)
+ goto err;
+
+ /* NIC driver is going to write the sge hdr. */
+ skb_reserve(skb, sizeof(struct sge_opaque_hdr));
+
+ chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
+ memset(chcr_req, 0, transhdr_len);
+
+ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
+ req->assoclen -= 8;
+
+ tag_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
+ chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
+ ctx->dev->tx_channel_id, 2, (ivsize ?
+ (req->assoclen + 1) : 0));
+ chcr_req->sec_cpl.pldlen = htonl(req->assoclen + ivsize + crypt_len);
+ chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+ req->assoclen ? 1 : 0, req->assoclen,
+ req->assoclen + ivsize + 1, 0);
+ if (req->cryptlen) {
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
+ FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + ivsize + 1,
+ tag_offset, tag_offset);
+ chcr_req->sec_cpl.seqno_numivs =
+ FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
+ CHCR_ENCRYPT_OP) ? 1 : 0,
+ CHCR_SCMD_CIPHER_MODE_AES_GCM,
+ CHCR_SCMD_AUTH_MODE_GHASH, hmac_ctrl,
+ ivsize >> 1);
+ } else {
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
+ FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
+ chcr_req->sec_cpl.seqno_numivs =
+ FILL_SEC_CPL_SCMD0_SEQNO(op_type,
+ (op_type == CHCR_ENCRYPT_OP) ?
+ 1 : 0, CHCR_SCMD_CIPHER_MODE_AES_CBC,
+ 0, 0, ivsize >> 1);
+ }
+ chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
+ 0, 1, dst_size);
+ chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
+ memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
+ memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
+ 16), GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
+
+ /* prepare a 16 byte iv */
+ /* S A L T | IV | 0x00000001 */
+ if (get_aead_subtype(tfm) ==
+ CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
+ memcpy(reqctx->iv, aeadctx->salt, 4);
+ memcpy(reqctx->iv + 4, req->iv, 8);
+ } else {
+ memcpy(reqctx->iv, req->iv, 12);
+ }
+ *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
+
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ sg_param.nents = reqctx->dst_nents;
+ sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
+ sg_param.qid = qid;
+ sg_param.align = 0;
+ if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+ &sg_param))
+ goto dstmap_fail;
+
+ skb_set_transport_header(skb, transhdr_len);
+
+ write_sg_to_skb(skb, &frags, req->src, req->assoclen);
+
+ write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
+
+ if (req->cryptlen) {
+ write_sg_to_skb(skb, &frags, src, req->cryptlen);
+ } else {
+ aes_gcm_empty_pld_pad(req->dst, authsize - 1);
+ write_sg_to_skb(skb, &frags, dst, crypt_len);
+ }
+
+ create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
+ sizeof(struct cpl_rx_phys_dsgl) + dst_size);
+ reqctx->skb = skb;
+ skb_get(skb);
+ return skb;
+
+dstmap_fail:
+ /* ivmap_fail: */
+ kfree_skb(skb);
+ skb = NULL;
+err:
+ return skb;
+}
+
+
+
+static int chcr_aead_cra_init(struct crypto_aead *tfm)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(tfm);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct chcr_aead_reqctx));
+ aeadctx->null = crypto_get_default_null_skcipher();
+ if (IS_ERR(aeadctx->null))
+ return PTR_ERR(aeadctx->null);
+ return chcr_device_init(ctx);
+}
+
+static void chcr_aead_cra_exit(struct crypto_aead *tfm)
+{
+ crypto_put_default_null_skcipher();
+}
+
+static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
+ aeadctx->mayverify = VERIFY_HW;
+ return 0;
+}
+static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+ u32 maxauth = crypto_aead_maxauthsize(tfm);
+
+ /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
+ * true for sha1. authsize == 12 condition should be before
+ * authsize == (maxauth >> 1)
+ */
+ if (authsize == ICV_4) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == ICV_6) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == ICV_10) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == ICV_12) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == ICV_14) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == (maxauth >> 1)) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == maxauth) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_HW;
+ } else {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_SW;
+ }
+ return 0;
+}
+
+
+static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+
+ switch (authsize) {
+ case ICV_4:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_8:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_12:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_14:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_16:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_13:
+ case ICV_15:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_SW;
+ break;
+ default:
+
+ crypto_tfm_set_flags((struct crypto_tfm *) tfm,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+
+ switch (authsize) {
+ case ICV_8:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_12:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_16:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ default:
+ crypto_tfm_set_flags((struct crypto_tfm *)tfm,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+
+ switch (authsize) {
+ case ICV_4:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_6:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_8:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_10:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_12:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_14:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_16:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ default:
+ crypto_tfm_set_flags((struct crypto_tfm *)tfm,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
+ const u8 *key,
+ unsigned int keylen)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(aead);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ unsigned char ck_size, mk_size;
+ int key_ctx_size = 0;
+
+ memcpy(aeadctx->key, key, keylen);
+ aeadctx->enckey_len = keylen;
+ key_ctx_size = sizeof(struct _key_ctx) +
+ ((DIV_ROUND_UP(keylen, 16)) << 4) * 2;
+ if (keylen == AES_KEYSIZE_128) {
+ mk_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ } else if (keylen == AES_KEYSIZE_192) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
+ } else if (keylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
+ } else {
+ crypto_tfm_set_flags((struct crypto_tfm *)aead,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ aeadctx->enckey_len = 0;
+ return -EINVAL;
+ }
+ aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
+ key_ctx_size >> 4);
+ return 0;
+}
+
+static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(aead);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+
+ if (keylen < 3) {
+ crypto_tfm_set_flags((struct crypto_tfm *)aead,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ aeadctx->enckey_len = 0;
+ return -EINVAL;
+ }
+ keylen -= 3;
+ memcpy(aeadctx->salt, key + keylen, 3);
+ return chcr_aead_ccm_setkey(aead, key, keylen);
+}
+
+static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(aead);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
+ struct blkcipher_desc h_desc;
+ struct scatterlist src[1];
+ unsigned int ck_size;
+ int ret = 0, key_ctx_size = 0;
+
+ if (get_aead_subtype(aead) ==
+ CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
+ keylen -= 4; /* nonce/salt is present in the last 4 bytes */
+ memcpy(aeadctx->salt, key + keylen, 4);
+ }
+ if (keylen == AES_KEYSIZE_128) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ } else if (keylen == AES_KEYSIZE_192) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ } else if (keylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ } else {
+ crypto_tfm_set_flags((struct crypto_tfm *)aead,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ aeadctx->enckey_len = 0;
+ pr_err("GCM: Invalid key length %d", keylen);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(aeadctx->key, key, keylen);
+ aeadctx->enckey_len = keylen;
+ key_ctx_size = sizeof(struct _key_ctx) +
+ ((DIV_ROUND_UP(keylen, 16)) << 4) +
+ AEAD_H_SIZE;
+ aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
+ CHCR_KEYCTX_MAC_KEY_SIZE_128,
+ 0, 0,
+ key_ctx_size >> 4);
+ /* Calculate the H = CIPH(K, 0 repeated 16 times) using sync aes
+ * blkcipher It will go on key context
+ */
+ h_desc.tfm = crypto_alloc_blkcipher("cbc(aes-generic)", 0, 0);
+ if (IS_ERR(h_desc.tfm)) {
+ aeadctx->enckey_len = 0;
+ ret = -ENOMEM;
+ goto out;
+ }
+ h_desc.flags = 0;
+ ret = crypto_blkcipher_setkey(h_desc.tfm, key, keylen);
+ if (ret) {
+ aeadctx->enckey_len = 0;
+ goto out1;
+ }
+ memset(gctx->ghash_h, 0, AEAD_H_SIZE);
+ sg_init_one(&src[0], gctx->ghash_h, AEAD_H_SIZE);
+ ret = crypto_blkcipher_encrypt(&h_desc, &src[0], &src[0], AEAD_H_SIZE);
+
+out1:
+ crypto_free_blkcipher(h_desc.tfm);
+out:
+ return ret;
+}
+
+static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
+ unsigned int keylen)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(authenc);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
+ /* it contains auth and cipher key both*/
+ struct crypto_authenc_keys keys;
+ unsigned int bs;
+ unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
+ int err = 0, i, key_ctx_len = 0;
+ unsigned char ck_size = 0;
+ unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
+ struct crypto_shash *base_hash = NULL;
+ struct algo_param param;
+ int align;
+ u8 *o_ptr = NULL;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
+ crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ goto out;
+ }
+
+ if (get_alg_config(&param, max_authsize)) {
+ pr_err("chcr : Unsupported digest size\n");
+ goto out;
+ }
+ if (keys.enckeylen == AES_KEYSIZE_128) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ } else if (keys.enckeylen == AES_KEYSIZE_192) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ } else if (keys.enckeylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ } else {
+ pr_err("chcr : Unsupported cipher key\n");
+ goto out;
+ }
+
+ /* Copy only encryption key. We use authkey to generate h(ipad) and
+ * h(opad) so authkey is not needed again. authkeylen size have the
+ * size of the hash digest size.
+ */
+ memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
+ aeadctx->enckey_len = keys.enckeylen;
+ get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
+ aeadctx->enckey_len << 3);
+
+ base_hash = chcr_alloc_shash(max_authsize);
+ if (IS_ERR(base_hash)) {
+ pr_err("chcr : Base driver cannot be loaded\n");
+ goto out;
}
+ {
+ SHASH_DESC_ON_STACK(shash, base_hash);
+ shash->tfm = base_hash;
+ shash->flags = crypto_shash_get_flags(base_hash);
+ bs = crypto_shash_blocksize(base_hash);
+ align = KEYCTX_ALIGN_PAD(max_authsize);
+ o_ptr = actx->h_iopad + param.result_size + align;
+
+ if (keys.authkeylen > bs) {
+ err = crypto_shash_digest(shash, keys.authkey,
+ keys.authkeylen,
+ o_ptr);
+ if (err) {
+ pr_err("chcr : Base driver cannot be loaded\n");
+ goto out;
+ }
+ keys.authkeylen = max_authsize;
+ } else
+ memcpy(o_ptr, keys.authkey, keys.authkeylen);
+
+ /* Compute the ipad-digest*/
+ memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
+ memcpy(pad, o_ptr, keys.authkeylen);
+ for (i = 0; i < bs >> 2; i++)
+ *((unsigned int *)pad + i) ^= IPAD_DATA;
+
+ if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
+ max_authsize))
+ goto out;
+ /* Compute the opad-digest */
+ memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
+ memcpy(pad, o_ptr, keys.authkeylen);
+ for (i = 0; i < bs >> 2; i++)
+ *((unsigned int *)pad + i) ^= OPAD_DATA;
+
+ if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
+ goto out;
+
+ /* convert the ipad and opad digest to network order */
+ chcr_change_order(actx->h_iopad, param.result_size);
+ chcr_change_order(o_ptr, param.result_size);
+ key_ctx_len = sizeof(struct _key_ctx) +
+ ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4) +
+ (param.result_size + align) * 2;
+ aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
+ 0, 1, key_ctx_len >> 4);
+ actx->auth_mode = param.auth_mode;
+ chcr_free_shash(base_hash);
+
+ return 0;
+ }
+out:
+ aeadctx->enckey_len = 0;
+ if (base_hash)
+ chcr_free_shash(base_hash);
+ return -EINVAL;
}
+static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
+ const u8 *key, unsigned int keylen)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(authenc);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
+ struct crypto_authenc_keys keys;
+
+ /* it contains auth and cipher key both*/
+ int key_ctx_len = 0;
+ unsigned char ck_size = 0;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
+ crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ goto out;
+ }
+ if (keys.enckeylen == AES_KEYSIZE_128) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ } else if (keys.enckeylen == AES_KEYSIZE_192) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ } else if (keys.enckeylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ } else {
+ pr_err("chcr : Unsupported cipher key\n");
+ goto out;
+ }
+ memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
+ aeadctx->enckey_len = keys.enckeylen;
+ get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
+ aeadctx->enckey_len << 3);
+ key_ctx_len = sizeof(struct _key_ctx)
+ + ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4);
+
+ aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
+ 0, key_ctx_len >> 4);
+ actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
+ return 0;
+out:
+ aeadctx->enckey_len = 0;
+ return -EINVAL;
+}
+static int chcr_aead_encrypt(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+
+ reqctx->verify = VERIFY_HW;
+
+ switch (get_aead_subtype(tfm)) {
+ case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
+ case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
+ return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
+ create_authenc_wr);
+ case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
+ case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
+ return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
+ create_aead_ccm_wr);
+ default:
+ return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
+ create_gcm_wr);
+ }
+}
+
+static int chcr_aead_decrypt(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ int size;
+
+ if (aeadctx->mayverify == VERIFY_SW) {
+ size = crypto_aead_maxauthsize(tfm);
+ reqctx->verify = VERIFY_SW;
+ } else {
+ size = 0;
+ reqctx->verify = VERIFY_HW;
+ }
+
+ switch (get_aead_subtype(tfm)) {
+ case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
+ case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
+ return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
+ create_authenc_wr);
+ case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
+ case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
+ return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
+ create_aead_ccm_wr);
+ default:
+ return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
+ create_gcm_wr);
+ }
+}
+
+static int chcr_aead_op(struct aead_request *req,
+ unsigned short op_type,
+ int size,
+ create_wr_t create_wr_fn)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_context *ctx = crypto_aead_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct sk_buff *skb;
+
+ if (ctx && !ctx->dev) {
+ pr_err("chcr : %s : No crypto device.\n", __func__);
+ return -ENXIO;
+ }
+ if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+ ctx->tx_channel_id)) {
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return -EBUSY;
+ }
+
+ /* Form a WR from req */
+ skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id], size,
+ op_type);
+
+ if (IS_ERR(skb) || skb == NULL) {
+ pr_err("chcr : %s : failed to form WR. No memory\n", __func__);
+ return PTR_ERR(skb);
+ }
+
+ skb->dev = u_ctx->lldi.ports[0];
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
+ chcr_send_wr(skb);
+ return -EINPROGRESS;
+}
static struct chcr_alg_template driver_algs[] = {
/* AES-CBC */
{
@@ -1234,7 +2487,7 @@ static struct chcr_alg_template driver_algs[] = {
.is_registered = 0,
.alg.crypto = {
.cra_name = "cbc(aes)",
- .cra_driver_name = "cbc(aes-chcr)",
+ .cra_driver_name = "cbc-aes-chcr",
.cra_priority = CHCR_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_ASYNC,
@@ -1261,7 +2514,7 @@ static struct chcr_alg_template driver_algs[] = {
.is_registered = 0,
.alg.crypto = {
.cra_name = "xts(aes)",
- .cra_driver_name = "xts(aes-chcr)",
+ .cra_driver_name = "xts-aes-chcr",
.cra_priority = CHCR_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_ASYNC,
@@ -1354,7 +2607,7 @@ static struct chcr_alg_template driver_algs[] = {
.halg.digestsize = SHA1_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha1)",
- .cra_driver_name = "hmac(sha1-chcr)",
+ .cra_driver_name = "hmac-sha1-chcr",
.cra_blocksize = SHA1_BLOCK_SIZE,
}
}
@@ -1366,7 +2619,7 @@ static struct chcr_alg_template driver_algs[] = {
.halg.digestsize = SHA224_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha224)",
- .cra_driver_name = "hmac(sha224-chcr)",
+ .cra_driver_name = "hmac-sha224-chcr",
.cra_blocksize = SHA224_BLOCK_SIZE,
}
}
@@ -1378,7 +2631,7 @@ static struct chcr_alg_template driver_algs[] = {
.halg.digestsize = SHA256_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha256)",
- .cra_driver_name = "hmac(sha256-chcr)",
+ .cra_driver_name = "hmac-sha256-chcr",
.cra_blocksize = SHA256_BLOCK_SIZE,
}
}
@@ -1390,7 +2643,7 @@ static struct chcr_alg_template driver_algs[] = {
.halg.digestsize = SHA384_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha384)",
- .cra_driver_name = "hmac(sha384-chcr)",
+ .cra_driver_name = "hmac-sha384-chcr",
.cra_blocksize = SHA384_BLOCK_SIZE,
}
}
@@ -1402,11 +2655,205 @@ static struct chcr_alg_template driver_algs[] = {
.halg.digestsize = SHA512_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha512)",
- .cra_driver_name = "hmac(sha512-chcr)",
+ .cra_driver_name = "hmac-sha512-chcr",
.cra_blocksize = SHA512_BLOCK_SIZE,
}
}
},
+ /* Add AEAD Algorithms */
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_gcm_ctx),
+ },
+ .ivsize = 12,
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .setkey = chcr_gcm_setkey,
+ .setauthsize = chcr_gcm_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "rfc4106-gcm-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_gcm_ctx),
+
+ },
+ .ivsize = 8,
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .setkey = chcr_gcm_setkey,
+ .setauthsize = chcr_4106_4309_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "ccm(aes)",
+ .cra_driver_name = "ccm-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .setkey = chcr_aead_ccm_setkey,
+ .setauthsize = chcr_ccm_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "rfc4309(ccm(aes))",
+ .cra_driver_name = "rfc4309-ccm-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx),
+
+ },
+ .ivsize = 8,
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .setkey = chcr_aead_rfc4309_setkey,
+ .setauthsize = chcr_4106_4309_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name =
+ "authenc-hmac-sha1-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name =
+ "authenc-hmac-sha256-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(aes))",
+ .cra_driver_name =
+ "authenc-hmac-sha224-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(aes))",
+ .cra_driver_name =
+ "authenc-hmac-sha384-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
+ .cra_driver_name =
+ "authenc-hmac-sha512-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_NULL,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(digest_null,cbc(aes))",
+ .cra_driver_name =
+ "authenc-digest_null-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = 0,
+ .setkey = chcr_aead_digest_null_setkey,
+ .setauthsize = chcr_authenc_null_setauthsize,
+ }
+ },
};
/*
@@ -1424,6 +2871,11 @@ static int chcr_unregister_alg(void)
crypto_unregister_alg(
&driver_algs[i].alg.crypto);
break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ if (driver_algs[i].is_registered)
+ crypto_unregister_aead(
+ &driver_algs[i].alg.aead);
+ break;
case CRYPTO_ALG_TYPE_AHASH:
if (driver_algs[i].is_registered)
crypto_unregister_ahash(
@@ -1458,6 +2910,19 @@ static int chcr_register_alg(void)
err = crypto_register_alg(&driver_algs[i].alg.crypto);
name = driver_algs[i].alg.crypto.cra_driver_name;
break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ driver_algs[i].alg.aead.base.cra_priority =
+ CHCR_CRA_PRIORITY;
+ driver_algs[i].alg.aead.base.cra_flags =
+ CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
+ driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
+ driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
+ driver_algs[i].alg.aead.init = chcr_aead_cra_init;
+ driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
+ driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
+ err = crypto_register_aead(&driver_algs[i].alg.aead);
+ name = driver_algs[i].alg.aead.base.cra_driver_name;
+ break;
case CRYPTO_ALG_TYPE_AHASH:
a_hash = &driver_algs[i].alg.hash;
a_hash->update = chcr_ahash_update;
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index 199b0bb69b89..3c7c51f7bedf 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -108,30 +108,24 @@
#define IPAD_DATA 0x36363636
#define OPAD_DATA 0x5c5c5c5c
-#define TRANSHDR_SIZE(alignedkctx_len)\
- (sizeof(struct ulptx_idata) +\
- sizeof(struct ulp_txpkt) +\
- sizeof(struct fw_crypto_lookaside_wr) +\
- sizeof(struct cpl_tx_sec_pdu) +\
- (alignedkctx_len))
-#define CIPHER_TRANSHDR_SIZE(alignedkctx_len, sge_pairs) \
- (TRANSHDR_SIZE(alignedkctx_len) + sge_pairs +\
+#define TRANSHDR_SIZE(kctx_len)\
+ (sizeof(struct chcr_wr) +\
+ kctx_len)
+#define CIPHER_TRANSHDR_SIZE(kctx_len, sge_pairs) \
+ (TRANSHDR_SIZE((kctx_len)) + (sge_pairs) +\
sizeof(struct cpl_rx_phys_dsgl))
-#define HASH_TRANSHDR_SIZE(alignedkctx_len)\
- (TRANSHDR_SIZE(alignedkctx_len) + DUMMY_BYTES)
+#define HASH_TRANSHDR_SIZE(kctx_len)\
+ (TRANSHDR_SIZE(kctx_len) + DUMMY_BYTES)
-#define SEC_CPL_OFFSET (sizeof(struct fw_crypto_lookaside_wr) + \
- sizeof(struct ulp_txpkt) + \
- sizeof(struct ulptx_idata))
-#define FILL_SEC_CPL_OP_IVINSR(id, len, hldr, ofst) \
+#define FILL_SEC_CPL_OP_IVINSR(id, len, ofst) \
htonl( \
CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) | \
CPL_TX_SEC_PDU_RXCHID_V((id)) | \
CPL_TX_SEC_PDU_ACKFOLLOWS_V(0) | \
CPL_TX_SEC_PDU_ULPTXLPBK_V(1) | \
CPL_TX_SEC_PDU_CPLLEN_V((len)) | \
- CPL_TX_SEC_PDU_PLACEHOLDER_V((hldr)) | \
+ CPL_TX_SEC_PDU_PLACEHOLDER_V(0) | \
CPL_TX_SEC_PDU_IVINSRTOFST_V((ofst)))
#define FILL_SEC_CPL_CIPHERSTOP_HI(a_start, a_stop, c_start, c_stop_hi) \
@@ -148,7 +142,7 @@
CPL_TX_SEC_PDU_AUTHSTOP_V((a_stop)) | \
CPL_TX_SEC_PDU_AUTHINSERT_V((a_inst)))
-#define FILL_SEC_CPL_SCMD0_SEQNO(ctrl, seq, cmode, amode, opad, size, nivs) \
+#define FILL_SEC_CPL_SCMD0_SEQNO(ctrl, seq, cmode, amode, opad, size) \
htonl( \
SCMD_SEQ_NO_CTRL_V(0) | \
SCMD_STATUS_PRESENT_V(0) | \
@@ -159,7 +153,7 @@
SCMD_AUTH_MODE_V((amode)) | \
SCMD_HMAC_CTRL_V((opad)) | \
SCMD_IV_SIZE_V((size)) | \
- SCMD_NUM_IVS_V((nivs)))
+ SCMD_NUM_IVS_V(0))
#define FILL_SEC_CPL_IVGEN_HDRLEN(last, more, ctx_in, mac, ivdrop, len) htonl( \
SCMD_ENB_DBGID_V(0) | \
@@ -264,13 +258,15 @@ enum {
* where they indicate the size of the integrity check value (ICV)
*/
enum {
- AES_CCM_ICV_4 = 4,
- AES_CCM_ICV_6 = 6,
- AES_CCM_ICV_8 = 8,
- AES_CCM_ICV_10 = 10,
- AES_CCM_ICV_12 = 12,
- AES_CCM_ICV_14 = 14,
- AES_CCM_ICV_16 = 16
+ ICV_4 = 4,
+ ICV_6 = 6,
+ ICV_8 = 8,
+ ICV_10 = 10,
+ ICV_12 = 12,
+ ICV_13 = 13,
+ ICV_14 = 14,
+ ICV_15 = 15,
+ ICV_16 = 16
};
struct hash_op_params {
@@ -394,7 +390,7 @@ static const u8 aes_sbox[256] = {
187, 22
};
-static u32 aes_ks_subword(const u32 w)
+static inline u32 aes_ks_subword(const u32 w)
{
u8 bytes[4];
@@ -412,61 +408,4 @@ static u32 round_constant[11] = {
0x1B000000, 0x36000000, 0x6C000000
};
-/* dec_key - OUTPUT - Reverse round key
- * key - INPUT - key
- * keylength - INPUT - length of the key in number of bits
- */
-static inline void get_aes_decrypt_key(unsigned char *dec_key,
- const unsigned char *key,
- unsigned int keylength)
-{
- u32 temp;
- u32 w_ring[MAX_NK];
- int i, j, k;
- u8 nr, nk;
-
- switch (keylength) {
- case AES_KEYLENGTH_128BIT:
- nk = KEYLENGTH_4BYTES;
- nr = NUMBER_OF_ROUNDS_10;
- break;
-
- case AES_KEYLENGTH_192BIT:
- nk = KEYLENGTH_6BYTES;
- nr = NUMBER_OF_ROUNDS_12;
- break;
- case AES_KEYLENGTH_256BIT:
- nk = KEYLENGTH_8BYTES;
- nr = NUMBER_OF_ROUNDS_14;
- break;
- default:
- return;
- }
- for (i = 0; i < nk; i++ )
- w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
-
- i = 0;
- temp = w_ring[nk - 1];
- while(i + nk < (nr + 1) * 4) {
- if(!(i % nk)) {
- /* RotWord(temp) */
- temp = (temp << 8) | (temp >> 24);
- temp = aes_ks_subword(temp);
- temp ^= round_constant[i / nk];
- }
- else if (nk == 8 && (i % 4 == 0))
- temp = aes_ks_subword(temp);
- w_ring[i % nk] ^= temp;
- temp = w_ring[i % nk];
- i++;
- }
- i--;
- for (k = 0, j = i % nk; k < nk; k++) {
- *((u32 *)dec_key + k) = htonl(w_ring[j]);
- j--;
- if(j < 0)
- j += nk;
- }
-}
-
#endif /* __CHCR_ALGO_H__ */
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index 4d7f6700fd7e..918da8e6e2d8 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -110,14 +110,12 @@ static int cpl_fw6_pld_handler(struct chcr_dev *dev,
if (ack_err_status) {
if (CHK_MAC_ERR_BIT(ack_err_status) ||
CHK_PAD_ERR_BIT(ack_err_status))
- error_status = -EINVAL;
+ error_status = -EBADMSG;
}
/* call completion callback with failure status */
if (req) {
- if (!chcr_handle_resp(req, input, error_status))
- req->complete(req, error_status);
- else
- return -EINVAL;
+ error_status = chcr_handle_resp(req, input, error_status);
+ req->complete(req, error_status);
} else {
pr_err("Incorrect request address from the firmware\n");
return -EFAULT;
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index 2a5c671a4232..c7088a4e0a49 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -52,13 +52,27 @@
#define MAC_ERROR_BIT 0
#define CHK_MAC_ERR_BIT(x) (((x) >> MAC_ERROR_BIT) & 1)
+#define MAX_SALT 4
struct uld_ctx;
+struct _key_ctx {
+ __be32 ctx_hdr;
+ u8 salt[MAX_SALT];
+ __be64 reserverd;
+ unsigned char key[0];
+};
+
+struct chcr_wr {
+ struct fw_crypto_lookaside_wr wreq;
+ struct ulp_txpkt ulptx;
+ struct ulptx_idata sc_imm;
+ struct cpl_tx_sec_pdu sec_cpl;
+ struct _key_ctx key_ctx;
+};
+
struct chcr_dev {
- /* Request submited to h/w and waiting for response. */
spinlock_t lock_chcr_dev;
- struct crypto_queue pending_queue;
struct uld_ctx *u_ctx;
unsigned char tx_channel_id;
};
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index d7d75605da8b..d5af7d64a763 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -36,6 +36,14 @@
#ifndef __CHCR_CRYPTO_H__
#define __CHCR_CRYPTO_H__
+#define GHASH_BLOCK_SIZE 16
+#define GHASH_DIGEST_SIZE 16
+
+#define CCM_B0_SIZE 16
+#define CCM_AAD_FIELD_SIZE 2
+#define T5_MAX_AAD_SIZE 512
+
+
/* Define following if h/w is not dropping the AAD and IV data before
* giving the processed data
*/
@@ -63,22 +71,36 @@
#define CHCR_SCMD_AUTH_CTRL_AUTH_CIPHER 0
#define CHCR_SCMD_AUTH_CTRL_CIPHER_AUTH 1
-#define CHCR_SCMD_CIPHER_MODE_NOP 0
-#define CHCR_SCMD_CIPHER_MODE_AES_CBC 1
-#define CHCR_SCMD_CIPHER_MODE_GENERIC_AES 4
-#define CHCR_SCMD_CIPHER_MODE_AES_XTS 6
+#define CHCR_SCMD_CIPHER_MODE_NOP 0
+#define CHCR_SCMD_CIPHER_MODE_AES_CBC 1
+#define CHCR_SCMD_CIPHER_MODE_AES_GCM 2
+#define CHCR_SCMD_CIPHER_MODE_AES_CTR 3
+#define CHCR_SCMD_CIPHER_MODE_GENERIC_AES 4
+#define CHCR_SCMD_CIPHER_MODE_AES_XTS 6
+#define CHCR_SCMD_CIPHER_MODE_AES_CCM 7
#define CHCR_SCMD_AUTH_MODE_NOP 0
#define CHCR_SCMD_AUTH_MODE_SHA1 1
#define CHCR_SCMD_AUTH_MODE_SHA224 2
#define CHCR_SCMD_AUTH_MODE_SHA256 3
+#define CHCR_SCMD_AUTH_MODE_GHASH 4
#define CHCR_SCMD_AUTH_MODE_SHA512_224 5
#define CHCR_SCMD_AUTH_MODE_SHA512_256 6
#define CHCR_SCMD_AUTH_MODE_SHA512_384 7
#define CHCR_SCMD_AUTH_MODE_SHA512_512 8
+#define CHCR_SCMD_AUTH_MODE_CBCMAC 9
+#define CHCR_SCMD_AUTH_MODE_CMAC 10
#define CHCR_SCMD_HMAC_CTRL_NOP 0
#define CHCR_SCMD_HMAC_CTRL_NO_TRUNC 1
+#define CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366 2
+#define CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT 3
+#define CHCR_SCMD_HMAC_CTRL_PL1 4
+#define CHCR_SCMD_HMAC_CTRL_PL2 5
+#define CHCR_SCMD_HMAC_CTRL_PL3 6
+#define CHCR_SCMD_HMAC_CTRL_DIV2 7
+#define VERIFY_HW 0
+#define VERIFY_SW 1
#define CHCR_SCMD_IVGEN_CTRL_HW 0
#define CHCR_SCMD_IVGEN_CTRL_SW 1
@@ -106,39 +128,74 @@
#define IV_IMMEDIATE 1
#define IV_DSGL 2
+#define AEAD_H_SIZE 16
+
#define CRYPTO_ALG_SUB_TYPE_MASK 0x0f000000
#define CRYPTO_ALG_SUB_TYPE_HASH_HMAC 0x01000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 0x02000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_GCM 0x03000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC 0x04000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_CCM 0x05000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309 0x06000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_NULL 0x07000000
+#define CRYPTO_ALG_SUB_TYPE_CTR 0x08000000
#define CRYPTO_ALG_TYPE_HMAC (CRYPTO_ALG_TYPE_AHASH |\
CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
-#define MAX_SALT 4
#define MAX_SCRATCH_PAD_SIZE 32
#define CHCR_HASH_MAX_BLOCK_SIZE_64 64
#define CHCR_HASH_MAX_BLOCK_SIZE_128 128
/* Aligned to 128 bit boundary */
-struct _key_ctx {
- __be32 ctx_hdr;
- u8 salt[MAX_SALT];
- __be64 reserverd;
- unsigned char key[0];
-};
struct ablk_ctx {
- u8 enc;
- unsigned int processed_len;
__be32 key_ctx_hdr;
unsigned int enckey_len;
- unsigned int dst_nents;
- struct scatterlist iv_sg;
u8 key[CHCR_AES_MAX_KEY_LEN];
- u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
unsigned char ciph_mode;
+ u8 rrkey[AES_MAX_KEY_SIZE];
+};
+struct chcr_aead_reqctx {
+ struct sk_buff *skb;
+ short int dst_nents;
+ u16 verify;
+ u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
+ unsigned char scratch_pad[MAX_SCRATCH_PAD_SIZE];
+};
+
+struct chcr_gcm_ctx {
+ u8 ghash_h[AEAD_H_SIZE];
};
+struct chcr_authenc_ctx {
+ u8 dec_rrkey[AES_MAX_KEY_SIZE];
+ u8 h_iopad[2 * CHCR_HASH_MAX_DIGEST_SIZE];
+ unsigned char auth_mode;
+};
+
+struct __aead_ctx {
+ struct chcr_gcm_ctx gcm[0];
+ struct chcr_authenc_ctx authenc[0];
+};
+
+
+
+struct chcr_aead_ctx {
+ __be32 key_ctx_hdr;
+ unsigned int enckey_len;
+ struct crypto_skcipher *null;
+ u8 salt[MAX_SALT];
+ u8 key[CHCR_AES_MAX_KEY_LEN];
+ u16 hmac_ctrl;
+ u16 mayverify;
+ struct __aead_ctx ctx[0];
+};
+
+
+
struct hmac_ctx {
- struct shash_desc *desc;
+ struct crypto_shash *base_hash;
u8 ipad[CHCR_HASH_MAX_BLOCK_SIZE_128];
u8 opad[CHCR_HASH_MAX_BLOCK_SIZE_128];
};
@@ -146,6 +203,7 @@ struct hmac_ctx {
struct __crypto_ctx {
struct hmac_ctx hmacctx[0];
struct ablk_ctx ablkctx[0];
+ struct chcr_aead_ctx aeadctx[0];
};
struct chcr_context {
@@ -156,18 +214,22 @@ struct chcr_context {
struct chcr_ahash_req_ctx {
u32 result;
- char bfr[CHCR_HASH_MAX_BLOCK_SIZE_128];
- u8 bfr_len;
+ u8 bfr1[CHCR_HASH_MAX_BLOCK_SIZE_128];
+ u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128];
+ u8 *reqbfr;
+ u8 *skbfr;
+ u8 reqlen;
/* DMA the partial hash in it */
u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE];
u64 data_len; /* Data len till time */
- void *dummy_payload_ptr;
/* SKB which is being sent to the hardware for processing */
struct sk_buff *skb;
};
struct chcr_blkcipher_req_ctx {
struct sk_buff *skb;
+ unsigned int dst_nents;
+ u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
};
struct chcr_alg_template {
@@ -176,16 +238,19 @@ struct chcr_alg_template {
union {
struct crypto_alg crypto;
struct ahash_alg hash;
+ struct aead_alg aead;
} alg;
};
struct chcr_req_ctx {
union {
struct ahash_request *ahash_req;
+ struct aead_request *aead_req;
struct ablkcipher_request *ablk_req;
} req;
union {
struct chcr_ahash_req_ctx *ahash_ctx;
+ struct chcr_aead_reqctx *reqctx;
struct chcr_blkcipher_req_ctx *ablk_ctx;
} ctx;
};
@@ -195,9 +260,15 @@ struct sge_opaque_hdr {
dma_addr_t addr[MAX_SKB_FRAGS + 1];
};
-typedef struct sk_buff *(*create_wr_t)(struct crypto_async_request *req,
- struct chcr_context *ctx,
+typedef struct sk_buff *(*create_wr_t)(struct aead_request *req,
unsigned short qid,
+ int size,
unsigned short op_type);
+static int chcr_aead_op(struct aead_request *req_base,
+ unsigned short op_type,
+ int size,
+ create_wr_t create_wr_fn);
+static inline int get_aead_subtype(struct crypto_aead *aead);
+
#endif /* __CHCR_CRYPTO_H__ */
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index 37dadb2a4feb..6e7a5c77a00a 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -375,10 +375,6 @@ static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
if (!dma->padding_pool)
return -ENOMEM;
- dma->iv_pool = dmam_pool_create("cesa_iv", dev, 16, 1, 0);
- if (!dma->iv_pool)
- return -ENOMEM;
-
cesa->dma = dma;
return 0;
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
index e423d33decd4..b7872f62f674 100644
--- a/drivers/crypto/marvell/cesa.h
+++ b/drivers/crypto/marvell/cesa.h
@@ -273,11 +273,12 @@ struct mv_cesa_op_ctx {
#define CESA_TDMA_SRC_IN_SRAM BIT(30)
#define CESA_TDMA_END_OF_REQ BIT(29)
#define CESA_TDMA_BREAK_CHAIN BIT(28)
-#define CESA_TDMA_TYPE_MSK GENMASK(27, 0)
+#define CESA_TDMA_SET_STATE BIT(27)
+#define CESA_TDMA_TYPE_MSK GENMASK(26, 0)
#define CESA_TDMA_DUMMY 0
#define CESA_TDMA_DATA 1
#define CESA_TDMA_OP 2
-#define CESA_TDMA_IV 3
+#define CESA_TDMA_RESULT 3
/**
* struct mv_cesa_tdma_desc - TDMA descriptor
@@ -393,7 +394,6 @@ struct mv_cesa_dev_dma {
struct dma_pool *op_pool;
struct dma_pool *cache_pool;
struct dma_pool *padding_pool;
- struct dma_pool *iv_pool;
};
/**
@@ -839,7 +839,7 @@ mv_cesa_tdma_desc_iter_init(struct mv_cesa_tdma_chain *chain)
memset(chain, 0, sizeof(*chain));
}
-int mv_cesa_dma_add_iv_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
+int mv_cesa_dma_add_result_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
u32 size, u32 flags, gfp_t gfp_flags);
struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain,
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index d19dc9614e6e..098871a22a54 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -212,7 +212,8 @@ mv_cesa_ablkcipher_complete(struct crypto_async_request *req)
struct mv_cesa_req *basereq;
basereq = &creq->base;
- memcpy(ablkreq->info, basereq->chain.last->data, ivsize);
+ memcpy(ablkreq->info, basereq->chain.last->op->ctx.blkcipher.iv,
+ ivsize);
} else {
memcpy_fromio(ablkreq->info,
engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
@@ -373,8 +374,9 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
/* Add output data for IV */
ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
- ret = mv_cesa_dma_add_iv_op(&basereq->chain, CESA_SA_CRYPT_IV_SRAM_OFFSET,
- ivsize, CESA_TDMA_SRC_IN_SRAM, flags);
+ ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET,
+ CESA_SA_DATA_SRAM_OFFSET,
+ CESA_TDMA_SRC_IN_SRAM, flags);
if (ret)
goto err_free_tdma;
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index 77712b375b84..77c0fb936f47 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -280,13 +280,32 @@ static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
sreq->offset = 0;
}
+static void mv_cesa_ahash_dma_step(struct ahash_request *req)
+{
+ struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+ struct mv_cesa_req *base = &creq->base;
+
+ /* We must explicitly set the digest state. */
+ if (base->chain.first->flags & CESA_TDMA_SET_STATE) {
+ struct mv_cesa_engine *engine = base->engine;
+ int i;
+
+ /* Set the hash state in the IVDIG regs. */
+ for (i = 0; i < ARRAY_SIZE(creq->state); i++)
+ writel_relaxed(creq->state[i], engine->regs +
+ CESA_IVDIG(i));
+ }
+
+ mv_cesa_dma_step(base);
+}
+
static void mv_cesa_ahash_step(struct crypto_async_request *req)
{
struct ahash_request *ahashreq = ahash_request_cast(req);
struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
- mv_cesa_dma_step(&creq->base);
+ mv_cesa_ahash_dma_step(ahashreq);
else
mv_cesa_ahash_std_step(ahashreq);
}
@@ -311,24 +330,40 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req)
int i;
digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
- for (i = 0; i < digsize / 4; i++)
- creq->state[i] = readl_relaxed(engine->regs + CESA_IVDIG(i));
- if (creq->last_req) {
+ if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
+ (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_RESULT) {
+ __le32 *data = NULL;
+
/*
- * Hardware's MD5 digest is in little endian format, but
- * SHA in big endian format
+ * Result is already in the correct endianess when the SA is
+ * used
*/
- if (creq->algo_le) {
- __le32 *result = (void *)ahashreq->result;
+ data = creq->base.chain.last->op->ctx.hash.hash;
+ for (i = 0; i < digsize / 4; i++)
+ creq->state[i] = cpu_to_le32(data[i]);
- for (i = 0; i < digsize / 4; i++)
- result[i] = cpu_to_le32(creq->state[i]);
- } else {
- __be32 *result = (void *)ahashreq->result;
+ memcpy(ahashreq->result, data, digsize);
+ } else {
+ for (i = 0; i < digsize / 4; i++)
+ creq->state[i] = readl_relaxed(engine->regs +
+ CESA_IVDIG(i));
+ if (creq->last_req) {
+ /*
+ * Hardware's MD5 digest is in little endian format, but
+ * SHA in big endian format
+ */
+ if (creq->algo_le) {
+ __le32 *result = (void *)ahashreq->result;
+
+ for (i = 0; i < digsize / 4; i++)
+ result[i] = cpu_to_le32(creq->state[i]);
+ } else {
+ __be32 *result = (void *)ahashreq->result;
- for (i = 0; i < digsize / 4; i++)
- result[i] = cpu_to_be32(creq->state[i]);
+ for (i = 0; i < digsize / 4; i++)
+ result[i] = cpu_to_be32(creq->state[i]);
+ }
}
}
@@ -503,6 +538,12 @@ mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain,
CESA_SA_DESC_CFG_LAST_FRAG,
CESA_SA_DESC_CFG_FRAG_MSK);
+ ret = mv_cesa_dma_add_result_op(chain,
+ CESA_SA_CFG_SRAM_OFFSET,
+ CESA_SA_DATA_SRAM_OFFSET,
+ CESA_TDMA_SRC_IN_SRAM, flags);
+ if (ret)
+ return ERR_PTR(-ENOMEM);
return op;
}
@@ -562,11 +603,16 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
struct mv_cesa_ahash_dma_iter iter;
struct mv_cesa_op_ctx *op = NULL;
unsigned int frag_len;
+ bool set_state = false;
int ret;
+ u32 type;
basereq->chain.first = NULL;
basereq->chain.last = NULL;
+ if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl))
+ set_state = true;
+
if (creq->src_nents) {
ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
DMA_TO_DEVICE);
@@ -634,7 +680,15 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
goto err_free_tdma;
}
- if (op) {
+ /*
+ * If results are copied via DMA, this means that this
+ * request can be directly processed by the engine,
+ * without partial updates. So we can chain it at the
+ * DMA level with other requests.
+ */
+ type = basereq->chain.last->flags & CESA_TDMA_TYPE_MSK;
+
+ if (op && type != CESA_TDMA_RESULT) {
/* Add dummy desc to wait for crypto operation end */
ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags);
if (ret)
@@ -647,8 +701,19 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
else
creq->cache_ptr = 0;
- basereq->chain.last->flags |= (CESA_TDMA_END_OF_REQ |
- CESA_TDMA_BREAK_CHAIN);
+ basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
+
+ if (type != CESA_TDMA_RESULT)
+ basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
+
+ if (set_state) {
+ /*
+ * Put the CESA_TDMA_SET_STATE flag on the first tdma desc to
+ * let the step logic know that the IVDIG registers should be
+ * explicitly set before launching a TDMA chain.
+ */
+ basereq->chain.first->flags |= CESA_TDMA_SET_STATE;
+ }
return 0;
diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c
index 9fd7a5fbaa1b..c76375ff376d 100644
--- a/drivers/crypto/marvell/tdma.c
+++ b/drivers/crypto/marvell/tdma.c
@@ -69,9 +69,6 @@ void mv_cesa_dma_cleanup(struct mv_cesa_req *dreq)
if (type == CESA_TDMA_OP)
dma_pool_free(cesa_dev->dma->op_pool, tdma->op,
le32_to_cpu(tdma->src));
- else if (type == CESA_TDMA_IV)
- dma_pool_free(cesa_dev->dma->iv_pool, tdma->data,
- le32_to_cpu(tdma->dst));
tdma = tdma->next;
dma_pool_free(cesa_dev->dma->tdma_desc_pool, old_tdma,
@@ -112,7 +109,14 @@ void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
last->next = dreq->chain.first;
engine->chain.last = dreq->chain.last;
- if (!(last->flags & CESA_TDMA_BREAK_CHAIN))
+ /*
+ * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
+ * the last element of the current chain, or if the request
+ * being queued needs the IV regs to be set before lauching
+ * the request.
+ */
+ if (!(last->flags & CESA_TDMA_BREAK_CHAIN) &&
+ !(dreq->chain.first->flags & CESA_TDMA_SET_STATE))
last->next_dma = dreq->chain.first->cur_dma;
}
}
@@ -209,29 +213,37 @@ mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags)
return new_tdma;
}
-int mv_cesa_dma_add_iv_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
+int mv_cesa_dma_add_result_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
u32 size, u32 flags, gfp_t gfp_flags)
{
-
- struct mv_cesa_tdma_desc *tdma;
- u8 *iv;
- dma_addr_t dma_handle;
+ struct mv_cesa_tdma_desc *tdma, *op_desc;
tdma = mv_cesa_dma_add_desc(chain, gfp_flags);
if (IS_ERR(tdma))
return PTR_ERR(tdma);
- iv = dma_pool_alloc(cesa_dev->dma->iv_pool, gfp_flags, &dma_handle);
- if (!iv)
- return -ENOMEM;
+ /* We re-use an existing op_desc object to retrieve the context
+ * and result instead of allocating a new one.
+ * There is at least one object of this type in a CESA crypto
+ * req, just pick the first one in the chain.
+ */
+ for (op_desc = chain->first; op_desc; op_desc = op_desc->next) {
+ u32 type = op_desc->flags & CESA_TDMA_TYPE_MSK;
+
+ if (type == CESA_TDMA_OP)
+ break;
+ }
+
+ if (!op_desc)
+ return -EIO;
tdma->byte_cnt = cpu_to_le32(size | BIT(31));
tdma->src = src;
- tdma->dst = cpu_to_le32(dma_handle);
- tdma->data = iv;
+ tdma->dst = op_desc->src;
+ tdma->op = op_desc->op;
flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM);
- tdma->flags = flags | CESA_TDMA_IV;
+ tdma->flags = flags | CESA_TDMA_RESULT;
return 0;
}
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 104e9ce9400a..451fa18c1c7b 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -1073,7 +1073,7 @@ static int mv_probe(struct platform_device *pdev)
if (!res)
return -ENXIO;
- cp = kzalloc(sizeof(*cp), GFP_KERNEL);
+ cp = devm_kzalloc(&pdev->dev, sizeof(*cp), GFP_KERNEL);
if (!cp)
return -ENOMEM;
@@ -1163,7 +1163,6 @@ err_irq:
err_thread:
kthread_stop(cp->queue_th);
err:
- kfree(cp);
cpg = NULL;
return ret;
}
@@ -1187,7 +1186,6 @@ static int mv_remove(struct platform_device *pdev)
clk_put(cp->clk);
}
- kfree(cp);
cpg = NULL;
return 0;
}
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 42f0f229f7f7..036057abb257 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -32,7 +32,6 @@
#include <linux/scatterlist.h>
#include <linux/device.h>
#include <linux/of.h>
-#include <linux/types.h>
#include <asm/hvcall.h>
#include <asm/vio.h>
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 0c49956ee0ce..1d9ecd368b5b 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -390,7 +390,7 @@ static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
if (status & SAHARA_STATUS_MODE_BATCH)
dev_dbg(dev->device, " - Batch Mode.\n");
else if (status & SAHARA_STATUS_MODE_DEDICATED)
- dev_dbg(dev->device, " - Decidated Mode.\n");
+ dev_dbg(dev->device, " - Dedicated Mode.\n");
else if (status & SAHARA_STATUS_MODE_DEBUG)
dev_dbg(dev->device, " - Debug Mode.\n");
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 0418a2f41dc0..0bba6a19d36a 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -590,7 +590,7 @@ static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
if (v_lo & TALITOS_CCPSR_LO_MDTE)
dev_err(dev, "master data transfer error\n");
if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
- dev_err(dev, is_sec1 ? "pointeur not complete error\n"
+ dev_err(dev, is_sec1 ? "pointer not complete error\n"
: "s/g data length zero error\n");
if (v_lo & TALITOS_CCPSR_LO_FPZ)
dev_err(dev, is_sec1 ? "parity error\n"
diff --git a/drivers/crypto/virtio/Kconfig b/drivers/crypto/virtio/Kconfig
new file mode 100644
index 000000000000..d80f73366ae2
--- /dev/null
+++ b/drivers/crypto/virtio/Kconfig
@@ -0,0 +1,10 @@
+config CRYPTO_DEV_VIRTIO
+ tristate "VirtIO crypto driver"
+ depends on VIRTIO
+ select CRYPTO_AEAD
+ select CRYPTO_AUTHENC
+ select CRYPTO_BLKCIPHER
+ default m
+ help
+ This driver provides support for virtio crypto device. If you
+ choose 'M' here, this module will be called virtio_crypto.
diff --git a/drivers/crypto/virtio/Makefile b/drivers/crypto/virtio/Makefile
new file mode 100644
index 000000000000..dd342c947ff9
--- /dev/null
+++ b/drivers/crypto/virtio/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio_crypto.o
+virtio_crypto-objs := \
+ virtio_crypto_algs.o \
+ virtio_crypto_mgr.o \
+ virtio_crypto_core.o
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
new file mode 100644
index 000000000000..c2374df9abae
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -0,0 +1,540 @@
+ /* Algorithms supported by virtio crypto device
+ *
+ * Authors: Gonglei <arei.gonglei@huawei.com>
+ *
+ * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/scatterlist.h>
+#include <crypto/algapi.h>
+#include <linux/err.h>
+#include <crypto/scatterwalk.h>
+#include <linux/atomic.h>
+
+#include <uapi/linux/virtio_crypto.h>
+#include "virtio_crypto_common.h"
+
+/*
+ * The algs_lock protects the below global virtio_crypto_active_devs
+ * and crypto algorithms registion.
+ */
+static DEFINE_MUTEX(algs_lock);
+static unsigned int virtio_crypto_active_devs;
+
+static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg)
+{
+ u64 total = 0;
+
+ for (total = 0; sg; sg = sg_next(sg))
+ total += sg->length;
+
+ return total;
+}
+
+static int
+virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
+{
+ switch (key_len) {
+ case AES_KEYSIZE_128:
+ case AES_KEYSIZE_192:
+ case AES_KEYSIZE_256:
+ *alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
+ break;
+ default:
+ pr_err("virtio_crypto: Unsupported key length: %d\n",
+ key_len);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int virtio_crypto_alg_ablkcipher_init_session(
+ struct virtio_crypto_ablkcipher_ctx *ctx,
+ uint32_t alg, const uint8_t *key,
+ unsigned int keylen,
+ int encrypt)
+{
+ struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
+ unsigned int tmp;
+ struct virtio_crypto *vcrypto = ctx->vcrypto;
+ int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT;
+ int err;
+ unsigned int num_out = 0, num_in = 0;
+
+ /*
+ * Avoid to do DMA from the stack, switch to using
+ * dynamically-allocated for the key
+ */
+ uint8_t *cipher_key = kmalloc(keylen, GFP_ATOMIC);
+
+ if (!cipher_key)
+ return -ENOMEM;
+
+ memcpy(cipher_key, key, keylen);
+
+ spin_lock(&vcrypto->ctrl_lock);
+ /* Pad ctrl header */
+ vcrypto->ctrl.header.opcode =
+ cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
+ vcrypto->ctrl.header.algo = cpu_to_le32(alg);
+ /* Set the default dataqueue id to 0 */
+ vcrypto->ctrl.header.queue_id = 0;
+
+ vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
+ /* Pad cipher's parameters */
+ vcrypto->ctrl.u.sym_create_session.op_type =
+ cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
+ vcrypto->ctrl.u.sym_create_session.u.cipher.para.algo =
+ vcrypto->ctrl.header.algo;
+ vcrypto->ctrl.u.sym_create_session.u.cipher.para.keylen =
+ cpu_to_le32(keylen);
+ vcrypto->ctrl.u.sym_create_session.u.cipher.para.op =
+ cpu_to_le32(op);
+
+ sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
+ sgs[num_out++] = &outhdr;
+
+ /* Set key */
+ sg_init_one(&key_sg, cipher_key, keylen);
+ sgs[num_out++] = &key_sg;
+
+ /* Return status and session id back */
+ sg_init_one(&inhdr, &vcrypto->input, sizeof(vcrypto->input));
+ sgs[num_out + num_in++] = &inhdr;
+
+ err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
+ num_in, vcrypto, GFP_ATOMIC);
+ if (err < 0) {
+ spin_unlock(&vcrypto->ctrl_lock);
+ kzfree(cipher_key);
+ return err;
+ }
+ virtqueue_kick(vcrypto->ctrl_vq);
+
+ /*
+ * Trapping into the hypervisor, so the request should be
+ * handled immediately.
+ */
+ while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
+ !virtqueue_is_broken(vcrypto->ctrl_vq))
+ cpu_relax();
+
+ if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) {
+ spin_unlock(&vcrypto->ctrl_lock);
+ pr_err("virtio_crypto: Create session failed status: %u\n",
+ le32_to_cpu(vcrypto->input.status));
+ kzfree(cipher_key);
+ return -EINVAL;
+ }
+
+ if (encrypt)
+ ctx->enc_sess_info.session_id =
+ le64_to_cpu(vcrypto->input.session_id);
+ else
+ ctx->dec_sess_info.session_id =
+ le64_to_cpu(vcrypto->input.session_id);
+
+ spin_unlock(&vcrypto->ctrl_lock);
+
+ kzfree(cipher_key);
+ return 0;
+}
+
+static int virtio_crypto_alg_ablkcipher_close_session(
+ struct virtio_crypto_ablkcipher_ctx *ctx,
+ int encrypt)
+{
+ struct scatterlist outhdr, status_sg, *sgs[2];
+ unsigned int tmp;
+ struct virtio_crypto_destroy_session_req *destroy_session;
+ struct virtio_crypto *vcrypto = ctx->vcrypto;
+ int err;
+ unsigned int num_out = 0, num_in = 0;
+
+ spin_lock(&vcrypto->ctrl_lock);
+ vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR;
+ /* Pad ctrl header */
+ vcrypto->ctrl.header.opcode =
+ cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
+ /* Set the default virtqueue id to 0 */
+ vcrypto->ctrl.header.queue_id = 0;
+
+ destroy_session = &vcrypto->ctrl.u.destroy_session;
+
+ if (encrypt)
+ destroy_session->session_id =
+ cpu_to_le64(ctx->enc_sess_info.session_id);
+ else
+ destroy_session->session_id =
+ cpu_to_le64(ctx->dec_sess_info.session_id);
+
+ sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
+ sgs[num_out++] = &outhdr;
+
+ /* Return status and session id back */
+ sg_init_one(&status_sg, &vcrypto->ctrl_status.status,
+ sizeof(vcrypto->ctrl_status.status));
+ sgs[num_out + num_in++] = &status_sg;
+
+ err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
+ num_in, vcrypto, GFP_ATOMIC);
+ if (err < 0) {
+ spin_unlock(&vcrypto->ctrl_lock);
+ return err;
+ }
+ virtqueue_kick(vcrypto->ctrl_vq);
+
+ while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
+ !virtqueue_is_broken(vcrypto->ctrl_vq))
+ cpu_relax();
+
+ if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) {
+ spin_unlock(&vcrypto->ctrl_lock);
+ pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
+ vcrypto->ctrl_status.status,
+ destroy_session->session_id);
+
+ return -EINVAL;
+ }
+ spin_unlock(&vcrypto->ctrl_lock);
+
+ return 0;
+}
+
+static int virtio_crypto_alg_ablkcipher_init_sessions(
+ struct virtio_crypto_ablkcipher_ctx *ctx,
+ const uint8_t *key, unsigned int keylen)
+{
+ uint32_t alg;
+ int ret;
+ struct virtio_crypto *vcrypto = ctx->vcrypto;
+
+ if (keylen > vcrypto->max_cipher_key_len) {
+ pr_err("virtio_crypto: the key is too long\n");
+ goto bad_key;
+ }
+
+ if (virtio_crypto_alg_validate_key(keylen, &alg))
+ goto bad_key;
+
+ /* Create encryption session */
+ ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
+ alg, key, keylen, 1);
+ if (ret)
+ return ret;
+ /* Create decryption session */
+ ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
+ alg, key, keylen, 0);
+ if (ret) {
+ virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
+ return ret;
+ }
+ return 0;
+
+bad_key:
+ crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+}
+
+/* Note: kernel crypto API realization */
+static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
+ const uint8_t *key,
+ unsigned int keylen)
+{
+ struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ int ret;
+
+ if (!ctx->vcrypto) {
+ /* New key */
+ int node = virtio_crypto_get_current_node();
+ struct virtio_crypto *vcrypto =
+ virtcrypto_get_dev_node(node);
+ if (!vcrypto) {
+ pr_err("virtio_crypto: Could not find a virtio device in the system");
+ return -ENODEV;
+ }
+
+ ctx->vcrypto = vcrypto;
+ } else {
+ /* Rekeying, we should close the created sessions previously */
+ virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
+ virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
+ }
+
+ ret = virtio_crypto_alg_ablkcipher_init_sessions(ctx, key, keylen);
+ if (ret) {
+ virtcrypto_dev_put(ctx->vcrypto);
+ ctx->vcrypto = NULL;
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+__virtio_crypto_ablkcipher_do_req(struct virtio_crypto_request *vc_req,
+ struct ablkcipher_request *req,
+ struct data_queue *data_vq,
+ __u8 op)
+{
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
+ struct virtio_crypto_ablkcipher_ctx *ctx = vc_req->ablkcipher_ctx;
+ struct virtio_crypto *vcrypto = ctx->vcrypto;
+ struct virtio_crypto_op_data_req *req_data;
+ int src_nents, dst_nents;
+ int err;
+ unsigned long flags;
+ struct scatterlist outhdr, iv_sg, status_sg, **sgs;
+ int i;
+ u64 dst_len;
+ unsigned int num_out = 0, num_in = 0;
+ int sg_total;
+ uint8_t *iv;
+
+ src_nents = sg_nents_for_len(req->src, req->nbytes);
+ dst_nents = sg_nents(req->dst);
+
+ pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
+ src_nents, dst_nents);
+
+ /* Why 3? outhdr + iv + inhdr */
+ sg_total = src_nents + dst_nents + 3;
+ sgs = kzalloc_node(sg_total * sizeof(*sgs), GFP_ATOMIC,
+ dev_to_node(&vcrypto->vdev->dev));
+ if (!sgs)
+ return -ENOMEM;
+
+ req_data = kzalloc_node(sizeof(*req_data), GFP_ATOMIC,
+ dev_to_node(&vcrypto->vdev->dev));
+ if (!req_data) {
+ kfree(sgs);
+ return -ENOMEM;
+ }
+
+ vc_req->req_data = req_data;
+ vc_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
+ /* Head of operation */
+ if (op) {
+ req_data->header.session_id =
+ cpu_to_le64(ctx->enc_sess_info.session_id);
+ req_data->header.opcode =
+ cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT);
+ } else {
+ req_data->header.session_id =
+ cpu_to_le64(ctx->dec_sess_info.session_id);
+ req_data->header.opcode =
+ cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT);
+ }
+ req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
+ req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize);
+ req_data->u.sym_req.u.cipher.para.src_data_len =
+ cpu_to_le32(req->nbytes);
+
+ dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
+ if (unlikely(dst_len > U32_MAX)) {
+ pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
+ err = -EINVAL;
+ goto free;
+ }
+
+ pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
+ req->nbytes, dst_len);
+
+ if (unlikely(req->nbytes + dst_len + ivsize +
+ sizeof(vc_req->status) > vcrypto->max_size)) {
+ pr_err("virtio_crypto: The length is too big\n");
+ err = -EINVAL;
+ goto free;
+ }
+
+ req_data->u.sym_req.u.cipher.para.dst_data_len =
+ cpu_to_le32((uint32_t)dst_len);
+
+ /* Outhdr */
+ sg_init_one(&outhdr, req_data, sizeof(*req_data));
+ sgs[num_out++] = &outhdr;
+
+ /* IV */
+
+ /*
+ * Avoid to do DMA from the stack, switch to using
+ * dynamically-allocated for the IV
+ */
+ iv = kzalloc_node(ivsize, GFP_ATOMIC,
+ dev_to_node(&vcrypto->vdev->dev));
+ if (!iv) {
+ err = -ENOMEM;
+ goto free;
+ }
+ memcpy(iv, req->info, ivsize);
+ sg_init_one(&iv_sg, iv, ivsize);
+ sgs[num_out++] = &iv_sg;
+ vc_req->iv = iv;
+
+ /* Source data */
+ for (i = 0; i < src_nents; i++)
+ sgs[num_out++] = &req->src[i];
+
+ /* Destination data */
+ for (i = 0; i < dst_nents; i++)
+ sgs[num_out + num_in++] = &req->dst[i];
+
+ /* Status */
+ sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
+ sgs[num_out + num_in++] = &status_sg;
+
+ vc_req->sgs = sgs;
+
+ spin_lock_irqsave(&data_vq->lock, flags);
+ err = virtqueue_add_sgs(data_vq->vq, sgs, num_out,
+ num_in, vc_req, GFP_ATOMIC);
+ virtqueue_kick(data_vq->vq);
+ spin_unlock_irqrestore(&data_vq->lock, flags);
+ if (unlikely(err < 0))
+ goto free_iv;
+
+ return 0;
+
+free_iv:
+ kzfree(iv);
+free:
+ kzfree(req_data);
+ kfree(sgs);
+ return err;
+}
+
+static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
+ struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
+ struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req);
+ struct virtio_crypto *vcrypto = ctx->vcrypto;
+ int ret;
+ /* Use the first data virtqueue as default */
+ struct data_queue *data_vq = &vcrypto->data_vq[0];
+
+ vc_req->ablkcipher_ctx = ctx;
+ vc_req->ablkcipher_req = req;
+ ret = __virtio_crypto_ablkcipher_do_req(vc_req, req, data_vq, 1);
+ if (ret < 0) {
+ pr_err("virtio_crypto: Encryption failed!\n");
+ return ret;
+ }
+
+ return -EINPROGRESS;
+}
+
+static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
+ struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
+ struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req);
+ struct virtio_crypto *vcrypto = ctx->vcrypto;
+ int ret;
+ /* Use the first data virtqueue as default */
+ struct data_queue *data_vq = &vcrypto->data_vq[0];
+
+ vc_req->ablkcipher_ctx = ctx;
+ vc_req->ablkcipher_req = req;
+
+ ret = __virtio_crypto_ablkcipher_do_req(vc_req, req, data_vq, 0);
+ if (ret < 0) {
+ pr_err("virtio_crypto: Decryption failed!\n");
+ return ret;
+ }
+
+ return -EINPROGRESS;
+}
+
+static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm)
+{
+ struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_request);
+ ctx->tfm = tfm;
+
+ return 0;
+}
+
+static void virtio_crypto_ablkcipher_exit(struct crypto_tfm *tfm)
+{
+ struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (!ctx->vcrypto)
+ return;
+
+ virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
+ virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
+ virtcrypto_dev_put(ctx->vcrypto);
+ ctx->vcrypto = NULL;
+}
+
+static struct crypto_alg virtio_crypto_algs[] = { {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "virtio_crypto_aes_cbc",
+ .cra_priority = 501,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct virtio_crypto_ablkcipher_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = virtio_crypto_ablkcipher_init,
+ .cra_exit = virtio_crypto_ablkcipher_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .setkey = virtio_crypto_ablkcipher_setkey,
+ .decrypt = virtio_crypto_ablkcipher_decrypt,
+ .encrypt = virtio_crypto_ablkcipher_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ },
+} };
+
+int virtio_crypto_algs_register(void)
+{
+ int ret = 0;
+
+ mutex_lock(&algs_lock);
+ if (++virtio_crypto_active_devs != 1)
+ goto unlock;
+
+ ret = crypto_register_algs(virtio_crypto_algs,
+ ARRAY_SIZE(virtio_crypto_algs));
+ if (ret)
+ virtio_crypto_active_devs--;
+
+unlock:
+ mutex_unlock(&algs_lock);
+ return ret;
+}
+
+void virtio_crypto_algs_unregister(void)
+{
+ mutex_lock(&algs_lock);
+ if (--virtio_crypto_active_devs != 0)
+ goto unlock;
+
+ crypto_unregister_algs(virtio_crypto_algs,
+ ARRAY_SIZE(virtio_crypto_algs));
+
+unlock:
+ mutex_unlock(&algs_lock);
+}
diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
new file mode 100644
index 000000000000..3d6566b02876
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -0,0 +1,128 @@
+/* Common header for Virtio crypto device.
+ *
+ * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _VIRTIO_CRYPTO_COMMON_H
+#define _VIRTIO_CRYPTO_COMMON_H
+
+#include <linux/virtio.h>
+#include <linux/crypto.h>
+#include <linux/spinlock.h>
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include <crypto/authenc.h>
+
+
+/* Internal representation of a data virtqueue */
+struct data_queue {
+ /* Virtqueue associated with this send _queue */
+ struct virtqueue *vq;
+
+ /* To protect the vq operations for the dataq */
+ spinlock_t lock;
+
+ /* Name of the tx queue: dataq.$index */
+ char name[32];
+};
+
+struct virtio_crypto {
+ struct virtio_device *vdev;
+ struct virtqueue *ctrl_vq;
+ struct data_queue *data_vq;
+
+ /* To protect the vq operations for the controlq */
+ spinlock_t ctrl_lock;
+
+ /* Maximum of data queues supported by the device */
+ u32 max_data_queues;
+
+ /* Number of queue currently used by the driver */
+ u32 curr_queue;
+
+ /* Maximum length of cipher key */
+ u32 max_cipher_key_len;
+ /* Maximum length of authenticated key */
+ u32 max_auth_key_len;
+ /* Maximum size of per request */
+ u64 max_size;
+
+ /* Control VQ buffers: protected by the ctrl_lock */
+ struct virtio_crypto_op_ctrl_req ctrl;
+ struct virtio_crypto_session_input input;
+ struct virtio_crypto_inhdr ctrl_status;
+
+ unsigned long status;
+ atomic_t ref_count;
+ struct list_head list;
+ struct module *owner;
+ uint8_t dev_id;
+
+ /* Does the affinity hint is set for virtqueues? */
+ bool affinity_hint_set;
+};
+
+struct virtio_crypto_sym_session_info {
+ /* Backend session id, which come from the host side */
+ __u64 session_id;
+};
+
+struct virtio_crypto_ablkcipher_ctx {
+ struct virtio_crypto *vcrypto;
+ struct crypto_tfm *tfm;
+
+ struct virtio_crypto_sym_session_info enc_sess_info;
+ struct virtio_crypto_sym_session_info dec_sess_info;
+};
+
+struct virtio_crypto_request {
+ /* Cipher or aead */
+ uint32_t type;
+ uint8_t status;
+ struct virtio_crypto_ablkcipher_ctx *ablkcipher_ctx;
+ struct ablkcipher_request *ablkcipher_req;
+ struct virtio_crypto_op_data_req *req_data;
+ struct scatterlist **sgs;
+ uint8_t *iv;
+};
+
+int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev);
+struct list_head *virtcrypto_devmgr_get_head(void);
+void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev);
+struct virtio_crypto *virtcrypto_devmgr_get_first(void);
+int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev);
+int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev);
+void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev);
+int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev);
+struct virtio_crypto *virtcrypto_get_dev_node(int node);
+int virtcrypto_dev_start(struct virtio_crypto *vcrypto);
+void virtcrypto_dev_stop(struct virtio_crypto *vcrypto);
+
+static inline int virtio_crypto_get_current_node(void)
+{
+ int cpu, node;
+
+ cpu = get_cpu();
+ node = topology_physical_package_id(cpu);
+ put_cpu();
+
+ return node;
+}
+
+int virtio_crypto_algs_register(void);
+void virtio_crypto_algs_unregister(void);
+
+#endif /* _VIRTIO_CRYPTO_COMMON_H */
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
new file mode 100644
index 000000000000..fe70ec823b27
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -0,0 +1,476 @@
+ /* Driver for Virtio crypto device.
+ *
+ * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/virtio_config.h>
+#include <linux/cpu.h>
+
+#include <uapi/linux/virtio_crypto.h>
+#include "virtio_crypto_common.h"
+
+
+static void
+virtcrypto_clear_request(struct virtio_crypto_request *vc_req)
+{
+ if (vc_req) {
+ kzfree(vc_req->iv);
+ kzfree(vc_req->req_data);
+ kfree(vc_req->sgs);
+ }
+}
+
+static void virtcrypto_dataq_callback(struct virtqueue *vq)
+{
+ struct virtio_crypto *vcrypto = vq->vdev->priv;
+ struct virtio_crypto_request *vc_req;
+ unsigned long flags;
+ unsigned int len;
+ struct ablkcipher_request *ablk_req;
+ int error;
+ unsigned int qid = vq->index;
+
+ spin_lock_irqsave(&vcrypto->data_vq[qid].lock, flags);
+ do {
+ virtqueue_disable_cb(vq);
+ while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
+ if (vc_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
+ switch (vc_req->status) {
+ case VIRTIO_CRYPTO_OK:
+ error = 0;
+ break;
+ case VIRTIO_CRYPTO_INVSESS:
+ case VIRTIO_CRYPTO_ERR:
+ error = -EINVAL;
+ break;
+ case VIRTIO_CRYPTO_BADMSG:
+ error = -EBADMSG;
+ break;
+ default:
+ error = -EIO;
+ break;
+ }
+ ablk_req = vc_req->ablkcipher_req;
+ virtcrypto_clear_request(vc_req);
+
+ spin_unlock_irqrestore(
+ &vcrypto->data_vq[qid].lock, flags);
+ /* Finish the encrypt or decrypt process */
+ ablk_req->base.complete(&ablk_req->base, error);
+ spin_lock_irqsave(
+ &vcrypto->data_vq[qid].lock, flags);
+ }
+ }
+ } while (!virtqueue_enable_cb(vq));
+ spin_unlock_irqrestore(&vcrypto->data_vq[qid].lock, flags);
+}
+
+static int virtcrypto_find_vqs(struct virtio_crypto *vi)
+{
+ vq_callback_t **callbacks;
+ struct virtqueue **vqs;
+ int ret = -ENOMEM;
+ int i, total_vqs;
+ const char **names;
+
+ /*
+ * We expect 1 data virtqueue, followed by
+ * possible N-1 data queues used in multiqueue mode,
+ * followed by control vq.
+ */
+ total_vqs = vi->max_data_queues + 1;
+
+ /* Allocate space for find_vqs parameters */
+ vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
+ if (!vqs)
+ goto err_vq;
+ callbacks = kcalloc(total_vqs, sizeof(*callbacks), GFP_KERNEL);
+ if (!callbacks)
+ goto err_callback;
+ names = kcalloc(total_vqs, sizeof(*names), GFP_KERNEL);
+ if (!names)
+ goto err_names;
+
+ /* Parameters for control virtqueue */
+ callbacks[total_vqs - 1] = NULL;
+ names[total_vqs - 1] = "controlq";
+
+ /* Allocate/initialize parameters for data virtqueues */
+ for (i = 0; i < vi->max_data_queues; i++) {
+ callbacks[i] = virtcrypto_dataq_callback;
+ snprintf(vi->data_vq[i].name, sizeof(vi->data_vq[i].name),
+ "dataq.%d", i);
+ names[i] = vi->data_vq[i].name;
+ }
+
+ ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
+ names);
+ if (ret)
+ goto err_find;
+
+ vi->ctrl_vq = vqs[total_vqs - 1];
+
+ for (i = 0; i < vi->max_data_queues; i++) {
+ spin_lock_init(&vi->data_vq[i].lock);
+ vi->data_vq[i].vq = vqs[i];
+ }
+
+ kfree(names);
+ kfree(callbacks);
+ kfree(vqs);
+
+ return 0;
+
+err_find:
+ kfree(names);
+err_names:
+ kfree(callbacks);
+err_callback:
+ kfree(vqs);
+err_vq:
+ return ret;
+}
+
+static int virtcrypto_alloc_queues(struct virtio_crypto *vi)
+{
+ vi->data_vq = kcalloc(vi->max_data_queues, sizeof(*vi->data_vq),
+ GFP_KERNEL);
+ if (!vi->data_vq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void virtcrypto_clean_affinity(struct virtio_crypto *vi, long hcpu)
+{
+ int i;
+
+ if (vi->affinity_hint_set) {
+ for (i = 0; i < vi->max_data_queues; i++)
+ virtqueue_set_affinity(vi->data_vq[i].vq, -1);
+
+ vi->affinity_hint_set = false;
+ }
+}
+
+static void virtcrypto_set_affinity(struct virtio_crypto *vcrypto)
+{
+ int i = 0;
+ int cpu;
+
+ /*
+ * In single queue mode, we don't set the cpu affinity.
+ */
+ if (vcrypto->curr_queue == 1 || vcrypto->max_data_queues == 1) {
+ virtcrypto_clean_affinity(vcrypto, -1);
+ return;
+ }
+
+ /*
+ * In multiqueue mode, we let the queue to be private to one cpu
+ * by setting the affinity hint to eliminate the contention.
+ *
+ * TODO: adds cpu hotplug support by register cpu notifier.
+ *
+ */
+ for_each_online_cpu(cpu) {
+ virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpu);
+ if (++i >= vcrypto->max_data_queues)
+ break;
+ }
+
+ vcrypto->affinity_hint_set = true;
+}
+
+static void virtcrypto_free_queues(struct virtio_crypto *vi)
+{
+ kfree(vi->data_vq);
+}
+
+static int virtcrypto_init_vqs(struct virtio_crypto *vi)
+{
+ int ret;
+
+ /* Allocate send & receive queues */
+ ret = virtcrypto_alloc_queues(vi);
+ if (ret)
+ goto err;
+
+ ret = virtcrypto_find_vqs(vi);
+ if (ret)
+ goto err_free;
+
+ get_online_cpus();
+ virtcrypto_set_affinity(vi);
+ put_online_cpus();
+
+ return 0;
+
+err_free:
+ virtcrypto_free_queues(vi);
+err:
+ return ret;
+}
+
+static int virtcrypto_update_status(struct virtio_crypto *vcrypto)
+{
+ u32 status;
+ int err;
+
+ virtio_cread(vcrypto->vdev,
+ struct virtio_crypto_config, status, &status);
+
+ /*
+ * Unknown status bits would be a host error and the driver
+ * should consider the device to be broken.
+ */
+ if (status & (~VIRTIO_CRYPTO_S_HW_READY)) {
+ dev_warn(&vcrypto->vdev->dev,
+ "Unknown status bits: 0x%x\n", status);
+
+ virtio_break_device(vcrypto->vdev);
+ return -EPERM;
+ }
+
+ if (vcrypto->status == status)
+ return 0;
+
+ vcrypto->status = status;
+
+ if (vcrypto->status & VIRTIO_CRYPTO_S_HW_READY) {
+ err = virtcrypto_dev_start(vcrypto);
+ if (err) {
+ dev_err(&vcrypto->vdev->dev,
+ "Failed to start virtio crypto device.\n");
+
+ return -EPERM;
+ }
+ dev_info(&vcrypto->vdev->dev, "Accelerator is ready\n");
+ } else {
+ virtcrypto_dev_stop(vcrypto);
+ dev_info(&vcrypto->vdev->dev, "Accelerator is not ready\n");
+ }
+
+ return 0;
+}
+
+static void virtcrypto_del_vqs(struct virtio_crypto *vcrypto)
+{
+ struct virtio_device *vdev = vcrypto->vdev;
+
+ virtcrypto_clean_affinity(vcrypto, -1);
+
+ vdev->config->del_vqs(vdev);
+
+ virtcrypto_free_queues(vcrypto);
+}
+
+static int virtcrypto_probe(struct virtio_device *vdev)
+{
+ int err = -EFAULT;
+ struct virtio_crypto *vcrypto;
+ u32 max_data_queues = 0, max_cipher_key_len = 0;
+ u32 max_auth_key_len = 0;
+ u64 max_size = 0;
+
+ if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
+ return -ENODEV;
+
+ if (!vdev->config->get) {
+ dev_err(&vdev->dev, "%s failure: config access disabled\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (num_possible_nodes() > 1 && dev_to_node(&vdev->dev) < 0) {
+ /*
+ * If the accelerator is connected to a node with no memory
+ * there is no point in using the accelerator since the remote
+ * memory transaction will be very slow.
+ */
+ dev_err(&vdev->dev, "Invalid NUMA configuration.\n");
+ return -EINVAL;
+ }
+
+ vcrypto = kzalloc_node(sizeof(*vcrypto), GFP_KERNEL,
+ dev_to_node(&vdev->dev));
+ if (!vcrypto)
+ return -ENOMEM;
+
+ virtio_cread(vdev, struct virtio_crypto_config,
+ max_dataqueues, &max_data_queues);
+ if (max_data_queues < 1)
+ max_data_queues = 1;
+
+ virtio_cread(vdev, struct virtio_crypto_config,
+ max_cipher_key_len, &max_cipher_key_len);
+ virtio_cread(vdev, struct virtio_crypto_config,
+ max_auth_key_len, &max_auth_key_len);
+ virtio_cread(vdev, struct virtio_crypto_config,
+ max_size, &max_size);
+
+ /* Add virtio crypto device to global table */
+ err = virtcrypto_devmgr_add_dev(vcrypto);
+ if (err) {
+ dev_err(&vdev->dev, "Failed to add new virtio crypto device.\n");
+ goto free;
+ }
+ vcrypto->owner = THIS_MODULE;
+ vcrypto = vdev->priv = vcrypto;
+ vcrypto->vdev = vdev;
+
+ spin_lock_init(&vcrypto->ctrl_lock);
+
+ /* Use single data queue as default */
+ vcrypto->curr_queue = 1;
+ vcrypto->max_data_queues = max_data_queues;
+ vcrypto->max_cipher_key_len = max_cipher_key_len;
+ vcrypto->max_auth_key_len = max_auth_key_len;
+ vcrypto->max_size = max_size;
+
+ dev_info(&vdev->dev,
+ "max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n",
+ vcrypto->max_data_queues,
+ vcrypto->max_cipher_key_len,
+ vcrypto->max_auth_key_len,
+ vcrypto->max_size);
+
+ err = virtcrypto_init_vqs(vcrypto);
+ if (err) {
+ dev_err(&vdev->dev, "Failed to initialize vqs.\n");
+ goto free_dev;
+ }
+ virtio_device_ready(vdev);
+
+ err = virtcrypto_update_status(vcrypto);
+ if (err)
+ goto free_vqs;
+
+ return 0;
+
+free_vqs:
+ vcrypto->vdev->config->reset(vdev);
+ virtcrypto_del_vqs(vcrypto);
+free_dev:
+ virtcrypto_devmgr_rm_dev(vcrypto);
+free:
+ kfree(vcrypto);
+ return err;
+}
+
+static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)
+{
+ struct virtio_crypto_request *vc_req;
+ int i;
+ struct virtqueue *vq;
+
+ for (i = 0; i < vcrypto->max_data_queues; i++) {
+ vq = vcrypto->data_vq[i].vq;
+ while ((vc_req = virtqueue_detach_unused_buf(vq)) != NULL) {
+ kfree(vc_req->req_data);
+ kfree(vc_req->sgs);
+ }
+ }
+}
+
+static void virtcrypto_remove(struct virtio_device *vdev)
+{
+ struct virtio_crypto *vcrypto = vdev->priv;
+
+ dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
+
+ if (virtcrypto_dev_started(vcrypto))
+ virtcrypto_dev_stop(vcrypto);
+ vdev->config->reset(vdev);
+ virtcrypto_free_unused_reqs(vcrypto);
+ virtcrypto_del_vqs(vcrypto);
+ virtcrypto_devmgr_rm_dev(vcrypto);
+ kfree(vcrypto);
+}
+
+static void virtcrypto_config_changed(struct virtio_device *vdev)
+{
+ struct virtio_crypto *vcrypto = vdev->priv;
+
+ virtcrypto_update_status(vcrypto);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int virtcrypto_freeze(struct virtio_device *vdev)
+{
+ struct virtio_crypto *vcrypto = vdev->priv;
+
+ vdev->config->reset(vdev);
+ virtcrypto_free_unused_reqs(vcrypto);
+ if (virtcrypto_dev_started(vcrypto))
+ virtcrypto_dev_stop(vcrypto);
+
+ virtcrypto_del_vqs(vcrypto);
+ return 0;
+}
+
+static int virtcrypto_restore(struct virtio_device *vdev)
+{
+ struct virtio_crypto *vcrypto = vdev->priv;
+ int err;
+
+ err = virtcrypto_init_vqs(vcrypto);
+ if (err)
+ return err;
+
+ virtio_device_ready(vdev);
+ err = virtcrypto_dev_start(vcrypto);
+ if (err) {
+ dev_err(&vdev->dev, "Failed to start virtio crypto device.\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+#endif
+
+static unsigned int features[] = {
+ /* none */
+};
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_CRYPTO, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+static struct virtio_driver virtio_crypto_driver = {
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
+ .id_table = id_table,
+ .probe = virtcrypto_probe,
+ .remove = virtcrypto_remove,
+ .config_changed = virtcrypto_config_changed,
+#ifdef CONFIG_PM_SLEEP
+ .freeze = virtcrypto_freeze,
+ .restore = virtcrypto_restore,
+#endif
+};
+
+module_virtio_driver(virtio_crypto_driver);
+
+MODULE_DEVICE_TABLE(virtio, id_table);
+MODULE_DESCRIPTION("virtio crypto device driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Gonglei <arei.gonglei@huawei.com>");
diff --git a/drivers/crypto/virtio/virtio_crypto_mgr.c b/drivers/crypto/virtio/virtio_crypto_mgr.c
new file mode 100644
index 000000000000..a69ff71de2c4
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_mgr.c
@@ -0,0 +1,264 @@
+ /* Management for virtio crypto devices (refer to adf_dev_mgr.c)
+ *
+ * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/module.h>
+
+#include <uapi/linux/virtio_crypto.h>
+#include "virtio_crypto_common.h"
+
+static LIST_HEAD(virtio_crypto_table);
+static uint32_t num_devices;
+
+/* The table_lock protects the above global list and num_devices */
+static DEFINE_MUTEX(table_lock);
+
+#define VIRTIO_CRYPTO_MAX_DEVICES 32
+
+
+/*
+ * virtcrypto_devmgr_add_dev() - Add vcrypto_dev to the acceleration
+ * framework.
+ * @vcrypto_dev: Pointer to virtio crypto device.
+ *
+ * Function adds virtio crypto device to the global list.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev)
+{
+ struct list_head *itr;
+
+ mutex_lock(&table_lock);
+ if (num_devices == VIRTIO_CRYPTO_MAX_DEVICES) {
+ pr_info("virtio_crypto: only support up to %d devices\n",
+ VIRTIO_CRYPTO_MAX_DEVICES);
+ mutex_unlock(&table_lock);
+ return -EFAULT;
+ }
+
+ list_for_each(itr, &virtio_crypto_table) {
+ struct virtio_crypto *ptr =
+ list_entry(itr, struct virtio_crypto, list);
+
+ if (ptr == vcrypto_dev) {
+ mutex_unlock(&table_lock);
+ return -EEXIST;
+ }
+ }
+ atomic_set(&vcrypto_dev->ref_count, 0);
+ list_add_tail(&vcrypto_dev->list, &virtio_crypto_table);
+ vcrypto_dev->dev_id = num_devices++;
+ mutex_unlock(&table_lock);
+ return 0;
+}
+
+struct list_head *virtcrypto_devmgr_get_head(void)
+{
+ return &virtio_crypto_table;
+}
+
+/*
+ * virtcrypto_devmgr_rm_dev() - Remove vcrypto_dev from the acceleration
+ * framework.
+ * @vcrypto_dev: Pointer to virtio crypto device.
+ *
+ * Function removes virtio crypto device from the acceleration framework.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: void
+ */
+void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev)
+{
+ mutex_lock(&table_lock);
+ list_del(&vcrypto_dev->list);
+ num_devices--;
+ mutex_unlock(&table_lock);
+}
+
+/*
+ * virtcrypto_devmgr_get_first()
+ *
+ * Function returns the first virtio crypto device from the acceleration
+ * framework.
+ *
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: pointer to vcrypto_dev or NULL if not found.
+ */
+struct virtio_crypto *virtcrypto_devmgr_get_first(void)
+{
+ struct virtio_crypto *dev = NULL;
+
+ mutex_lock(&table_lock);
+ if (!list_empty(&virtio_crypto_table))
+ dev = list_first_entry(&virtio_crypto_table,
+ struct virtio_crypto,
+ list);
+ mutex_unlock(&table_lock);
+ return dev;
+}
+
+/*
+ * virtcrypto_dev_in_use() - Check whether vcrypto_dev is currently in use
+ * @vcrypto_dev: Pointer to virtio crypto device.
+ *
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: 1 when device is in use, 0 otherwise.
+ */
+int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev)
+{
+ return atomic_read(&vcrypto_dev->ref_count) != 0;
+}
+
+/*
+ * virtcrypto_dev_get() - Increment vcrypto_dev reference count
+ * @vcrypto_dev: Pointer to virtio crypto device.
+ *
+ * Increment the vcrypto_dev refcount and if this is the first time
+ * incrementing it during this period the vcrypto_dev is in use,
+ * increment the module refcount too.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: 0 when successful, EFAULT when fail to bump module refcount
+ */
+int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev)
+{
+ if (atomic_add_return(1, &vcrypto_dev->ref_count) == 1)
+ if (!try_module_get(vcrypto_dev->owner))
+ return -EFAULT;
+ return 0;
+}
+
+/*
+ * virtcrypto_dev_put() - Decrement vcrypto_dev reference count
+ * @vcrypto_dev: Pointer to virtio crypto device.
+ *
+ * Decrement the vcrypto_dev refcount and if this is the last time
+ * decrementing it during this period the vcrypto_dev is in use,
+ * decrement the module refcount too.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: void
+ */
+void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev)
+{
+ if (atomic_sub_return(1, &vcrypto_dev->ref_count) == 0)
+ module_put(vcrypto_dev->owner);
+}
+
+/*
+ * virtcrypto_dev_started() - Check whether device has started
+ * @vcrypto_dev: Pointer to virtio crypto device.
+ *
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: 1 when the device has started, 0 otherwise
+ */
+int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev)
+{
+ return (vcrypto_dev->status & VIRTIO_CRYPTO_S_HW_READY);
+}
+
+/*
+ * virtcrypto_get_dev_node() - Get vcrypto_dev on the node.
+ * @node: Node id the driver works.
+ *
+ * Function returns the virtio crypto device used fewest on the node.
+ *
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: pointer to vcrypto_dev or NULL if not found.
+ */
+struct virtio_crypto *virtcrypto_get_dev_node(int node)
+{
+ struct virtio_crypto *vcrypto_dev = NULL, *tmp_dev;
+ unsigned long best = ~0;
+ unsigned long ctr;
+
+ mutex_lock(&table_lock);
+ list_for_each_entry(tmp_dev, virtcrypto_devmgr_get_head(), list) {
+
+ if ((node == dev_to_node(&tmp_dev->vdev->dev) ||
+ dev_to_node(&tmp_dev->vdev->dev) < 0) &&
+ virtcrypto_dev_started(tmp_dev)) {
+ ctr = atomic_read(&tmp_dev->ref_count);
+ if (best > ctr) {
+ vcrypto_dev = tmp_dev;
+ best = ctr;
+ }
+ }
+ }
+
+ if (!vcrypto_dev) {
+ pr_info("virtio_crypto: Could not find a device on node %d\n",
+ node);
+ /* Get any started device */
+ list_for_each_entry(tmp_dev,
+ virtcrypto_devmgr_get_head(), list) {
+ if (virtcrypto_dev_started(tmp_dev)) {
+ vcrypto_dev = tmp_dev;
+ break;
+ }
+ }
+ }
+ mutex_unlock(&table_lock);
+ if (!vcrypto_dev)
+ return NULL;
+
+ virtcrypto_dev_get(vcrypto_dev);
+ return vcrypto_dev;
+}
+
+/*
+ * virtcrypto_dev_start() - Start virtio crypto device
+ * @vcrypto: Pointer to virtio crypto device.
+ *
+ * Function notifies all the registered services that the virtio crypto device
+ * is ready to be used.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: 0 on success, EFAULT when fail to register algorithms
+ */
+int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
+{
+ if (virtio_crypto_algs_register()) {
+ pr_err("virtio_crypto: Failed to register crypto algs\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*
+ * virtcrypto_dev_stop() - Stop virtio crypto device
+ * @vcrypto: Pointer to virtio crypto device.
+ *
+ * Function notifies all the registered services that the virtio crypto device
+ * is ready to be used.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: void
+ */
+void virtcrypto_dev_stop(struct virtio_crypto *vcrypto)
+{
+ virtio_crypto_algs_unregister();
+}
diff --git a/drivers/crypto/vmx/Makefile b/drivers/crypto/vmx/Makefile
index de6e241b0866..55f7c392582f 100644
--- a/drivers/crypto/vmx/Makefile
+++ b/drivers/crypto/vmx/Makefile
@@ -10,10 +10,12 @@ endif
quiet_cmd_perl = PERL $@
cmd_perl = $(PERL) $(<) $(TARGET) > $(@)
-$(src)/aesp8-ppc.S: $(src)/aesp8-ppc.pl
- $(call cmd,perl)
+targets += aesp8-ppc.S ghashp8-ppc.S
+
+$(obj)/aesp8-ppc.S: $(src)/aesp8-ppc.pl FORCE
+ $(call if_changed,perl)
-$(src)/ghashp8-ppc.S: $(src)/ghashp8-ppc.pl
- $(call cmd,perl)
+$(obj)/ghashp8-ppc.S: $(src)/ghashp8-ppc.pl FORCE
+ $(call if_changed,perl)
-.PRECIOUS: $(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S
+clean-files := aesp8-ppc.S ghashp8-ppc.S
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
index 286447a83dab..ed758b74ddf0 100644
--- a/drivers/dax/dax.c
+++ b/drivers/dax/dax.c
@@ -75,6 +75,73 @@ struct dax_dev {
struct resource res[0];
};
+static ssize_t id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dax_region *dax_region;
+ ssize_t rc = -ENXIO;
+
+ device_lock(dev);
+ dax_region = dev_get_drvdata(dev);
+ if (dax_region)
+ rc = sprintf(buf, "%d\n", dax_region->id);
+ device_unlock(dev);
+
+ return rc;
+}
+static DEVICE_ATTR_RO(id);
+
+static ssize_t region_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dax_region *dax_region;
+ ssize_t rc = -ENXIO;
+
+ device_lock(dev);
+ dax_region = dev_get_drvdata(dev);
+ if (dax_region)
+ rc = sprintf(buf, "%llu\n", (unsigned long long)
+ resource_size(&dax_region->res));
+ device_unlock(dev);
+
+ return rc;
+}
+static struct device_attribute dev_attr_region_size = __ATTR(size, 0444,
+ region_size_show, NULL);
+
+static ssize_t align_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dax_region *dax_region;
+ ssize_t rc = -ENXIO;
+
+ device_lock(dev);
+ dax_region = dev_get_drvdata(dev);
+ if (dax_region)
+ rc = sprintf(buf, "%u\n", dax_region->align);
+ device_unlock(dev);
+
+ return rc;
+}
+static DEVICE_ATTR_RO(align);
+
+static struct attribute *dax_region_attributes[] = {
+ &dev_attr_region_size.attr,
+ &dev_attr_align.attr,
+ &dev_attr_id.attr,
+ NULL,
+};
+
+static const struct attribute_group dax_region_attribute_group = {
+ .name = "dax_region",
+ .attrs = dax_region_attributes,
+};
+
+static const struct attribute_group *dax_region_attribute_groups[] = {
+ &dax_region_attribute_group,
+ NULL,
+};
+
static struct inode *dax_alloc_inode(struct super_block *sb)
{
return kmem_cache_alloc(dax_cache, GFP_KERNEL);
@@ -200,12 +267,31 @@ void dax_region_put(struct dax_region *dax_region)
}
EXPORT_SYMBOL_GPL(dax_region_put);
+static void dax_region_unregister(void *region)
+{
+ struct dax_region *dax_region = region;
+
+ sysfs_remove_groups(&dax_region->dev->kobj,
+ dax_region_attribute_groups);
+ dax_region_put(dax_region);
+}
+
struct dax_region *alloc_dax_region(struct device *parent, int region_id,
struct resource *res, unsigned int align, void *addr,
unsigned long pfn_flags)
{
struct dax_region *dax_region;
+ /*
+ * The DAX core assumes that it can store its private data in
+ * parent->driver_data. This WARN is a reminder / safeguard for
+ * developers of device-dax drivers.
+ */
+ if (dev_get_drvdata(parent)) {
+ dev_WARN(parent, "dax core failed to setup private data\n");
+ return NULL;
+ }
+
if (!IS_ALIGNED(res->start, align)
|| !IS_ALIGNED(resource_size(res), align))
return NULL;
@@ -214,6 +300,7 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id,
if (!dax_region)
return NULL;
+ dev_set_drvdata(parent, dax_region);
memcpy(&dax_region->res, res, sizeof(*res));
dax_region->pfn_flags = pfn_flags;
kref_init(&dax_region->kref);
@@ -222,7 +309,14 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id,
dax_region->align = align;
dax_region->dev = parent;
dax_region->base = addr;
+ if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) {
+ kfree(dax_region);
+ return NULL;;
+ }
+ kref_get(&dax_region->kref);
+ if (devm_add_action_or_reset(parent, dax_region_unregister, dax_region))
+ return NULL;
return dax_region;
}
EXPORT_SYMBOL_GPL(alloc_dax_region);
@@ -328,7 +422,6 @@ static phys_addr_t pgoff_to_phys(struct dax_dev *dax_dev, pgoff_t pgoff,
static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma,
struct vm_fault *vmf)
{
- unsigned long vaddr = (unsigned long) vmf->virtual_address;
struct device *dev = &dax_dev->dev;
struct dax_region *dax_region;
int rc = VM_FAULT_SIGBUS;
@@ -353,7 +446,7 @@ static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma,
pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
- rc = vm_insert_mixed(vma, vaddr, pfn);
+ rc = vm_insert_mixed(vma, vmf->address, pfn);
if (rc == -ENOMEM)
return VM_FAULT_OOM;
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index 73c6ce93a0d9..033f49b31fdc 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -89,7 +89,8 @@ static int dax_pmem_probe(struct device *dev)
pfn_sb = nd_pfn->pfn_sb;
if (!devm_request_mem_region(dev, nsio->res.start,
- resource_size(&nsio->res), dev_name(dev))) {
+ resource_size(&nsio->res),
+ dev_name(&ndns->dev))) {
dev_warn(dev, "could not reserve region %pR\n", &nsio->res);
return -EBUSY;
}
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index a324801d6a66..47206a21bb90 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -593,11 +593,16 @@ struct devfreq *devfreq_add_device(struct device *dev,
list_add(&devfreq->node, &devfreq_list);
governor = find_devfreq_governor(devfreq->governor_name);
- if (!IS_ERR(governor))
- devfreq->governor = governor;
- if (devfreq->governor)
- err = devfreq->governor->event_handler(devfreq,
- DEVFREQ_GOV_START, NULL);
+ if (IS_ERR(governor)) {
+ dev_err(dev, "%s: Unable to find governor for the device\n",
+ __func__);
+ err = PTR_ERR(governor);
+ goto err_init;
+ }
+
+ devfreq->governor = governor;
+ err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START,
+ NULL);
if (err) {
dev_err(dev, "%s: Unable to start governor for the device\n",
__func__);
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index a8ed7792ece2..9af86f46fbec 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -497,7 +497,7 @@ passive:
if (IS_ERR(bus->devfreq)) {
dev_err(dev,
"failed to add devfreq dev with passive governor\n");
- ret = -EPROBE_DEFER;
+ ret = PTR_ERR(bus->devfreq);
goto err;
}
diff --git a/drivers/dio/dio.c b/drivers/dio/dio.c
index 55dd88d82d6d..830184529109 100644
--- a/drivers/dio/dio.c
+++ b/drivers/dio/dio.c
@@ -31,7 +31,7 @@
#include <linux/init.h>
#include <linux/dio.h>
#include <linux/slab.h> /* kmalloc() */
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h> /* readb() */
struct dio_bus dio_bus = {
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 2154ea3c5d1c..263495d0adbd 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -494,7 +494,7 @@ config TEGRA20_APB_DMA
or vice versa. It does not support memory to memory data transfer.
config TEGRA210_ADMA
- bool "NVIDIA Tegra210 ADMA support"
+ tristate "NVIDIA Tegra210 ADMA support"
depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST) && PM_CLK
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 939a7c31f760..0b7c6ce629a6 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1793,6 +1793,13 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
}
EXPORT_SYMBOL_GPL(pl08x_filter_id);
+static bool pl08x_filter_fn(struct dma_chan *chan, void *chan_id)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+
+ return plchan->cd == chan_id;
+}
+
/*
* Just check that the device is there and active
* TODO: turn this bit on/off depending on the number of physical channels
@@ -2307,6 +2314,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
ret = -EINVAL;
goto out_no_platdata;
}
+ } else {
+ pl08x->slave.filter.map = pl08x->pd->slave_map;
+ pl08x->slave.filter.mapcnt = pl08x->pd->slave_map_len;
+ pl08x->slave.filter.fn = pl08x_filter_fn;
}
/* By default, AHB1 only. If dualmaster, from platform */
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index a4c8f80db29d..1baf3404a365 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -111,9 +111,8 @@ static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
struct at_dma *atdma = to_at_dma(chan->device);
dma_addr_t phys;
- desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
+ desc = dma_pool_zalloc(atdma->dma_desc_pool, gfp_flags, &phys);
if (desc) {
- memset(desc, 0, sizeof(struct at_desc));
INIT_LIST_HEAD(&desc->tx_list);
dma_async_tx_descriptor_init(&desc->txd, chan);
/* txd.flags will be overwritten in prep functions */
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index b7d7f2d443a1..7d4e0bcda9af 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -221,7 +221,6 @@ struct at_xdmac {
int irq;
struct clk *clk;
u32 save_gim;
- u32 save_gs;
struct dma_pool *at_xdmac_desc_pool;
struct at_xdmac_chan chan[0];
};
@@ -444,9 +443,8 @@ static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
dma_addr_t phys;
- desc = dma_pool_alloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
+ desc = dma_pool_zalloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
if (desc) {
- memset(desc, 0, sizeof(*desc));
INIT_LIST_HEAD(&desc->descs_list);
dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan);
desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit;
@@ -1896,7 +1894,6 @@ static int atmel_xdmac_resume(struct device *dev)
}
at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim);
- at_xdmac_write(atxdmac, AT_XDMAC_GE, atxdmac->save_gs);
list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
atchan = to_at_xdmac_chan(chan);
at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index cf76fc6149e5..c9297605058c 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -164,7 +164,9 @@ struct dmatest_thread {
struct task_struct *task;
struct dma_chan *chan;
u8 **srcs;
+ u8 **usrcs;
u8 **dsts;
+ u8 **udsts;
enum dma_transaction_type type;
bool done;
};
@@ -427,10 +429,11 @@ static int dmatest_func(void *data)
int dst_cnt;
int i;
ktime_t ktime, start, diff;
- ktime_t filltime = ktime_set(0, 0);
- ktime_t comparetime = ktime_set(0, 0);
+ ktime_t filltime = 0;
+ ktime_t comparetime = 0;
s64 runtime = 0;
unsigned long long total_len = 0;
+ u8 align = 0;
set_freezable();
@@ -441,20 +444,24 @@ static int dmatest_func(void *data)
params = &info->params;
chan = thread->chan;
dev = chan->device;
- if (thread->type == DMA_MEMCPY)
+ if (thread->type == DMA_MEMCPY) {
+ align = dev->copy_align;
src_cnt = dst_cnt = 1;
- else if (thread->type == DMA_SG)
+ } else if (thread->type == DMA_SG) {
+ align = dev->copy_align;
src_cnt = dst_cnt = sg_buffers;
- else if (thread->type == DMA_XOR) {
+ } else if (thread->type == DMA_XOR) {
/* force odd to ensure dst = src */
src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
dst_cnt = 1;
+ align = dev->xor_align;
} else if (thread->type == DMA_PQ) {
/* force odd to ensure dst = src */
src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
dst_cnt = 2;
+ align = dev->pq_align;
- pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL);
+ pq_coefs = kmalloc(params->pq_sources + 1, GFP_KERNEL);
if (!pq_coefs)
goto err_thread_type;
@@ -463,23 +470,47 @@ static int dmatest_func(void *data)
} else
goto err_thread_type;
- thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
+ thread->srcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL);
if (!thread->srcs)
goto err_srcs;
+
+ thread->usrcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL);
+ if (!thread->usrcs)
+ goto err_usrcs;
+
for (i = 0; i < src_cnt; i++) {
- thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL);
- if (!thread->srcs[i])
+ thread->usrcs[i] = kmalloc(params->buf_size + align,
+ GFP_KERNEL);
+ if (!thread->usrcs[i])
goto err_srcbuf;
+
+ /* align srcs to alignment restriction */
+ if (align)
+ thread->srcs[i] = PTR_ALIGN(thread->usrcs[i], align);
+ else
+ thread->srcs[i] = thread->usrcs[i];
}
thread->srcs[i] = NULL;
- thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
+ thread->dsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL);
if (!thread->dsts)
goto err_dsts;
+
+ thread->udsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL);
+ if (!thread->udsts)
+ goto err_udsts;
+
for (i = 0; i < dst_cnt; i++) {
- thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL);
- if (!thread->dsts[i])
+ thread->udsts[i] = kmalloc(params->buf_size + align,
+ GFP_KERNEL);
+ if (!thread->udsts[i])
goto err_dstbuf;
+
+ /* align dsts to alignment restriction */
+ if (align)
+ thread->dsts[i] = PTR_ALIGN(thread->udsts[i], align);
+ else
+ thread->dsts[i] = thread->udsts[i];
}
thread->dsts[i] = NULL;
@@ -498,20 +529,11 @@ static int dmatest_func(void *data)
dma_addr_t srcs[src_cnt];
dma_addr_t *dsts;
unsigned int src_off, dst_off, len;
- u8 align = 0;
struct scatterlist tx_sg[src_cnt];
struct scatterlist rx_sg[src_cnt];
total_tests++;
- /* honor alignment restrictions */
- if (thread->type == DMA_MEMCPY || thread->type == DMA_SG)
- align = dev->copy_align;
- else if (thread->type == DMA_XOR)
- align = dev->xor_align;
- else if (thread->type == DMA_PQ)
- align = dev->pq_align;
-
if (1 << align > params->buf_size) {
pr_err("%u-byte buffer too small for %d-byte alignment\n",
params->buf_size, 1 << align);
@@ -549,7 +571,7 @@ static int dmatest_func(void *data)
filltime = ktime_add(filltime, diff);
}
- um = dmaengine_get_unmap_data(dev->dev, src_cnt+dst_cnt,
+ um = dmaengine_get_unmap_data(dev->dev, src_cnt + dst_cnt,
GFP_KERNEL);
if (!um) {
failed_tests++;
@@ -729,13 +751,17 @@ static int dmatest_func(void *data)
ret = 0;
err_dstbuf:
- for (i = 0; thread->dsts[i]; i++)
- kfree(thread->dsts[i]);
+ for (i = 0; thread->udsts[i]; i++)
+ kfree(thread->udsts[i]);
+ kfree(thread->udsts);
+err_udsts:
kfree(thread->dsts);
err_dsts:
err_srcbuf:
- for (i = 0; thread->srcs[i]; i++)
- kfree(thread->srcs[i]);
+ for (i = 0; thread->usrcs[i]; i++)
+ kfree(thread->usrcs[i]);
+ kfree(thread->usrcs);
+err_usrcs:
kfree(thread->srcs);
err_srcs:
kfree(pq_coefs);
diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig
index e00c9b022964..5a37b9fcf40d 100644
--- a/drivers/dma/dw/Kconfig
+++ b/drivers/dma/dw/Kconfig
@@ -24,5 +24,5 @@ config DW_DMAC_PCI
select DW_DMAC_CORE
help
Support the Synopsys DesignWare AHB DMA controller on the
- platfroms that enumerate it as a PCI device. For example,
+ platforms that enumerate it as a PCI device. For example,
Intel Medfield has integrated this GPDMA controller.
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index c2c0a613cb7a..e5adf5d1c34f 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1569,7 +1569,7 @@ int dw_dma_probe(struct dw_dma_chip *chip)
(dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
} else {
dwc->block_size = pdata->block_size;
- dwc->nollp = pdata->is_nollp;
+ dwc->nollp = !pdata->multi_block[i];
}
}
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 5bda0eb9f393..b1655e40cfa2 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -102,7 +102,7 @@ dw_dma_parse_dt(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct dw_dma_platform_data *pdata;
- u32 tmp, arr[DW_DMA_MAX_NR_MASTERS];
+ u32 tmp, arr[DW_DMA_MAX_NR_MASTERS], mb[DW_DMA_MAX_NR_CHANNELS];
u32 nr_masters;
u32 nr_channels;
@@ -118,6 +118,8 @@ dw_dma_parse_dt(struct platform_device *pdev)
if (of_property_read_u32(np, "dma-channels", &nr_channels))
return NULL;
+ if (nr_channels > DW_DMA_MAX_NR_CHANNELS)
+ return NULL;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
@@ -129,6 +131,12 @@ dw_dma_parse_dt(struct platform_device *pdev)
if (of_property_read_bool(np, "is_private"))
pdata->is_private = true;
+ /*
+ * All known devices, which use DT for configuration, support
+ * memory-to-memory transfers. So enable it by default.
+ */
+ pdata->is_memcpy = true;
+
if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
pdata->chan_allocation_order = (unsigned char)tmp;
@@ -146,6 +154,14 @@ dw_dma_parse_dt(struct platform_device *pdev)
pdata->data_width[tmp] = BIT(arr[tmp] & 0x07);
}
+ if (!of_property_read_u32_array(np, "multi-block", mb, nr_channels)) {
+ for (tmp = 0; tmp < nr_channels; tmp++)
+ pdata->multi_block[tmp] = mb[tmp];
+ } else {
+ for (tmp = 0; tmp < nr_channels; tmp++)
+ pdata->multi_block[tmp] = 1;
+ }
+
return pdata;
}
#else
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h
index f65dd104479f..4e0128c62704 100644
--- a/drivers/dma/dw/regs.h
+++ b/drivers/dma/dw/regs.h
@@ -12,7 +12,8 @@
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
-#define DW_DMA_MAX_NR_CHANNELS 8
+#include "internal.h"
+
#define DW_DMA_MAX_NR_REQUESTS 16
/* flow controller */
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 77242b37ef87..3879f80a4815 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -2451,6 +2451,9 @@ static int edma_pm_resume(struct device *dev)
int i;
s8 (*queue_priority_mapping)[2];
+ /* re initialize dummy slot to dummy param set */
+ edma_write_slot(ecc, ecc->dummy_slot, &dummy_paramset);
+
queue_priority_mapping = ecc->info->queue_priority_mapping;
/* Event queue priority mapping */
diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c
index db2f9e1653a2..90d29f90acfb 100644
--- a/drivers/dma/fsl_raid.c
+++ b/drivers/dma/fsl_raid.c
@@ -881,6 +881,7 @@ static struct of_device_id fsl_re_ids[] = {
{ .compatible = "fsl,raideng-v1.0", },
{}
};
+MODULE_DEVICE_TABLE(of, fsl_re_ids);
static struct platform_driver fsl_re_driver = {
.driver = {
diff --git a/drivers/dma/hsu/pci.c b/drivers/dma/hsu/pci.c
index b51639f045ed..4875fa428e81 100644
--- a/drivers/dma/hsu/pci.c
+++ b/drivers/dma/hsu/pci.c
@@ -77,13 +77,15 @@ static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!chip)
return -ENOMEM;
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0)
+ return ret;
+
chip->dev = &pdev->dev;
chip->regs = pcim_iomap_table(pdev)[0];
chip->length = pci_resource_len(pdev, 0);
chip->offset = HSU_PCI_CHAN_OFFSET;
- chip->irq = pdev->irq;
-
- pci_enable_msi(pdev);
+ chip->irq = pci_irq_vector(pdev, 0);
ret = hsu_dma_probe(chip);
if (ret)
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
index 624f1e1e9c55..54db1411ce73 100644
--- a/drivers/dma/img-mdc-dma.c
+++ b/drivers/dma/img-mdc-dma.c
@@ -292,7 +292,7 @@ static struct dma_async_tx_descriptor *mdc_prep_dma_memcpy(
struct mdc_dma *mdma = mchan->mdma;
struct mdc_tx_desc *mdesc;
struct mdc_hw_list_desc *curr, *prev = NULL;
- dma_addr_t curr_phys, prev_phys;
+ dma_addr_t curr_phys;
if (!len)
return NULL;
@@ -324,7 +324,6 @@ static struct dma_async_tx_descriptor *mdc_prep_dma_memcpy(
xfer_size);
prev = curr;
- prev_phys = curr_phys;
mdesc->list_len++;
src += xfer_size;
@@ -375,7 +374,7 @@ static struct dma_async_tx_descriptor *mdc_prep_dma_cyclic(
struct mdc_dma *mdma = mchan->mdma;
struct mdc_tx_desc *mdesc;
struct mdc_hw_list_desc *curr, *prev = NULL;
- dma_addr_t curr_phys, prev_phys;
+ dma_addr_t curr_phys;
if (!buf_len && !period_len)
return NULL;
@@ -430,7 +429,6 @@ static struct dma_async_tx_descriptor *mdc_prep_dma_cyclic(
}
prev = curr;
- prev_phys = curr_phys;
mdesc->list_len++;
buf_addr += xfer_size;
@@ -458,7 +456,7 @@ static struct dma_async_tx_descriptor *mdc_prep_slave_sg(
struct mdc_tx_desc *mdesc;
struct scatterlist *sg;
struct mdc_hw_list_desc *curr, *prev = NULL;
- dma_addr_t curr_phys, prev_phys;
+ dma_addr_t curr_phys;
unsigned int i;
if (!sgl)
@@ -509,7 +507,6 @@ static struct dma_async_tx_descriptor *mdc_prep_slave_sg(
}
prev = curr;
- prev_phys = curr_phys;
mdesc->list_len++;
mdesc->list_xfer_size += xfer_size;
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index b9629b2bfc05..d1651a50c349 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -298,6 +298,7 @@ struct sdma_engine;
* @event_id1 for channels that use 2 events
* @word_size peripheral access size
* @buf_tail ID of the buffer that was processed
+ * @buf_ptail ID of the previous buffer that was processed
* @num_bd max NUM_BD. number of descriptors currently handling
*/
struct sdma_channel {
@@ -309,6 +310,7 @@ struct sdma_channel {
unsigned int event_id1;
enum dma_slave_buswidth word_size;
unsigned int buf_tail;
+ unsigned int buf_ptail;
unsigned int num_bd;
unsigned int period_len;
struct sdma_buffer_descriptor *bd;
@@ -700,6 +702,8 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
sdmac->chn_real_count = bd->mode.count;
bd->mode.status |= BD_DONE;
bd->mode.count = sdmac->period_len;
+ sdmac->buf_ptail = sdmac->buf_tail;
+ sdmac->buf_tail = (sdmac->buf_tail + 1) % sdmac->num_bd;
/*
* The callback is called from the interrupt context in order
@@ -710,9 +714,6 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
- sdmac->buf_tail++;
- sdmac->buf_tail %= sdmac->num_bd;
-
if (error)
sdmac->status = old_status;
}
@@ -1186,6 +1187,8 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
sdmac->flags = 0;
sdmac->buf_tail = 0;
+ sdmac->buf_ptail = 0;
+ sdmac->chn_real_count = 0;
dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
sg_len, channel);
@@ -1288,6 +1291,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
sdmac->status = DMA_IN_PROGRESS;
sdmac->buf_tail = 0;
+ sdmac->buf_ptail = 0;
+ sdmac->chn_real_count = 0;
sdmac->period_len = period_len;
sdmac->flags |= IMX_DMA_SG_LOOP;
@@ -1385,7 +1390,7 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
u32 residue;
if (sdmac->flags & IMX_DMA_SG_LOOP)
- residue = (sdmac->num_bd - sdmac->buf_tail) *
+ residue = (sdmac->num_bd - sdmac->buf_ptail) *
sdmac->period_len - sdmac->chn_real_count;
else
residue = sdmac->chn_count - sdmac->chn_real_count;
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 49386ce04bf5..a371b07a0981 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -39,6 +39,7 @@
#include "../dmaengine.h"
static char *chanerr_str[] = {
+ "DMA Transfer Source Address Error",
"DMA Transfer Destination Address Error",
"Next Descriptor Address Error",
"Descriptor Error",
@@ -66,7 +67,6 @@ static char *chanerr_str[] = {
"Result Guard Tag verification Error",
"Result Application Tag verification Error",
"Result Reference Tag verification Error",
- NULL
};
static void ioat_eh(struct ioatdma_chan *ioat_chan);
@@ -75,13 +75,10 @@ static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, u32 chanerr)
{
int i;
- for (i = 0; i < 32; i++) {
+ for (i = 0; i < ARRAY_SIZE(chanerr_str); i++) {
if ((chanerr >> i) & 1) {
- if (chanerr_str[i]) {
- dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
- i, chanerr_str[i]);
- } else
- break;
+ dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
+ i, chanerr_str[i]);
}
}
}
@@ -341,15 +338,12 @@ ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags)
{
struct ioat_dma_descriptor *hw;
struct ioat_ring_ent *desc;
- struct ioatdma_device *ioat_dma;
struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
int chunk;
dma_addr_t phys;
u8 *pos;
off_t offs;
- ioat_dma = to_ioatdma_device(chan->device);
-
chunk = idx / IOAT_DESCS_PER_2M;
idx &= (IOAT_DESCS_PER_2M - 1);
offs = idx * IOAT_DESC_SZ;
@@ -614,11 +608,8 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
tx = &desc->txd;
if (tx->cookie) {
- struct dmaengine_result res;
-
dma_cookie_complete(tx);
dma_descriptor_unmap(tx);
- res.result = DMA_TRANS_NOERROR;
dmaengine_desc_get_callback_invoke(tx, NULL);
tx->callback = NULL;
tx->callback_result = NULL;
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index 8e67895bcca3..abcc51b343ce 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -64,6 +64,8 @@
#define PCI_DEVICE_ID_INTEL_IOAT_BDX8 0x6f2e
#define PCI_DEVICE_ID_INTEL_IOAT_BDX9 0x6f2f
+#define PCI_DEVICE_ID_INTEL_IOAT_SKX 0x2021
+
#define IOAT_VER_1_2 0x12 /* Version 1.2 */
#define IOAT_VER_2_0 0x20 /* Version 2.0 */
#define IOAT_VER_3_0 0x30 /* Version 3.0 */
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 015f7110b96d..cc5259b881d4 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -106,6 +106,8 @@ static struct pci_device_id ioat_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX8) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX9) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SKX) },
+
/* I/OAT v3.3 platforms */
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) },
@@ -243,10 +245,15 @@ static bool is_bdx_ioat(struct pci_dev *pdev)
}
}
+static inline bool is_skx_ioat(struct pci_dev *pdev)
+{
+ return (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SKX) ? true : false;
+}
+
static bool is_xeon_cb32(struct pci_dev *pdev)
{
return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
- is_hsw_ioat(pdev) || is_bdx_ioat(pdev);
+ is_hsw_ioat(pdev) || is_bdx_ioat(pdev) || is_skx_ioat(pdev);
}
bool is_bwd_ioat(struct pci_dev *pdev)
@@ -340,11 +347,13 @@ static int ioat_dma_self_test(struct ioatdma_device *ioat_dma)
dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_src)) {
dev_err(dev, "mapping src buffer failed\n");
+ err = -ENOMEM;
goto free_resources;
}
dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, dma_dest)) {
dev_err(dev, "mapping dest buffer failed\n");
+ err = -ENOMEM;
goto unmap_src;
}
flags = DMA_PREP_INTERRUPT;
@@ -691,7 +700,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
ioat_chan->completion =
dma_pool_zalloc(ioat_chan->ioat_dma->completion_pool,
- GFP_KERNEL, &ioat_chan->completion_dma);
+ GFP_NOWAIT, &ioat_chan->completion_dma);
if (!ioat_chan->completion)
return -ENOMEM;
@@ -701,7 +710,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
order = IOAT_MAX_ORDER;
- ring = ioat_alloc_ring(c, order, GFP_KERNEL);
+ ring = ioat_alloc_ring(c, order, GFP_NOWAIT);
if (!ring)
return -ENOMEM;
@@ -827,16 +836,20 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
op = IOAT_OP_XOR;
dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, dest_dma))
+ if (dma_mapping_error(dev, dest_dma)) {
+ err = -ENOMEM;
goto free_resources;
+ }
for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
dma_srcs[i] = DMA_ERROR_CODE;
for (i = 0; i < IOAT_NUM_SRC_TEST; i++) {
dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma_srcs[i]))
+ if (dma_mapping_error(dev, dma_srcs[i])) {
+ err = -ENOMEM;
goto dma_unmap;
+ }
}
tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
IOAT_NUM_SRC_TEST, PAGE_SIZE,
@@ -904,8 +917,10 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma_srcs[i]))
+ if (dma_mapping_error(dev, dma_srcs[i])) {
+ err = -ENOMEM;
goto dma_unmap;
+ }
}
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
@@ -957,8 +972,10 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma_srcs[i]))
+ if (dma_mapping_error(dev, dma_srcs[i])) {
+ err = -ENOMEM;
goto dma_unmap;
+ }
}
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
@@ -1071,7 +1088,6 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
struct dma_device *dma;
struct dma_chan *c;
struct ioatdma_chan *ioat_chan;
- bool is_raid_device = false;
int err;
u16 val16;
@@ -1095,7 +1111,6 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
ioat_dma->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
if (ioat_dma->cap & IOAT_CAP_XOR) {
- is_raid_device = true;
dma->max_xor = 8;
dma_cap_set(DMA_XOR, dma->cap_mask);
@@ -1106,7 +1121,6 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
}
if (ioat_dma->cap & IOAT_CAP_PQ) {
- is_raid_device = true;
dma->device_prep_dma_pq = ioat_prep_pq;
dma->device_prep_dma_pq_val = ioat_prep_pq_val;
@@ -1350,6 +1364,8 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
device->version = readb(device->reg_base + IOAT_VER_OFFSET);
if (device->version >= IOAT_VER_3_0) {
+ if (is_skx_ioat(pdev))
+ device->version = IOAT_VER_3_2;
err = ioat3_dma_probe(device, ioat_dca_enabled);
if (device->version >= IOAT_VER_3_3)
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index aabcb7934b05..01e25c68dd5a 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -458,13 +458,12 @@ static struct k3_dma_desc_sw *k3_dma_alloc_desc_resource(int num,
if (!ds)
return NULL;
- ds->desc_hw = dma_pool_alloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
+ ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
if (!ds->desc_hw) {
dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
kfree(ds);
return NULL;
}
- memset(ds->desc_hw, 0, sizeof(struct k3_desc_hw) * num);
ds->desc_num = num;
return ds;
}
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c
index 818255844a3c..5ba5714d0b7c 100644
--- a/drivers/dma/mic_x100_dma.c
+++ b/drivers/dma/mic_x100_dma.c
@@ -554,9 +554,7 @@ static int mic_dma_init(struct mic_dma_device *mic_dma_dev,
int ret;
for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) {
- unsigned long data;
ch = &mic_dma_dev->mic_ch[i];
- data = (unsigned long)ch;
ch->ch_num = i;
ch->owner = owner;
spin_lock_init(&ch->cleanup_lock);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 23f75285a4d9..0cb951b743a6 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -68,6 +68,36 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc,
hw_desc->byte_count = byte_count;
}
+/* Populate the descriptor */
+static void mv_xor_config_sg_ll_desc(struct mv_xor_desc_slot *desc,
+ dma_addr_t dma_src, dma_addr_t dma_dst,
+ u32 len, struct mv_xor_desc_slot *prev)
+{
+ struct mv_xor_desc *hw_desc = desc->hw_desc;
+
+ hw_desc->status = XOR_DESC_DMA_OWNED;
+ hw_desc->phy_next_desc = 0;
+ /* Configure for XOR with only one src address -> MEMCPY */
+ hw_desc->desc_command = XOR_DESC_OPERATION_XOR | (0x1 << 0);
+ hw_desc->phy_dest_addr = dma_dst;
+ hw_desc->phy_src_addr[0] = dma_src;
+ hw_desc->byte_count = len;
+
+ if (prev) {
+ struct mv_xor_desc *hw_prev = prev->hw_desc;
+
+ hw_prev->phy_next_desc = desc->async_tx.phys;
+ }
+}
+
+static void mv_xor_desc_config_eod(struct mv_xor_desc_slot *desc)
+{
+ struct mv_xor_desc *hw_desc = desc->hw_desc;
+
+ /* Enable end-of-descriptor interrupt */
+ hw_desc->desc_command |= XOR_DESC_EOD_INT_EN;
+}
+
static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
{
struct mv_xor_desc *hw_desc = desc->hw_desc;
@@ -228,8 +258,13 @@ mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan)
list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
node) {
- if (async_tx_test_ack(&iter->async_tx))
+ if (async_tx_test_ack(&iter->async_tx)) {
list_move_tail(&iter->node, &mv_chan->free_slots);
+ if (!list_empty(&iter->sg_tx_list)) {
+ list_splice_tail_init(&iter->sg_tx_list,
+ &mv_chan->free_slots);
+ }
+ }
}
return 0;
}
@@ -244,11 +279,20 @@ mv_desc_clean_slot(struct mv_xor_desc_slot *desc,
/* the client is allowed to attach dependent operations
* until 'ack' is set
*/
- if (!async_tx_test_ack(&desc->async_tx))
+ if (!async_tx_test_ack(&desc->async_tx)) {
/* move this slot to the completed_slots */
list_move_tail(&desc->node, &mv_chan->completed_slots);
- else
+ if (!list_empty(&desc->sg_tx_list)) {
+ list_splice_tail_init(&desc->sg_tx_list,
+ &mv_chan->completed_slots);
+ }
+ } else {
list_move_tail(&desc->node, &mv_chan->free_slots);
+ if (!list_empty(&desc->sg_tx_list)) {
+ list_splice_tail_init(&desc->sg_tx_list,
+ &mv_chan->free_slots);
+ }
+ }
return 0;
}
@@ -450,6 +494,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
dma_async_tx_descriptor_init(&slot->async_tx, chan);
slot->async_tx.tx_submit = mv_xor_tx_submit;
INIT_LIST_HEAD(&slot->node);
+ INIT_LIST_HEAD(&slot->sg_tx_list);
dma_desc = mv_chan->dma_desc_pool;
slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
slot->idx = idx++;
@@ -617,6 +662,132 @@ mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
}
+/**
+ * mv_xor_prep_dma_sg - prepare descriptors for a memory sg transaction
+ * @chan: DMA channel
+ * @dst_sg: Destination scatter list
+ * @dst_sg_len: Number of entries in destination scatter list
+ * @src_sg: Source scatter list
+ * @src_sg_len: Number of entries in source scatter list
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+mv_xor_prep_dma_sg(struct dma_chan *chan, struct scatterlist *dst_sg,
+ unsigned int dst_sg_len, struct scatterlist *src_sg,
+ unsigned int src_sg_len, unsigned long flags)
+{
+ struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
+ struct mv_xor_desc_slot *new;
+ struct mv_xor_desc_slot *first = NULL;
+ struct mv_xor_desc_slot *prev = NULL;
+ size_t len, dst_avail, src_avail;
+ dma_addr_t dma_dst, dma_src;
+ int desc_cnt = 0;
+ int ret;
+
+ dev_dbg(mv_chan_to_devp(mv_chan),
+ "%s dst_sg_len: %d src_sg_len: %d flags: %ld\n",
+ __func__, dst_sg_len, src_sg_len, flags);
+
+ dst_avail = sg_dma_len(dst_sg);
+ src_avail = sg_dma_len(src_sg);
+
+ /* Run until we are out of scatterlist entries */
+ while (true) {
+ /* Allocate and populate the descriptor */
+ desc_cnt++;
+ new = mv_chan_alloc_slot(mv_chan);
+ if (!new) {
+ dev_err(mv_chan_to_devp(mv_chan),
+ "Out of descriptors (desc_cnt=%d)!\n",
+ desc_cnt);
+ goto err;
+ }
+
+ len = min_t(size_t, src_avail, dst_avail);
+ len = min_t(size_t, len, MV_XOR_MAX_BYTE_COUNT);
+ if (len == 0)
+ goto fetch;
+
+ if (len < MV_XOR_MIN_BYTE_COUNT) {
+ dev_err(mv_chan_to_devp(mv_chan),
+ "Transfer size of %zu too small!\n", len);
+ goto err;
+ }
+
+ dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
+ dst_avail;
+ dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
+ src_avail;
+
+ /* Check if a new window needs to get added for 'dst' */
+ ret = mv_xor_add_io_win(mv_chan, dma_dst);
+ if (ret)
+ goto err;
+
+ /* Check if a new window needs to get added for 'src' */
+ ret = mv_xor_add_io_win(mv_chan, dma_src);
+ if (ret)
+ goto err;
+
+ /* Populate the descriptor */
+ mv_xor_config_sg_ll_desc(new, dma_src, dma_dst, len, prev);
+ prev = new;
+ dst_avail -= len;
+ src_avail -= len;
+
+ if (!first)
+ first = new;
+ else
+ list_move_tail(&new->node, &first->sg_tx_list);
+
+fetch:
+ /* Fetch the next dst scatterlist entry */
+ if (dst_avail == 0) {
+ if (dst_sg_len == 0)
+ break;
+
+ /* Fetch the next entry: if there are no more: done */
+ dst_sg = sg_next(dst_sg);
+ if (dst_sg == NULL)
+ break;
+
+ dst_sg_len--;
+ dst_avail = sg_dma_len(dst_sg);
+ }
+
+ /* Fetch the next src scatterlist entry */
+ if (src_avail == 0) {
+ if (src_sg_len == 0)
+ break;
+
+ /* Fetch the next entry: if there are no more: done */
+ src_sg = sg_next(src_sg);
+ if (src_sg == NULL)
+ break;
+
+ src_sg_len--;
+ src_avail = sg_dma_len(src_sg);
+ }
+ }
+
+ /* Set the EOD flag in the last descriptor */
+ mv_xor_desc_config_eod(new);
+ first->async_tx.flags = flags;
+
+ return &first->async_tx;
+
+err:
+ /* Cleanup: Move all descriptors back into the free list */
+ spin_lock_bh(&mv_chan->lock);
+ mv_desc_clean_slot(first, mv_chan);
+ spin_unlock_bh(&mv_chan->lock);
+
+ return NULL;
+}
+
static void mv_xor_free_chan_resources(struct dma_chan *chan)
{
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
@@ -1083,6 +1254,8 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
+ if (dma_has_cap(DMA_SG, dma_dev->cap_mask))
+ dma_dev->device_prep_dma_sg = mv_xor_prep_dma_sg;
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
dma_dev->max_xor = 8;
dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
@@ -1132,10 +1305,11 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
goto err_free_irq;
}
- dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n",
+ dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s%s)\n",
mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
+ dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "sg " : "",
dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
dma_async_device_register(dma_dev);
@@ -1378,6 +1552,7 @@ static int mv_xor_probe(struct platform_device *pdev)
dma_cap_zero(cap_mask);
dma_cap_set(DMA_MEMCPY, cap_mask);
+ dma_cap_set(DMA_SG, cap_mask);
dma_cap_set(DMA_XOR, cap_mask);
dma_cap_set(DMA_INTERRUPT, cap_mask);
@@ -1455,12 +1630,7 @@ static struct platform_driver mv_xor_driver = {
},
};
-
-static int __init mv_xor_init(void)
-{
- return platform_driver_register(&mv_xor_driver);
-}
-device_initcall(mv_xor_init);
+builtin_platform_driver(mv_xor_driver);
/*
MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index 88eeab222a23..cf921dd6af73 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -148,6 +148,7 @@ struct mv_xor_chan {
*/
struct mv_xor_desc_slot {
struct list_head node;
+ struct list_head sg_tx_list;
enum dma_transaction_type type;
void *hw_desc;
u16 idx;
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index 09de71519d37..3f45b9bdf201 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -225,6 +225,8 @@ struct nbpf_channel {
struct nbpf_device {
struct dma_device dma_dev;
void __iomem *base;
+ u32 max_burst_mem_read;
+ u32 max_burst_mem_write;
struct clk *clk;
const struct nbpf_config *config;
unsigned int eirq;
@@ -425,10 +427,33 @@ static void nbpf_chan_configure(struct nbpf_channel *chan)
nbpf_chan_write(chan, NBPF_CHAN_CFG, NBPF_CHAN_CFG_DMS | chan->dmarq_cfg);
}
-static u32 nbpf_xfer_ds(struct nbpf_device *nbpf, size_t size)
+static u32 nbpf_xfer_ds(struct nbpf_device *nbpf, size_t size,
+ enum dma_transfer_direction direction)
{
+ int max_burst = nbpf->config->buffer_size * 8;
+
+ if (nbpf->max_burst_mem_read || nbpf->max_burst_mem_write) {
+ switch (direction) {
+ case DMA_MEM_TO_MEM:
+ max_burst = min_not_zero(nbpf->max_burst_mem_read,
+ nbpf->max_burst_mem_write);
+ break;
+ case DMA_MEM_TO_DEV:
+ if (nbpf->max_burst_mem_read)
+ max_burst = nbpf->max_burst_mem_read;
+ break;
+ case DMA_DEV_TO_MEM:
+ if (nbpf->max_burst_mem_write)
+ max_burst = nbpf->max_burst_mem_write;
+ break;
+ case DMA_DEV_TO_DEV:
+ default:
+ break;
+ }
+ }
+
/* Maximum supported bursts depend on the buffer size */
- return min_t(int, __ffs(size), ilog2(nbpf->config->buffer_size * 8));
+ return min_t(int, __ffs(size), ilog2(max_burst));
}
static size_t nbpf_xfer_size(struct nbpf_device *nbpf,
@@ -458,7 +483,7 @@ static size_t nbpf_xfer_size(struct nbpf_device *nbpf,
size = burst;
}
- return nbpf_xfer_ds(nbpf, size);
+ return nbpf_xfer_ds(nbpf, size, DMA_TRANS_NONE);
}
/*
@@ -507,7 +532,7 @@ static int nbpf_prep_one(struct nbpf_link_desc *ldesc,
* transfers we enable the SBE bit and terminate the transfer in our
* .device_pause handler.
*/
- mem_xfer = nbpf_xfer_ds(chan->nbpf, size);
+ mem_xfer = nbpf_xfer_ds(chan->nbpf, size, direction);
switch (direction) {
case DMA_DEV_TO_MEM:
@@ -1313,6 +1338,11 @@ static int nbpf_probe(struct platform_device *pdev)
if (IS_ERR(nbpf->clk))
return PTR_ERR(nbpf->clk);
+ of_property_read_u32(np, "max-burst-mem-read",
+ &nbpf->max_burst_mem_read);
+ of_property_read_u32(np, "max-burst-mem-write",
+ &nbpf->max_burst_mem_write);
+
nbpf->config = cfg;
for (i = 0; irqs < ARRAY_SIZE(irqbuf); i++) {
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 7ca27d4b1c54..daf479cce691 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -166,6 +166,9 @@ enum {
CSDP_DST_BURST_16 = 1 << 14,
CSDP_DST_BURST_32 = 2 << 14,
CSDP_DST_BURST_64 = 3 << 14,
+ CSDP_WRITE_NON_POSTED = 0 << 16,
+ CSDP_WRITE_POSTED = 1 << 16,
+ CSDP_WRITE_LAST_NON_POSTED = 2 << 16,
CICR_TOUT_IE = BIT(0), /* OMAP1 only */
CICR_DROP_IE = BIT(1),
@@ -422,7 +425,30 @@ static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
c->running = true;
}
-static void omap_dma_stop(struct omap_chan *c)
+static void omap_dma_drain_chan(struct omap_chan *c)
+{
+ int i;
+ u32 val;
+
+ /* Wait for sDMA FIFO to drain */
+ for (i = 0; ; i++) {
+ val = omap_dma_chan_read(c, CCR);
+ if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)))
+ break;
+
+ if (i > 100)
+ break;
+
+ udelay(5);
+ }
+
+ if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))
+ dev_err(c->vc.chan.device->dev,
+ "DMA drain did not complete on lch %d\n",
+ c->dma_ch);
+}
+
+static int omap_dma_stop(struct omap_chan *c)
{
struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
uint32_t val;
@@ -435,7 +461,6 @@ static void omap_dma_stop(struct omap_chan *c)
val = omap_dma_chan_read(c, CCR);
if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) {
uint32_t sysconfig;
- unsigned i;
sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG);
val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK;
@@ -446,27 +471,19 @@ static void omap_dma_stop(struct omap_chan *c)
val &= ~CCR_ENABLE;
omap_dma_chan_write(c, CCR, val);
- /* Wait for sDMA FIFO to drain */
- for (i = 0; ; i++) {
- val = omap_dma_chan_read(c, CCR);
- if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)))
- break;
-
- if (i > 100)
- break;
-
- udelay(5);
- }
-
- if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))
- dev_err(c->vc.chan.device->dev,
- "DMA drain did not complete on lch %d\n",
- c->dma_ch);
+ if (!(c->ccr & CCR_BUFFERING_DISABLE))
+ omap_dma_drain_chan(c);
omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig);
} else {
+ if (!(val & CCR_ENABLE))
+ return -EINVAL;
+
val &= ~CCR_ENABLE;
omap_dma_chan_write(c, CCR, val);
+
+ if (!(c->ccr & CCR_BUFFERING_DISABLE))
+ omap_dma_drain_chan(c);
}
mb();
@@ -481,8 +498,8 @@ static void omap_dma_stop(struct omap_chan *c)
omap_dma_chan_write(c, CLNK_CTRL, val);
}
-
c->running = false;
+ return 0;
}
static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d)
@@ -836,6 +853,8 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
} else {
txstate->residue = 0;
}
+ if (ret == DMA_IN_PROGRESS && c->paused)
+ ret = DMA_PAUSED;
spin_unlock_irqrestore(&c->vc.lock, flags);
return ret;
@@ -865,15 +884,18 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
unsigned i, es, en, frame_bytes;
bool ll_failed = false;
u32 burst;
+ u32 port_window, port_window_bytes;
if (dir == DMA_DEV_TO_MEM) {
dev_addr = c->cfg.src_addr;
dev_width = c->cfg.src_addr_width;
burst = c->cfg.src_maxburst;
+ port_window = c->cfg.src_port_window_size;
} else if (dir == DMA_MEM_TO_DEV) {
dev_addr = c->cfg.dst_addr;
dev_width = c->cfg.dst_addr_width;
burst = c->cfg.dst_maxburst;
+ port_window = c->cfg.dst_port_window_size;
} else {
dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
return NULL;
@@ -894,6 +916,12 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
return NULL;
}
+ /* When the port_window is used, one frame must cover the window */
+ if (port_window) {
+ burst = port_window;
+ port_window_bytes = port_window * es_bytes[es];
+ }
+
/* Now allocate and setup the descriptor. */
d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
if (!d)
@@ -905,11 +933,46 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
d->ccr = c->ccr | CCR_SYNC_FRAME;
if (dir == DMA_DEV_TO_MEM) {
- d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT;
d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED;
+
+ d->ccr |= CCR_DST_AMODE_POSTINC;
+ if (port_window) {
+ d->ccr |= CCR_SRC_AMODE_DBLIDX;
+
+ if (port_window_bytes >= 64)
+ d->csdp |= CSDP_SRC_BURST_64;
+ else if (port_window_bytes >= 32)
+ d->csdp |= CSDP_SRC_BURST_32;
+ else if (port_window_bytes >= 16)
+ d->csdp |= CSDP_SRC_BURST_16;
+
+ } else {
+ d->ccr |= CCR_SRC_AMODE_CONSTANT;
+ }
} else {
- d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC;
d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED;
+
+ d->ccr |= CCR_SRC_AMODE_POSTINC;
+ if (port_window) {
+ d->ccr |= CCR_DST_AMODE_DBLIDX;
+ d->ei = 1;
+ /*
+ * One frame covers the port_window and by configure
+ * the source frame index to be -1 * (port_window - 1)
+ * we instruct the sDMA that after a frame is processed
+ * it should move back to the start of the window.
+ */
+ d->fi = -(port_window_bytes - 1);
+
+ if (port_window_bytes >= 64)
+ d->csdp |= CSDP_DST_BURST_64;
+ else if (port_window_bytes >= 32)
+ d->csdp |= CSDP_DST_BURST_32;
+ else if (port_window_bytes >= 16)
+ d->csdp |= CSDP_DST_BURST_16;
+ } else {
+ d->ccr |= CCR_DST_AMODE_CONSTANT;
+ }
}
d->cicr = CICR_DROP_IE | CICR_BLOCK_IE;
@@ -927,6 +990,9 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
d->ccr |= CCR_TRIGGER_SRC;
d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
+
+ if (port_window)
+ d->csdp |= CSDP_WRITE_LAST_NON_POSTED;
}
if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS)
d->clnk_ctrl = c->dma_ch;
@@ -952,6 +1018,16 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
osg->addr = sg_dma_address(sgent);
osg->en = en;
osg->fn = sg_dma_len(sgent) / frame_bytes;
+ if (port_window && dir == DMA_DEV_TO_MEM) {
+ osg->ei = 1;
+ /*
+ * One frame covers the port_window and by configure
+ * the source frame index to be -1 * (port_window - 1)
+ * we instruct the sDMA that after a frame is processed
+ * it should move back to the start of the window.
+ */
+ osg->fi = -(port_window_bytes - 1);
+ }
if (d->using_ll) {
osg->t2_desc = dma_pool_alloc(od->desc_pool, GFP_ATOMIC,
@@ -1247,10 +1323,8 @@ static int omap_dma_terminate_all(struct dma_chan *chan)
omap_dma_stop(c);
}
- if (c->cyclic) {
- c->cyclic = false;
- c->paused = false;
- }
+ c->cyclic = false;
+ c->paused = false;
vchan_get_all_descriptors(&c->vc, &head);
spin_unlock_irqrestore(&c->vc.lock, flags);
@@ -1269,28 +1343,66 @@ static void omap_dma_synchronize(struct dma_chan *chan)
static int omap_dma_pause(struct dma_chan *chan)
{
struct omap_chan *c = to_omap_dma_chan(chan);
+ struct omap_dmadev *od = to_omap_dma_dev(chan->device);
+ unsigned long flags;
+ int ret = -EINVAL;
+ bool can_pause = false;
- /* Pause/Resume only allowed with cyclic mode */
- if (!c->cyclic)
- return -EINVAL;
+ spin_lock_irqsave(&od->irq_lock, flags);
+
+ if (!c->desc)
+ goto out;
- if (!c->paused) {
- omap_dma_stop(c);
- c->paused = true;
+ if (c->cyclic)
+ can_pause = true;
+
+ /*
+ * We do not allow DMA_MEM_TO_DEV transfers to be paused.
+ * From the AM572x TRM, 16.1.4.18 Disabling a Channel During Transfer:
+ * "When a channel is disabled during a transfer, the channel undergoes
+ * an abort, unless it is hardware-source-synchronized …".
+ * A source-synchronised channel is one where the fetching of data is
+ * under control of the device. In other words, a device-to-memory
+ * transfer. So, a destination-synchronised channel (which would be a
+ * memory-to-device transfer) undergoes an abort if the the CCR_ENABLE
+ * bit is cleared.
+ * From 16.1.4.20.4.6.2 Abort: "If an abort trigger occurs, the channel
+ * aborts immediately after completion of current read/write
+ * transactions and then the FIFO is cleaned up." The term "cleaned up"
+ * is not defined. TI recommends to check that RD_ACTIVE and WR_ACTIVE
+ * are both clear _before_ disabling the channel, otherwise data loss
+ * will occur.
+ * The problem is that if the channel is active, then device activity
+ * can result in DMA activity starting between reading those as both
+ * clear and the write to DMA_CCR to clear the enable bit hitting the
+ * hardware. If the DMA hardware can't drain the data in its FIFO to the
+ * destination, then data loss "might" occur (say if we write to an UART
+ * and the UART is not accepting any further data).
+ */
+ else if (c->desc->dir == DMA_DEV_TO_MEM)
+ can_pause = true;
+
+ if (can_pause && !c->paused) {
+ ret = omap_dma_stop(c);
+ if (!ret)
+ c->paused = true;
}
+out:
+ spin_unlock_irqrestore(&od->irq_lock, flags);
- return 0;
+ return ret;
}
static int omap_dma_resume(struct dma_chan *chan)
{
struct omap_chan *c = to_omap_dma_chan(chan);
+ struct omap_dmadev *od = to_omap_dma_dev(chan->device);
+ unsigned long flags;
+ int ret = -EINVAL;
- /* Pause/Resume only allowed with cyclic mode */
- if (!c->cyclic)
- return -EINVAL;
+ spin_lock_irqsave(&od->irq_lock, flags);
- if (c->paused) {
+ if (c->paused && c->desc) {
mb();
/* Restore channel link register */
@@ -1298,9 +1410,11 @@ static int omap_dma_resume(struct dma_chan *chan)
omap_dma_start(c, c->desc);
c->paused = false;
+ ret = 0;
}
+ spin_unlock_irqrestore(&od->irq_lock, flags);
- return 0;
+ return ret;
}
static int omap_dma_chan_init(struct omap_dmadev *od)
@@ -1339,6 +1453,7 @@ static int omap_dma_probe(struct platform_device *pdev)
struct omap_dmadev *od;
struct resource *res;
int rc, i, irq;
+ u32 lch_count;
od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
if (!od)
@@ -1381,20 +1496,31 @@ static int omap_dma_probe(struct platform_device *pdev)
spin_lock_init(&od->lock);
spin_lock_init(&od->irq_lock);
- if (!pdev->dev.of_node) {
- od->dma_requests = od->plat->dma_attr->lch_count;
- if (unlikely(!od->dma_requests))
- od->dma_requests = OMAP_SDMA_REQUESTS;
- } else if (of_property_read_u32(pdev->dev.of_node, "dma-requests",
- &od->dma_requests)) {
+ /* Number of DMA requests */
+ od->dma_requests = OMAP_SDMA_REQUESTS;
+ if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node,
+ "dma-requests",
+ &od->dma_requests)) {
dev_info(&pdev->dev,
"Missing dma-requests property, using %u.\n",
OMAP_SDMA_REQUESTS);
- od->dma_requests = OMAP_SDMA_REQUESTS;
}
- od->lch_map = devm_kcalloc(&pdev->dev, od->dma_requests,
- sizeof(*od->lch_map), GFP_KERNEL);
+ /* Number of available logical channels */
+ if (!pdev->dev.of_node) {
+ lch_count = od->plat->dma_attr->lch_count;
+ if (unlikely(!lch_count))
+ lch_count = OMAP_SDMA_CHANNELS;
+ } else if (of_property_read_u32(pdev->dev.of_node, "dma-channels",
+ &lch_count)) {
+ dev_info(&pdev->dev,
+ "Missing dma-channels property, using %u.\n",
+ OMAP_SDMA_CHANNELS);
+ lch_count = OMAP_SDMA_CHANNELS;
+ }
+
+ od->lch_map = devm_kcalloc(&pdev->dev, lch_count, sizeof(*od->lch_map),
+ GFP_KERNEL);
if (!od->lch_map)
return -ENOMEM;
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index df95727dc2fb..f9028e9d0dfc 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -417,10 +417,8 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
{
struct pch_dma_desc *desc = to_pd_desc(txd);
struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan);
- dma_cookie_t cookie;
spin_lock(&pd_chan->lock);
- cookie = dma_cookie_assign(txd);
if (list_empty(&pd_chan->active_list)) {
list_add_tail(&desc->desc_node, &pd_chan->active_list);
@@ -439,9 +437,8 @@ static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags)
struct pch_dma *pd = to_pd(chan->device);
dma_addr_t addr;
- desc = pci_pool_alloc(pd->pool, flags, &addr);
+ desc = pci_pool_zalloc(pd->pool, flags, &addr);
if (desc) {
- memset(desc, 0, sizeof(struct pch_dma_desc));
INIT_LIST_HEAD(&desc->tx_list);
dma_async_tx_descriptor_init(&desc->txd, chan);
desc->txd.tx_submit = pd_tx_submit;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 030fe05ed43b..740bbb942594 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -448,6 +448,9 @@ struct dma_pl330_chan {
/* for cyclic capability */
bool cyclic;
+
+ /* for runtime pm tracking */
+ bool active;
};
struct pl330_dmac {
@@ -570,7 +573,8 @@ static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[],
buf[0] = CMD_DMAADDH;
buf[0] |= (da << 1);
- *((__le16 *)&buf[1]) = cpu_to_le16(val);
+ buf[1] = val;
+ buf[2] = val >> 8;
PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n",
da == 1 ? "DA" : "SA", val);
@@ -724,7 +728,10 @@ static inline u32 _emit_MOV(unsigned dry_run, u8 buf[],
buf[0] = CMD_DMAMOV;
buf[1] = dst;
- *((__le32 *)&buf[2]) = cpu_to_le32(val);
+ buf[2] = val;
+ buf[3] = val >> 8;
+ buf[4] = val >> 16;
+ buf[5] = val >> 24;
PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n",
dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val);
@@ -899,10 +906,11 @@ static inline u32 _emit_GO(unsigned dry_run, u8 buf[],
buf[0] = CMD_DMAGO;
buf[0] |= (ns << 1);
-
buf[1] = chan & 0x7;
-
- *((__le32 *)&buf[2]) = cpu_to_le32(addr);
+ buf[2] = addr;
+ buf[3] = addr >> 8;
+ buf[4] = addr >> 16;
+ buf[5] = addr >> 24;
return SZ_DMAGO;
}
@@ -1883,11 +1891,8 @@ static int dmac_alloc_resources(struct pl330_dmac *pl330)
static int pl330_add(struct pl330_dmac *pl330)
{
- void __iomem *regs;
int i, ret;
- regs = pl330->base;
-
/* Check if we can handle this DMAC */
if ((pl330->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) {
dev_err(pl330->ddma.dev, "PERIPH_ID 0x%x !\n",
@@ -2031,6 +2036,7 @@ static void pl330_tasklet(unsigned long data)
_stop(pch->thread);
spin_unlock(&pch->thread->dmac->lock);
power_down = true;
+ pch->active = false;
} else {
/* Make sure the PL330 Channel thread is active */
spin_lock(&pch->thread->dmac->lock);
@@ -2050,6 +2056,7 @@ static void pl330_tasklet(unsigned long data)
desc->status = PREP;
list_move_tail(&desc->node, &pch->work_list);
if (power_down) {
+ pch->active = true;
spin_lock(&pch->thread->dmac->lock);
_start(pch->thread);
spin_unlock(&pch->thread->dmac->lock);
@@ -2164,6 +2171,7 @@ static int pl330_terminate_all(struct dma_chan *chan)
unsigned long flags;
struct pl330_dmac *pl330 = pch->dmac;
LIST_HEAD(list);
+ bool power_down = false;
pm_runtime_get_sync(pl330->ddma.dev);
spin_lock_irqsave(&pch->lock, flags);
@@ -2174,6 +2182,8 @@ static int pl330_terminate_all(struct dma_chan *chan)
pch->thread->req[0].desc = NULL;
pch->thread->req[1].desc = NULL;
pch->thread->req_running = -1;
+ power_down = pch->active;
+ pch->active = false;
/* Mark all desc done */
list_for_each_entry(desc, &pch->submitted_list, node) {
@@ -2191,6 +2201,8 @@ static int pl330_terminate_all(struct dma_chan *chan)
list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
spin_unlock_irqrestore(&pch->lock, flags);
pm_runtime_mark_last_busy(pl330->ddma.dev);
+ if (power_down)
+ pm_runtime_put_autosuspend(pl330->ddma.dev);
pm_runtime_put_autosuspend(pl330->ddma.dev);
return 0;
@@ -2263,6 +2275,11 @@ static int pl330_get_current_xferred_count(struct dma_pl330_chan *pch,
}
pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
pm_runtime_put_autosuspend(pl330->ddma.dev);
+
+ /* If DMAMOV hasn't finished yet, SAR/DAR can be zero */
+ if (!val)
+ return 0;
+
return val - addr;
}
@@ -2350,6 +2367,7 @@ static void pl330_issue_pending(struct dma_chan *chan)
* updated on work_list emptiness status.
*/
WARN_ON(list_empty(&pch->submitted_list));
+ pch->active = true;
pm_runtime_get_sync(pch->dmac->ddma.dev);
}
list_splice_tail_init(&pch->submitted_list, &pch->work_list);
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index 3f56f9ca4482..b53fb618bbf6 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -413,15 +413,6 @@ static inline void pxad_init_debugfs(struct pxad_device *pdev) {}
static inline void pxad_cleanup_debugfs(struct pxad_device *pdev) {}
#endif
-/*
- * In the transition phase where legacy pxa handling is done at the same time as
- * mmp_dma, the DMA physical channel split between the 2 DMA providers is done
- * through legacy_reserved. Legacy code reserves DMA channels by settings
- * corresponding bits in legacy_reserved.
- */
-static u32 legacy_reserved;
-static u32 legacy_unavailable;
-
static struct pxad_phy *lookup_phy(struct pxad_chan *pchan)
{
int prio, i;
@@ -442,14 +433,10 @@ static struct pxad_phy *lookup_phy(struct pxad_chan *pchan)
for (i = 0; i < pdev->nr_chans; i++) {
if (prio != (i & 0xf) >> 2)
continue;
- if ((i < 32) && (legacy_reserved & BIT(i)))
- continue;
phy = &pdev->phys[i];
if (!phy->vchan) {
phy->vchan = pchan;
found = phy;
- if (i < 32)
- legacy_unavailable |= BIT(i);
goto out_unlock;
}
}
@@ -469,7 +456,6 @@ static void pxad_free_phy(struct pxad_chan *chan)
struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
unsigned long flags;
u32 reg;
- int i;
dev_dbg(&chan->vc.chan.dev->device,
"%s(): freeing\n", __func__);
@@ -483,9 +469,6 @@ static void pxad_free_phy(struct pxad_chan *chan)
}
spin_lock_irqsave(&pdev->phy_lock, flags);
- for (i = 0; i < 32; i++)
- if (chan->phy == &pdev->phys[i])
- legacy_unavailable &= ~BIT(i);
chan->phy->vchan = NULL;
chan->phy = NULL;
spin_unlock_irqrestore(&pdev->phy_lock, flags);
@@ -739,8 +722,6 @@ static irqreturn_t pxad_int_handler(int irq, void *dev_id)
i = __ffs(dint);
dint &= (dint - 1);
phy = &pdev->phys[i];
- if ((i < 32) && (legacy_reserved & BIT(i)))
- continue;
if (pxad_chan_handler(irq, phy) == IRQ_HANDLED)
ret = IRQ_HANDLED;
}
@@ -1522,15 +1503,6 @@ bool pxad_filter_fn(struct dma_chan *chan, void *param)
}
EXPORT_SYMBOL_GPL(pxad_filter_fn);
-int pxad_toggle_reserved_channel(int legacy_channel)
-{
- if (legacy_unavailable & (BIT(legacy_channel)))
- return -EBUSY;
- legacy_reserved ^= BIT(legacy_channel);
- return 0;
-}
-EXPORT_SYMBOL_GPL(pxad_toggle_reserved_channel);
-
module_platform_driver(pxad_driver);
MODULE_DESCRIPTION("Marvell PXA Peripheral DMA Driver");
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index e244e10a94b5..3c982c96b4b7 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -56,6 +56,7 @@
#include <linux/irq.h>
#include <linux/atomic.h>
#include <linux/pm_runtime.h>
+#include <linux/msi.h>
#include "../dmaengine.h"
#include "hidma.h"
@@ -70,6 +71,7 @@
#define HIDMA_ERR_INFO_SW 0xFF
#define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0
#define HIDMA_NR_DEFAULT_DESC 10
+#define HIDMA_MSI_INTS 11
static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev)
{
@@ -553,6 +555,17 @@ static irqreturn_t hidma_chirq_handler(int chirq, void *arg)
return hidma_ll_inthandler(chirq, lldev);
}
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+static irqreturn_t hidma_chirq_handler_msi(int chirq, void *arg)
+{
+ struct hidma_lldev **lldevp = arg;
+ struct hidma_dev *dmadev = to_hidma_dev_from_lldev(lldevp);
+
+ return hidma_ll_inthandler_msi(chirq, *lldevp,
+ 1 << (chirq - dmadev->msi_virqbase));
+}
+#endif
+
static ssize_t hidma_show_values(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -567,8 +580,13 @@ static ssize_t hidma_show_values(struct device *dev,
return strlen(buf);
}
-static int hidma_create_sysfs_entry(struct hidma_dev *dev, char *name,
- int mode)
+static inline void hidma_sysfs_uninit(struct hidma_dev *dev)
+{
+ device_remove_file(dev->ddev.dev, dev->chid_attrs);
+}
+
+static struct device_attribute*
+hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, int mode)
{
struct device_attribute *attrs;
char *name_copy;
@@ -576,18 +594,125 @@ static int hidma_create_sysfs_entry(struct hidma_dev *dev, char *name,
attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute),
GFP_KERNEL);
if (!attrs)
- return -ENOMEM;
+ return NULL;
name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL);
if (!name_copy)
- return -ENOMEM;
+ return NULL;
attrs->attr.name = name_copy;
attrs->attr.mode = mode;
attrs->show = hidma_show_values;
sysfs_attr_init(&attrs->attr);
- return device_create_file(dev->ddev.dev, attrs);
+ return attrs;
+}
+
+static int hidma_sysfs_init(struct hidma_dev *dev)
+{
+ dev->chid_attrs = hidma_create_sysfs_entry(dev, "chid", S_IRUGO);
+ if (!dev->chid_attrs)
+ return -ENOMEM;
+
+ return device_create_file(dev->ddev.dev, dev->chid_attrs);
+}
+
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+static void hidma_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
+{
+ struct device *dev = msi_desc_to_dev(desc);
+ struct hidma_dev *dmadev = dev_get_drvdata(dev);
+
+ if (!desc->platform.msi_index) {
+ writel(msg->address_lo, dmadev->dev_evca + 0x118);
+ writel(msg->address_hi, dmadev->dev_evca + 0x11C);
+ writel(msg->data, dmadev->dev_evca + 0x120);
+ }
+}
+#endif
+
+static void hidma_free_msis(struct hidma_dev *dmadev)
+{
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+ struct device *dev = dmadev->ddev.dev;
+ struct msi_desc *desc;
+
+ /* free allocated MSI interrupts above */
+ for_each_msi_entry(desc, dev)
+ devm_free_irq(dev, desc->irq, &dmadev->lldev);
+
+ platform_msi_domain_free_irqs(dev);
+#endif
+}
+
+static int hidma_request_msi(struct hidma_dev *dmadev,
+ struct platform_device *pdev)
+{
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+ int rc;
+ struct msi_desc *desc;
+ struct msi_desc *failed_desc = NULL;
+
+ rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
+ hidma_write_msi_msg);
+ if (rc)
+ return rc;
+
+ for_each_msi_entry(desc, &pdev->dev) {
+ if (!desc->platform.msi_index)
+ dmadev->msi_virqbase = desc->irq;
+
+ rc = devm_request_irq(&pdev->dev, desc->irq,
+ hidma_chirq_handler_msi,
+ 0, "qcom-hidma-msi",
+ &dmadev->lldev);
+ if (rc) {
+ failed_desc = desc;
+ break;
+ }
+ }
+
+ if (rc) {
+ /* free allocated MSI interrupts above */
+ for_each_msi_entry(desc, &pdev->dev) {
+ if (desc == failed_desc)
+ break;
+ devm_free_irq(&pdev->dev, desc->irq,
+ &dmadev->lldev);
+ }
+ } else {
+ /* Add callback to free MSIs on teardown */
+ hidma_ll_setup_irq(dmadev->lldev, true);
+
+ }
+ if (rc)
+ dev_warn(&pdev->dev,
+ "failed to request MSI irq, falling back to wired IRQ\n");
+ return rc;
+#else
+ return -EINVAL;
+#endif
+}
+
+static bool hidma_msi_capable(struct device *dev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ const char *of_compat;
+ int ret = -EINVAL;
+
+ if (!adev || acpi_disabled) {
+ ret = device_property_read_string(dev, "compatible",
+ &of_compat);
+ if (ret)
+ return false;
+
+ ret = strcmp(of_compat, "qcom,hidma-1.1");
+ } else {
+#ifdef CONFIG_ACPI
+ ret = strcmp(acpi_device_hid(adev), "QCOM8062");
+#endif
+ }
+ return ret == 0;
}
static int hidma_probe(struct platform_device *pdev)
@@ -599,6 +724,7 @@ static int hidma_probe(struct platform_device *pdev)
void __iomem *evca;
void __iomem *trca;
int rc;
+ bool msi;
pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
@@ -660,6 +786,12 @@ static int hidma_probe(struct platform_device *pdev)
dmadev->ddev.device_terminate_all = hidma_terminate_all;
dmadev->ddev.copy_align = 8;
+ /*
+ * Determine the MSI capability of the platform. Old HW doesn't
+ * support MSI.
+ */
+ msi = hidma_msi_capable(&pdev->dev);
+
device_property_read_u32(&pdev->dev, "desc-count",
&dmadev->nr_descriptors);
@@ -688,10 +820,17 @@ static int hidma_probe(struct platform_device *pdev)
goto dmafree;
}
- rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler, 0,
- "qcom-hidma", dmadev->lldev);
- if (rc)
- goto uninit;
+ platform_set_drvdata(pdev, dmadev);
+ if (msi)
+ rc = hidma_request_msi(dmadev, pdev);
+
+ if (!msi || rc) {
+ hidma_ll_setup_irq(dmadev->lldev, false);
+ rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler,
+ 0, "qcom-hidma", dmadev->lldev);
+ if (rc)
+ goto uninit;
+ }
INIT_LIST_HEAD(&dmadev->ddev.channels);
rc = hidma_chan_init(dmadev, 0);
@@ -705,14 +844,16 @@ static int hidma_probe(struct platform_device *pdev)
dmadev->irq = chirq;
tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev);
hidma_debug_init(dmadev);
- hidma_create_sysfs_entry(dmadev, "chid", S_IRUGO);
+ hidma_sysfs_init(dmadev);
dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n");
- platform_set_drvdata(pdev, dmadev);
pm_runtime_mark_last_busy(dmadev->ddev.dev);
pm_runtime_put_autosuspend(dmadev->ddev.dev);
return 0;
uninit:
+ if (msi)
+ hidma_free_msis(dmadev);
+
hidma_debug_uninit(dmadev);
hidma_ll_uninit(dmadev->lldev);
dmafree:
@@ -730,8 +871,13 @@ static int hidma_remove(struct platform_device *pdev)
pm_runtime_get_sync(dmadev->ddev.dev);
dma_async_device_unregister(&dmadev->ddev);
- devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
+ if (!dmadev->lldev->msi_support)
+ devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
+ else
+ hidma_free_msis(dmadev);
+
tasklet_kill(&dmadev->task);
+ hidma_sysfs_uninit(dmadev);
hidma_debug_uninit(dmadev);
hidma_ll_uninit(dmadev->lldev);
hidma_free(dmadev);
@@ -746,12 +892,15 @@ static int hidma_remove(struct platform_device *pdev)
#if IS_ENABLED(CONFIG_ACPI)
static const struct acpi_device_id hidma_acpi_ids[] = {
{"QCOM8061"},
+ {"QCOM8062"},
{},
};
+MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
#endif
static const struct of_device_id hidma_match[] = {
{.compatible = "qcom,hidma-1.0",},
+ {.compatible = "qcom,hidma-1.1",},
{},
};
MODULE_DEVICE_TABLE(of, hidma_match);
diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h
index e52e20716303..c7d014235c32 100644
--- a/drivers/dma/qcom/hidma.h
+++ b/drivers/dma/qcom/hidma.h
@@ -46,6 +46,7 @@ struct hidma_tre {
};
struct hidma_lldev {
+ bool msi_support; /* flag indicating MSI support */
bool initialized; /* initialized flag */
u8 trch_state; /* trch_state of the device */
u8 evch_state; /* evch_state of the device */
@@ -58,7 +59,7 @@ struct hidma_lldev {
void __iomem *evca; /* Event Channel address */
struct hidma_tre
**pending_tre_list; /* Pointers to pending TREs */
- s32 pending_tre_count; /* Number of TREs pending */
+ atomic_t pending_tre_count; /* Number of TREs pending */
void *tre_ring; /* TRE ring */
dma_addr_t tre_dma; /* TRE ring to be shared with HW */
@@ -114,6 +115,7 @@ struct hidma_dev {
int irq;
int chidx;
u32 nr_descriptors;
+ int msi_virqbase;
struct hidma_lldev *lldev;
void __iomem *dev_trca;
@@ -128,6 +130,9 @@ struct hidma_dev {
struct dentry *debugfs;
struct dentry *stats;
+ /* sysfs entry for the channel id */
+ struct device_attribute *chid_attrs;
+
/* Task delivering issue_pending */
struct tasklet_struct task;
};
@@ -145,12 +150,14 @@ int hidma_ll_disable(struct hidma_lldev *lldev);
int hidma_ll_enable(struct hidma_lldev *llhndl);
void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch,
dma_addr_t src, dma_addr_t dest, u32 len, u32 flags);
+void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi);
int hidma_ll_setup(struct hidma_lldev *lldev);
struct hidma_lldev *hidma_ll_init(struct device *dev, u32 max_channels,
void __iomem *trca, void __iomem *evca,
u8 chidx);
int hidma_ll_uninit(struct hidma_lldev *llhndl);
irqreturn_t hidma_ll_inthandler(int irq, void *arg);
+irqreturn_t hidma_ll_inthandler_msi(int irq, void *arg, int cause);
void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info,
u8 err_code);
int hidma_debug_init(struct hidma_dev *dmadev);
diff --git a/drivers/dma/qcom/hidma_dbg.c b/drivers/dma/qcom/hidma_dbg.c
index fa827e5ffd68..3bdcb8056a36 100644
--- a/drivers/dma/qcom/hidma_dbg.c
+++ b/drivers/dma/qcom/hidma_dbg.c
@@ -74,7 +74,8 @@ static void hidma_ll_devstats(struct seq_file *s, void *llhndl)
seq_printf(s, "tre_ring_handle=%pap\n", &lldev->tre_dma);
seq_printf(s, "tre_ring_size = 0x%x\n", lldev->tre_ring_size);
seq_printf(s, "tre_processed_off = 0x%x\n", lldev->tre_processed_off);
- seq_printf(s, "pending_tre_count=%d\n", lldev->pending_tre_count);
+ seq_printf(s, "pending_tre_count=%d\n",
+ atomic_read(&lldev->pending_tre_count));
seq_printf(s, "evca=%p\n", lldev->evca);
seq_printf(s, "evre_ring=%p\n", lldev->evre_ring);
seq_printf(s, "evre_ring_handle=%pap\n", &lldev->evre_dma);
@@ -164,7 +165,6 @@ static const struct file_operations hidma_dma_fops = {
void hidma_debug_uninit(struct hidma_dev *dmadev)
{
debugfs_remove_recursive(dmadev->debugfs);
- debugfs_remove_recursive(dmadev->stats);
}
int hidma_debug_init(struct hidma_dev *dmadev)
diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c
index 3224f24c577b..6645bdf0d151 100644
--- a/drivers/dma/qcom/hidma_ll.c
+++ b/drivers/dma/qcom/hidma_ll.c
@@ -198,13 +198,16 @@ static void hidma_ll_tre_complete(unsigned long arg)
}
}
-static int hidma_post_completed(struct hidma_lldev *lldev, int tre_iterator,
- u8 err_info, u8 err_code)
+static int hidma_post_completed(struct hidma_lldev *lldev, u8 err_info,
+ u8 err_code)
{
struct hidma_tre *tre;
unsigned long flags;
+ u32 tre_iterator;
spin_lock_irqsave(&lldev->lock, flags);
+
+ tre_iterator = lldev->tre_processed_off;
tre = lldev->pending_tre_list[tre_iterator / HIDMA_TRE_SIZE];
if (!tre) {
spin_unlock_irqrestore(&lldev->lock, flags);
@@ -218,12 +221,14 @@ static int hidma_post_completed(struct hidma_lldev *lldev, int tre_iterator,
* Keep track of pending TREs that SW is expecting to receive
* from HW. We got one now. Decrement our counter.
*/
- lldev->pending_tre_count--;
- if (lldev->pending_tre_count < 0) {
+ if (atomic_dec_return(&lldev->pending_tre_count) < 0) {
dev_warn(lldev->dev, "tre count mismatch on completion");
- lldev->pending_tre_count = 0;
+ atomic_set(&lldev->pending_tre_count, 0);
}
+ HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE,
+ lldev->tre_ring_size);
+ lldev->tre_processed_off = tre_iterator;
spin_unlock_irqrestore(&lldev->lock, flags);
tre->err_info = err_info;
@@ -245,13 +250,11 @@ static int hidma_post_completed(struct hidma_lldev *lldev, int tre_iterator,
static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
{
u32 evre_ring_size = lldev->evre_ring_size;
- u32 tre_ring_size = lldev->tre_ring_size;
u32 err_info, err_code, evre_write_off;
- u32 tre_iterator, evre_iterator;
+ u32 evre_iterator;
u32 num_completed = 0;
evre_write_off = readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
- tre_iterator = lldev->tre_processed_off;
evre_iterator = lldev->evre_processed_off;
if ((evre_write_off > evre_ring_size) ||
@@ -274,12 +277,9 @@ static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
err_code =
(cfg >> HIDMA_EVRE_CODE_BIT_POS) & HIDMA_EVRE_CODE_MASK;
- if (hidma_post_completed(lldev, tre_iterator, err_info,
- err_code))
+ if (hidma_post_completed(lldev, err_info, err_code))
break;
- HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE,
- tre_ring_size);
HIDMA_INCREMENT_ITERATOR(evre_iterator, HIDMA_EVRE_SIZE,
evre_ring_size);
@@ -291,21 +291,22 @@ static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
evre_write_off =
readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
num_completed++;
+
+ /*
+ * An error interrupt might have arrived while we are processing
+ * the completed interrupt.
+ */
+ if (!hidma_ll_isenabled(lldev))
+ break;
}
if (num_completed) {
u32 evre_read_off = (lldev->evre_processed_off +
HIDMA_EVRE_SIZE * num_completed);
- u32 tre_read_off = (lldev->tre_processed_off +
- HIDMA_TRE_SIZE * num_completed);
-
evre_read_off = evre_read_off % evre_ring_size;
- tre_read_off = tre_read_off % tre_ring_size;
-
writel(evre_read_off, lldev->evca + HIDMA_EVCA_DOORBELL_REG);
/* record the last processed tre offset */
- lldev->tre_processed_off = tre_read_off;
lldev->evre_processed_off = evre_read_off;
}
@@ -315,27 +316,10 @@ static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
void hidma_cleanup_pending_tre(struct hidma_lldev *lldev, u8 err_info,
u8 err_code)
{
- u32 tre_iterator;
- u32 tre_ring_size = lldev->tre_ring_size;
- int num_completed = 0;
- u32 tre_read_off;
-
- tre_iterator = lldev->tre_processed_off;
- while (lldev->pending_tre_count) {
- if (hidma_post_completed(lldev, tre_iterator, err_info,
- err_code))
+ while (atomic_read(&lldev->pending_tre_count)) {
+ if (hidma_post_completed(lldev, err_info, err_code))
break;
- HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE,
- tre_ring_size);
- num_completed++;
}
- tre_read_off = (lldev->tre_processed_off +
- HIDMA_TRE_SIZE * num_completed);
-
- tre_read_off = tre_read_off % tre_ring_size;
-
- /* record the last processed tre offset */
- lldev->tre_processed_off = tre_read_off;
}
static int hidma_ll_reset(struct hidma_lldev *lldev)
@@ -412,12 +396,24 @@ static int hidma_ll_reset(struct hidma_lldev *lldev)
* requests traditionally to the destination, this concept does not apply
* here for this HW.
*/
-irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
+static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause)
{
- struct hidma_lldev *lldev = arg;
- u32 status;
- u32 enable;
- u32 cause;
+ if (cause & HIDMA_ERR_INT_MASK) {
+ dev_err(lldev->dev, "error 0x%x, disabling...\n",
+ cause);
+
+ /* Clear out pending interrupts */
+ writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+
+ /* No further submissions. */
+ hidma_ll_disable(lldev);
+
+ /* Driver completes the txn and intimates the client.*/
+ hidma_cleanup_pending_tre(lldev, 0xFF,
+ HIDMA_EVRE_STATUS_ERROR);
+
+ return;
+ }
/*
* Fine tuned for this HW...
@@ -426,35 +422,28 @@ irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
* read and write accessors are used for performance reasons due to
* interrupt delivery guarantees. Do not copy this code blindly and
* expect that to work.
+ *
+ * Try to consume as many EVREs as possible.
*/
+ hidma_handle_tre_completion(lldev);
+
+ /* We consumed TREs or there are pending TREs or EVREs. */
+ writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+}
+
+irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
+{
+ struct hidma_lldev *lldev = arg;
+ u32 status;
+ u32 enable;
+ u32 cause;
+
status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
cause = status & enable;
while (cause) {
- if (cause & HIDMA_ERR_INT_MASK) {
- dev_err(lldev->dev, "error 0x%x, disabling...\n",
- cause);
-
- /* Clear out pending interrupts */
- writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
-
- /* No further submissions. */
- hidma_ll_disable(lldev);
-
- /* Driver completes the txn and intimates the client.*/
- hidma_cleanup_pending_tre(lldev, 0xFF,
- HIDMA_EVRE_STATUS_ERROR);
- goto out;
- }
-
- /*
- * Try to consume as many EVREs as possible.
- */
- hidma_handle_tre_completion(lldev);
-
- /* We consumed TREs or there are pending TREs or EVREs. */
- writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+ hidma_ll_int_handler_internal(lldev, cause);
/*
* Another interrupt might have arrived while we are
@@ -465,7 +454,14 @@ irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
cause = status & enable;
}
-out:
+ return IRQ_HANDLED;
+}
+
+irqreturn_t hidma_ll_inthandler_msi(int chirq, void *arg, int cause)
+{
+ struct hidma_lldev *lldev = arg;
+
+ hidma_ll_int_handler_internal(lldev, cause);
return IRQ_HANDLED;
}
@@ -548,7 +544,7 @@ void hidma_ll_queue_request(struct hidma_lldev *lldev, u32 tre_ch)
tre->err_code = 0;
tre->err_info = 0;
tre->queued = 1;
- lldev->pending_tre_count++;
+ atomic_inc(&lldev->pending_tre_count);
lldev->tre_write_offset = (lldev->tre_write_offset + HIDMA_TRE_SIZE)
% lldev->tre_ring_size;
spin_unlock_irqrestore(&lldev->lock, flags);
@@ -564,19 +560,8 @@ int hidma_ll_disable(struct hidma_lldev *lldev)
u32 val;
int ret;
- val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
- lldev->evch_state = HIDMA_CH_STATE(val);
- val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
- lldev->trch_state = HIDMA_CH_STATE(val);
-
- /* already suspended by this OS */
- if ((lldev->trch_state == HIDMA_CH_SUSPENDED) ||
- (lldev->evch_state == HIDMA_CH_SUSPENDED))
- return 0;
-
- /* already stopped by the manager */
- if ((lldev->trch_state == HIDMA_CH_STOPPED) ||
- (lldev->evch_state == HIDMA_CH_STOPPED))
+ /* The channel needs to be in working state */
+ if (!hidma_ll_isenabled(lldev))
return 0;
val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
@@ -654,7 +639,7 @@ int hidma_ll_setup(struct hidma_lldev *lldev)
u32 val;
u32 nr_tres = lldev->nr_tres;
- lldev->pending_tre_count = 0;
+ atomic_set(&lldev->pending_tre_count, 0);
lldev->tre_processed_off = 0;
lldev->evre_processed_off = 0;
lldev->tre_write_offset = 0;
@@ -691,17 +676,36 @@ int hidma_ll_setup(struct hidma_lldev *lldev)
writel(HIDMA_EVRE_SIZE * nr_tres,
lldev->evca + HIDMA_EVCA_RING_LEN_REG);
- /* support IRQ only for now */
+ /* configure interrupts */
+ hidma_ll_setup_irq(lldev, lldev->msi_support);
+
+ rc = hidma_ll_enable(lldev);
+ if (rc)
+ return rc;
+
+ return rc;
+}
+
+void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi)
+{
+ u32 val;
+
+ lldev->msi_support = msi;
+
+ /* disable interrupts again after reset */
+ writel(0, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+ writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+
+ /* support IRQ by default */
val = readl(lldev->evca + HIDMA_EVCA_INTCTRL_REG);
val &= ~0xF;
- val |= 0x1;
+ if (!lldev->msi_support)
+ val = val | 0x1;
writel(val, lldev->evca + HIDMA_EVCA_INTCTRL_REG);
/* clear all pending interrupts and enable them */
writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
-
- return hidma_ll_enable(lldev);
}
struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres,
@@ -816,7 +820,7 @@ int hidma_ll_uninit(struct hidma_lldev *lldev)
tasklet_kill(&lldev->task);
memset(lldev->trepool, 0, required_bytes);
lldev->trepool = NULL;
- lldev->pending_tre_count = 0;
+ atomic_set(&lldev->pending_tre_count, 0);
lldev->tre_write_offset = 0;
rc = hidma_ll_reset(lldev);
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index 82f36e466083..f847d32cc4b5 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -282,6 +282,7 @@ static const struct acpi_device_id hidma_mgmt_acpi_ids[] = {
{"QCOM8060"},
{},
};
+MODULE_DEVICE_TABLE(acpi, hidma_mgmt_acpi_ids);
#endif
static const struct of_device_id hidma_mgmt_match[] = {
@@ -375,8 +376,15 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
ret = PTR_ERR(new_pdev);
goto out;
}
+ of_node_get(child);
+ new_pdev->dev.of_node = child;
of_dma_configure(&new_pdev->dev, child);
-
+ /*
+ * It is assumed that calling of_msi_configure is safe on
+ * platforms with or without MSI support.
+ */
+ of_msi_configure(&new_pdev->dev, child);
+ of_node_put(child);
kfree(res);
res = NULL;
}
@@ -395,7 +403,6 @@ static int __init hidma_mgmt_init(void)
for_each_matching_node(child, hidma_mgmt_match) {
/* device tree based firmware here */
hidma_mgmt_of_populate_channels(child);
- of_node_put(child);
}
#endif
platform_driver_register(&hidma_mgmt_driver);
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 3c579abbabb7..f04c4702d98b 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -289,16 +289,11 @@ static
struct s3c24xx_dma_phy *s3c24xx_dma_get_phy(struct s3c24xx_dma_chan *s3cchan)
{
struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
- const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
- struct s3c24xx_dma_channel *cdata;
struct s3c24xx_dma_phy *phy = NULL;
unsigned long flags;
int i;
int ret;
- if (s3cchan->slave)
- cdata = &pdata->channels[s3cchan->id];
-
for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) {
phy = &s3cdma->phy_chans[i];
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 2e441d0ccd79..4c357d475465 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -986,6 +986,7 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
{
struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
+ struct rcar_dmac_chan_map *map = &rchan->map;
struct rcar_dmac_desc_page *page, *_page;
struct rcar_dmac_desc *desc;
LIST_HEAD(list);
@@ -1019,6 +1020,13 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
free_page((unsigned long)page);
}
+ /* Remove slave mapping if present. */
+ if (map->slave.xfer_size) {
+ dma_unmap_resource(chan->device->dev, map->addr,
+ map->slave.xfer_size, map->dir, 0);
+ map->slave.xfer_size = 0;
+ }
+
pm_runtime_put(chan->device->dev);
}
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index 06ecdc38cee0..72c649713ace 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -652,7 +652,6 @@ static bool usb_dmac_chan_filter(struct dma_chan *chan, void *arg)
static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
- struct usb_dmac_chan *uchan;
struct dma_chan *chan;
dma_cap_mask_t mask;
@@ -667,8 +666,6 @@ static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec,
if (!chan)
return NULL;
- uchan = to_usb_dmac_chan(chan);
-
return chan;
}
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 8f62edad51be..a0733ac3edb1 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -1011,7 +1011,6 @@ static int __maybe_unused sirfsoc_dma_pm_suspend(struct device *dev)
{
struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
struct sirfsoc_dma_regs *save = &sdma->regs_save;
- struct sirfsoc_dma_desc *sdesc;
struct sirfsoc_dma_chan *schan;
int ch;
int ret;
@@ -1044,9 +1043,6 @@ static int __maybe_unused sirfsoc_dma_pm_suspend(struct device *dev)
schan = &sdma->channels[ch];
if (list_empty(&schan->active))
continue;
- sdesc = list_first_entry(&schan->active,
- struct sirfsoc_dma_desc,
- node);
save->ctrl[ch] = readl_relaxed(sdma->base +
ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
}
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 307547f4848d..3056ce7f8c69 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -527,13 +527,12 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
{
struct stm32_dma_chan *chan = devid;
struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
- u32 status, scr, sfcr;
+ u32 status, scr;
spin_lock(&chan->vchan.lock);
status = stm32_dma_irq_status(chan);
scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
- sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
if ((status & STM32_DMA_TCI) && (scr & STM32_DMA_SCR_TCIE)) {
stm32_dma_irq_clear(chan, STM32_DMA_TCI);
@@ -574,15 +573,12 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
int src_bus_width, dst_bus_width;
int src_burst_size, dst_burst_size;
u32 src_maxburst, dst_maxburst;
- dma_addr_t src_addr, dst_addr;
u32 dma_scr = 0;
src_addr_width = chan->dma_sconfig.src_addr_width;
dst_addr_width = chan->dma_sconfig.dst_addr_width;
src_maxburst = chan->dma_sconfig.src_maxburst;
dst_maxburst = chan->dma_sconfig.dst_maxburst;
- src_addr = chan->dma_sconfig.src_addr;
- dst_addr = chan->dma_sconfig.dst_addr;
switch (direction) {
case DMA_MEM_TO_DEV:
@@ -884,7 +880,7 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
struct virt_dma_desc *vdesc;
enum dma_status status;
unsigned long flags;
- u32 residue;
+ u32 residue = 0;
status = dma_cookie_status(c, cookie, state);
if ((status == DMA_COMPLETE) || (!state))
@@ -892,16 +888,12 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
spin_lock_irqsave(&chan->vchan.lock, flags);
vdesc = vchan_find_desc(&chan->vchan, cookie);
- if (cookie == chan->desc->vdesc.tx.cookie) {
+ if (chan->desc && cookie == chan->desc->vdesc.tx.cookie)
residue = stm32_dma_desc_residue(chan, chan->desc,
chan->next_sg);
- } else if (vdesc) {
+ else if (vdesc)
residue = stm32_dma_desc_residue(chan,
to_stm32_dma_desc(vdesc), 0);
- } else {
- residue = 0;
- }
-
dma_set_residue(state, residue);
spin_unlock_irqrestore(&chan->vchan.lock, flags);
@@ -976,21 +968,18 @@ static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
struct stm32_dma_chan *chan;
struct dma_chan *c;
- if (dma_spec->args_count < 3)
+ if (dma_spec->args_count < 4)
return NULL;
cfg.channel_id = dma_spec->args[0];
cfg.request_line = dma_spec->args[1];
cfg.stream_config = dma_spec->args[2];
- cfg.threshold = 0;
+ cfg.threshold = dma_spec->args[3];
if ((cfg.channel_id >= STM32_DMA_MAX_CHANNELS) || (cfg.request_line >=
STM32_DMA_MAX_REQUEST_ID))
return NULL;
- if (dma_spec->args_count > 3)
- cfg.threshold = dma_spec->args[3];
-
chan = &dmadev->chan[cfg.channel_id];
c = dma_get_slave_channel(&chan->vchan.chan);
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index 3f24aeb48c0e..2403475a37cf 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -149,6 +149,7 @@ static int ti_am335x_xbar_probe(struct platform_device *pdev)
match = of_match_node(ti_am335x_master_match, dma_node);
if (!match) {
dev_err(&pdev->dev, "DMA master is not supported\n");
+ of_node_put(dma_node);
return -EINVAL;
}
@@ -339,6 +340,7 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
match = of_match_node(ti_dra7_master_match, dma_node);
if (!match) {
dev_err(&pdev->dev, "DMA master is not supported\n");
+ of_node_put(dma_node);
return -EINVAL;
}
diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c
index 245d759d5ffc..380276d078b2 100644
--- a/drivers/dma/zx296702_dma.c
+++ b/drivers/dma/zx296702_dma.c
@@ -435,13 +435,12 @@ static struct zx_dma_desc_sw *zx_alloc_desc_resource(int num,
if (!ds)
return NULL;
- ds->desc_hw = dma_pool_alloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
+ ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
if (!ds->desc_hw) {
dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
kfree(ds);
return NULL;
}
- memset(ds->desc_hw, 0, sizeof(struct zx_desc_hw) * num);
ds->desc_num = num;
return ds;
}
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 6421cc3c7dc1..c5a5b91f37f0 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -35,7 +35,6 @@
#include <linux/uaccess.h>
#include "altera_edac.h"
-#include "edac_core.h"
#include "edac_module.h"
#define EDAC_MOD_STR "altera_edac"
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index f14c24d5b140..496603d8f3d2 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -17,7 +17,7 @@
#include <linux/mmzone.h>
#include <linux/edac.h>
#include <asm/msr.h>
-#include "edac_core.h"
+#include "edac_module.h"
#include "mce_amd.h"
#define amd64_debug(fmt, arg...) \
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
index 3a501b530e11..a7450275ad28 100644
--- a/drivers/edac/amd76x_edac.c
+++ b/drivers/edac/amd76x_edac.c
@@ -17,7 +17,7 @@
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
-#include "edac_core.h"
+#include "edac_module.h"
#define AMD76X_REVISION " Ver: 2.0.2"
#define EDAC_MOD_STR "amd76x_edac"
diff --git a/drivers/edac/amd8111_edac.c b/drivers/edac/amd8111_edac.c
index 2b63f7c2d6d2..b5786cfded3a 100644
--- a/drivers/edac/amd8111_edac.c
+++ b/drivers/edac/amd8111_edac.c
@@ -29,7 +29,6 @@
#include <linux/pci_ids.h>
#include <asm/io.h>
-#include "edac_core.h"
#include "edac_module.h"
#include "amd8111_edac.h"
diff --git a/drivers/edac/amd8131_edac.c b/drivers/edac/amd8131_edac.c
index a5c680561c73..8851c33d7d24 100644
--- a/drivers/edac/amd8131_edac.c
+++ b/drivers/edac/amd8131_edac.c
@@ -29,7 +29,6 @@
#include <linux/edac.h>
#include <linux/pci_ids.h>
-#include "edac_core.h"
#include "edac_module.h"
#include "amd8131_edac.h"
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
index a9259b069dcd..bc1f3416400e 100644
--- a/drivers/edac/cell_edac.c
+++ b/drivers/edac/cell_edac.c
@@ -19,7 +19,7 @@
#include <asm/machdep.h>
#include <asm/cell-regs.h>
-#include "edac_core.h"
+#include "edac_module.h"
struct cell_edac_priv
{
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index 682288ced4ac..837b62c4993d 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -27,7 +27,6 @@
#include <linux/platform_device.h>
#include <linux/gfp.h>
-#include "edac_core.h"
#include "edac_module.h"
#define CPC925_EDAC_REVISION " Ver: 1.0.0"
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index b2d71388172b..1a352cae1f52 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -24,7 +24,7 @@
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
-#include "edac_core.h"
+#include "edac_module.h"
#define E752X_REVISION " Ver: 2.0.2"
#define EDAC_MOD_STR "e752x_edac"
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
index ece3aef16bb1..67ef07aed923 100644
--- a/drivers/edac/e7xxx_edac.c
+++ b/drivers/edac/e7xxx_edac.c
@@ -30,7 +30,7 @@
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
-#include "edac_core.h"
+#include "edac_module.h"
#define E7XXX_REVISION " Ver: 2.0.2"
#define EDAC_MOD_STR "e7xxx_edac"
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index a97900333e2d..65cf2b9355c4 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -12,23 +12,20 @@
* 19 Jan 2007
*/
+#include <asm/page.h>
+#include <linux/uaccess.h>
+#include <linux/ctype.h>
+#include <linux/highmem.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
#include <linux/module.h>
-#include <linux/types.h>
+#include <linux/slab.h>
#include <linux/smp.h>
-#include <linux/init.h>
+#include <linux/spinlock.h>
#include <linux/sysctl.h>
-#include <linux/highmem.h>
#include <linux/timer.h>
-#include <linux/slab.h>
-#include <linux/jiffies.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/ctype.h>
-#include <linux/workqueue.h>
-#include <asm/uaccess.h>
-#include <asm/page.h>
-#include "edac_core.h"
+#include "edac_device.h"
#include "edac_module.h"
/* lock for the list: 'edac_device_list', manipulation of this list
@@ -50,21 +47,6 @@ static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
}
#endif /* CONFIG_EDAC_DEBUG */
-
-/*
- * edac_device_alloc_ctl_info()
- * Allocate a new edac device control info structure
- *
- * The control structure is allocated in complete chunk
- * from the OS. It is in turn sub allocated to the
- * various objects that compose the structure
- *
- * The structure has a 'nr_instance' array within itself.
- * Each instance represents a major component
- * Example: L1 cache and L2 cache are 2 instance components
- *
- * Within each instance is an array of 'nr_blocks' blockoffsets
- */
struct edac_device_ctl_info *edac_device_alloc_ctl_info(
unsigned sz_private,
char *edac_device_name, unsigned nr_instances,
@@ -244,11 +226,6 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
}
EXPORT_SYMBOL_GPL(edac_device_alloc_ctl_info);
-/*
- * edac_device_free_ctl_info()
- * frees the memory allocated by the edac_device_alloc_ctl_info()
- * function
- */
void edac_device_free_ctl_info(struct edac_device_ctl_info *ctl_info)
{
edac_device_unregister_sysfs_main_kobj(ctl_info);
@@ -460,12 +437,6 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
edac_mod_work(&edac_dev->work, jiffs);
}
-/*
- * edac_device_alloc_index: Allocate a unique device index number
- *
- * Return:
- * allocated index number
- */
int edac_device_alloc_index(void)
{
static atomic_t device_indexes = ATOMIC_INIT(0);
@@ -474,17 +445,6 @@ int edac_device_alloc_index(void)
}
EXPORT_SYMBOL_GPL(edac_device_alloc_index);
-/**
- * edac_device_add_device: Insert the 'edac_dev' structure into the
- * edac_device global list and create sysfs entries associated with
- * edac_device structure.
- * @edac_device: pointer to the edac_device structure to be added to the list
- * 'edac_device' structure.
- *
- * Return:
- * 0 Success
- * !0 Failure
- */
int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
{
edac_dbg(0, "\n");
@@ -541,19 +501,6 @@ fail0:
}
EXPORT_SYMBOL_GPL(edac_device_add_device);
-/**
- * edac_device_del_device:
- * Remove sysfs entries for specified edac_device structure and
- * then remove edac_device structure from global list
- *
- * @dev:
- * Pointer to 'struct device' representing edac_device
- * structure to remove.
- *
- * Return:
- * Pointer to removed edac_device structure,
- * OR NULL if device not found.
- */
struct edac_device_ctl_info *edac_device_del_device(struct device *dev)
{
struct edac_device_ctl_info *edac_dev;
@@ -608,10 +555,6 @@ static inline int edac_device_get_panic_on_ue(struct edac_device_ctl_info
return edac_dev->panic_on_ue;
}
-/*
- * edac_device_handle_ce
- * perform a common output and handling of an 'edac_dev' CE event
- */
void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
int inst_nr, int block_nr, const char *msg)
{
@@ -654,10 +597,6 @@ void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
}
EXPORT_SYMBOL_GPL(edac_device_handle_ce);
-/*
- * edac_device_handle_ue
- * perform a common output and handling of an 'edac_dev' UE event
- */
void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
int inst_nr, int block_nr, const char *msg)
{
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_device.h
index 4861542163d7..1aaba74ae411 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_device.h
@@ -1,5 +1,5 @@
/*
- * Defines, structures, APIs for edac_core module
+ * Defines, structures, APIs for edac_device
*
* (C) 2007 Linux Networx (http://lnxi.com)
* This file may be distributed under the terms of the
@@ -15,86 +15,22 @@
* Refactored for multi-source files:
* Doug Thompson <norsk5@xmission.com>
*
+ * Please look at Documentation/driver-api/edac.rst for more info about
+ * EDAC core structs and functions.
*/
-#ifndef _EDAC_CORE_H_
-#define _EDAC_CORE_H_
+#ifndef _EDAC_DEVICE_H_
+#define _EDAC_DEVICE_H_
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/smp.h>
-#include <linux/pci.h>
-#include <linux/time.h>
-#include <linux/nmi.h>
-#include <linux/rcupdate.h>
#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/edac.h>
#include <linux/kobject.h>
-#include <linux/platform_device.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/sysfs.h>
#include <linux/workqueue.h>
-#include <linux/edac.h>
-
-#define EDAC_DEVICE_NAME_LEN 31
-#define EDAC_ATTRIB_VALUE_LEN 15
-
-#if PAGE_SHIFT < 20
-#define PAGES_TO_MiB(pages) ((pages) >> (20 - PAGE_SHIFT))
-#define MiB_TO_PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
-#else /* PAGE_SHIFT > 20 */
-#define PAGES_TO_MiB(pages) ((pages) << (PAGE_SHIFT - 20))
-#define MiB_TO_PAGES(mb) ((mb) >> (PAGE_SHIFT - 20))
-#endif
-
-#define edac_printk(level, prefix, fmt, arg...) \
- printk(level "EDAC " prefix ": " fmt, ##arg)
-
-#define edac_mc_printk(mci, level, fmt, arg...) \
- printk(level "EDAC MC%d: " fmt, mci->mc_idx, ##arg)
-
-#define edac_mc_chipset_printk(mci, level, prefix, fmt, arg...) \
- printk(level "EDAC " prefix " MC%d: " fmt, mci->mc_idx, ##arg)
-
-#define edac_device_printk(ctl, level, fmt, arg...) \
- printk(level "EDAC DEVICE%d: " fmt, ctl->dev_idx, ##arg)
-
-#define edac_pci_printk(ctl, level, fmt, arg...) \
- printk(level "EDAC PCI%d: " fmt, ctl->pci_idx, ##arg)
-/* prefixes for edac_printk() and edac_mc_printk() */
-#define EDAC_MC "MC"
-#define EDAC_PCI "PCI"
-#define EDAC_DEBUG "DEBUG"
-
-extern const char * const edac_mem_types[];
-
-#ifdef CONFIG_EDAC_DEBUG
-extern int edac_debug_level;
-
-#define edac_dbg(level, fmt, ...) \
-do { \
- if (level <= edac_debug_level) \
- edac_printk(KERN_DEBUG, EDAC_DEBUG, \
- "%s: " fmt, __func__, ##__VA_ARGS__); \
-} while (0)
-
-#else /* !CONFIG_EDAC_DEBUG */
-
-#define edac_dbg(level, fmt, ...) \
-do { \
- if (0) \
- edac_printk(KERN_DEBUG, EDAC_DEBUG, \
- "%s: " fmt, __func__, ##__VA_ARGS__); \
-} while (0)
-
-#endif /* !CONFIG_EDAC_DEBUG */
-
-#define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, \
- PCI_DEVICE_ID_ ## vend ## _ ## dev
-
-#define edac_dev_name(dev) (dev)->dev_name
-
-#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
/*
* The following are the structures to provide for a generic
@@ -321,197 +257,64 @@ extern struct edac_device_ctl_info *edac_device_alloc_ctl_info(
extern void edac_device_free_ctl_info(struct edac_device_ctl_info *ctl_info);
-#ifdef CONFIG_PCI
-
-struct edac_pci_counter {
- atomic_t pe_count;
- atomic_t npe_count;
-};
-
-/*
- * Abstract edac_pci control info structure
+/**
+ * edac_device_add_device: Insert the 'edac_dev' structure into the
+ * edac_device global list and create sysfs entries associated with
+ * edac_device structure.
+ *
+ * @edac_dev: pointer to edac_device structure to be added to the list
+ * 'edac_device' structure.
*
+ * Returns:
+ * 0 on Success, or an error code on failure
*/
-struct edac_pci_ctl_info {
- /* for global list of edac_pci_ctl_info structs */
- struct list_head link;
-
- int pci_idx;
-
- struct bus_type *edac_subsys; /* pointer to subsystem */
-
- /* the internal state of this controller instance */
- int op_state;
- /* work struct for this instance */
- struct delayed_work work;
-
- /* pointer to edac polling checking routine:
- * If NOT NULL: points to polling check routine
- * If NULL: Then assumes INTERRUPT operation, where
- * MC driver will receive events
- */
- void (*edac_check) (struct edac_pci_ctl_info * edac_dev);
-
- struct device *dev; /* pointer to device structure */
-
- const char *mod_name; /* module name */
- const char *ctl_name; /* edac controller name */
- const char *dev_name; /* pci/platform/etc... name */
-
- void *pvt_info; /* pointer to 'private driver' info */
-
- unsigned long start_time; /* edac_pci load start time (jiffies) */
-
- struct completion complete;
-
- /* sysfs top name under 'edac' directory
- * and instance name:
- * cpu/cpu0/...
- * cpu/cpu1/...
- * cpu/cpu2/...
- * ...
- */
- char name[EDAC_DEVICE_NAME_LEN + 1];
-
- /* Event counters for the this whole EDAC Device */
- struct edac_pci_counter counters;
-
- /* edac sysfs device control for the 'name'
- * device this structure controls
- */
- struct kobject kobj;
- struct completion kobj_complete;
-};
-
-#define to_edac_pci_ctl_work(w) \
- container_of(w, struct edac_pci_ctl_info,work)
-
-/* write all or some bits in a byte-register*/
-static inline void pci_write_bits8(struct pci_dev *pdev, int offset, u8 value,
- u8 mask)
-{
- if (mask != 0xff) {
- u8 buf;
-
- pci_read_config_byte(pdev, offset, &buf);
- value &= mask;
- buf &= ~mask;
- value |= buf;
- }
-
- pci_write_config_byte(pdev, offset, value);
-}
-
-/* write all or some bits in a word-register*/
-static inline void pci_write_bits16(struct pci_dev *pdev, int offset,
- u16 value, u16 mask)
-{
- if (mask != 0xffff) {
- u16 buf;
-
- pci_read_config_word(pdev, offset, &buf);
- value &= mask;
- buf &= ~mask;
- value |= buf;
- }
-
- pci_write_config_word(pdev, offset, value);
-}
+extern int edac_device_add_device(struct edac_device_ctl_info *edac_dev);
-/*
- * pci_write_bits32
+/**
+ * edac_device_del_device:
+ * Remove sysfs entries for specified edac_device structure and
+ * then remove edac_device structure from global list
*
- * edac local routine to do pci_write_config_dword, but adds
- * a mask parameter. If mask is all ones, ignore the mask.
- * Otherwise utilize the mask to isolate specified bits
+ * @dev:
+ * Pointer to struct &device representing the edac device
+ * structure to remove.
*
- * write all or some bits in a dword-register
+ * Returns:
+ * Pointer to removed edac_device structure,
+ * or %NULL if device not found.
*/
-static inline void pci_write_bits32(struct pci_dev *pdev, int offset,
- u32 value, u32 mask)
-{
- if (mask != 0xffffffff) {
- u32 buf;
-
- pci_read_config_dword(pdev, offset, &buf);
- value &= mask;
- buf &= ~mask;
- value |= buf;
- }
-
- pci_write_config_dword(pdev, offset, value);
-}
-
-#endif /* CONFIG_PCI */
-
-struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
- unsigned n_layers,
- struct edac_mc_layer *layers,
- unsigned sz_pvt);
-extern int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
- const struct attribute_group **groups);
-#define edac_mc_add_mc(mci) edac_mc_add_mc_with_groups(mci, NULL)
-extern void edac_mc_free(struct mem_ctl_info *mci);
-extern struct mem_ctl_info *edac_mc_find(int idx);
-extern struct mem_ctl_info *find_mci_by_dev(struct device *dev);
-extern struct mem_ctl_info *edac_mc_del_mc(struct device *dev);
-extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
- unsigned long page);
-
-void edac_raw_mc_handle_error(const enum hw_event_mc_err_type type,
- struct mem_ctl_info *mci,
- struct edac_raw_error_desc *e);
-
-void edac_mc_handle_error(const enum hw_event_mc_err_type type,
- struct mem_ctl_info *mci,
- const u16 error_count,
- const unsigned long page_frame_number,
- const unsigned long offset_in_page,
- const unsigned long syndrome,
- const int top_layer,
- const int mid_layer,
- const int low_layer,
- const char *msg,
- const char *other_detail);
+extern struct edac_device_ctl_info *edac_device_del_device(struct device *dev);
-/*
- * edac_device APIs
+/**
+ * edac_device_handle_ue():
+ * perform a common output and handling of an 'edac_dev' UE event
+ *
+ * @edac_dev: pointer to struct &edac_device_ctl_info
+ * @inst_nr: number of the instance where the UE error happened
+ * @block_nr: number of the block where the UE error happened
+ * @msg: message to be printed
*/
-extern int edac_device_add_device(struct edac_device_ctl_info *edac_dev);
-extern struct edac_device_ctl_info *edac_device_del_device(struct device *dev);
extern void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
int inst_nr, int block_nr, const char *msg);
+/**
+ * edac_device_handle_ce():
+ * perform a common output and handling of an 'edac_dev' CE event
+ *
+ * @edac_dev: pointer to struct &edac_device_ctl_info
+ * @inst_nr: number of the instance where the CE error happened
+ * @block_nr: number of the block where the CE error happened
+ * @msg: message to be printed
+ */
extern void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
int inst_nr, int block_nr, const char *msg);
-extern int edac_device_alloc_index(void);
-extern const char *edac_layer_name[];
-/*
- * edac_pci APIs
- */
-extern struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
- const char *edac_pci_name);
-
-extern void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci);
-
-extern void edac_pci_reset_delay_period(struct edac_pci_ctl_info *pci,
- unsigned long value);
-
-extern int edac_pci_alloc_index(void);
-extern int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx);
-extern struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev);
-
-extern struct edac_pci_ctl_info *edac_pci_create_generic_ctl(
- struct device *dev,
- const char *mod_name);
-
-extern void edac_pci_release_generic_ctl(struct edac_pci_ctl_info *pci);
-extern int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci);
-extern void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci);
-
-/*
- * edac misc APIs
+/**
+ * edac_device_alloc_index: Allocate a unique device index number
+ *
+ * Returns:
+ * allocated index number
*/
-extern char *edac_op_state_to_string(int op_state);
+extern int edac_device_alloc_index(void);
+extern const char *edac_layer_name[];
-#endif /* _EDAC_CORE_H_ */
+#endif
diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
index 93da1a45c716..0e7ea3591b78 100644
--- a/drivers/edac/edac_device_sysfs.c
+++ b/drivers/edac/edac_device_sysfs.c
@@ -1,7 +1,7 @@
/*
* file for managing the edac_device subsystem of devices for EDAC
*
- * (C) 2007 SoftwareBitMaker
+ * (C) 2007 SoftwareBitMaker
*
* This file may be distributed under the terms of the
* GNU General Public License.
@@ -15,7 +15,7 @@
#include <linux/slab.h>
#include <linux/edac.h>
-#include "edac_core.h"
+#include "edac_device.h"
#include "edac_module.h"
#define EDAC_DEVICE_SYMLINK "device"
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index d2ea9c4f1824..750891ea07de 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -28,9 +28,9 @@
#include <linux/ctype.h>
#include <linux/edac.h>
#include <linux/bitops.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/page.h>
-#include "edac_core.h"
+#include "edac_mc.h"
#include "edac_module.h"
#include <ras/ras_event.h>
@@ -239,30 +239,6 @@ static void _edac_mc_free(struct mem_ctl_info *mci)
kfree(mci);
}
-/**
- * edac_mc_alloc: Allocate and partially fill a struct mem_ctl_info structure
- * @mc_num: Memory controller number
- * @n_layers: Number of MC hierarchy layers
- * layers: Describes each layer as seen by the Memory Controller
- * @size_pvt: size of private storage needed
- *
- *
- * Everything is kmalloc'ed as one big chunk - more efficient.
- * Only can be used if all structures have the same lifetime - otherwise
- * you have to allocate and initialize your own structures.
- *
- * Use edac_mc_free() to free mc structures allocated by this function.
- *
- * NOTE: drivers handle multi-rank memories in different ways: in some
- * drivers, one multi-rank memory stick is mapped as one entry, while, in
- * others, a single multi-rank memory stick would be mapped into several
- * entries. Currently, this function will allocate multiple struct dimm_info
- * on such scenarios, as grouping the multiple ranks require drivers change.
- *
- * Returns:
- * On failure: NULL
- * On success: struct mem_ctl_info pointer
- */
struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
unsigned n_layers,
struct edac_mc_layer *layers,
@@ -460,11 +436,6 @@ error:
}
EXPORT_SYMBOL_GPL(edac_mc_alloc);
-/**
- * edac_mc_free
- * 'Free' a previously allocated 'mci' structure
- * @mci: pointer to a struct mem_ctl_info structure
- */
void edac_mc_free(struct mem_ctl_info *mci)
{
edac_dbg(1, "\n");
@@ -646,12 +617,6 @@ static int del_mc_from_global_list(struct mem_ctl_info *mci)
return handlers;
}
-/**
- * edac_mc_find: Search for a mem_ctl_info structure whose index is 'idx'.
- *
- * If found, return a pointer to the structure.
- * Else return NULL.
- */
struct mem_ctl_info *edac_mc_find(int idx)
{
struct mem_ctl_info *mci = NULL;
@@ -676,16 +641,6 @@ unlock:
}
EXPORT_SYMBOL(edac_mc_find);
-/**
- * edac_mc_add_mc_with_groups: Insert the 'mci' structure into the mci
- * global list and create sysfs entries associated with mci structure
- * @mci: pointer to the mci structure to be added to the list
- * @groups: optional attribute groups for the driver-specific sysfs entries
- *
- * Return:
- * 0 Success
- * !0 Failure
- */
/* FIXME - should a warning be printed if no error detection? correction? */
int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
@@ -776,13 +731,6 @@ fail0:
}
EXPORT_SYMBOL_GPL(edac_mc_add_mc_with_groups);
-/**
- * edac_mc_del_mc: Remove sysfs entries for specified mci structure and
- * remove mci structure from global list
- * @pdev: Pointer to 'struct device' representing mci structure to remove.
- *
- * Return pointer to removed mci structure, or NULL if device not found.
- */
struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
{
struct mem_ctl_info *mci;
@@ -1046,18 +994,6 @@ static void edac_ue_error(struct mem_ctl_info *mci,
edac_inc_ue_error(mci, enable_per_layer_report, pos, error_count);
}
-/**
- * edac_raw_mc_handle_error - reports a memory event to userspace without doing
- * anything to discover the error location
- *
- * @type: severity of the error (CE/UE/Fatal)
- * @mci: a struct mem_ctl_info pointer
- * @e: error description
- *
- * This raw function is used internally by edac_mc_handle_error(). It should
- * only be called directly when the hardware error come directly from BIOS,
- * like in the case of APEI GHES driver.
- */
void edac_raw_mc_handle_error(const enum hw_event_mc_err_type type,
struct mem_ctl_info *mci,
struct edac_raw_error_desc *e)
@@ -1087,24 +1023,6 @@ void edac_raw_mc_handle_error(const enum hw_event_mc_err_type type,
}
EXPORT_SYMBOL_GPL(edac_raw_mc_handle_error);
-/**
- * edac_mc_handle_error - reports a memory event to userspace
- *
- * @type: severity of the error (CE/UE/Fatal)
- * @mci: a struct mem_ctl_info pointer
- * @error_count: Number of errors of the same type
- * @page_frame_number: mem page where the error occurred
- * @offset_in_page: offset of the error inside the page
- * @syndrome: ECC syndrome
- * @top_layer: Memory layer[0] position
- * @mid_layer: Memory layer[1] position
- * @low_layer: Memory layer[2] position
- * @msg: Message meaningful to the end users that
- * explains the event
- * @other_detail: Technical details about the event that
- * may help hardware manufacturers and
- * EDAC developers to analyse the event
- */
void edac_mc_handle_error(const enum hw_event_mc_err_type type,
struct mem_ctl_info *mci,
const u16 error_count,
diff --git a/drivers/edac/edac_mc.h b/drivers/edac/edac_mc.h
new file mode 100644
index 000000000000..50fc1dc9c0d8
--- /dev/null
+++ b/drivers/edac/edac_mc.h
@@ -0,0 +1,245 @@
+/*
+ * Defines, structures, APIs for edac_mc module
+ *
+ * (C) 2007 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Thayne Harbaugh
+ * Based on work by Dan Hollis <goemon at anime dot net> and others.
+ * http://www.anime.net/~goemon/linux-ecc/
+ *
+ * NMI handling support added by
+ * Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com>
+ *
+ * Refactored for multi-source files:
+ * Doug Thompson <norsk5@xmission.com>
+ *
+ * Please look at Documentation/driver-api/edac.rst for more info about
+ * EDAC core structs and functions.
+ */
+
+#ifndef _EDAC_MC_H_
+#define _EDAC_MC_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/pci.h>
+#include <linux/time.h>
+#include <linux/nmi.h>
+#include <linux/rcupdate.h>
+#include <linux/completion.h>
+#include <linux/kobject.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/edac.h>
+
+#if PAGE_SHIFT < 20
+#define PAGES_TO_MiB(pages) ((pages) >> (20 - PAGE_SHIFT))
+#define MiB_TO_PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
+#else /* PAGE_SHIFT > 20 */
+#define PAGES_TO_MiB(pages) ((pages) << (PAGE_SHIFT - 20))
+#define MiB_TO_PAGES(mb) ((mb) >> (PAGE_SHIFT - 20))
+#endif
+
+#define edac_printk(level, prefix, fmt, arg...) \
+ printk(level "EDAC " prefix ": " fmt, ##arg)
+
+#define edac_mc_printk(mci, level, fmt, arg...) \
+ printk(level "EDAC MC%d: " fmt, mci->mc_idx, ##arg)
+
+#define edac_mc_chipset_printk(mci, level, prefix, fmt, arg...) \
+ printk(level "EDAC " prefix " MC%d: " fmt, mci->mc_idx, ##arg)
+
+#define edac_device_printk(ctl, level, fmt, arg...) \
+ printk(level "EDAC DEVICE%d: " fmt, ctl->dev_idx, ##arg)
+
+#define edac_pci_printk(ctl, level, fmt, arg...) \
+ printk(level "EDAC PCI%d: " fmt, ctl->pci_idx, ##arg)
+
+/* prefixes for edac_printk() and edac_mc_printk() */
+#define EDAC_MC "MC"
+#define EDAC_PCI "PCI"
+#define EDAC_DEBUG "DEBUG"
+
+extern const char * const edac_mem_types[];
+
+#ifdef CONFIG_EDAC_DEBUG
+extern int edac_debug_level;
+
+#define edac_dbg(level, fmt, ...) \
+do { \
+ if (level <= edac_debug_level) \
+ edac_printk(KERN_DEBUG, EDAC_DEBUG, \
+ "%s: " fmt, __func__, ##__VA_ARGS__); \
+} while (0)
+
+#else /* !CONFIG_EDAC_DEBUG */
+
+#define edac_dbg(level, fmt, ...) \
+do { \
+ if (0) \
+ edac_printk(KERN_DEBUG, EDAC_DEBUG, \
+ "%s: " fmt, __func__, ##__VA_ARGS__); \
+} while (0)
+
+#endif /* !CONFIG_EDAC_DEBUG */
+
+#define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, \
+ PCI_DEVICE_ID_ ## vend ## _ ## dev
+
+#define edac_dev_name(dev) (dev)->dev_name
+
+#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
+
+/**
+ * edac_mc_alloc() - Allocate and partially fill a struct &mem_ctl_info.
+ *
+ * @mc_num: Memory controller number
+ * @n_layers: Number of MC hierarchy layers
+ * @layers: Describes each layer as seen by the Memory Controller
+ * @sz_pvt: size of private storage needed
+ *
+ *
+ * Everything is kmalloc'ed as one big chunk - more efficient.
+ * Only can be used if all structures have the same lifetime - otherwise
+ * you have to allocate and initialize your own structures.
+ *
+ * Use edac_mc_free() to free mc structures allocated by this function.
+ *
+ * .. note::
+ *
+ * drivers handle multi-rank memories in different ways: in some
+ * drivers, one multi-rank memory stick is mapped as one entry, while, in
+ * others, a single multi-rank memory stick would be mapped into several
+ * entries. Currently, this function will allocate multiple struct dimm_info
+ * on such scenarios, as grouping the multiple ranks require drivers change.
+ *
+ * Returns:
+ * On success, return a pointer to struct mem_ctl_info pointer;
+ * %NULL otherwise
+ */
+struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
+ unsigned n_layers,
+ struct edac_mc_layer *layers,
+ unsigned sz_pvt);
+
+/**
+ * edac_mc_add_mc_with_groups() - Insert the @mci structure into the mci
+ * global list and create sysfs entries associated with @mci structure.
+ *
+ * @mci: pointer to the mci structure to be added to the list
+ * @groups: optional attribute groups for the driver-specific sysfs entries
+ *
+ * Returns:
+ * 0 on Success, or an error code on failure
+ */
+extern int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
+ const struct attribute_group **groups);
+#define edac_mc_add_mc(mci) edac_mc_add_mc_with_groups(mci, NULL)
+
+/**
+ * edac_mc_free() - Frees a previously allocated @mci structure
+ *
+ * @mci: pointer to a struct mem_ctl_info structure
+ */
+extern void edac_mc_free(struct mem_ctl_info *mci);
+
+/**
+ * edac_mc_find() - Search for a mem_ctl_info structure whose index is @idx.
+ *
+ * @idx: index to be seek
+ *
+ * If found, return a pointer to the structure.
+ * Else return NULL.
+ */
+extern struct mem_ctl_info *edac_mc_find(int idx);
+
+/**
+ * find_mci_by_dev() - Scan list of controllers looking for the one that
+ * manages the @dev device.
+ *
+ * @dev: pointer to a struct device related with the MCI
+ *
+ * Returns: on success, returns a pointer to struct &mem_ctl_info;
+ * %NULL otherwise.
+ */
+extern struct mem_ctl_info *find_mci_by_dev(struct device *dev);
+
+/**
+ * edac_mc_del_mc() - Remove sysfs entries for mci structure associated with
+ * @dev and remove mci structure from global list.
+ *
+ * @dev: Pointer to struct &device representing mci structure to remove.
+ *
+ * Returns: pointer to removed mci structure, or %NULL if device not found.
+ */
+extern struct mem_ctl_info *edac_mc_del_mc(struct device *dev);
+
+/**
+ * edac_mc_find_csrow_by_page() - Ancillary routine to identify what csrow
+ * contains a memory page.
+ *
+ * @mci: pointer to a struct mem_ctl_info structure
+ * @page: memory page to find
+ *
+ * Returns: on success, returns the csrow. -1 if not found.
+ */
+extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
+ unsigned long page);
+
+/**
+ * edac_raw_mc_handle_error() - Reports a memory event to userspace without
+ * doing anything to discover the error location.
+ *
+ * @type: severity of the error (CE/UE/Fatal)
+ * @mci: a struct mem_ctl_info pointer
+ * @e: error description
+ *
+ * This raw function is used internally by edac_mc_handle_error(). It should
+ * only be called directly when the hardware error come directly from BIOS,
+ * like in the case of APEI GHES driver.
+ */
+void edac_raw_mc_handle_error(const enum hw_event_mc_err_type type,
+ struct mem_ctl_info *mci,
+ struct edac_raw_error_desc *e);
+
+/**
+ * edac_mc_handle_error() - Reports a memory event to userspace.
+ *
+ * @type: severity of the error (CE/UE/Fatal)
+ * @mci: a struct mem_ctl_info pointer
+ * @error_count: Number of errors of the same type
+ * @page_frame_number: mem page where the error occurred
+ * @offset_in_page: offset of the error inside the page
+ * @syndrome: ECC syndrome
+ * @top_layer: Memory layer[0] position
+ * @mid_layer: Memory layer[1] position
+ * @low_layer: Memory layer[2] position
+ * @msg: Message meaningful to the end users that
+ * explains the event
+ * @other_detail: Technical details about the event that
+ * may help hardware manufacturers and
+ * EDAC developers to analyse the event
+ */
+void edac_mc_handle_error(const enum hw_event_mc_err_type type,
+ struct mem_ctl_info *mci,
+ const u16 error_count,
+ const unsigned long page_frame_number,
+ const unsigned long offset_in_page,
+ const unsigned long syndrome,
+ const int top_layer,
+ const int mid_layer,
+ const int low_layer,
+ const char *msg,
+ const char *other_detail);
+
+/*
+ * edac misc APIs
+ */
+extern char *edac_op_state_to_string(int op_state);
+
+#endif /* _EDAC_MC_H_ */
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 4e0f8e720ad9..39dbab7d62f1 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -19,7 +19,7 @@
#include <linux/pm_runtime.h>
#include <linux/uaccess.h>
-#include "edac_core.h"
+#include "edac_mc.h"
#include "edac_module.h"
/* MC EDAC Controls, setable by module parameter, and sysfs */
diff --git a/drivers/edac/edac_module.c b/drivers/edac/edac_module.c
index 5f8543be995a..172598a27d7d 100644
--- a/drivers/edac/edac_module.c
+++ b/drivers/edac/edac_module.c
@@ -12,7 +12,7 @@
*/
#include <linux/edac.h>
-#include "edac_core.h"
+#include "edac_mc.h"
#include "edac_module.h"
#define EDAC_VERSION "Ver: 3.0.0"
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index cfaacb99c973..014871e169cc 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -10,7 +10,9 @@
#ifndef __EDAC_MODULE_H__
#define __EDAC_MODULE_H__
-#include "edac_core.h"
+#include "edac_mc.h"
+#include "edac_pci.h"
+#include "edac_device.h"
/*
* INTERNAL EDAC MODULE:
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index 8f2f2899a7a2..48c844a72a27 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -9,35 +9,25 @@
* or implied.
*
*/
+#include <asm/page.h>
+#include <linux/uaccess.h>
+#include <linux/ctype.h>
+#include <linux/highmem.h>
+#include <linux/init.h>
#include <linux/module.h>
-#include <linux/types.h>
+#include <linux/slab.h>
#include <linux/smp.h>
-#include <linux/init.h>
+#include <linux/spinlock.h>
#include <linux/sysctl.h>
-#include <linux/highmem.h>
#include <linux/timer.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/ctype.h>
-#include <linux/workqueue.h>
-#include <asm/uaccess.h>
-#include <asm/page.h>
-#include "edac_core.h"
+#include "edac_pci.h"
#include "edac_module.h"
static DEFINE_MUTEX(edac_pci_ctls_mutex);
static LIST_HEAD(edac_pci_list);
static atomic_t pci_indexes = ATOMIC_INIT(0);
-/*
- * edac_pci_alloc_ctl_info
- *
- * The alloc() function for the 'edac_pci' control info
- * structure. The chip driver will allocate one of these for each
- * edac_pci it is going to control/register with the EDAC CORE.
- */
struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
const char *edac_pci_name)
{
@@ -68,16 +58,6 @@ struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
}
EXPORT_SYMBOL_GPL(edac_pci_alloc_ctl_info);
-/*
- * edac_pci_free_ctl_info()
- *
- * Last action on the pci control structure.
- *
- * call the remove sysfs information, which will unregister
- * this control struct's kobj. When that kobj's ref count
- * goes to zero, its release function will be call and then
- * kfree() the memory.
- */
void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci)
{
edac_dbg(1, "\n");
@@ -215,31 +195,12 @@ static void edac_pci_workq_function(struct work_struct *work_req)
mutex_unlock(&edac_pci_ctls_mutex);
}
-/*
- * edac_pci_alloc_index: Allocate a unique PCI index number
- *
- * Return:
- * allocated index number
- *
- */
int edac_pci_alloc_index(void)
{
return atomic_inc_return(&pci_indexes) - 1;
}
EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
-/*
- * edac_pci_add_device: Insert the 'edac_dev' structure into the
- * edac_pci global list and create sysfs entries associated with
- * edac_pci structure.
- * @pci: pointer to the edac_device structure to be added to the list
- * @edac_idx: A unique numeric identifier to be assigned to the
- * 'edac_pci' structure.
- *
- * Return:
- * 0 Success
- * !0 Failure
- */
int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx)
{
edac_dbg(0, "\n");
@@ -285,19 +246,6 @@ fail0:
}
EXPORT_SYMBOL_GPL(edac_pci_add_device);
-/*
- * edac_pci_del_device()
- * Remove sysfs entries for specified edac_pci structure and
- * then remove edac_pci structure from global list
- *
- * @dev:
- * Pointer to 'struct device' representing edac_pci structure
- * to remove
- *
- * Return:
- * Pointer to removed edac_pci structure,
- * or NULL if device not found
- */
struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev)
{
struct edac_pci_ctl_info *pci;
@@ -351,17 +299,6 @@ struct edac_pci_gen_data {
int edac_idx;
};
-/*
- * edac_pci_create_generic_ctl
- *
- * A generic constructor for a PCI parity polling device
- * Some systems have more than one domain of PCI busses.
- * For systems with one domain, then this API will
- * provide for a generic poller.
- *
- * This routine calls the edac_pci_alloc_ctl_info() for
- * the generic device, with default values
- */
struct edac_pci_ctl_info *edac_pci_create_generic_ctl(struct device *dev,
const char *mod_name)
{
@@ -394,11 +331,6 @@ struct edac_pci_ctl_info *edac_pci_create_generic_ctl(struct device *dev,
}
EXPORT_SYMBOL_GPL(edac_pci_create_generic_ctl);
-/*
- * edac_pci_release_generic_ctl
- *
- * The release function of a generic EDAC PCI polling device
- */
void edac_pci_release_generic_ctl(struct edac_pci_ctl_info *pci)
{
edac_dbg(0, "pci mod=%s\n", pci->mod_name);
diff --git a/drivers/edac/edac_pci.h b/drivers/edac/edac_pci.h
new file mode 100644
index 000000000000..5175f5724cfa
--- /dev/null
+++ b/drivers/edac/edac_pci.h
@@ -0,0 +1,271 @@
+/*
+ * Defines, structures, APIs for edac_pci and edac_pci_sysfs
+ *
+ * (C) 2007 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Thayne Harbaugh
+ * Based on work by Dan Hollis <goemon at anime dot net> and others.
+ * http://www.anime.net/~goemon/linux-ecc/
+ *
+ * NMI handling support added by
+ * Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com>
+ *
+ * Refactored for multi-source files:
+ * Doug Thompson <norsk5@xmission.com>
+ *
+ * Please look at Documentation/driver-api/edac.rst for more info about
+ * EDAC core structs and functions.
+ */
+
+#ifndef _EDAC_PCI_H_
+#define _EDAC_PCI_H_
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/edac.h>
+#include <linux/kobject.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#ifdef CONFIG_PCI
+
+struct edac_pci_counter {
+ atomic_t pe_count;
+ atomic_t npe_count;
+};
+
+/*
+ * Abstract edac_pci control info structure
+ *
+ */
+struct edac_pci_ctl_info {
+ /* for global list of edac_pci_ctl_info structs */
+ struct list_head link;
+
+ int pci_idx;
+
+ struct bus_type *edac_subsys; /* pointer to subsystem */
+
+ /* the internal state of this controller instance */
+ int op_state;
+ /* work struct for this instance */
+ struct delayed_work work;
+
+ /* pointer to edac polling checking routine:
+ * If NOT NULL: points to polling check routine
+ * If NULL: Then assumes INTERRUPT operation, where
+ * MC driver will receive events
+ */
+ void (*edac_check) (struct edac_pci_ctl_info * edac_dev);
+
+ struct device *dev; /* pointer to device structure */
+
+ const char *mod_name; /* module name */
+ const char *ctl_name; /* edac controller name */
+ const char *dev_name; /* pci/platform/etc... name */
+
+ void *pvt_info; /* pointer to 'private driver' info */
+
+ unsigned long start_time; /* edac_pci load start time (jiffies) */
+
+ struct completion complete;
+
+ /* sysfs top name under 'edac' directory
+ * and instance name:
+ * cpu/cpu0/...
+ * cpu/cpu1/...
+ * cpu/cpu2/...
+ * ...
+ */
+ char name[EDAC_DEVICE_NAME_LEN + 1];
+
+ /* Event counters for the this whole EDAC Device */
+ struct edac_pci_counter counters;
+
+ /* edac sysfs device control for the 'name'
+ * device this structure controls
+ */
+ struct kobject kobj;
+};
+
+#define to_edac_pci_ctl_work(w) \
+ container_of(w, struct edac_pci_ctl_info,work)
+
+/* write all or some bits in a byte-register*/
+static inline void pci_write_bits8(struct pci_dev *pdev, int offset, u8 value,
+ u8 mask)
+{
+ if (mask != 0xff) {
+ u8 buf;
+
+ pci_read_config_byte(pdev, offset, &buf);
+ value &= mask;
+ buf &= ~mask;
+ value |= buf;
+ }
+
+ pci_write_config_byte(pdev, offset, value);
+}
+
+/* write all or some bits in a word-register*/
+static inline void pci_write_bits16(struct pci_dev *pdev, int offset,
+ u16 value, u16 mask)
+{
+ if (mask != 0xffff) {
+ u16 buf;
+
+ pci_read_config_word(pdev, offset, &buf);
+ value &= mask;
+ buf &= ~mask;
+ value |= buf;
+ }
+
+ pci_write_config_word(pdev, offset, value);
+}
+
+/*
+ * pci_write_bits32
+ *
+ * edac local routine to do pci_write_config_dword, but adds
+ * a mask parameter. If mask is all ones, ignore the mask.
+ * Otherwise utilize the mask to isolate specified bits
+ *
+ * write all or some bits in a dword-register
+ */
+static inline void pci_write_bits32(struct pci_dev *pdev, int offset,
+ u32 value, u32 mask)
+{
+ if (mask != 0xffffffff) {
+ u32 buf;
+
+ pci_read_config_dword(pdev, offset, &buf);
+ value &= mask;
+ buf &= ~mask;
+ value |= buf;
+ }
+
+ pci_write_config_dword(pdev, offset, value);
+}
+
+#endif /* CONFIG_PCI */
+
+/*
+ * edac_pci APIs
+ */
+
+/**
+ * edac_pci_alloc_ctl_info:
+ * The alloc() function for the 'edac_pci' control info
+ * structure.
+ *
+ * @sz_pvt: size of the private info at struct &edac_pci_ctl_info
+ * @edac_pci_name: name of the PCI device
+ *
+ * The chip driver will allocate one of these for each
+ * edac_pci it is going to control/register with the EDAC CORE.
+ *
+ * Returns: a pointer to struct &edac_pci_ctl_info on success; %NULL otherwise.
+ */
+extern struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
+ const char *edac_pci_name);
+
+/**
+ * edac_pci_free_ctl_info():
+ * Last action on the pci control structure.
+ *
+ * @pci: pointer to struct &edac_pci_ctl_info
+ *
+ * Calls the remove sysfs information, which will unregister
+ * this control struct's kobj. When that kobj's ref count
+ * goes to zero, its release function will be call and then
+ * kfree() the memory.
+ */
+extern void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci);
+
+/**
+ * edac_pci_alloc_index: Allocate a unique PCI index number
+ *
+ * Returns:
+ * allocated index number
+ *
+ */
+extern int edac_pci_alloc_index(void);
+
+/**
+ * edac_pci_add_device(): Insert the 'edac_dev' structure into the
+ * edac_pci global list and create sysfs entries associated with
+ * edac_pci structure.
+ *
+ * @pci: pointer to the edac_device structure to be added to the list
+ * @edac_idx: A unique numeric identifier to be assigned to the
+ * 'edac_pci' structure.
+ *
+ * Returns:
+ * 0 on Success, or an error code on failure
+ */
+extern int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx);
+
+/**
+ * edac_pci_del_device()
+ * Remove sysfs entries for specified edac_pci structure and
+ * then remove edac_pci structure from global list
+ *
+ * @dev:
+ * Pointer to 'struct device' representing edac_pci structure
+ * to remove
+ *
+ * Returns:
+ * Pointer to removed edac_pci structure,
+ * or %NULL if device not found
+ */
+extern struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev);
+
+/**
+ * edac_pci_create_generic_ctl()
+ * A generic constructor for a PCI parity polling device
+ * Some systems have more than one domain of PCI busses.
+ * For systems with one domain, then this API will
+ * provide for a generic poller.
+ *
+ * @dev: pointer to struct &device;
+ * @mod_name: name of the PCI device
+ *
+ * This routine calls the edac_pci_alloc_ctl_info() for
+ * the generic device, with default values
+ *
+ * Returns: Pointer to struct &edac_pci_ctl_info on success, %NULL on
+ * failure.
+ */
+extern struct edac_pci_ctl_info *edac_pci_create_generic_ctl(
+ struct device *dev,
+ const char *mod_name);
+
+/**
+ * edac_pci_release_generic_ctl
+ * The release function of a generic EDAC PCI polling device
+ *
+ * @pci: pointer to struct &edac_pci_ctl_info
+ */
+extern void edac_pci_release_generic_ctl(struct edac_pci_ctl_info *pci);
+
+/**
+ * edac_pci_create_sysfs
+ * Create the controls/attributes for the specified EDAC PCI device
+ *
+ * @pci: pointer to struct &edac_pci_ctl_info
+ */
+extern int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci);
+
+/**
+ * edac_pci_remove_sysfs()
+ * remove the controls and attributes for this EDAC PCI device
+ *
+ * @pci: pointer to struct &edac_pci_ctl_info
+ */
+extern void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci);
+
+#endif
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index 6e3428ba400f..72c9eb9fdffb 100644
--- a/drivers/edac/edac_pci_sysfs.c
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -11,7 +11,7 @@
#include <linux/slab.h>
#include <linux/ctype.h>
-#include "edac_core.h"
+#include "edac_pci.h"
#include "edac_module.h"
#define EDAC_PCI_SYMLINK "device"
@@ -418,12 +418,6 @@ static void edac_pci_main_kobj_teardown(void)
}
}
-/*
- *
- * edac_pci_create_sysfs
- *
- * Create the controls/attributes for the specified EDAC PCI device
- */
int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci)
{
int err;
@@ -459,11 +453,6 @@ unregister_cleanup:
return err;
}
-/*
- * edac_pci_remove_sysfs
- *
- * remove the controls and attributes for this EDAC PCI device
- */
void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci)
{
edac_dbg(0, "index=%d\n", pci->pci_idx);
diff --git a/drivers/edac/fsl_ddr_edac.c b/drivers/edac/fsl_ddr_edac.c
index 9774f52f0c3e..4e9608a958e7 100644
--- a/drivers/edac/fsl_ddr_edac.c
+++ b/drivers/edac/fsl_ddr_edac.c
@@ -28,7 +28,6 @@
#include <linux/of_device.h>
#include <linux/of_address.h>
#include "edac_module.h"
-#include "edac_core.h"
#include "fsl_ddr_edac.h"
#define EDAC_MOD_STR "fsl_ddr_edac"
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index e3fa4390f846..4e61a6229dd2 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -14,7 +14,7 @@
#include <acpi/ghes.h>
#include <linux/edac.h>
#include <linux/dmi.h>
-#include "edac_core.h"
+#include "edac_module.h"
#include <ras/ras_event.h>
#define GHES_EDAC_REVISION " Ver: 1.0.0"
diff --git a/drivers/edac/highbank_l2_edac.c b/drivers/edac/highbank_l2_edac.c
index 2f193668ebc7..cd9a2bb7c548 100644
--- a/drivers/edac/highbank_l2_edac.c
+++ b/drivers/edac/highbank_l2_edac.c
@@ -21,7 +21,6 @@
#include <linux/platform_device.h>
#include <linux/of_platform.h>
-#include "edac_core.h"
#include "edac_module.h"
#define SR_CLR_SB_ECC_INTR 0x0
diff --git a/drivers/edac/highbank_mc_edac.c b/drivers/edac/highbank_mc_edac.c
index 11260cc3360e..0e7e0a404d89 100644
--- a/drivers/edac/highbank_mc_edac.c
+++ b/drivers/edac/highbank_mc_edac.c
@@ -22,7 +22,6 @@
#include <linux/of_platform.h>
#include <linux/uaccess.h>
-#include "edac_core.h"
#include "edac_module.h"
/* DDR Ctrlr Error Registers */
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
index 5cb36a6022cc..5306240570d7 100644
--- a/drivers/edac/i3000_edac.c
+++ b/drivers/edac/i3000_edac.c
@@ -14,7 +14,7 @@
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
-#include "edac_core.h"
+#include "edac_module.h"
#define I3000_REVISION "1.1"
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index 1f453382258a..77c58d201a30 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -13,7 +13,7 @@
#include <linux/pci_ids.h>
#include <linux/edac.h>
#include <linux/io.h>
-#include "edac_core.h"
+#include "edac_module.h"
#include <linux/io-64-nonatomic-lo-hi.h>
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 72e07e3cf718..1670d27bcac8 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -22,7 +22,7 @@
#include <linux/edac.h>
#include <asm/mmzone.h>
-#include "edac_core.h"
+#include "edac_module.h"
/*
* Alter this version for the I5000 module when modifications are made
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index c655162caf08..a8334c4acea7 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -29,7 +29,6 @@
#include <linux/mmzone.h>
#include <linux/debugfs.h>
-#include "edac_core.h"
#include "edac_module.h"
/* register addresses */
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index 6ef6ad1ba16e..abf6ef22e220 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -32,7 +32,7 @@
#include <linux/edac.h>
#include <linux/mmzone.h>
-#include "edac_core.h"
+#include "edac_module.h"
/*
* Alter this version for the I5400 module when modifications are made
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index dcac982fdc7a..0a912bf6de00 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -26,7 +26,7 @@
#include <linux/edac.h>
#include <linux/mmzone.h>
-#include "edac_core.h"
+#include "edac_module.h"
/*
* Alter this version for the I7300 module when modifications are made
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 8a68a5e943ea..69b5adead0ad 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -39,7 +39,7 @@
#include <asm/processor.h>
#include <asm/div64.h>
-#include "edac_core.h"
+#include "edac_module.h"
/* Static vars */
static LIST_HEAD(i7core_edac_list);
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
index 4d4110364f02..cb61a5b7d080 100644
--- a/drivers/edac/i82443bxgx_edac.c
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -29,7 +29,7 @@
#include <linux/edac.h>
-#include "edac_core.h"
+#include "edac_module.h"
#define I82443_REVISION "0.1"
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
index ee1078cd3b96..236c813227fc 100644
--- a/drivers/edac/i82860_edac.c
+++ b/drivers/edac/i82860_edac.c
@@ -14,7 +14,7 @@
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
-#include "edac_core.h"
+#include "edac_module.h"
#define I82860_REVISION " Ver: 2.0.2"
#define EDAC_MOD_STR "i82860_edac"
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index c26a513f8869..e286b7e74c7a 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -18,7 +18,7 @@
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
-#include "edac_core.h"
+#include "edac_module.h"
#define I82875P_REVISION " Ver: 2.0.2"
#define EDAC_MOD_STR "i82875p_edac"
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index 35ab66c623a3..7baa8ace267b 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -14,7 +14,7 @@
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
-#include "edac_core.h"
+#include "edac_module.h"
#define I82975X_REVISION " Ver: 1.0.0"
#define EDAC_MOD_STR "i82975x_edac"
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index 1c88d9707495..2733fb5938a4 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -41,7 +41,7 @@
#include <linux/edac.h>
#include <linux/io-64-nonatomic-lo-hi.h>
-#include "edac_core.h"
+#include "edac_module.h"
#define IE31200_REVISION "1.0"
#define EDAC_MOD_STR "ie31200_edac"
diff --git a/drivers/edac/layerscape_edac.c b/drivers/edac/layerscape_edac.c
index 6c59d897ad12..94cac7686a56 100644
--- a/drivers/edac/layerscape_edac.c
+++ b/drivers/edac/layerscape_edac.c
@@ -16,7 +16,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include "edac_core.h"
+#include "edac_module.h"
#include "fsl_ddr_edac.h"
static const struct of_device_id fsl_ddr_mc_err_of_match[] = {
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index c62602141f95..8f66cbed70b7 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -25,7 +25,6 @@
#include <linux/of_platform.h>
#include <linux/of_device.h>
#include "edac_module.h"
-#include "edac_core.h"
#include "mpc85xx_edac.h"
#include "fsl_ddr_edac.h"
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index cb9b8577acbc..14b7e7b71eaa 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -17,7 +17,6 @@
#include <linux/edac.h>
#include <linux/gfp.h>
-#include "edac_core.h"
#include "edac_module.h"
#include "mv64x60_edac.h"
diff --git a/drivers/edac/octeon_edac-l2c.c b/drivers/edac/octeon_edac-l2c.c
index afea7fc625cc..c33059e9b0be 100644
--- a/drivers/edac/octeon_edac-l2c.c
+++ b/drivers/edac/octeon_edac-l2c.c
@@ -16,7 +16,6 @@
#include <asm/octeon/cvmx.h>
-#include "edac_core.h"
#include "edac_module.h"
#define EDAC_MOD_STR "octeon-l2c"
diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c
index cda6dab5067a..9c1ffe3e912b 100644
--- a/drivers/edac/octeon_edac-lmc.c
+++ b/drivers/edac/octeon_edac-lmc.c
@@ -19,7 +19,6 @@
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-lmcx-defs.h>
-#include "edac_core.h"
#include "edac_module.h"
#define OCTEON_MAX_MC 4
diff --git a/drivers/edac/octeon_edac-pc.c b/drivers/edac/octeon_edac-pc.c
index 2ab6cf24c959..754eced59c32 100644
--- a/drivers/edac/octeon_edac-pc.c
+++ b/drivers/edac/octeon_edac-pc.c
@@ -15,7 +15,6 @@
#include <linux/io.h>
#include <linux/edac.h>
-#include "edac_core.h"
#include "edac_module.h"
#include <asm/octeon/cvmx.h>
diff --git a/drivers/edac/octeon_edac-pci.c b/drivers/edac/octeon_edac-pci.c
index 9ca73cec74e7..28b238eecefc 100644
--- a/drivers/edac/octeon_edac-pci.c
+++ b/drivers/edac/octeon_edac-pci.c
@@ -18,7 +18,6 @@
#include <asm/octeon/cvmx-pci-defs.h>
#include <asm/octeon/octeon.h>
-#include "edac_core.h"
#include "edac_module.h"
static void octeon_pci_poll(struct edac_pci_ctl_info *pci)
diff --git a/drivers/edac/pasemi_edac.c b/drivers/edac/pasemi_edac.c
index 9c971b575530..199f2c80480d 100644
--- a/drivers/edac/pasemi_edac.c
+++ b/drivers/edac/pasemi_edac.c
@@ -26,7 +26,7 @@
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
-#include "edac_core.h"
+#include "edac_module.h"
#define MODULE_NAME "pasemi_edac"
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 691ce25e9010..e55e92590106 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -21,7 +21,7 @@
#include <asm/dcr.h>
-#include "edac_core.h"
+#include "edac_module.h"
#include "ppc4xx_edac.h"
/*
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index 8f936bc7a010..978916625ced 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -20,7 +20,7 @@
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
-#include "edac_core.h"
+#include "edac_module.h"
#define R82600_REVISION " Ver: 2.0.2"
#define EDAC_MOD_STR "r82600_edac"
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index c1ad0eb7d5dd..54ae6dc45ab2 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -27,7 +27,7 @@
#include <asm/processor.h>
#include <asm/mce.h>
-#include "edac_core.h"
+#include "edac_module.h"
/* Static vars */
static LIST_HEAD(sbridge_edac_list);
diff --git a/drivers/edac/skx_edac.c b/drivers/edac/skx_edac.c
index 9edcb29b3001..79ef675e4d6f 100644
--- a/drivers/edac/skx_edac.c
+++ b/drivers/edac/skx_edac.c
@@ -29,7 +29,7 @@
#include <asm/processor.h>
#include <asm/mce.h>
-#include "edac_core.h"
+#include "edac_module.h"
#define SKX_REVISION " Ver: 1.0 "
diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
index fc153aea2f6c..1c01dec78ec3 100644
--- a/drivers/edac/synopsys_edac.c
+++ b/drivers/edac/synopsys_edac.c
@@ -23,7 +23,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
-#include "edac_core.h"
+#include "edac_module.h"
/* Number of cs_rows needed per memory controller */
#define SYNPS_EDAC_NR_CSROWS 1
diff --git a/drivers/edac/tile_edac.c b/drivers/edac/tile_edac.c
index 71381642ce2a..8a33a87e67f1 100644
--- a/drivers/edac/tile_edac.c
+++ b/drivers/edac/tile_edac.c
@@ -30,7 +30,7 @@
#include <hv/hypervisor.h>
#include <hv/drv_mshim_intf.h>
-#include "edac_core.h"
+#include "edac_module.h"
#define DRV_NAME "tile-edac"
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
index 314cf5cf268c..03c97a4bf590 100644
--- a/drivers/edac/x38_edac.c
+++ b/drivers/edac/x38_edac.c
@@ -16,7 +16,7 @@
#include <linux/edac.h>
#include <linux/io-64-nonatomic-lo-hi.h>
-#include "edac_core.h"
+#include "edac_module.h"
#define X38_REVISION "1.1"
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
index 5569391ea800..6c270d9d304a 100644
--- a/drivers/edac/xgene_edac.c
+++ b/drivers/edac/xgene_edac.c
@@ -28,7 +28,6 @@
#include <linux/of_address.h>
#include <linux/regmap.h>
-#include "edac_core.h"
#include "edac_module.h"
#define EDAC_MOD_STR "xgene_edac"
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index 56e6c4c7c60d..d836d4ce5ee4 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -274,9 +274,10 @@ static void arizona_extcon_pulse_micbias(struct arizona_extcon_info *info)
struct arizona *arizona = info->arizona;
const char *widget = arizona_extcon_get_micbias(info);
struct snd_soc_dapm_context *dapm = arizona->dapm;
+ struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
int ret;
- ret = snd_soc_dapm_force_enable_pin(dapm, widget);
+ ret = snd_soc_component_force_enable_pin(component, widget);
if (ret != 0)
dev_warn(arizona->dev, "Failed to enable %s: %d\n",
widget, ret);
@@ -284,7 +285,7 @@ static void arizona_extcon_pulse_micbias(struct arizona_extcon_info *info)
snd_soc_dapm_sync(dapm);
if (!arizona->pdata.micd_force_micbias) {
- ret = snd_soc_dapm_disable_pin(arizona->dapm, widget);
+ ret = snd_soc_component_disable_pin(component, widget);
if (ret != 0)
dev_warn(arizona->dev, "Failed to disable %s: %d\n",
widget, ret);
@@ -349,6 +350,7 @@ static void arizona_stop_mic(struct arizona_extcon_info *info)
struct arizona *arizona = info->arizona;
const char *widget = arizona_extcon_get_micbias(info);
struct snd_soc_dapm_context *dapm = arizona->dapm;
+ struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
bool change;
int ret;
@@ -356,7 +358,7 @@ static void arizona_stop_mic(struct arizona_extcon_info *info)
ARIZONA_MICD_ENA, 0,
&change);
- ret = snd_soc_dapm_disable_pin(dapm, widget);
+ ret = snd_soc_component_disable_pin(component, widget);
if (ret != 0)
dev_warn(arizona->dev,
"Failed to disable %s: %d\n",
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 78298460d168..7c1e3a7b14e0 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -453,7 +453,7 @@ int extcon_sync(struct extcon_dev *edev, unsigned int id)
dev_err(&edev->dev, "out of memory in extcon_set_state\n");
kobject_uevent(&edev->dev.kobj, KOBJ_CHANGE);
- return 0;
+ return -ENOMEM;
}
length = name_show(&edev->dev, NULL, prop_buf);
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index bca172d42c74..1867f0d1389b 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -8,6 +8,17 @@ menu "Firmware Drivers"
config ARM_PSCI_FW
bool
+config ARM_PSCI_CHECKER
+ bool "ARM PSCI checker"
+ depends on ARM_PSCI_FW && HOTPLUG_CPU && !TORTURE_TEST
+ help
+ Run the PSCI checker during startup. This checks that hotplug and
+ suspend operations work correctly when using PSCI.
+
+ The torture tests may interfere with the PSCI checker by turning CPUs
+ on and off through hotplug, so for now torture tests and PSCI checker
+ are mutually exclusive.
+
config ARM_SCPI_PROTOCOL
tristate "ARM System Control and Power Interface (SCPI) Message Protocol"
depends on MAILBOX
@@ -203,6 +214,21 @@ config QCOM_SCM_64
def_bool y
depends on QCOM_SCM && ARM64
+config TI_SCI_PROTOCOL
+ tristate "TI System Control Interface (TISCI) Message Protocol"
+ depends on TI_MESSAGE_MANAGER
+ help
+ TI System Control Interface (TISCI) Message Protocol is used to manage
+ compute systems such as ARM, DSP etc with the system controller in
+ complex System on Chip(SoC) such as those found on certain keystone
+ generation SoC from TI.
+
+ System controller provides various facilities including power
+ management function support.
+
+ This protocol library is used by client drivers to use the features
+ provided by the system controller.
+
config HAVE_ARM_SMCCC
bool
@@ -210,5 +236,6 @@ source "drivers/firmware/broadcom/Kconfig"
source "drivers/firmware/google/Kconfig"
source "drivers/firmware/efi/Kconfig"
source "drivers/firmware/meson/Kconfig"
+source "drivers/firmware/tegra/Kconfig"
endmenu
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 898ac41fa8b3..a37f12e8d137 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -2,6 +2,7 @@
# Makefile for the linux kernel.
#
obj-$(CONFIG_ARM_PSCI_FW) += psci.o
+obj-$(CONFIG_ARM_PSCI_CHECKER) += psci_checker.o
obj-$(CONFIG_ARM_SCPI_PROTOCOL) += arm_scpi.o
obj-$(CONFIG_ARM_SCPI_POWER_DOMAIN) += scpi_pm_domain.o
obj-$(CONFIG_DMI) += dmi_scan.o
@@ -20,9 +21,11 @@ obj-$(CONFIG_QCOM_SCM) += qcom_scm.o
obj-$(CONFIG_QCOM_SCM_64) += qcom_scm-64.o
obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o
CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch armv7-a\n.arch_extension sec,-DREQUIRES_SEC=1) -march=armv7-a
+obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o
obj-y += broadcom/
obj-y += meson/
obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
obj-$(CONFIG_EFI) += efi/
obj-$(CONFIG_UEFI_CPER) += efi/
+obj-y += tegra/
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index ce2bc2a38101..9ad0b1934be9 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -50,20 +50,27 @@
#define CMD_TOKEN_ID_MASK 0xff
#define CMD_DATA_SIZE_SHIFT 16
#define CMD_DATA_SIZE_MASK 0x1ff
+#define CMD_LEGACY_DATA_SIZE_SHIFT 20
+#define CMD_LEGACY_DATA_SIZE_MASK 0x1ff
#define PACK_SCPI_CMD(cmd_id, tx_sz) \
((((cmd_id) & CMD_ID_MASK) << CMD_ID_SHIFT) | \
(((tx_sz) & CMD_DATA_SIZE_MASK) << CMD_DATA_SIZE_SHIFT))
#define ADD_SCPI_TOKEN(cmd, token) \
((cmd) |= (((token) & CMD_TOKEN_ID_MASK) << CMD_TOKEN_ID_SHIFT))
+#define PACK_LEGACY_SCPI_CMD(cmd_id, tx_sz) \
+ ((((cmd_id) & CMD_ID_MASK) << CMD_ID_SHIFT) | \
+ (((tx_sz) & CMD_LEGACY_DATA_SIZE_MASK) << CMD_LEGACY_DATA_SIZE_SHIFT))
#define CMD_SIZE(cmd) (((cmd) >> CMD_DATA_SIZE_SHIFT) & CMD_DATA_SIZE_MASK)
+#define CMD_LEGACY_SIZE(cmd) (((cmd) >> CMD_LEGACY_DATA_SIZE_SHIFT) & \
+ CMD_LEGACY_DATA_SIZE_MASK)
#define CMD_UNIQ_MASK (CMD_TOKEN_ID_MASK << CMD_TOKEN_ID_SHIFT | CMD_ID_MASK)
#define CMD_XTRACT_UNIQ(cmd) ((cmd) & CMD_UNIQ_MASK)
#define SCPI_SLOT 0
#define MAX_DVFS_DOMAINS 8
-#define MAX_DVFS_OPPS 8
+#define MAX_DVFS_OPPS 16
#define DVFS_LATENCY(hdr) (le32_to_cpu(hdr) >> 16)
#define DVFS_OPP_COUNT(hdr) ((le32_to_cpu(hdr) >> 8) & 0xff)
@@ -99,6 +106,7 @@ enum scpi_error_codes {
SCPI_ERR_MAX
};
+/* SCPI Standard commands */
enum scpi_std_cmd {
SCPI_CMD_INVALID = 0x00,
SCPI_CMD_SCPI_READY = 0x01,
@@ -132,6 +140,108 @@ enum scpi_std_cmd {
SCPI_CMD_COUNT
};
+/* SCPI Legacy Commands */
+enum legacy_scpi_std_cmd {
+ LEGACY_SCPI_CMD_INVALID = 0x00,
+ LEGACY_SCPI_CMD_SCPI_READY = 0x01,
+ LEGACY_SCPI_CMD_SCPI_CAPABILITIES = 0x02,
+ LEGACY_SCPI_CMD_EVENT = 0x03,
+ LEGACY_SCPI_CMD_SET_CSS_PWR_STATE = 0x04,
+ LEGACY_SCPI_CMD_GET_CSS_PWR_STATE = 0x05,
+ LEGACY_SCPI_CMD_CFG_PWR_STATE_STAT = 0x06,
+ LEGACY_SCPI_CMD_GET_PWR_STATE_STAT = 0x07,
+ LEGACY_SCPI_CMD_SYS_PWR_STATE = 0x08,
+ LEGACY_SCPI_CMD_L2_READY = 0x09,
+ LEGACY_SCPI_CMD_SET_AP_TIMER = 0x0a,
+ LEGACY_SCPI_CMD_CANCEL_AP_TIME = 0x0b,
+ LEGACY_SCPI_CMD_DVFS_CAPABILITIES = 0x0c,
+ LEGACY_SCPI_CMD_GET_DVFS_INFO = 0x0d,
+ LEGACY_SCPI_CMD_SET_DVFS = 0x0e,
+ LEGACY_SCPI_CMD_GET_DVFS = 0x0f,
+ LEGACY_SCPI_CMD_GET_DVFS_STAT = 0x10,
+ LEGACY_SCPI_CMD_SET_RTC = 0x11,
+ LEGACY_SCPI_CMD_GET_RTC = 0x12,
+ LEGACY_SCPI_CMD_CLOCK_CAPABILITIES = 0x13,
+ LEGACY_SCPI_CMD_SET_CLOCK_INDEX = 0x14,
+ LEGACY_SCPI_CMD_SET_CLOCK_VALUE = 0x15,
+ LEGACY_SCPI_CMD_GET_CLOCK_VALUE = 0x16,
+ LEGACY_SCPI_CMD_PSU_CAPABILITIES = 0x17,
+ LEGACY_SCPI_CMD_SET_PSU = 0x18,
+ LEGACY_SCPI_CMD_GET_PSU = 0x19,
+ LEGACY_SCPI_CMD_SENSOR_CAPABILITIES = 0x1a,
+ LEGACY_SCPI_CMD_SENSOR_INFO = 0x1b,
+ LEGACY_SCPI_CMD_SENSOR_VALUE = 0x1c,
+ LEGACY_SCPI_CMD_SENSOR_CFG_PERIODIC = 0x1d,
+ LEGACY_SCPI_CMD_SENSOR_CFG_BOUNDS = 0x1e,
+ LEGACY_SCPI_CMD_SENSOR_ASYNC_VALUE = 0x1f,
+ LEGACY_SCPI_CMD_COUNT
+};
+
+/* List all commands that are required to go through the high priority link */
+static int legacy_hpriority_cmds[] = {
+ LEGACY_SCPI_CMD_GET_CSS_PWR_STATE,
+ LEGACY_SCPI_CMD_CFG_PWR_STATE_STAT,
+ LEGACY_SCPI_CMD_GET_PWR_STATE_STAT,
+ LEGACY_SCPI_CMD_SET_DVFS,
+ LEGACY_SCPI_CMD_GET_DVFS,
+ LEGACY_SCPI_CMD_SET_RTC,
+ LEGACY_SCPI_CMD_GET_RTC,
+ LEGACY_SCPI_CMD_SET_CLOCK_INDEX,
+ LEGACY_SCPI_CMD_SET_CLOCK_VALUE,
+ LEGACY_SCPI_CMD_GET_CLOCK_VALUE,
+ LEGACY_SCPI_CMD_SET_PSU,
+ LEGACY_SCPI_CMD_GET_PSU,
+ LEGACY_SCPI_CMD_SENSOR_CFG_PERIODIC,
+ LEGACY_SCPI_CMD_SENSOR_CFG_BOUNDS,
+};
+
+/* List all commands used by this driver, used as indexes */
+enum scpi_drv_cmds {
+ CMD_SCPI_CAPABILITIES = 0,
+ CMD_GET_CLOCK_INFO,
+ CMD_GET_CLOCK_VALUE,
+ CMD_SET_CLOCK_VALUE,
+ CMD_GET_DVFS,
+ CMD_SET_DVFS,
+ CMD_GET_DVFS_INFO,
+ CMD_SENSOR_CAPABILITIES,
+ CMD_SENSOR_INFO,
+ CMD_SENSOR_VALUE,
+ CMD_SET_DEVICE_PWR_STATE,
+ CMD_GET_DEVICE_PWR_STATE,
+ CMD_MAX_COUNT,
+};
+
+static int scpi_std_commands[CMD_MAX_COUNT] = {
+ SCPI_CMD_SCPI_CAPABILITIES,
+ SCPI_CMD_GET_CLOCK_INFO,
+ SCPI_CMD_GET_CLOCK_VALUE,
+ SCPI_CMD_SET_CLOCK_VALUE,
+ SCPI_CMD_GET_DVFS,
+ SCPI_CMD_SET_DVFS,
+ SCPI_CMD_GET_DVFS_INFO,
+ SCPI_CMD_SENSOR_CAPABILITIES,
+ SCPI_CMD_SENSOR_INFO,
+ SCPI_CMD_SENSOR_VALUE,
+ SCPI_CMD_SET_DEVICE_PWR_STATE,
+ SCPI_CMD_GET_DEVICE_PWR_STATE,
+};
+
+static int scpi_legacy_commands[CMD_MAX_COUNT] = {
+ LEGACY_SCPI_CMD_SCPI_CAPABILITIES,
+ -1, /* GET_CLOCK_INFO */
+ LEGACY_SCPI_CMD_GET_CLOCK_VALUE,
+ LEGACY_SCPI_CMD_SET_CLOCK_VALUE,
+ LEGACY_SCPI_CMD_GET_DVFS,
+ LEGACY_SCPI_CMD_SET_DVFS,
+ LEGACY_SCPI_CMD_GET_DVFS_INFO,
+ LEGACY_SCPI_CMD_SENSOR_CAPABILITIES,
+ LEGACY_SCPI_CMD_SENSOR_INFO,
+ LEGACY_SCPI_CMD_SENSOR_VALUE,
+ -1, /* SET_DEVICE_PWR_STATE */
+ -1, /* GET_DEVICE_PWR_STATE */
+};
+
struct scpi_xfer {
u32 slot; /* has to be first element */
u32 cmd;
@@ -160,7 +270,10 @@ struct scpi_chan {
struct scpi_drvinfo {
u32 protocol_version;
u32 firmware_version;
+ bool is_legacy;
int num_chans;
+ int *commands;
+ DECLARE_BITMAP(cmd_priority, LEGACY_SCPI_CMD_COUNT);
atomic_t next_chan;
struct scpi_ops *scpi_ops;
struct scpi_chan *channels;
@@ -177,6 +290,11 @@ struct scpi_shared_mem {
u8 payload[0];
} __packed;
+struct legacy_scpi_shared_mem {
+ __le32 status;
+ u8 payload[0];
+} __packed;
+
struct scp_capabilities {
__le32 protocol_version;
__le32 event_version;
@@ -202,6 +320,12 @@ struct clk_set_value {
__le32 rate;
} __packed;
+struct legacy_clk_set_value {
+ __le32 rate;
+ __le16 id;
+ __le16 reserved;
+} __packed;
+
struct dvfs_info {
__le32 header;
struct {
@@ -273,19 +397,43 @@ static void scpi_process_cmd(struct scpi_chan *ch, u32 cmd)
return;
}
- list_for_each_entry(t, &ch->rx_pending, node)
- if (CMD_XTRACT_UNIQ(t->cmd) == CMD_XTRACT_UNIQ(cmd)) {
- list_del(&t->node);
- match = t;
- break;
- }
+ /* Command type is not replied by the SCP Firmware in legacy Mode
+ * We should consider that command is the head of pending RX commands
+ * if the list is not empty. In TX only mode, the list would be empty.
+ */
+ if (scpi_info->is_legacy) {
+ match = list_first_entry(&ch->rx_pending, struct scpi_xfer,
+ node);
+ list_del(&match->node);
+ } else {
+ list_for_each_entry(t, &ch->rx_pending, node)
+ if (CMD_XTRACT_UNIQ(t->cmd) == CMD_XTRACT_UNIQ(cmd)) {
+ list_del(&t->node);
+ match = t;
+ break;
+ }
+ }
/* check if wait_for_completion is in progress or timed-out */
if (match && !completion_done(&match->done)) {
- struct scpi_shared_mem *mem = ch->rx_payload;
- unsigned int len = min(match->rx_len, CMD_SIZE(cmd));
+ unsigned int len;
+
+ if (scpi_info->is_legacy) {
+ struct legacy_scpi_shared_mem *mem = ch->rx_payload;
+
+ /* RX Length is not replied by the legacy Firmware */
+ len = match->rx_len;
+
+ match->status = le32_to_cpu(mem->status);
+ memcpy_fromio(match->rx_buf, mem->payload, len);
+ } else {
+ struct scpi_shared_mem *mem = ch->rx_payload;
+
+ len = min(match->rx_len, CMD_SIZE(cmd));
+
+ match->status = le32_to_cpu(mem->status);
+ memcpy_fromio(match->rx_buf, mem->payload, len);
+ }
- match->status = le32_to_cpu(mem->status);
- memcpy_fromio(match->rx_buf, mem->payload, len);
if (match->rx_len > len)
memset(match->rx_buf + len, 0, match->rx_len - len);
complete(&match->done);
@@ -297,7 +445,10 @@ static void scpi_handle_remote_msg(struct mbox_client *c, void *msg)
{
struct scpi_chan *ch = container_of(c, struct scpi_chan, cl);
struct scpi_shared_mem *mem = ch->rx_payload;
- u32 cmd = le32_to_cpu(mem->command);
+ u32 cmd = 0;
+
+ if (!scpi_info->is_legacy)
+ cmd = le32_to_cpu(mem->command);
scpi_process_cmd(ch, cmd);
}
@@ -309,8 +460,13 @@ static void scpi_tx_prepare(struct mbox_client *c, void *msg)
struct scpi_chan *ch = container_of(c, struct scpi_chan, cl);
struct scpi_shared_mem *mem = (struct scpi_shared_mem *)ch->tx_payload;
- if (t->tx_buf)
- memcpy_toio(mem->payload, t->tx_buf, t->tx_len);
+ if (t->tx_buf) {
+ if (scpi_info->is_legacy)
+ memcpy_toio(ch->tx_payload, t->tx_buf, t->tx_len);
+ else
+ memcpy_toio(mem->payload, t->tx_buf, t->tx_len);
+ }
+
if (t->rx_buf) {
if (!(++ch->token))
++ch->token;
@@ -319,7 +475,9 @@ static void scpi_tx_prepare(struct mbox_client *c, void *msg)
list_add_tail(&t->node, &ch->rx_pending);
spin_unlock_irqrestore(&ch->rx_lock, flags);
}
- mem->command = cpu_to_le32(t->cmd);
+
+ if (!scpi_info->is_legacy)
+ mem->command = cpu_to_le32(t->cmd);
}
static struct scpi_xfer *get_scpi_xfer(struct scpi_chan *ch)
@@ -344,23 +502,38 @@ static void put_scpi_xfer(struct scpi_xfer *t, struct scpi_chan *ch)
mutex_unlock(&ch->xfers_lock);
}
-static int scpi_send_message(u8 cmd, void *tx_buf, unsigned int tx_len,
+static int scpi_send_message(u8 idx, void *tx_buf, unsigned int tx_len,
void *rx_buf, unsigned int rx_len)
{
int ret;
u8 chan;
+ u8 cmd;
struct scpi_xfer *msg;
struct scpi_chan *scpi_chan;
- chan = atomic_inc_return(&scpi_info->next_chan) % scpi_info->num_chans;
+ if (scpi_info->commands[idx] < 0)
+ return -EOPNOTSUPP;
+
+ cmd = scpi_info->commands[idx];
+
+ if (scpi_info->is_legacy)
+ chan = test_bit(cmd, scpi_info->cmd_priority) ? 1 : 0;
+ else
+ chan = atomic_inc_return(&scpi_info->next_chan) %
+ scpi_info->num_chans;
scpi_chan = scpi_info->channels + chan;
msg = get_scpi_xfer(scpi_chan);
if (!msg)
return -ENOMEM;
- msg->slot = BIT(SCPI_SLOT);
- msg->cmd = PACK_SCPI_CMD(cmd, tx_len);
+ if (scpi_info->is_legacy) {
+ msg->cmd = PACK_LEGACY_SCPI_CMD(cmd, tx_len);
+ msg->slot = msg->cmd;
+ } else {
+ msg->slot = BIT(SCPI_SLOT);
+ msg->cmd = PACK_SCPI_CMD(cmd, tx_len);
+ }
msg->tx_buf = tx_buf;
msg->tx_len = tx_len;
msg->rx_buf = rx_buf;
@@ -397,7 +570,7 @@ scpi_clk_get_range(u16 clk_id, unsigned long *min, unsigned long *max)
struct clk_get_info clk;
__le16 le_clk_id = cpu_to_le16(clk_id);
- ret = scpi_send_message(SCPI_CMD_GET_CLOCK_INFO, &le_clk_id,
+ ret = scpi_send_message(CMD_GET_CLOCK_INFO, &le_clk_id,
sizeof(le_clk_id), &clk, sizeof(clk));
if (!ret) {
*min = le32_to_cpu(clk.min_rate);
@@ -412,8 +585,9 @@ static unsigned long scpi_clk_get_val(u16 clk_id)
struct clk_get_value clk;
__le16 le_clk_id = cpu_to_le16(clk_id);
- ret = scpi_send_message(SCPI_CMD_GET_CLOCK_VALUE, &le_clk_id,
+ ret = scpi_send_message(CMD_GET_CLOCK_VALUE, &le_clk_id,
sizeof(le_clk_id), &clk, sizeof(clk));
+
return ret ? ret : le32_to_cpu(clk.rate);
}
@@ -425,7 +599,19 @@ static int scpi_clk_set_val(u16 clk_id, unsigned long rate)
.rate = cpu_to_le32(rate)
};
- return scpi_send_message(SCPI_CMD_SET_CLOCK_VALUE, &clk, sizeof(clk),
+ return scpi_send_message(CMD_SET_CLOCK_VALUE, &clk, sizeof(clk),
+ &stat, sizeof(stat));
+}
+
+static int legacy_scpi_clk_set_val(u16 clk_id, unsigned long rate)
+{
+ int stat;
+ struct legacy_clk_set_value clk = {
+ .id = cpu_to_le16(clk_id),
+ .rate = cpu_to_le32(rate)
+ };
+
+ return scpi_send_message(CMD_SET_CLOCK_VALUE, &clk, sizeof(clk),
&stat, sizeof(stat));
}
@@ -434,8 +620,9 @@ static int scpi_dvfs_get_idx(u8 domain)
int ret;
u8 dvfs_idx;
- ret = scpi_send_message(SCPI_CMD_GET_DVFS, &domain, sizeof(domain),
+ ret = scpi_send_message(CMD_GET_DVFS, &domain, sizeof(domain),
&dvfs_idx, sizeof(dvfs_idx));
+
return ret ? ret : dvfs_idx;
}
@@ -444,7 +631,7 @@ static int scpi_dvfs_set_idx(u8 domain, u8 index)
int stat;
struct dvfs_set dvfs = {domain, index};
- return scpi_send_message(SCPI_CMD_SET_DVFS, &dvfs, sizeof(dvfs),
+ return scpi_send_message(CMD_SET_DVFS, &dvfs, sizeof(dvfs),
&stat, sizeof(stat));
}
@@ -468,9 +655,8 @@ static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain)
if (scpi_info->dvfs[domain]) /* data already populated */
return scpi_info->dvfs[domain];
- ret = scpi_send_message(SCPI_CMD_GET_DVFS_INFO, &domain, sizeof(domain),
+ ret = scpi_send_message(CMD_GET_DVFS_INFO, &domain, sizeof(domain),
&buf, sizeof(buf));
-
if (ret)
return ERR_PTR(ret);
@@ -503,7 +689,7 @@ static int scpi_sensor_get_capability(u16 *sensors)
struct sensor_capabilities cap_buf;
int ret;
- ret = scpi_send_message(SCPI_CMD_SENSOR_CAPABILITIES, NULL, 0, &cap_buf,
+ ret = scpi_send_message(CMD_SENSOR_CAPABILITIES, NULL, 0, &cap_buf,
sizeof(cap_buf));
if (!ret)
*sensors = le16_to_cpu(cap_buf.sensors);
@@ -517,7 +703,7 @@ static int scpi_sensor_get_info(u16 sensor_id, struct scpi_sensor_info *info)
struct _scpi_sensor_info _info;
int ret;
- ret = scpi_send_message(SCPI_CMD_SENSOR_INFO, &id, sizeof(id),
+ ret = scpi_send_message(CMD_SENSOR_INFO, &id, sizeof(id),
&_info, sizeof(_info));
if (!ret) {
memcpy(info, &_info, sizeof(*info));
@@ -533,13 +719,19 @@ static int scpi_sensor_get_value(u16 sensor, u64 *val)
struct sensor_value buf;
int ret;
- ret = scpi_send_message(SCPI_CMD_SENSOR_VALUE, &id, sizeof(id),
+ ret = scpi_send_message(CMD_SENSOR_VALUE, &id, sizeof(id),
&buf, sizeof(buf));
- if (!ret)
+ if (ret)
+ return ret;
+
+ if (scpi_info->is_legacy)
+ /* only 32-bits supported, hi_val can be junk */
+ *val = le32_to_cpu(buf.lo_val);
+ else
*val = (u64)le32_to_cpu(buf.hi_val) << 32 |
le32_to_cpu(buf.lo_val);
- return ret;
+ return 0;
}
static int scpi_device_get_power_state(u16 dev_id)
@@ -548,7 +740,7 @@ static int scpi_device_get_power_state(u16 dev_id)
u8 pstate;
__le16 id = cpu_to_le16(dev_id);
- ret = scpi_send_message(SCPI_CMD_GET_DEVICE_PWR_STATE, &id,
+ ret = scpi_send_message(CMD_GET_DEVICE_PWR_STATE, &id,
sizeof(id), &pstate, sizeof(pstate));
return ret ? ret : pstate;
}
@@ -561,7 +753,7 @@ static int scpi_device_set_power_state(u16 dev_id, u8 pstate)
.pstate = pstate,
};
- return scpi_send_message(SCPI_CMD_SET_DEVICE_PWR_STATE, &dev_set,
+ return scpi_send_message(CMD_SET_DEVICE_PWR_STATE, &dev_set,
sizeof(dev_set), &stat, sizeof(stat));
}
@@ -591,12 +783,16 @@ static int scpi_init_versions(struct scpi_drvinfo *info)
int ret;
struct scp_capabilities caps;
- ret = scpi_send_message(SCPI_CMD_SCPI_CAPABILITIES, NULL, 0,
+ ret = scpi_send_message(CMD_SCPI_CAPABILITIES, NULL, 0,
&caps, sizeof(caps));
if (!ret) {
info->protocol_version = le32_to_cpu(caps.protocol_version);
info->firmware_version = le32_to_cpu(caps.platform_version);
}
+ /* Ignore error if not implemented */
+ if (scpi_info->is_legacy && ret == -EOPNOTSUPP)
+ return 0;
+
return ret;
}
@@ -681,6 +877,11 @@ static int scpi_alloc_xfer_list(struct device *dev, struct scpi_chan *ch)
return 0;
}
+static const struct of_device_id legacy_scpi_of_match[] = {
+ {.compatible = "arm,scpi-pre-1.0"},
+ {},
+};
+
static int scpi_probe(struct platform_device *pdev)
{
int count, idx, ret;
@@ -693,6 +894,9 @@ static int scpi_probe(struct platform_device *pdev)
if (!scpi_info)
return -ENOMEM;
+ if (of_match_device(legacy_scpi_of_match, &pdev->dev))
+ scpi_info->is_legacy = true;
+
count = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
if (count < 0) {
dev_err(dev, "no mboxes property in '%s'\n", np->full_name);
@@ -755,8 +959,21 @@ err:
scpi_info->channels = scpi_chan;
scpi_info->num_chans = count;
+ scpi_info->commands = scpi_std_commands;
+
platform_set_drvdata(pdev, scpi_info);
+ if (scpi_info->is_legacy) {
+ /* Replace with legacy variants */
+ scpi_ops.clk_set_val = legacy_scpi_clk_set_val;
+ scpi_info->commands = scpi_legacy_commands;
+
+ /* Fill priority bitmap */
+ for (idx = 0; idx < ARRAY_SIZE(legacy_hpriority_cmds); idx++)
+ set_bit(legacy_hpriority_cmds[idx],
+ scpi_info->cmd_priority);
+ }
+
ret = scpi_init_versions(scpi_info);
if (ret) {
dev_err(dev, "incorrect or no SCP firmware found\n");
@@ -781,6 +998,7 @@ err:
static const struct of_device_id scpi_of_match[] = {
{.compatible = "arm,scpi"},
+ {.compatible = "arm,scpi-pre-1.0"},
{},
};
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 88bebe1968b7..54be60ead08f 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -560,7 +560,7 @@ static int __init dmi_present(const u8 *buf)
dmi_ver >> 16, (dmi_ver >> 8) & 0xFF);
}
dmi_format_ids(dmi_ids_string, sizeof(dmi_ids_string));
- printk(KERN_DEBUG "DMI: %s\n", dmi_ids_string);
+ pr_info("DMI: %s\n", dmi_ids_string);
return 0;
}
}
@@ -588,7 +588,7 @@ static int __init dmi_smbios3_present(const u8 *buf)
dmi_ver >> 16, (dmi_ver >> 8) & 0xFF,
dmi_ver & 0xFF);
dmi_format_ids(dmi_ids_string, sizeof(dmi_ids_string));
- pr_debug("DMI: %s\n", dmi_ids_string);
+ pr_info("DMI: %s\n", dmi_ids_string);
return 0;
}
}
diff --git a/drivers/firmware/efi/fake_mem.c b/drivers/firmware/efi/fake_mem.c
index 520a40e5e0e4..6c7d60c239b5 100644
--- a/drivers/firmware/efi/fake_mem.c
+++ b/drivers/firmware/efi/fake_mem.c
@@ -71,8 +71,7 @@ void __init efi_fake_memmap(void)
}
/* allocate memory for new EFI memmap */
- new_memmap_phy = memblock_alloc(efi.memmap.desc_size * new_nr_map,
- PAGE_SIZE);
+ new_memmap_phy = efi_memmap_alloc(new_nr_map);
if (!new_memmap_phy)
return;
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 6621b13c370f..d564d25df8ab 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -6,7 +6,7 @@
#
cflags-$(CONFIG_X86_32) := -march=i386
cflags-$(CONFIG_X86_64) := -mcmodel=small
-cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 \
+cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -O2 \
-fPIC -fno-strict-aliasing -mno-red-zone \
-mno-mmx -mno-sse
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index b98824e3800a..0e2a96b12cb3 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -39,14 +39,6 @@ efi_status_t efi_file_close(void *handle);
unsigned long get_dram_base(efi_system_table_t *sys_table_arg);
-efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
- unsigned long orig_fdt_size,
- void *fdt, int new_fdt_size, char *cmdline_ptr,
- u64 initrd_addr, u64 initrd_size,
- efi_memory_desc_t *memory_map,
- unsigned long map_size, unsigned long desc_size,
- u32 desc_ver);
-
efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
void *handle,
unsigned long *new_fdt_addr,
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index a6a93116a8f0..921dfa047202 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -16,13 +16,10 @@
#include "efistub.h"
-efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
- unsigned long orig_fdt_size,
- void *fdt, int new_fdt_size, char *cmdline_ptr,
- u64 initrd_addr, u64 initrd_size,
- efi_memory_desc_t *memory_map,
- unsigned long map_size, unsigned long desc_size,
- u32 desc_ver)
+static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
+ unsigned long orig_fdt_size,
+ void *fdt, int new_fdt_size, char *cmdline_ptr,
+ u64 initrd_addr, u64 initrd_size)
{
int node, num_rsv;
int status;
@@ -101,25 +98,23 @@ efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
if (status)
goto fdt_set_fail;
- fdt_val64 = cpu_to_fdt64((u64)(unsigned long)memory_map);
+ fdt_val64 = U64_MAX; /* placeholder */
status = fdt_setprop(fdt, node, "linux,uefi-mmap-start",
&fdt_val64, sizeof(fdt_val64));
if (status)
goto fdt_set_fail;
- fdt_val32 = cpu_to_fdt32(map_size);
+ fdt_val32 = U32_MAX; /* placeholder */
status = fdt_setprop(fdt, node, "linux,uefi-mmap-size",
&fdt_val32, sizeof(fdt_val32));
if (status)
goto fdt_set_fail;
- fdt_val32 = cpu_to_fdt32(desc_size);
status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-size",
&fdt_val32, sizeof(fdt_val32));
if (status)
goto fdt_set_fail;
- fdt_val32 = cpu_to_fdt32(desc_ver);
status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-ver",
&fdt_val32, sizeof(fdt_val32));
if (status)
@@ -148,6 +143,43 @@ fdt_set_fail:
return EFI_LOAD_ERROR;
}
+static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
+{
+ int node = fdt_path_offset(fdt, "/chosen");
+ u64 fdt_val64;
+ u32 fdt_val32;
+ int err;
+
+ if (node < 0)
+ return EFI_LOAD_ERROR;
+
+ fdt_val64 = cpu_to_fdt64((unsigned long)*map->map);
+ err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-start",
+ &fdt_val64, sizeof(fdt_val64));
+ if (err)
+ return EFI_LOAD_ERROR;
+
+ fdt_val32 = cpu_to_fdt32(*map->map_size);
+ err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-size",
+ &fdt_val32, sizeof(fdt_val32));
+ if (err)
+ return EFI_LOAD_ERROR;
+
+ fdt_val32 = cpu_to_fdt32(*map->desc_size);
+ err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-desc-size",
+ &fdt_val32, sizeof(fdt_val32));
+ if (err)
+ return EFI_LOAD_ERROR;
+
+ fdt_val32 = cpu_to_fdt32(*map->desc_ver);
+ err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-desc-ver",
+ &fdt_val32, sizeof(fdt_val32));
+ if (err)
+ return EFI_LOAD_ERROR;
+
+ return EFI_SUCCESS;
+}
+
#ifndef EFI_FDT_ALIGN
#define EFI_FDT_ALIGN EFI_PAGE_SIZE
#endif
@@ -243,20 +275,10 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
goto fail;
}
- /*
- * Now that we have done our final memory allocation (and free)
- * we can get the memory map key needed for
- * exit_boot_services().
- */
- status = efi_get_memory_map(sys_table, &map);
- if (status != EFI_SUCCESS)
- goto fail_free_new_fdt;
-
status = update_fdt(sys_table,
(void *)fdt_addr, fdt_size,
(void *)*new_fdt_addr, new_fdt_size,
- cmdline_ptr, initrd_addr, initrd_size,
- memory_map, map_size, desc_size, desc_ver);
+ cmdline_ptr, initrd_addr, initrd_size);
/* Succeeding the first time is the expected case. */
if (status == EFI_SUCCESS)
@@ -266,20 +288,16 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
/*
* We need to allocate more space for the new
* device tree, so free existing buffer that is
- * too small. Also free memory map, as we will need
- * to get new one that reflects the free/alloc we do
- * on the device tree buffer.
+ * too small.
*/
efi_free(sys_table, new_fdt_size, *new_fdt_addr);
- sys_table->boottime->free_pool(memory_map);
new_fdt_size += EFI_PAGE_SIZE;
} else {
pr_efi_err(sys_table, "Unable to construct new device tree.\n");
- goto fail_free_mmap;
+ goto fail_free_new_fdt;
}
}
- sys_table->boottime->free_pool(memory_map);
priv.runtime_map = runtime_map;
priv.runtime_entry_count = &runtime_entry_count;
status = efi_exit_boot_services(sys_table, handle, &map, &priv,
@@ -288,6 +306,16 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
if (status == EFI_SUCCESS) {
efi_set_virtual_address_map_t *svam;
+ status = update_fdt_memmap((void *)*new_fdt_addr, &map);
+ if (status != EFI_SUCCESS) {
+ /*
+ * The kernel won't get far without the memory map, but
+ * may still be able to print something meaningful so
+ * return success here.
+ */
+ return EFI_SUCCESS;
+ }
+
/* Install the new virtual address map */
svam = sys_table->runtime->set_virtual_address_map;
status = svam(runtime_entry_count * desc_size, desc_size,
@@ -319,9 +347,6 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
pr_efi_err(sys_table, "Exit boot services failed.\n");
-fail_free_mmap:
- sys_table->boottime->free_pool(memory_map);
-
fail_free_new_fdt:
efi_free(sys_table, new_fdt_size, *new_fdt_addr);
diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
index f03ddecd232b..78686443cb37 100644
--- a/drivers/firmware/efi/memmap.c
+++ b/drivers/firmware/efi/memmap.c
@@ -9,6 +9,44 @@
#include <linux/efi.h>
#include <linux/io.h>
#include <asm/early_ioremap.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+
+static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size)
+{
+ return memblock_alloc(size, 0);
+}
+
+static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
+{
+ unsigned int order = get_order(size);
+ struct page *p = alloc_pages(GFP_KERNEL, order);
+
+ if (!p)
+ return 0;
+
+ return PFN_PHYS(page_to_pfn(p));
+}
+
+/**
+ * efi_memmap_alloc - Allocate memory for the EFI memory map
+ * @num_entries: Number of entries in the allocated map.
+ *
+ * Depending on whether mm_init() has already been invoked or not,
+ * either memblock or "normal" page allocation is used.
+ *
+ * Returns the physical address of the allocated memory map on
+ * success, zero on failure.
+ */
+phys_addr_t __init efi_memmap_alloc(unsigned int num_entries)
+{
+ unsigned long size = num_entries * efi.memmap.desc_size;
+
+ if (slab_is_available())
+ return __efi_memmap_alloc_late(size);
+
+ return __efi_memmap_alloc_early(size);
+}
/**
* __efi_memmap_init - Common code for mapping the EFI memory map
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
index 8263429e21b8..6c60a5087caf 100644
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci.c
@@ -630,7 +630,7 @@ int __init psci_dt_init(void)
np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
- if (!np)
+ if (!np || !of_device_is_available(np))
return -ENODEV;
init_fn = (psci_initcall_t)matched_np->data;
diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c
new file mode 100644
index 000000000000..29d58feaf675
--- /dev/null
+++ b/drivers/firmware/psci_checker.c
@@ -0,0 +1,490 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2016 ARM Limited
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/atomic.h>
+#include <linux/completion.h>
+#include <linux/cpu.h>
+#include <linux/cpuidle.h>
+#include <linux/cpu_pm.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/preempt.h>
+#include <linux/psci.h>
+#include <linux/slab.h>
+#include <linux/tick.h>
+#include <linux/topology.h>
+
+#include <asm/cpuidle.h>
+
+#include <uapi/linux/psci.h>
+
+#define NUM_SUSPEND_CYCLE (10)
+
+static unsigned int nb_available_cpus;
+static int tos_resident_cpu = -1;
+
+static atomic_t nb_active_threads;
+static struct completion suspend_threads_started =
+ COMPLETION_INITIALIZER(suspend_threads_started);
+static struct completion suspend_threads_done =
+ COMPLETION_INITIALIZER(suspend_threads_done);
+
+/*
+ * We assume that PSCI operations are used if they are available. This is not
+ * necessarily true on arm64, since the decision is based on the
+ * "enable-method" property of each CPU in the DT, but given that there is no
+ * arch-specific way to check this, we assume that the DT is sensible.
+ */
+static int psci_ops_check(void)
+{
+ int migrate_type = -1;
+ int cpu;
+
+ if (!(psci_ops.cpu_off && psci_ops.cpu_on && psci_ops.cpu_suspend)) {
+ pr_warn("Missing PSCI operations, aborting tests\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (psci_ops.migrate_info_type)
+ migrate_type = psci_ops.migrate_info_type();
+
+ if (migrate_type == PSCI_0_2_TOS_UP_MIGRATE ||
+ migrate_type == PSCI_0_2_TOS_UP_NO_MIGRATE) {
+ /* There is a UP Trusted OS, find on which core it resides. */
+ for_each_online_cpu(cpu)
+ if (psci_tos_resident_on(cpu)) {
+ tos_resident_cpu = cpu;
+ break;
+ }
+ if (tos_resident_cpu == -1)
+ pr_warn("UP Trusted OS resides on no online CPU\n");
+ }
+
+ return 0;
+}
+
+static int find_clusters(const struct cpumask *cpus,
+ const struct cpumask **clusters)
+{
+ unsigned int nb = 0;
+ cpumask_var_t tmp;
+
+ if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
+ return -ENOMEM;
+ cpumask_copy(tmp, cpus);
+
+ while (!cpumask_empty(tmp)) {
+ const struct cpumask *cluster =
+ topology_core_cpumask(cpumask_any(tmp));
+
+ clusters[nb++] = cluster;
+ cpumask_andnot(tmp, tmp, cluster);
+ }
+
+ free_cpumask_var(tmp);
+ return nb;
+}
+
+/*
+ * offlined_cpus is a temporary array but passing it as an argument avoids
+ * multiple allocations.
+ */
+static unsigned int down_and_up_cpus(const struct cpumask *cpus,
+ struct cpumask *offlined_cpus)
+{
+ int cpu;
+ int err = 0;
+
+ cpumask_clear(offlined_cpus);
+
+ /* Try to power down all CPUs in the mask. */
+ for_each_cpu(cpu, cpus) {
+ int ret = cpu_down(cpu);
+
+ /*
+ * cpu_down() checks the number of online CPUs before the TOS
+ * resident CPU.
+ */
+ if (cpumask_weight(offlined_cpus) + 1 == nb_available_cpus) {
+ if (ret != -EBUSY) {
+ pr_err("Unexpected return code %d while trying "
+ "to power down last online CPU %d\n",
+ ret, cpu);
+ ++err;
+ }
+ } else if (cpu == tos_resident_cpu) {
+ if (ret != -EPERM) {
+ pr_err("Unexpected return code %d while trying "
+ "to power down TOS resident CPU %d\n",
+ ret, cpu);
+ ++err;
+ }
+ } else if (ret != 0) {
+ pr_err("Error occurred (%d) while trying "
+ "to power down CPU %d\n", ret, cpu);
+ ++err;
+ }
+
+ if (ret == 0)
+ cpumask_set_cpu(cpu, offlined_cpus);
+ }
+
+ /* Try to power up all the CPUs that have been offlined. */
+ for_each_cpu(cpu, offlined_cpus) {
+ int ret = cpu_up(cpu);
+
+ if (ret != 0) {
+ pr_err("Error occurred (%d) while trying "
+ "to power up CPU %d\n", ret, cpu);
+ ++err;
+ } else {
+ cpumask_clear_cpu(cpu, offlined_cpus);
+ }
+ }
+
+ /*
+ * Something went bad at some point and some CPUs could not be turned
+ * back on.
+ */
+ WARN_ON(!cpumask_empty(offlined_cpus) ||
+ num_online_cpus() != nb_available_cpus);
+
+ return err;
+}
+
+static int hotplug_tests(void)
+{
+ int err;
+ cpumask_var_t offlined_cpus;
+ int i, nb_cluster;
+ const struct cpumask **clusters;
+ char *page_buf;
+
+ err = -ENOMEM;
+ if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL))
+ return err;
+ /* We may have up to nb_available_cpus clusters. */
+ clusters = kmalloc_array(nb_available_cpus, sizeof(*clusters),
+ GFP_KERNEL);
+ if (!clusters)
+ goto out_free_cpus;
+ page_buf = (char *)__get_free_page(GFP_KERNEL);
+ if (!page_buf)
+ goto out_free_clusters;
+
+ err = 0;
+ nb_cluster = find_clusters(cpu_online_mask, clusters);
+
+ /*
+ * Of course the last CPU cannot be powered down and cpu_down() should
+ * refuse doing that.
+ */
+ pr_info("Trying to turn off and on again all CPUs\n");
+ err += down_and_up_cpus(cpu_online_mask, offlined_cpus);
+
+ /*
+ * Take down CPUs by cluster this time. When the last CPU is turned
+ * off, the cluster itself should shut down.
+ */
+ for (i = 0; i < nb_cluster; ++i) {
+ int cluster_id =
+ topology_physical_package_id(cpumask_any(clusters[i]));
+ ssize_t len = cpumap_print_to_pagebuf(true, page_buf,
+ clusters[i]);
+ /* Remove trailing newline. */
+ page_buf[len - 1] = '\0';
+ pr_info("Trying to turn off and on again cluster %d "
+ "(CPUs %s)\n", cluster_id, page_buf);
+ err += down_and_up_cpus(clusters[i], offlined_cpus);
+ }
+
+ free_page((unsigned long)page_buf);
+out_free_clusters:
+ kfree(clusters);
+out_free_cpus:
+ free_cpumask_var(offlined_cpus);
+ return err;
+}
+
+static void dummy_callback(unsigned long ignored) {}
+
+static int suspend_cpu(int index, bool broadcast)
+{
+ int ret;
+
+ arch_cpu_idle_enter();
+
+ if (broadcast) {
+ /*
+ * The local timer will be shut down, we need to enter tick
+ * broadcast.
+ */
+ ret = tick_broadcast_enter();
+ if (ret) {
+ /*
+ * In the absence of hardware broadcast mechanism,
+ * this CPU might be used to broadcast wakeups, which
+ * may be why entering tick broadcast has failed.
+ * There is little the kernel can do to work around
+ * that, so enter WFI instead (idle state 0).
+ */
+ cpu_do_idle();
+ ret = 0;
+ goto out_arch_exit;
+ }
+ }
+
+ /*
+ * Replicate the common ARM cpuidle enter function
+ * (arm_enter_idle_state).
+ */
+ ret = CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, index);
+
+ if (broadcast)
+ tick_broadcast_exit();
+
+out_arch_exit:
+ arch_cpu_idle_exit();
+
+ return ret;
+}
+
+static int suspend_test_thread(void *arg)
+{
+ int cpu = (long)arg;
+ int i, nb_suspend = 0, nb_shallow_sleep = 0, nb_err = 0;
+ struct sched_param sched_priority = { .sched_priority = MAX_RT_PRIO-1 };
+ struct cpuidle_device *dev;
+ struct cpuidle_driver *drv;
+ /* No need for an actual callback, we just want to wake up the CPU. */
+ struct timer_list wakeup_timer;
+
+ /* Wait for the main thread to give the start signal. */
+ wait_for_completion(&suspend_threads_started);
+
+ /* Set maximum priority to preempt all other threads on this CPU. */
+ if (sched_setscheduler_nocheck(current, SCHED_FIFO, &sched_priority))
+ pr_warn("Failed to set suspend thread scheduler on CPU %d\n",
+ cpu);
+
+ dev = this_cpu_read(cpuidle_devices);
+ drv = cpuidle_get_cpu_driver(dev);
+
+ pr_info("CPU %d entering suspend cycles, states 1 through %d\n",
+ cpu, drv->state_count - 1);
+
+ setup_timer_on_stack(&wakeup_timer, dummy_callback, 0);
+ for (i = 0; i < NUM_SUSPEND_CYCLE; ++i) {
+ int index;
+ /*
+ * Test all possible states, except 0 (which is usually WFI and
+ * doesn't use PSCI).
+ */
+ for (index = 1; index < drv->state_count; ++index) {
+ struct cpuidle_state *state = &drv->states[index];
+ bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP;
+ int ret;
+
+ /*
+ * Set the timer to wake this CPU up in some time (which
+ * should be largely sufficient for entering suspend).
+ * If the local tick is disabled when entering suspend,
+ * suspend_cpu() takes care of switching to a broadcast
+ * tick, so the timer will still wake us up.
+ */
+ mod_timer(&wakeup_timer, jiffies +
+ usecs_to_jiffies(state->target_residency));
+
+ /* IRQs must be disabled during suspend operations. */
+ local_irq_disable();
+
+ ret = suspend_cpu(index, broadcast);
+
+ /*
+ * We have woken up. Re-enable IRQs to handle any
+ * pending interrupt, do not wait until the end of the
+ * loop.
+ */
+ local_irq_enable();
+
+ if (ret == index) {
+ ++nb_suspend;
+ } else if (ret >= 0) {
+ /* We did not enter the expected state. */
+ ++nb_shallow_sleep;
+ } else {
+ pr_err("Failed to suspend CPU %d: error %d "
+ "(requested state %d, cycle %d)\n",
+ cpu, ret, index, i);
+ ++nb_err;
+ }
+ }
+ }
+
+ /*
+ * Disable the timer to make sure that the timer will not trigger
+ * later.
+ */
+ del_timer(&wakeup_timer);
+
+ if (atomic_dec_return_relaxed(&nb_active_threads) == 0)
+ complete(&suspend_threads_done);
+
+ /* Give up on RT scheduling and wait for termination. */
+ sched_priority.sched_priority = 0;
+ if (sched_setscheduler_nocheck(current, SCHED_NORMAL, &sched_priority))
+ pr_warn("Failed to set suspend thread scheduler on CPU %d\n",
+ cpu);
+ for (;;) {
+ /* Needs to be set first to avoid missing a wakeup. */
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (kthread_should_stop()) {
+ __set_current_state(TASK_RUNNING);
+ break;
+ }
+ schedule();
+ }
+
+ pr_info("CPU %d suspend test results: success %d, shallow states %d, errors %d\n",
+ cpu, nb_suspend, nb_shallow_sleep, nb_err);
+
+ return nb_err;
+}
+
+static int suspend_tests(void)
+{
+ int i, cpu, err = 0;
+ struct task_struct **threads;
+ int nb_threads = 0;
+
+ threads = kmalloc_array(nb_available_cpus, sizeof(*threads),
+ GFP_KERNEL);
+ if (!threads)
+ return -ENOMEM;
+
+ /*
+ * Stop cpuidle to prevent the idle tasks from entering a deep sleep
+ * mode, as it might interfere with the suspend threads on other CPUs.
+ * This does not prevent the suspend threads from using cpuidle (only
+ * the idle tasks check this status). Take the idle lock so that
+ * the cpuidle driver and device look-up can be carried out safely.
+ */
+ cpuidle_pause_and_lock();
+
+ for_each_online_cpu(cpu) {
+ struct task_struct *thread;
+ /* Check that cpuidle is available on that CPU. */
+ struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
+ struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
+
+ if (!dev || !drv) {
+ pr_warn("cpuidle not available on CPU %d, ignoring\n",
+ cpu);
+ continue;
+ }
+
+ thread = kthread_create_on_cpu(suspend_test_thread,
+ (void *)(long)cpu, cpu,
+ "psci_suspend_test");
+ if (IS_ERR(thread))
+ pr_err("Failed to create kthread on CPU %d\n", cpu);
+ else
+ threads[nb_threads++] = thread;
+ }
+
+ if (nb_threads < 1) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ atomic_set(&nb_active_threads, nb_threads);
+
+ /*
+ * Wake up the suspend threads. To avoid the main thread being preempted
+ * before all the threads have been unparked, the suspend threads will
+ * wait for the completion of suspend_threads_started.
+ */
+ for (i = 0; i < nb_threads; ++i)
+ wake_up_process(threads[i]);
+ complete_all(&suspend_threads_started);
+
+ wait_for_completion(&suspend_threads_done);
+
+
+ /* Stop and destroy all threads, get return status. */
+ for (i = 0; i < nb_threads; ++i)
+ err += kthread_stop(threads[i]);
+ out:
+ cpuidle_resume_and_unlock();
+ kfree(threads);
+ return err;
+}
+
+static int __init psci_checker(void)
+{
+ int ret;
+
+ /*
+ * Since we're in an initcall, we assume that all the CPUs that all
+ * CPUs that can be onlined have been onlined.
+ *
+ * The tests assume that hotplug is enabled but nobody else is using it,
+ * otherwise the results will be unpredictable. However, since there
+ * is no userspace yet in initcalls, that should be fine, as long as
+ * no torture test is running at the same time (see Kconfig).
+ */
+ nb_available_cpus = num_online_cpus();
+
+ /* Check PSCI operations are set up and working. */
+ ret = psci_ops_check();
+ if (ret)
+ return ret;
+
+ pr_info("PSCI checker started using %u CPUs\n", nb_available_cpus);
+
+ pr_info("Starting hotplug tests\n");
+ ret = hotplug_tests();
+ if (ret == 0)
+ pr_info("Hotplug tests passed OK\n");
+ else if (ret > 0)
+ pr_err("%d error(s) encountered in hotplug tests\n", ret);
+ else {
+ pr_err("Out of memory\n");
+ return ret;
+ }
+
+ pr_info("Starting suspend tests (%d cycles per state)\n",
+ NUM_SUSPEND_CYCLE);
+ ret = suspend_tests();
+ if (ret == 0)
+ pr_info("Suspend tests passed OK\n");
+ else if (ret > 0)
+ pr_err("%d error(s) encountered in suspend tests\n", ret);
+ else {
+ switch (ret) {
+ case -ENOMEM:
+ pr_err("Out of memory\n");
+ break;
+ case -ENODEV:
+ pr_warn("Could not start suspend tests on any CPU\n");
+ break;
+ }
+ }
+
+ pr_info("PSCI checker completed\n");
+ return ret < 0 ? ret : 0;
+}
+late_initcall(psci_checker);
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index d95c70227c05..893f953eaccf 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -28,6 +28,10 @@
#include "qcom_scm.h"
+#define SCM_HAS_CORE_CLK BIT(0)
+#define SCM_HAS_IFACE_CLK BIT(1)
+#define SCM_HAS_BUS_CLK BIT(2)
+
struct qcom_scm {
struct device *dev;
struct clk *core_clk;
@@ -323,32 +327,40 @@ EXPORT_SYMBOL(qcom_scm_is_available);
static int qcom_scm_probe(struct platform_device *pdev)
{
struct qcom_scm *scm;
+ unsigned long clks;
int ret;
scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
if (!scm)
return -ENOMEM;
- scm->core_clk = devm_clk_get(&pdev->dev, "core");
- if (IS_ERR(scm->core_clk)) {
- if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER)
+ clks = (unsigned long)of_device_get_match_data(&pdev->dev);
+ if (clks & SCM_HAS_CORE_CLK) {
+ scm->core_clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(scm->core_clk)) {
+ if (PTR_ERR(scm->core_clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "failed to acquire core clk\n");
return PTR_ERR(scm->core_clk);
-
- scm->core_clk = NULL;
+ }
}
- if (of_device_is_compatible(pdev->dev.of_node, "qcom,scm")) {
+ if (clks & SCM_HAS_IFACE_CLK) {
scm->iface_clk = devm_clk_get(&pdev->dev, "iface");
if (IS_ERR(scm->iface_clk)) {
if (PTR_ERR(scm->iface_clk) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to acquire iface clk\n");
+ dev_err(&pdev->dev,
+ "failed to acquire iface clk\n");
return PTR_ERR(scm->iface_clk);
}
+ }
+ if (clks & SCM_HAS_BUS_CLK) {
scm->bus_clk = devm_clk_get(&pdev->dev, "bus");
if (IS_ERR(scm->bus_clk)) {
if (PTR_ERR(scm->bus_clk) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to acquire bus clk\n");
+ dev_err(&pdev->dev,
+ "failed to acquire bus clk\n");
return PTR_ERR(scm->bus_clk);
}
}
@@ -356,7 +368,9 @@ static int qcom_scm_probe(struct platform_device *pdev)
scm->reset.ops = &qcom_scm_pas_reset_ops;
scm->reset.nr_resets = 1;
scm->reset.of_node = pdev->dev.of_node;
- reset_controller_register(&scm->reset);
+ ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
+ if (ret)
+ return ret;
/* vote for max clk rate for highest performance */
ret = clk_set_rate(scm->core_clk, INT_MAX);
@@ -372,10 +386,23 @@ static int qcom_scm_probe(struct platform_device *pdev)
}
static const struct of_device_id qcom_scm_dt_match[] = {
- { .compatible = "qcom,scm-apq8064",},
- { .compatible = "qcom,scm-msm8660",},
- { .compatible = "qcom,scm-msm8960",},
- { .compatible = "qcom,scm",},
+ { .compatible = "qcom,scm-apq8064",
+ .data = (void *) SCM_HAS_CORE_CLK,
+ },
+ { .compatible = "qcom,scm-msm8660",
+ .data = (void *) SCM_HAS_CORE_CLK,
+ },
+ { .compatible = "qcom,scm-msm8960",
+ .data = (void *) SCM_HAS_CORE_CLK,
+ },
+ { .compatible = "qcom,scm-msm8996",
+ .data = NULL, /* no clocks */
+ },
+ { .compatible = "qcom,scm",
+ .data = (void *)(SCM_HAS_CORE_CLK
+ | SCM_HAS_IFACE_CLK
+ | SCM_HAS_BUS_CLK),
+ },
{}
};
diff --git a/drivers/firmware/tegra/Kconfig b/drivers/firmware/tegra/Kconfig
new file mode 100644
index 000000000000..ff2730d5c468
--- /dev/null
+++ b/drivers/firmware/tegra/Kconfig
@@ -0,0 +1,25 @@
+menu "Tegra firmware driver"
+
+config TEGRA_IVC
+ bool "Tegra IVC protocol"
+ depends on ARCH_TEGRA
+ help
+ IVC (Inter-VM Communication) protocol is part of the IPC
+ (Inter Processor Communication) framework on Tegra. It maintains the
+ data and the different commuication channels in SysRAM or RAM and
+ keeps the content is synchronization between host CPU and remote
+ processors.
+
+config TEGRA_BPMP
+ bool "Tegra BPMP driver"
+ depends on ARCH_TEGRA && TEGRA_HSP_MBOX && TEGRA_IVC
+ help
+ BPMP (Boot and Power Management Processor) is designed to off-loading
+ the PM functions which include clock/DVFS/thermal/power from the CPU.
+ It needs HSP as the HW synchronization and notification module and
+ IVC module as the message communication protocol.
+
+ This driver manages the IPC interface between host CPU and the
+ firmware running on BPMP.
+
+endmenu
diff --git a/drivers/firmware/tegra/Makefile b/drivers/firmware/tegra/Makefile
new file mode 100644
index 000000000000..e34a2f79e1ad
--- /dev/null
+++ b/drivers/firmware/tegra/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_TEGRA_BPMP) += bpmp.o
+obj-$(CONFIG_TEGRA_IVC) += ivc.o
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
new file mode 100644
index 000000000000..4ff02d310868
--- /dev/null
+++ b/drivers/firmware/tegra/bpmp.c
@@ -0,0 +1,868 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/clk/tegra.h>
+#include <linux/genalloc.h>
+#include <linux/mailbox_client.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/semaphore.h>
+
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+#include <soc/tegra/ivc.h>
+
+#define MSG_ACK BIT(0)
+#define MSG_RING BIT(1)
+
+static inline struct tegra_bpmp *
+mbox_client_to_bpmp(struct mbox_client *client)
+{
+ return container_of(client, struct tegra_bpmp, mbox.client);
+}
+
+struct tegra_bpmp *tegra_bpmp_get(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct tegra_bpmp *bpmp;
+ struct device_node *np;
+
+ np = of_parse_phandle(dev->of_node, "nvidia,bpmp", 0);
+ if (!np)
+ return ERR_PTR(-ENOENT);
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev) {
+ bpmp = ERR_PTR(-ENODEV);
+ goto put;
+ }
+
+ bpmp = platform_get_drvdata(pdev);
+ if (!bpmp) {
+ bpmp = ERR_PTR(-EPROBE_DEFER);
+ put_device(&pdev->dev);
+ goto put;
+ }
+
+put:
+ of_node_put(np);
+ return bpmp;
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_get);
+
+void tegra_bpmp_put(struct tegra_bpmp *bpmp)
+{
+ if (bpmp)
+ put_device(bpmp->dev);
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_put);
+
+static int tegra_bpmp_channel_get_index(struct tegra_bpmp_channel *channel)
+{
+ return channel - channel->bpmp->channels;
+}
+
+static int
+tegra_bpmp_channel_get_thread_index(struct tegra_bpmp_channel *channel)
+{
+ struct tegra_bpmp *bpmp = channel->bpmp;
+ unsigned int offset, count;
+ int index;
+
+ offset = bpmp->soc->channels.thread.offset;
+ count = bpmp->soc->channels.thread.count;
+
+ index = tegra_bpmp_channel_get_index(channel);
+ if (index < 0)
+ return index;
+
+ if (index < offset || index >= offset + count)
+ return -EINVAL;
+
+ return index - offset;
+}
+
+static struct tegra_bpmp_channel *
+tegra_bpmp_channel_get_thread(struct tegra_bpmp *bpmp, unsigned int index)
+{
+ unsigned int offset = bpmp->soc->channels.thread.offset;
+ unsigned int count = bpmp->soc->channels.thread.count;
+
+ if (index >= count)
+ return NULL;
+
+ return &bpmp->channels[offset + index];
+}
+
+static struct tegra_bpmp_channel *
+tegra_bpmp_channel_get_tx(struct tegra_bpmp *bpmp)
+{
+ unsigned int offset = bpmp->soc->channels.cpu_tx.offset;
+
+ return &bpmp->channels[offset + smp_processor_id()];
+}
+
+static struct tegra_bpmp_channel *
+tegra_bpmp_channel_get_rx(struct tegra_bpmp *bpmp)
+{
+ unsigned int offset = bpmp->soc->channels.cpu_rx.offset;
+
+ return &bpmp->channels[offset];
+}
+
+static bool tegra_bpmp_message_valid(const struct tegra_bpmp_message *msg)
+{
+ return (msg->tx.size <= MSG_DATA_MIN_SZ) &&
+ (msg->rx.size <= MSG_DATA_MIN_SZ) &&
+ (msg->tx.size == 0 || msg->tx.data) &&
+ (msg->rx.size == 0 || msg->rx.data);
+}
+
+static bool tegra_bpmp_master_acked(struct tegra_bpmp_channel *channel)
+{
+ void *frame;
+
+ frame = tegra_ivc_read_get_next_frame(channel->ivc);
+ if (IS_ERR(frame)) {
+ channel->ib = NULL;
+ return false;
+ }
+
+ channel->ib = frame;
+
+ return true;
+}
+
+static int tegra_bpmp_wait_ack(struct tegra_bpmp_channel *channel)
+{
+ unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
+ ktime_t end;
+
+ end = ktime_add_us(ktime_get(), timeout);
+
+ do {
+ if (tegra_bpmp_master_acked(channel))
+ return 0;
+ } while (ktime_before(ktime_get(), end));
+
+ return -ETIMEDOUT;
+}
+
+static bool tegra_bpmp_master_free(struct tegra_bpmp_channel *channel)
+{
+ void *frame;
+
+ frame = tegra_ivc_write_get_next_frame(channel->ivc);
+ if (IS_ERR(frame)) {
+ channel->ob = NULL;
+ return false;
+ }
+
+ channel->ob = frame;
+
+ return true;
+}
+
+static int tegra_bpmp_wait_master_free(struct tegra_bpmp_channel *channel)
+{
+ unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
+ ktime_t start, now;
+
+ start = ns_to_ktime(local_clock());
+
+ do {
+ if (tegra_bpmp_master_free(channel))
+ return 0;
+
+ now = ns_to_ktime(local_clock());
+ } while (ktime_us_delta(now, start) < timeout);
+
+ return -ETIMEDOUT;
+}
+
+static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
+ void *data, size_t size)
+{
+ if (data && size > 0)
+ memcpy(data, channel->ib->data, size);
+
+ return tegra_ivc_read_advance(channel->ivc);
+}
+
+static ssize_t tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
+ void *data, size_t size)
+{
+ struct tegra_bpmp *bpmp = channel->bpmp;
+ unsigned long flags;
+ ssize_t err;
+ int index;
+
+ index = tegra_bpmp_channel_get_thread_index(channel);
+ if (index < 0)
+ return index;
+
+ spin_lock_irqsave(&bpmp->lock, flags);
+ err = __tegra_bpmp_channel_read(channel, data, size);
+ clear_bit(index, bpmp->threaded.allocated);
+ spin_unlock_irqrestore(&bpmp->lock, flags);
+
+ up(&bpmp->threaded.lock);
+
+ return err;
+}
+
+static ssize_t __tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
+ unsigned int mrq, unsigned long flags,
+ const void *data, size_t size)
+{
+ channel->ob->code = mrq;
+ channel->ob->flags = flags;
+
+ if (data && size > 0)
+ memcpy(channel->ob->data, data, size);
+
+ return tegra_ivc_write_advance(channel->ivc);
+}
+
+static struct tegra_bpmp_channel *
+tegra_bpmp_write_threaded(struct tegra_bpmp *bpmp, unsigned int mrq,
+ const void *data, size_t size)
+{
+ unsigned long timeout = bpmp->soc->channels.thread.timeout;
+ unsigned int count = bpmp->soc->channels.thread.count;
+ struct tegra_bpmp_channel *channel;
+ unsigned long flags;
+ unsigned int index;
+ int err;
+
+ err = down_timeout(&bpmp->threaded.lock, usecs_to_jiffies(timeout));
+ if (err < 0)
+ return ERR_PTR(err);
+
+ spin_lock_irqsave(&bpmp->lock, flags);
+
+ index = find_first_zero_bit(bpmp->threaded.allocated, count);
+ if (index == count) {
+ channel = ERR_PTR(-EBUSY);
+ goto unlock;
+ }
+
+ channel = tegra_bpmp_channel_get_thread(bpmp, index);
+ if (!channel) {
+ channel = ERR_PTR(-EINVAL);
+ goto unlock;
+ }
+
+ if (!tegra_bpmp_master_free(channel)) {
+ channel = ERR_PTR(-EBUSY);
+ goto unlock;
+ }
+
+ set_bit(index, bpmp->threaded.allocated);
+
+ err = __tegra_bpmp_channel_write(channel, mrq, MSG_ACK | MSG_RING,
+ data, size);
+ if (err < 0) {
+ clear_bit(index, bpmp->threaded.allocated);
+ goto unlock;
+ }
+
+ set_bit(index, bpmp->threaded.busy);
+
+unlock:
+ spin_unlock_irqrestore(&bpmp->lock, flags);
+ return channel;
+}
+
+static ssize_t tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
+ unsigned int mrq, unsigned long flags,
+ const void *data, size_t size)
+{
+ int err;
+
+ err = tegra_bpmp_wait_master_free(channel);
+ if (err < 0)
+ return err;
+
+ return __tegra_bpmp_channel_write(channel, mrq, flags, data, size);
+}
+
+int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
+ struct tegra_bpmp_message *msg)
+{
+ struct tegra_bpmp_channel *channel;
+ int err;
+
+ if (WARN_ON(!irqs_disabled()))
+ return -EPERM;
+
+ if (!tegra_bpmp_message_valid(msg))
+ return -EINVAL;
+
+ channel = tegra_bpmp_channel_get_tx(bpmp);
+
+ err = tegra_bpmp_channel_write(channel, msg->mrq, MSG_ACK,
+ msg->tx.data, msg->tx.size);
+ if (err < 0)
+ return err;
+
+ err = mbox_send_message(bpmp->mbox.channel, NULL);
+ if (err < 0)
+ return err;
+
+ mbox_client_txdone(bpmp->mbox.channel, 0);
+
+ err = tegra_bpmp_wait_ack(channel);
+ if (err < 0)
+ return err;
+
+ return __tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size);
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_transfer_atomic);
+
+int tegra_bpmp_transfer(struct tegra_bpmp *bpmp,
+ struct tegra_bpmp_message *msg)
+{
+ struct tegra_bpmp_channel *channel;
+ unsigned long timeout;
+ int err;
+
+ if (WARN_ON(irqs_disabled()))
+ return -EPERM;
+
+ if (!tegra_bpmp_message_valid(msg))
+ return -EINVAL;
+
+ channel = tegra_bpmp_write_threaded(bpmp, msg->mrq, msg->tx.data,
+ msg->tx.size);
+ if (IS_ERR(channel))
+ return PTR_ERR(channel);
+
+ err = mbox_send_message(bpmp->mbox.channel, NULL);
+ if (err < 0)
+ return err;
+
+ mbox_client_txdone(bpmp->mbox.channel, 0);
+
+ timeout = usecs_to_jiffies(bpmp->soc->channels.thread.timeout);
+
+ err = wait_for_completion_timeout(&channel->completion, timeout);
+ if (err == 0)
+ return -ETIMEDOUT;
+
+ return tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size);
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_transfer);
+
+static struct tegra_bpmp_mrq *tegra_bpmp_find_mrq(struct tegra_bpmp *bpmp,
+ unsigned int mrq)
+{
+ struct tegra_bpmp_mrq *entry;
+
+ list_for_each_entry(entry, &bpmp->mrqs, list)
+ if (entry->mrq == mrq)
+ return entry;
+
+ return NULL;
+}
+
+static void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel,
+ int code, const void *data, size_t size)
+{
+ unsigned long flags = channel->ib->flags;
+ struct tegra_bpmp *bpmp = channel->bpmp;
+ struct tegra_bpmp_mb_data *frame;
+ int err;
+
+ if (WARN_ON(size > MSG_DATA_MIN_SZ))
+ return;
+
+ err = tegra_ivc_read_advance(channel->ivc);
+ if (WARN_ON(err < 0))
+ return;
+
+ if ((flags & MSG_ACK) == 0)
+ return;
+
+ frame = tegra_ivc_write_get_next_frame(channel->ivc);
+ if (WARN_ON(IS_ERR(frame)))
+ return;
+
+ frame->code = code;
+
+ if (data && size > 0)
+ memcpy(frame->data, data, size);
+
+ err = tegra_ivc_write_advance(channel->ivc);
+ if (WARN_ON(err < 0))
+ return;
+
+ if (flags & MSG_RING) {
+ err = mbox_send_message(bpmp->mbox.channel, NULL);
+ if (WARN_ON(err < 0))
+ return;
+
+ mbox_client_txdone(bpmp->mbox.channel, 0);
+ }
+}
+
+static void tegra_bpmp_handle_mrq(struct tegra_bpmp *bpmp,
+ unsigned int mrq,
+ struct tegra_bpmp_channel *channel)
+{
+ struct tegra_bpmp_mrq *entry;
+ u32 zero = 0;
+
+ spin_lock(&bpmp->lock);
+
+ entry = tegra_bpmp_find_mrq(bpmp, mrq);
+ if (!entry) {
+ spin_unlock(&bpmp->lock);
+ tegra_bpmp_mrq_return(channel, -EINVAL, &zero, sizeof(zero));
+ return;
+ }
+
+ entry->handler(mrq, channel, entry->data);
+
+ spin_unlock(&bpmp->lock);
+}
+
+int tegra_bpmp_request_mrq(struct tegra_bpmp *bpmp, unsigned int mrq,
+ tegra_bpmp_mrq_handler_t handler, void *data)
+{
+ struct tegra_bpmp_mrq *entry;
+ unsigned long flags;
+
+ if (!handler)
+ return -EINVAL;
+
+ entry = devm_kzalloc(bpmp->dev, sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&bpmp->lock, flags);
+
+ entry->mrq = mrq;
+ entry->handler = handler;
+ entry->data = data;
+ list_add(&entry->list, &bpmp->mrqs);
+
+ spin_unlock_irqrestore(&bpmp->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_request_mrq);
+
+void tegra_bpmp_free_mrq(struct tegra_bpmp *bpmp, unsigned int mrq, void *data)
+{
+ struct tegra_bpmp_mrq *entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bpmp->lock, flags);
+
+ entry = tegra_bpmp_find_mrq(bpmp, mrq);
+ if (!entry)
+ goto unlock;
+
+ list_del(&entry->list);
+ devm_kfree(bpmp->dev, entry);
+
+unlock:
+ spin_unlock_irqrestore(&bpmp->lock, flags);
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_free_mrq);
+
+static void tegra_bpmp_mrq_handle_ping(unsigned int mrq,
+ struct tegra_bpmp_channel *channel,
+ void *data)
+{
+ struct mrq_ping_request *request;
+ struct mrq_ping_response response;
+
+ request = (struct mrq_ping_request *)channel->ib->data;
+
+ memset(&response, 0, sizeof(response));
+ response.reply = request->challenge << 1;
+
+ tegra_bpmp_mrq_return(channel, 0, &response, sizeof(response));
+}
+
+static int tegra_bpmp_ping(struct tegra_bpmp *bpmp)
+{
+ struct mrq_ping_response response;
+ struct mrq_ping_request request;
+ struct tegra_bpmp_message msg;
+ unsigned long flags;
+ ktime_t start, end;
+ int err;
+
+ memset(&request, 0, sizeof(request));
+ request.challenge = 1;
+
+ memset(&response, 0, sizeof(response));
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_PING;
+ msg.tx.data = &request;
+ msg.tx.size = sizeof(request);
+ msg.rx.data = &response;
+ msg.rx.size = sizeof(response);
+
+ local_irq_save(flags);
+ start = ktime_get();
+ err = tegra_bpmp_transfer_atomic(bpmp, &msg);
+ end = ktime_get();
+ local_irq_restore(flags);
+
+ if (!err)
+ dev_dbg(bpmp->dev,
+ "ping ok: challenge: %u, response: %u, time: %lld\n",
+ request.challenge, response.reply,
+ ktime_to_us(ktime_sub(end, start)));
+
+ return err;
+}
+
+static int tegra_bpmp_get_firmware_tag(struct tegra_bpmp *bpmp, char *tag,
+ size_t size)
+{
+ struct mrq_query_tag_request request;
+ struct tegra_bpmp_message msg;
+ unsigned long flags;
+ dma_addr_t phys;
+ void *virt;
+ int err;
+
+ virt = dma_alloc_coherent(bpmp->dev, MSG_DATA_MIN_SZ, &phys,
+ GFP_KERNEL | GFP_DMA32);
+ if (!virt)
+ return -ENOMEM;
+
+ memset(&request, 0, sizeof(request));
+ request.addr = phys;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_QUERY_TAG;
+ msg.tx.data = &request;
+ msg.tx.size = sizeof(request);
+
+ local_irq_save(flags);
+ err = tegra_bpmp_transfer_atomic(bpmp, &msg);
+ local_irq_restore(flags);
+
+ if (err == 0)
+ strlcpy(tag, virt, size);
+
+ dma_free_coherent(bpmp->dev, MSG_DATA_MIN_SZ, virt, phys);
+
+ return err;
+}
+
+static void tegra_bpmp_channel_signal(struct tegra_bpmp_channel *channel)
+{
+ unsigned long flags = channel->ob->flags;
+
+ if ((flags & MSG_RING) == 0)
+ return;
+
+ complete(&channel->completion);
+}
+
+static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data)
+{
+ struct tegra_bpmp *bpmp = mbox_client_to_bpmp(client);
+ struct tegra_bpmp_channel *channel;
+ unsigned int i, count;
+ unsigned long *busy;
+
+ channel = tegra_bpmp_channel_get_rx(bpmp);
+ count = bpmp->soc->channels.thread.count;
+ busy = bpmp->threaded.busy;
+
+ if (tegra_bpmp_master_acked(channel))
+ tegra_bpmp_handle_mrq(bpmp, channel->ib->code, channel);
+
+ spin_lock(&bpmp->lock);
+
+ for_each_set_bit(i, busy, count) {
+ struct tegra_bpmp_channel *channel;
+
+ channel = tegra_bpmp_channel_get_thread(bpmp, i);
+ if (!channel)
+ continue;
+
+ if (tegra_bpmp_master_acked(channel)) {
+ tegra_bpmp_channel_signal(channel);
+ clear_bit(i, busy);
+ }
+ }
+
+ spin_unlock(&bpmp->lock);
+}
+
+static void tegra_bpmp_ivc_notify(struct tegra_ivc *ivc, void *data)
+{
+ struct tegra_bpmp *bpmp = data;
+ int err;
+
+ if (WARN_ON(bpmp->mbox.channel == NULL))
+ return;
+
+ err = mbox_send_message(bpmp->mbox.channel, NULL);
+ if (err < 0)
+ return;
+
+ mbox_client_txdone(bpmp->mbox.channel, 0);
+}
+
+static int tegra_bpmp_channel_init(struct tegra_bpmp_channel *channel,
+ struct tegra_bpmp *bpmp,
+ unsigned int index)
+{
+ size_t message_size, queue_size;
+ unsigned int offset;
+ int err;
+
+ channel->ivc = devm_kzalloc(bpmp->dev, sizeof(*channel->ivc),
+ GFP_KERNEL);
+ if (!channel->ivc)
+ return -ENOMEM;
+
+ message_size = tegra_ivc_align(MSG_MIN_SZ);
+ queue_size = tegra_ivc_total_queue_size(message_size);
+ offset = queue_size * index;
+
+ err = tegra_ivc_init(channel->ivc, NULL,
+ bpmp->rx.virt + offset, bpmp->rx.phys + offset,
+ bpmp->tx.virt + offset, bpmp->tx.phys + offset,
+ 1, message_size, tegra_bpmp_ivc_notify,
+ bpmp);
+ if (err < 0) {
+ dev_err(bpmp->dev, "failed to setup IVC for channel %u: %d\n",
+ index, err);
+ return err;
+ }
+
+ init_completion(&channel->completion);
+ channel->bpmp = bpmp;
+
+ return 0;
+}
+
+static void tegra_bpmp_channel_reset(struct tegra_bpmp_channel *channel)
+{
+ /* reset the channel state */
+ tegra_ivc_reset(channel->ivc);
+
+ /* sync the channel state with BPMP */
+ while (tegra_ivc_notified(channel->ivc))
+ ;
+}
+
+static void tegra_bpmp_channel_cleanup(struct tegra_bpmp_channel *channel)
+{
+ tegra_ivc_cleanup(channel->ivc);
+}
+
+static int tegra_bpmp_probe(struct platform_device *pdev)
+{
+ struct tegra_bpmp_channel *channel;
+ struct tegra_bpmp *bpmp;
+ unsigned int i;
+ char tag[32];
+ size_t size;
+ int err;
+
+ bpmp = devm_kzalloc(&pdev->dev, sizeof(*bpmp), GFP_KERNEL);
+ if (!bpmp)
+ return -ENOMEM;
+
+ bpmp->soc = of_device_get_match_data(&pdev->dev);
+ bpmp->dev = &pdev->dev;
+
+ bpmp->tx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 0);
+ if (!bpmp->tx.pool) {
+ dev_err(&pdev->dev, "TX shmem pool not found\n");
+ return -ENOMEM;
+ }
+
+ bpmp->tx.virt = gen_pool_dma_alloc(bpmp->tx.pool, 4096, &bpmp->tx.phys);
+ if (!bpmp->tx.virt) {
+ dev_err(&pdev->dev, "failed to allocate from TX pool\n");
+ return -ENOMEM;
+ }
+
+ bpmp->rx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 1);
+ if (!bpmp->rx.pool) {
+ dev_err(&pdev->dev, "RX shmem pool not found\n");
+ err = -ENOMEM;
+ goto free_tx;
+ }
+
+ bpmp->rx.virt = gen_pool_dma_alloc(bpmp->rx.pool, 4096, &bpmp->rx.phys);
+ if (!bpmp->rx.pool) {
+ dev_err(&pdev->dev, "failed to allocate from RX pool\n");
+ err = -ENOMEM;
+ goto free_tx;
+ }
+
+ INIT_LIST_HEAD(&bpmp->mrqs);
+ spin_lock_init(&bpmp->lock);
+
+ bpmp->threaded.count = bpmp->soc->channels.thread.count;
+ sema_init(&bpmp->threaded.lock, bpmp->threaded.count);
+
+ size = BITS_TO_LONGS(bpmp->threaded.count) * sizeof(long);
+
+ bpmp->threaded.allocated = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if (!bpmp->threaded.allocated) {
+ err = -ENOMEM;
+ goto free_rx;
+ }
+
+ bpmp->threaded.busy = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if (!bpmp->threaded.busy) {
+ err = -ENOMEM;
+ goto free_rx;
+ }
+
+ bpmp->num_channels = bpmp->soc->channels.cpu_tx.count +
+ bpmp->soc->channels.thread.count +
+ bpmp->soc->channels.cpu_rx.count;
+
+ bpmp->channels = devm_kcalloc(&pdev->dev, bpmp->num_channels,
+ sizeof(*channel), GFP_KERNEL);
+ if (!bpmp->channels) {
+ err = -ENOMEM;
+ goto free_rx;
+ }
+
+ /* message channel initialization */
+ for (i = 0; i < bpmp->num_channels; i++) {
+ struct tegra_bpmp_channel *channel = &bpmp->channels[i];
+
+ err = tegra_bpmp_channel_init(channel, bpmp, i);
+ if (err < 0)
+ goto cleanup_channels;
+ }
+
+ /* mbox registration */
+ bpmp->mbox.client.dev = &pdev->dev;
+ bpmp->mbox.client.rx_callback = tegra_bpmp_handle_rx;
+ bpmp->mbox.client.tx_block = false;
+ bpmp->mbox.client.knows_txdone = false;
+
+ bpmp->mbox.channel = mbox_request_channel(&bpmp->mbox.client, 0);
+ if (IS_ERR(bpmp->mbox.channel)) {
+ err = PTR_ERR(bpmp->mbox.channel);
+ dev_err(&pdev->dev, "failed to get HSP mailbox: %d\n", err);
+ goto cleanup_channels;
+ }
+
+ /* reset message channels */
+ for (i = 0; i < bpmp->num_channels; i++) {
+ struct tegra_bpmp_channel *channel = &bpmp->channels[i];
+
+ tegra_bpmp_channel_reset(channel);
+ }
+
+ err = tegra_bpmp_request_mrq(bpmp, MRQ_PING,
+ tegra_bpmp_mrq_handle_ping, bpmp);
+ if (err < 0)
+ goto free_mbox;
+
+ err = tegra_bpmp_ping(bpmp);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to ping BPMP: %d\n", err);
+ goto free_mrq;
+ }
+
+ err = tegra_bpmp_get_firmware_tag(bpmp, tag, sizeof(tag) - 1);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to get firmware tag: %d\n", err);
+ goto free_mrq;
+ }
+
+ dev_info(&pdev->dev, "firmware: %s\n", tag);
+
+ err = of_platform_default_populate(pdev->dev.of_node, NULL, &pdev->dev);
+ if (err < 0)
+ goto free_mrq;
+
+ err = tegra_bpmp_init_clocks(bpmp);
+ if (err < 0)
+ goto free_mrq;
+
+ err = tegra_bpmp_init_resets(bpmp);
+ if (err < 0)
+ goto free_mrq;
+
+ platform_set_drvdata(pdev, bpmp);
+
+ return 0;
+
+free_mrq:
+ tegra_bpmp_free_mrq(bpmp, MRQ_PING, bpmp);
+free_mbox:
+ mbox_free_channel(bpmp->mbox.channel);
+cleanup_channels:
+ while (i--)
+ tegra_bpmp_channel_cleanup(&bpmp->channels[i]);
+free_rx:
+ gen_pool_free(bpmp->rx.pool, (unsigned long)bpmp->rx.virt, 4096);
+free_tx:
+ gen_pool_free(bpmp->tx.pool, (unsigned long)bpmp->tx.virt, 4096);
+ return err;
+}
+
+static const struct tegra_bpmp_soc tegra186_soc = {
+ .channels = {
+ .cpu_tx = {
+ .offset = 0,
+ .count = 6,
+ .timeout = 60 * USEC_PER_SEC,
+ },
+ .thread = {
+ .offset = 6,
+ .count = 7,
+ .timeout = 600 * USEC_PER_SEC,
+ },
+ .cpu_rx = {
+ .offset = 13,
+ .count = 1,
+ .timeout = 0,
+ },
+ },
+ .num_resets = 193,
+};
+
+static const struct of_device_id tegra_bpmp_match[] = {
+ { .compatible = "nvidia,tegra186-bpmp", .data = &tegra186_soc },
+ { }
+};
+
+static struct platform_driver tegra_bpmp_driver = {
+ .driver = {
+ .name = "tegra-bpmp",
+ .of_match_table = tegra_bpmp_match,
+ },
+ .probe = tegra_bpmp_probe,
+};
+
+static int __init tegra_bpmp_init(void)
+{
+ return platform_driver_register(&tegra_bpmp_driver);
+}
+core_initcall(tegra_bpmp_init);
diff --git a/drivers/firmware/tegra/ivc.c b/drivers/firmware/tegra/ivc.c
new file mode 100644
index 000000000000..29ecfd815320
--- /dev/null
+++ b/drivers/firmware/tegra/ivc.c
@@ -0,0 +1,695 @@
+/*
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <soc/tegra/ivc.h>
+
+#define TEGRA_IVC_ALIGN 64
+
+/*
+ * IVC channel reset protocol.
+ *
+ * Each end uses its tx_channel.state to indicate its synchronization state.
+ */
+enum tegra_ivc_state {
+ /*
+ * This value is zero for backwards compatibility with services that
+ * assume channels to be initially zeroed. Such channels are in an
+ * initially valid state, but cannot be asynchronously reset, and must
+ * maintain a valid state at all times.
+ *
+ * The transmitting end can enter the established state from the sync or
+ * ack state when it observes the receiving endpoint in the ack or
+ * established state, indicating that has cleared the counters in our
+ * rx_channel.
+ */
+ TEGRA_IVC_STATE_ESTABLISHED = 0,
+
+ /*
+ * If an endpoint is observed in the sync state, the remote endpoint is
+ * allowed to clear the counters it owns asynchronously with respect to
+ * the current endpoint. Therefore, the current endpoint is no longer
+ * allowed to communicate.
+ */
+ TEGRA_IVC_STATE_SYNC,
+
+ /*
+ * When the transmitting end observes the receiving end in the sync
+ * state, it can clear the w_count and r_count and transition to the ack
+ * state. If the remote endpoint observes us in the ack state, it can
+ * return to the established state once it has cleared its counters.
+ */
+ TEGRA_IVC_STATE_ACK
+};
+
+/*
+ * This structure is divided into two-cache aligned parts, the first is only
+ * written through the tx.channel pointer, while the second is only written
+ * through the rx.channel pointer. This delineates ownership of the cache
+ * lines, which is critical to performance and necessary in non-cache coherent
+ * implementations.
+ */
+struct tegra_ivc_header {
+ union {
+ struct {
+ /* fields owned by the transmitting end */
+ u32 count;
+ u32 state;
+ };
+
+ u8 pad[TEGRA_IVC_ALIGN];
+ } tx;
+
+ union {
+ /* fields owned by the receiving end */
+ u32 count;
+ u8 pad[TEGRA_IVC_ALIGN];
+ } rx;
+};
+
+static inline void tegra_ivc_invalidate(struct tegra_ivc *ivc, dma_addr_t phys)
+{
+ if (!ivc->peer)
+ return;
+
+ dma_sync_single_for_cpu(ivc->peer, phys, TEGRA_IVC_ALIGN,
+ DMA_FROM_DEVICE);
+}
+
+static inline void tegra_ivc_flush(struct tegra_ivc *ivc, dma_addr_t phys)
+{
+ if (!ivc->peer)
+ return;
+
+ dma_sync_single_for_device(ivc->peer, phys, TEGRA_IVC_ALIGN,
+ DMA_TO_DEVICE);
+}
+
+static inline bool tegra_ivc_empty(struct tegra_ivc *ivc,
+ struct tegra_ivc_header *header)
+{
+ /*
+ * This function performs multiple checks on the same values with
+ * security implications, so create snapshots with ACCESS_ONCE() to
+ * ensure that these checks use the same values.
+ */
+ u32 tx = ACCESS_ONCE(header->tx.count);
+ u32 rx = ACCESS_ONCE(header->rx.count);
+
+ /*
+ * Perform an over-full check to prevent denial of service attacks
+ * where a server could be easily fooled into believing that there's
+ * an extremely large number of frames ready, since receivers are not
+ * expected to check for full or over-full conditions.
+ *
+ * Although the channel isn't empty, this is an invalid case caused by
+ * a potentially malicious peer, so returning empty is safer, because
+ * it gives the impression that the channel has gone silent.
+ */
+ if (tx - rx > ivc->num_frames)
+ return true;
+
+ return tx == rx;
+}
+
+static inline bool tegra_ivc_full(struct tegra_ivc *ivc,
+ struct tegra_ivc_header *header)
+{
+ u32 tx = ACCESS_ONCE(header->tx.count);
+ u32 rx = ACCESS_ONCE(header->rx.count);
+
+ /*
+ * Invalid cases where the counters indicate that the queue is over
+ * capacity also appear full.
+ */
+ return tx - rx >= ivc->num_frames;
+}
+
+static inline u32 tegra_ivc_available(struct tegra_ivc *ivc,
+ struct tegra_ivc_header *header)
+{
+ u32 tx = ACCESS_ONCE(header->tx.count);
+ u32 rx = ACCESS_ONCE(header->rx.count);
+
+ /*
+ * This function isn't expected to be used in scenarios where an
+ * over-full situation can lead to denial of service attacks. See the
+ * comment in tegra_ivc_empty() for an explanation about special
+ * over-full considerations.
+ */
+ return tx - rx;
+}
+
+static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc)
+{
+ ACCESS_ONCE(ivc->tx.channel->tx.count) =
+ ACCESS_ONCE(ivc->tx.channel->tx.count) + 1;
+
+ if (ivc->tx.position == ivc->num_frames - 1)
+ ivc->tx.position = 0;
+ else
+ ivc->tx.position++;
+}
+
+static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc)
+{
+ ACCESS_ONCE(ivc->rx.channel->rx.count) =
+ ACCESS_ONCE(ivc->rx.channel->rx.count) + 1;
+
+ if (ivc->rx.position == ivc->num_frames - 1)
+ ivc->rx.position = 0;
+ else
+ ivc->rx.position++;
+}
+
+static inline int tegra_ivc_check_read(struct tegra_ivc *ivc)
+{
+ unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
+
+ /*
+ * tx.channel->state is set locally, so it is not synchronized with
+ * state from the remote peer. The remote peer cannot reset its
+ * transmit counters until we've acknowledged its synchronization
+ * request, so no additional synchronization is required because an
+ * asynchronous transition of rx.channel->state to
+ * TEGRA_IVC_STATE_ACK is not allowed.
+ */
+ if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
+ return -ECONNRESET;
+
+ /*
+ * Avoid unnecessary invalidations when performing repeated accesses
+ * to an IVC channel by checking the old queue pointers first.
+ *
+ * Synchronization is only necessary when these pointers indicate
+ * empty or full.
+ */
+ if (!tegra_ivc_empty(ivc, ivc->rx.channel))
+ return 0;
+
+ tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
+
+ if (tegra_ivc_empty(ivc, ivc->rx.channel))
+ return -ENOSPC;
+
+ return 0;
+}
+
+static inline int tegra_ivc_check_write(struct tegra_ivc *ivc)
+{
+ unsigned int offset = offsetof(struct tegra_ivc_header, rx.count);
+
+ if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
+ return -ECONNRESET;
+
+ if (!tegra_ivc_full(ivc, ivc->tx.channel))
+ return 0;
+
+ tegra_ivc_invalidate(ivc, ivc->tx.phys + offset);
+
+ if (tegra_ivc_full(ivc, ivc->tx.channel))
+ return -ENOSPC;
+
+ return 0;
+}
+
+static void *tegra_ivc_frame_virt(struct tegra_ivc *ivc,
+ struct tegra_ivc_header *header,
+ unsigned int frame)
+{
+ if (WARN_ON(frame >= ivc->num_frames))
+ return ERR_PTR(-EINVAL);
+
+ return (void *)(header + 1) + ivc->frame_size * frame;
+}
+
+static inline dma_addr_t tegra_ivc_frame_phys(struct tegra_ivc *ivc,
+ dma_addr_t phys,
+ unsigned int frame)
+{
+ unsigned long offset;
+
+ offset = sizeof(struct tegra_ivc_header) + ivc->frame_size * frame;
+
+ return phys + offset;
+}
+
+static inline void tegra_ivc_invalidate_frame(struct tegra_ivc *ivc,
+ dma_addr_t phys,
+ unsigned int frame,
+ unsigned int offset,
+ size_t size)
+{
+ if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
+ return;
+
+ phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
+
+ dma_sync_single_for_cpu(ivc->peer, phys, size, DMA_FROM_DEVICE);
+}
+
+static inline void tegra_ivc_flush_frame(struct tegra_ivc *ivc,
+ dma_addr_t phys,
+ unsigned int frame,
+ unsigned int offset,
+ size_t size)
+{
+ if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
+ return;
+
+ phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
+
+ dma_sync_single_for_device(ivc->peer, phys, size, DMA_TO_DEVICE);
+}
+
+/* directly peek at the next frame rx'ed */
+void *tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc)
+{
+ int err;
+
+ if (WARN_ON(ivc == NULL))
+ return ERR_PTR(-EINVAL);
+
+ err = tegra_ivc_check_read(ivc);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ /*
+ * Order observation of ivc->rx.position potentially indicating new
+ * data before data read.
+ */
+ smp_rmb();
+
+ tegra_ivc_invalidate_frame(ivc, ivc->rx.phys, ivc->rx.position, 0,
+ ivc->frame_size);
+
+ return tegra_ivc_frame_virt(ivc, ivc->rx.channel, ivc->rx.position);
+}
+EXPORT_SYMBOL(tegra_ivc_read_get_next_frame);
+
+int tegra_ivc_read_advance(struct tegra_ivc *ivc)
+{
+ unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
+ unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
+ int err;
+
+ /*
+ * No read barriers or synchronization here: the caller is expected to
+ * have already observed the channel non-empty. This check is just to
+ * catch programming errors.
+ */
+ err = tegra_ivc_check_read(ivc);
+ if (err < 0)
+ return err;
+
+ tegra_ivc_advance_rx(ivc);
+
+ tegra_ivc_flush(ivc, ivc->rx.phys + rx);
+
+ /*
+ * Ensure our write to ivc->rx.position occurs before our read from
+ * ivc->tx.position.
+ */
+ smp_mb();
+
+ /*
+ * Notify only upon transition from full to non-full. The available
+ * count can only asynchronously increase, so the worst possible
+ * side-effect will be a spurious notification.
+ */
+ tegra_ivc_invalidate(ivc, ivc->rx.phys + tx);
+
+ if (tegra_ivc_available(ivc, ivc->rx.channel) == ivc->num_frames - 1)
+ ivc->notify(ivc, ivc->notify_data);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_ivc_read_advance);
+
+/* directly poke at the next frame to be tx'ed */
+void *tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc)
+{
+ int err;
+
+ err = tegra_ivc_check_write(ivc);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ return tegra_ivc_frame_virt(ivc, ivc->tx.channel, ivc->tx.position);
+}
+EXPORT_SYMBOL(tegra_ivc_write_get_next_frame);
+
+/* advance the tx buffer */
+int tegra_ivc_write_advance(struct tegra_ivc *ivc)
+{
+ unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
+ unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
+ int err;
+
+ err = tegra_ivc_check_write(ivc);
+ if (err < 0)
+ return err;
+
+ tegra_ivc_flush_frame(ivc, ivc->tx.phys, ivc->tx.position, 0,
+ ivc->frame_size);
+
+ /*
+ * Order any possible stores to the frame before update of
+ * ivc->tx.position.
+ */
+ smp_wmb();
+
+ tegra_ivc_advance_tx(ivc);
+ tegra_ivc_flush(ivc, ivc->tx.phys + tx);
+
+ /*
+ * Ensure our write to ivc->tx.position occurs before our read from
+ * ivc->rx.position.
+ */
+ smp_mb();
+
+ /*
+ * Notify only upon transition from empty to non-empty. The available
+ * count can only asynchronously decrease, so the worst possible
+ * side-effect will be a spurious notification.
+ */
+ tegra_ivc_invalidate(ivc, ivc->tx.phys + rx);
+
+ if (tegra_ivc_available(ivc, ivc->tx.channel) == 1)
+ ivc->notify(ivc, ivc->notify_data);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_ivc_write_advance);
+
+void tegra_ivc_reset(struct tegra_ivc *ivc)
+{
+ unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
+
+ ivc->tx.channel->tx.state = TEGRA_IVC_STATE_SYNC;
+ tegra_ivc_flush(ivc, ivc->tx.phys + offset);
+ ivc->notify(ivc, ivc->notify_data);
+}
+EXPORT_SYMBOL(tegra_ivc_reset);
+
+/*
+ * =======================================================
+ * IVC State Transition Table - see tegra_ivc_notified()
+ * =======================================================
+ *
+ * local remote action
+ * ----- ------ -----------------------------------
+ * SYNC EST <none>
+ * SYNC ACK reset counters; move to EST; notify
+ * SYNC SYNC reset counters; move to ACK; notify
+ * ACK EST move to EST; notify
+ * ACK ACK move to EST; notify
+ * ACK SYNC reset counters; move to ACK; notify
+ * EST EST <none>
+ * EST ACK <none>
+ * EST SYNC reset counters; move to ACK; notify
+ *
+ * ===============================================================
+ */
+
+int tegra_ivc_notified(struct tegra_ivc *ivc)
+{
+ unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
+ enum tegra_ivc_state state;
+
+ /* Copy the receiver's state out of shared memory. */
+ tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
+ state = ACCESS_ONCE(ivc->rx.channel->tx.state);
+
+ if (state == TEGRA_IVC_STATE_SYNC) {
+ offset = offsetof(struct tegra_ivc_header, tx.count);
+
+ /*
+ * Order observation of TEGRA_IVC_STATE_SYNC before stores
+ * clearing tx.channel.
+ */
+ smp_rmb();
+
+ /*
+ * Reset tx.channel counters. The remote end is in the SYNC
+ * state and won't make progress until we change our state,
+ * so the counters are not in use at this time.
+ */
+ ivc->tx.channel->tx.count = 0;
+ ivc->rx.channel->rx.count = 0;
+
+ ivc->tx.position = 0;
+ ivc->rx.position = 0;
+
+ /*
+ * Ensure that counters appear cleared before new state can be
+ * observed.
+ */
+ smp_wmb();
+
+ /*
+ * Move to ACK state. We have just cleared our counters, so it
+ * is now safe for the remote end to start using these values.
+ */
+ ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ACK;
+ tegra_ivc_flush(ivc, ivc->tx.phys + offset);
+
+ /*
+ * Notify remote end to observe state transition.
+ */
+ ivc->notify(ivc, ivc->notify_data);
+
+ } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_SYNC &&
+ state == TEGRA_IVC_STATE_ACK) {
+ offset = offsetof(struct tegra_ivc_header, tx.count);
+
+ /*
+ * Order observation of ivc_state_sync before stores clearing
+ * tx_channel.
+ */
+ smp_rmb();
+
+ /*
+ * Reset tx.channel counters. The remote end is in the ACK
+ * state and won't make progress until we change our state,
+ * so the counters are not in use at this time.
+ */
+ ivc->tx.channel->tx.count = 0;
+ ivc->rx.channel->rx.count = 0;
+
+ ivc->tx.position = 0;
+ ivc->rx.position = 0;
+
+ /*
+ * Ensure that counters appear cleared before new state can be
+ * observed.
+ */
+ smp_wmb();
+
+ /*
+ * Move to ESTABLISHED state. We know that the remote end has
+ * already cleared its counters, so it is safe to start
+ * writing/reading on this channel.
+ */
+ ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED;
+ tegra_ivc_flush(ivc, ivc->tx.phys + offset);
+
+ /*
+ * Notify remote end to observe state transition.
+ */
+ ivc->notify(ivc, ivc->notify_data);
+
+ } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_ACK) {
+ offset = offsetof(struct tegra_ivc_header, tx.count);
+
+ /*
+ * At this point, we have observed the peer to be in either
+ * the ACK or ESTABLISHED state. Next, order observation of
+ * peer state before storing to tx.channel.
+ */
+ smp_rmb();
+
+ /*
+ * Move to ESTABLISHED state. We know that we have previously
+ * cleared our counters, and we know that the remote end has
+ * cleared its counters, so it is safe to start writing/reading
+ * on this channel.
+ */
+ ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED;
+ tegra_ivc_flush(ivc, ivc->tx.phys + offset);
+
+ /*
+ * Notify remote end to observe state transition.
+ */
+ ivc->notify(ivc, ivc->notify_data);
+
+ } else {
+ /*
+ * There is no need to handle any further action. Either the
+ * channel is already fully established, or we are waiting for
+ * the remote end to catch up with our current state. Refer
+ * to the diagram in "IVC State Transition Table" above.
+ */
+ }
+
+ if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
+ return -EAGAIN;
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_ivc_notified);
+
+size_t tegra_ivc_align(size_t size)
+{
+ return ALIGN(size, TEGRA_IVC_ALIGN);
+}
+EXPORT_SYMBOL(tegra_ivc_align);
+
+unsigned tegra_ivc_total_queue_size(unsigned queue_size)
+{
+ if (!IS_ALIGNED(queue_size, TEGRA_IVC_ALIGN)) {
+ pr_err("%s: queue_size (%u) must be %u-byte aligned\n",
+ __func__, queue_size, TEGRA_IVC_ALIGN);
+ return 0;
+ }
+
+ return queue_size + sizeof(struct tegra_ivc_header);
+}
+EXPORT_SYMBOL(tegra_ivc_total_queue_size);
+
+static int tegra_ivc_check_params(unsigned long rx, unsigned long tx,
+ unsigned int num_frames, size_t frame_size)
+{
+ BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, tx.count),
+ TEGRA_IVC_ALIGN));
+ BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, rx.count),
+ TEGRA_IVC_ALIGN));
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct tegra_ivc_header),
+ TEGRA_IVC_ALIGN));
+
+ if ((uint64_t)num_frames * (uint64_t)frame_size >= 0x100000000UL) {
+ pr_err("num_frames * frame_size overflows\n");
+ return -EINVAL;
+ }
+
+ if (!IS_ALIGNED(frame_size, TEGRA_IVC_ALIGN)) {
+ pr_err("frame size not adequately aligned: %zu\n", frame_size);
+ return -EINVAL;
+ }
+
+ /*
+ * The headers must at least be aligned enough for counters
+ * to be accessed atomically.
+ */
+ if (!IS_ALIGNED(rx, TEGRA_IVC_ALIGN)) {
+ pr_err("IVC channel start not aligned: %#lx\n", rx);
+ return -EINVAL;
+ }
+
+ if (!IS_ALIGNED(tx, TEGRA_IVC_ALIGN)) {
+ pr_err("IVC channel start not aligned: %#lx\n", tx);
+ return -EINVAL;
+ }
+
+ if (rx < tx) {
+ if (rx + frame_size * num_frames > tx) {
+ pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
+ rx, frame_size * num_frames, tx);
+ return -EINVAL;
+ }
+ } else {
+ if (tx + frame_size * num_frames > rx) {
+ pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
+ tx, frame_size * num_frames, rx);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, void *rx,
+ dma_addr_t rx_phys, void *tx, dma_addr_t tx_phys,
+ unsigned int num_frames, size_t frame_size,
+ void (*notify)(struct tegra_ivc *ivc, void *data),
+ void *data)
+{
+ size_t queue_size;
+ int err;
+
+ if (WARN_ON(!ivc || !notify))
+ return -EINVAL;
+
+ /*
+ * All sizes that can be returned by communication functions should
+ * fit in an int.
+ */
+ if (frame_size > INT_MAX)
+ return -E2BIG;
+
+ err = tegra_ivc_check_params((unsigned long)rx, (unsigned long)tx,
+ num_frames, frame_size);
+ if (err < 0)
+ return err;
+
+ queue_size = tegra_ivc_total_queue_size(num_frames * frame_size);
+
+ if (peer) {
+ ivc->rx.phys = dma_map_single(peer, rx, queue_size,
+ DMA_BIDIRECTIONAL);
+ if (ivc->rx.phys == DMA_ERROR_CODE)
+ return -ENOMEM;
+
+ ivc->tx.phys = dma_map_single(peer, tx, queue_size,
+ DMA_BIDIRECTIONAL);
+ if (ivc->tx.phys == DMA_ERROR_CODE) {
+ dma_unmap_single(peer, ivc->rx.phys, queue_size,
+ DMA_BIDIRECTIONAL);
+ return -ENOMEM;
+ }
+ } else {
+ ivc->rx.phys = rx_phys;
+ ivc->tx.phys = tx_phys;
+ }
+
+ ivc->rx.channel = rx;
+ ivc->tx.channel = tx;
+ ivc->peer = peer;
+ ivc->notify = notify;
+ ivc->notify_data = data;
+ ivc->frame_size = frame_size;
+ ivc->num_frames = num_frames;
+
+ /*
+ * These values aren't necessarily correct until the channel has been
+ * reset.
+ */
+ ivc->tx.position = 0;
+ ivc->rx.position = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_ivc_init);
+
+void tegra_ivc_cleanup(struct tegra_ivc *ivc)
+{
+ if (ivc->peer) {
+ size_t size = tegra_ivc_total_queue_size(ivc->num_frames *
+ ivc->frame_size);
+
+ dma_unmap_single(ivc->peer, ivc->rx.phys, size,
+ DMA_BIDIRECTIONAL);
+ dma_unmap_single(ivc->peer, ivc->tx.phys, size,
+ DMA_BIDIRECTIONAL);
+ }
+}
+EXPORT_SYMBOL(tegra_ivc_cleanup);
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
new file mode 100644
index 000000000000..874ff32db366
--- /dev/null
+++ b/drivers/firmware/ti_sci.c
@@ -0,0 +1,1991 @@
+/*
+ * Texas Instruments System Control Interface Protocol Driver
+ *
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Nishanth Menon
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/bitmap.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/semaphore.h>
+#include <linux/slab.h>
+#include <linux/soc/ti/ti-msgmgr.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+#include <linux/reboot.h>
+
+#include "ti_sci.h"
+
+/* List of all TI SCI devices active in system */
+static LIST_HEAD(ti_sci_list);
+/* Protection for the entire list */
+static DEFINE_MUTEX(ti_sci_list_mutex);
+
+/**
+ * struct ti_sci_xfer - Structure representing a message flow
+ * @tx_message: Transmit message
+ * @rx_len: Receive message length
+ * @xfer_buf: Preallocated buffer to store receive message
+ * Since we work with request-ACK protocol, we can
+ * reuse the same buffer for the rx path as we
+ * use for the tx path.
+ * @done: completion event
+ */
+struct ti_sci_xfer {
+ struct ti_msgmgr_message tx_message;
+ u8 rx_len;
+ u8 *xfer_buf;
+ struct completion done;
+};
+
+/**
+ * struct ti_sci_xfers_info - Structure to manage transfer information
+ * @sem_xfer_count: Counting Semaphore for managing max simultaneous
+ * Messages.
+ * @xfer_block: Preallocated Message array
+ * @xfer_alloc_table: Bitmap table for allocated messages.
+ * Index of this bitmap table is also used for message
+ * sequence identifier.
+ * @xfer_lock: Protection for message allocation
+ */
+struct ti_sci_xfers_info {
+ struct semaphore sem_xfer_count;
+ struct ti_sci_xfer *xfer_block;
+ unsigned long *xfer_alloc_table;
+ /* protect transfer allocation */
+ spinlock_t xfer_lock;
+};
+
+/**
+ * struct ti_sci_desc - Description of SoC integration
+ * @host_id: Host identifier representing the compute entity
+ * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
+ * @max_msgs: Maximum number of messages that can be pending
+ * simultaneously in the system
+ * @max_msg_size: Maximum size of data per message that can be handled.
+ */
+struct ti_sci_desc {
+ u8 host_id;
+ int max_rx_timeout_ms;
+ int max_msgs;
+ int max_msg_size;
+};
+
+/**
+ * struct ti_sci_info - Structure representing a TI SCI instance
+ * @dev: Device pointer
+ * @desc: SoC description for this instance
+ * @nb: Reboot Notifier block
+ * @d: Debugfs file entry
+ * @debug_region: Memory region where the debug message are available
+ * @debug_region_size: Debug region size
+ * @debug_buffer: Buffer allocated to copy debug messages.
+ * @handle: Instance of TI SCI handle to send to clients.
+ * @cl: Mailbox Client
+ * @chan_tx: Transmit mailbox channel
+ * @chan_rx: Receive mailbox channel
+ * @minfo: Message info
+ * @node: list head
+ * @users: Number of users of this instance
+ */
+struct ti_sci_info {
+ struct device *dev;
+ struct notifier_block nb;
+ const struct ti_sci_desc *desc;
+ struct dentry *d;
+ void __iomem *debug_region;
+ char *debug_buffer;
+ size_t debug_region_size;
+ struct ti_sci_handle handle;
+ struct mbox_client cl;
+ struct mbox_chan *chan_tx;
+ struct mbox_chan *chan_rx;
+ struct ti_sci_xfers_info minfo;
+ struct list_head node;
+ /* protected by ti_sci_list_mutex */
+ int users;
+
+};
+
+#define cl_to_ti_sci_info(c) container_of(c, struct ti_sci_info, cl)
+#define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
+#define reboot_to_ti_sci_info(n) container_of(n, struct ti_sci_info, nb)
+
+#ifdef CONFIG_DEBUG_FS
+
+/**
+ * ti_sci_debug_show() - Helper to dump the debug log
+ * @s: sequence file pointer
+ * @unused: unused.
+ *
+ * Return: 0
+ */
+static int ti_sci_debug_show(struct seq_file *s, void *unused)
+{
+ struct ti_sci_info *info = s->private;
+
+ memcpy_fromio(info->debug_buffer, info->debug_region,
+ info->debug_region_size);
+ /*
+ * We don't trust firmware to leave NULL terminated last byte (hence
+ * we have allocated 1 extra 0 byte). Since we cannot guarantee any
+ * specific data format for debug messages, We just present the data
+ * in the buffer as is - we expect the messages to be self explanatory.
+ */
+ seq_puts(s, info->debug_buffer);
+ return 0;
+}
+
+/**
+ * ti_sci_debug_open() - debug file open
+ * @inode: inode pointer
+ * @file: file pointer
+ *
+ * Return: result of single_open
+ */
+static int ti_sci_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ti_sci_debug_show, inode->i_private);
+}
+
+/* log file operations */
+static const struct file_operations ti_sci_debug_fops = {
+ .open = ti_sci_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/**
+ * ti_sci_debugfs_create() - Create log debug file
+ * @pdev: platform device pointer
+ * @info: Pointer to SCI entity information
+ *
+ * Return: 0 if all went fine, else corresponding error.
+ */
+static int ti_sci_debugfs_create(struct platform_device *pdev,
+ struct ti_sci_info *info)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ char debug_name[50] = "ti_sci_debug@";
+
+ /* Debug region is optional */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "debug_messages");
+ info->debug_region = devm_ioremap_resource(dev, res);
+ if (IS_ERR(info->debug_region))
+ return 0;
+ info->debug_region_size = resource_size(res);
+
+ info->debug_buffer = devm_kcalloc(dev, info->debug_region_size + 1,
+ sizeof(char), GFP_KERNEL);
+ if (!info->debug_buffer)
+ return -ENOMEM;
+ /* Setup NULL termination */
+ info->debug_buffer[info->debug_region_size] = 0;
+
+ info->d = debugfs_create_file(strncat(debug_name, dev_name(dev),
+ sizeof(debug_name)),
+ 0444, NULL, info, &ti_sci_debug_fops);
+ if (IS_ERR(info->d))
+ return PTR_ERR(info->d);
+
+ dev_dbg(dev, "Debug region => %p, size = %zu bytes, resource: %pr\n",
+ info->debug_region, info->debug_region_size, res);
+ return 0;
+}
+
+/**
+ * ti_sci_debugfs_destroy() - clean up log debug file
+ * @pdev: platform device pointer
+ * @info: Pointer to SCI entity information
+ */
+static void ti_sci_debugfs_destroy(struct platform_device *pdev,
+ struct ti_sci_info *info)
+{
+ if (IS_ERR(info->debug_region))
+ return;
+
+ debugfs_remove(info->d);
+}
+#else /* CONFIG_DEBUG_FS */
+static inline int ti_sci_debugfs_create(struct platform_device *dev,
+ struct ti_sci_info *info)
+{
+ return 0;
+}
+
+static inline void ti_sci_debugfs_destroy(struct platform_device *dev,
+ struct ti_sci_info *info)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+/**
+ * ti_sci_dump_header_dbg() - Helper to dump a message header.
+ * @dev: Device pointer corresponding to the SCI entity
+ * @hdr: pointer to header.
+ */
+static inline void ti_sci_dump_header_dbg(struct device *dev,
+ struct ti_sci_msg_hdr *hdr)
+{
+ dev_dbg(dev, "MSGHDR:type=0x%04x host=0x%02x seq=0x%02x flags=0x%08x\n",
+ hdr->type, hdr->host, hdr->seq, hdr->flags);
+}
+
+/**
+ * ti_sci_rx_callback() - mailbox client callback for receive messages
+ * @cl: client pointer
+ * @m: mailbox message
+ *
+ * Processes one received message to appropriate transfer information and
+ * signals completion of the transfer.
+ *
+ * NOTE: This function will be invoked in IRQ context, hence should be
+ * as optimal as possible.
+ */
+static void ti_sci_rx_callback(struct mbox_client *cl, void *m)
+{
+ struct ti_sci_info *info = cl_to_ti_sci_info(cl);
+ struct device *dev = info->dev;
+ struct ti_sci_xfers_info *minfo = &info->minfo;
+ struct ti_msgmgr_message *mbox_msg = m;
+ struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)mbox_msg->buf;
+ struct ti_sci_xfer *xfer;
+ u8 xfer_id;
+
+ xfer_id = hdr->seq;
+
+ /*
+ * Are we even expecting this?
+ * NOTE: barriers were implicit in locks used for modifying the bitmap
+ */
+ if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
+ dev_err(dev, "Message for %d is not expected!\n", xfer_id);
+ return;
+ }
+
+ xfer = &minfo->xfer_block[xfer_id];
+
+ /* Is the message of valid length? */
+ if (mbox_msg->len > info->desc->max_msg_size) {
+ dev_err(dev, "Unable to handle %d xfer(max %d)\n",
+ mbox_msg->len, info->desc->max_msg_size);
+ ti_sci_dump_header_dbg(dev, hdr);
+ return;
+ }
+ if (mbox_msg->len < xfer->rx_len) {
+ dev_err(dev, "Recv xfer %d < expected %d length\n",
+ mbox_msg->len, xfer->rx_len);
+ ti_sci_dump_header_dbg(dev, hdr);
+ return;
+ }
+
+ ti_sci_dump_header_dbg(dev, hdr);
+ /* Take a copy to the rx buffer.. */
+ memcpy(xfer->xfer_buf, mbox_msg->buf, xfer->rx_len);
+ complete(&xfer->done);
+}
+
+/**
+ * ti_sci_get_one_xfer() - Allocate one message
+ * @info: Pointer to SCI entity information
+ * @msg_type: Message type
+ * @msg_flags: Flag to set for the message
+ * @tx_message_size: transmit message size
+ * @rx_message_size: receive message size
+ *
+ * Helper function which is used by various command functions that are
+ * exposed to clients of this driver for allocating a message traffic event.
+ *
+ * This function can sleep depending on pending requests already in the system
+ * for the SCI entity. Further, this also holds a spinlock to maintain integrity
+ * of internal data structures.
+ *
+ * Return: 0 if all went fine, else corresponding error.
+ */
+static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info,
+ u16 msg_type, u32 msg_flags,
+ size_t tx_message_size,
+ size_t rx_message_size)
+{
+ struct ti_sci_xfers_info *minfo = &info->minfo;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_msg_hdr *hdr;
+ unsigned long flags;
+ unsigned long bit_pos;
+ u8 xfer_id;
+ int ret;
+ int timeout;
+
+ /* Ensure we have sane transfer sizes */
+ if (rx_message_size > info->desc->max_msg_size ||
+ tx_message_size > info->desc->max_msg_size ||
+ rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr))
+ return ERR_PTR(-ERANGE);
+
+ /*
+ * Ensure we have only controlled number of pending messages.
+ * Ideally, we might just have to wait a single message, be
+ * conservative and wait 5 times that..
+ */
+ timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms) * 5;
+ ret = down_timeout(&minfo->sem_xfer_count, timeout);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /* Keep the locked section as small as possible */
+ spin_lock_irqsave(&minfo->xfer_lock, flags);
+ bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
+ info->desc->max_msgs);
+ set_bit(bit_pos, minfo->xfer_alloc_table);
+ spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
+ /*
+ * We already ensured in probe that we can have max messages that can
+ * fit in hdr.seq - NOTE: this improves access latencies
+ * to predictable O(1) access, BUT, it opens us to risk if
+ * remote misbehaves with corrupted message sequence responses.
+ * If that happens, we are going to be messed up anyways..
+ */
+ xfer_id = (u8)bit_pos;
+
+ xfer = &minfo->xfer_block[xfer_id];
+
+ hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+ xfer->tx_message.len = tx_message_size;
+ xfer->rx_len = (u8)rx_message_size;
+
+ reinit_completion(&xfer->done);
+
+ hdr->seq = xfer_id;
+ hdr->type = msg_type;
+ hdr->host = info->desc->host_id;
+ hdr->flags = msg_flags;
+
+ return xfer;
+}
+
+/**
+ * ti_sci_put_one_xfer() - Release a message
+ * @minfo: transfer info pointer
+ * @xfer: message that was reserved by ti_sci_get_one_xfer
+ *
+ * This holds a spinlock to maintain integrity of internal data structures.
+ */
+static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo,
+ struct ti_sci_xfer *xfer)
+{
+ unsigned long flags;
+ struct ti_sci_msg_hdr *hdr;
+ u8 xfer_id;
+
+ hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+ xfer_id = hdr->seq;
+
+ /*
+ * Keep the locked section as small as possible
+ * NOTE: we might escape with smp_mb and no lock here..
+ * but just be conservative and symmetric.
+ */
+ spin_lock_irqsave(&minfo->xfer_lock, flags);
+ clear_bit(xfer_id, minfo->xfer_alloc_table);
+ spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
+ /* Increment the count for the next user to get through */
+ up(&minfo->sem_xfer_count);
+}
+
+/**
+ * ti_sci_do_xfer() - Do one transfer
+ * @info: Pointer to SCI entity information
+ * @xfer: Transfer to initiate and wait for response
+ *
+ * Return: -ETIMEDOUT in case of no response, if transmit error,
+ * return corresponding error, else if all goes well,
+ * return 0.
+ */
+static inline int ti_sci_do_xfer(struct ti_sci_info *info,
+ struct ti_sci_xfer *xfer)
+{
+ int ret;
+ int timeout;
+ struct device *dev = info->dev;
+
+ ret = mbox_send_message(info->chan_tx, &xfer->tx_message);
+ if (ret < 0)
+ return ret;
+
+ ret = 0;
+
+ /* And we wait for the response. */
+ timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
+ if (!wait_for_completion_timeout(&xfer->done, timeout)) {
+ dev_err(dev, "Mbox timedout in resp(caller: %pF)\n",
+ (void *)_RET_IP_);
+ ret = -ETIMEDOUT;
+ }
+ /*
+ * NOTE: we might prefer not to need the mailbox ticker to manage the
+ * transfer queueing since the protocol layer queues things by itself.
+ * Unfortunately, we have to kick the mailbox framework after we have
+ * received our message.
+ */
+ mbox_client_txdone(info->chan_tx, ret);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
+ * @info: Pointer to SCI entity information
+ *
+ * Updates the SCI information in the internal data structure.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_get_revision(struct ti_sci_info *info)
+{
+ struct device *dev = info->dev;
+ struct ti_sci_handle *handle = &info->handle;
+ struct ti_sci_version_info *ver = &handle->version;
+ struct ti_sci_msg_resp_version *rev_info;
+ struct ti_sci_xfer *xfer;
+ int ret;
+
+ /* No need to setup flags since it is expected to respond */
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION,
+ 0x0, sizeof(struct ti_sci_msg_hdr),
+ sizeof(*rev_info));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+
+ rev_info = (struct ti_sci_msg_resp_version *)xfer->xfer_buf;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ ver->abi_major = rev_info->abi_major;
+ ver->abi_minor = rev_info->abi_minor;
+ ver->firmware_revision = rev_info->firmware_revision;
+ strncpy(ver->firmware_description, rev_info->firmware_description,
+ sizeof(ver->firmware_description));
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+ return ret;
+}
+
+/**
+ * ti_sci_is_response_ack() - Generic ACK/NACK message checkup
+ * @r: pointer to response buffer
+ *
+ * Return: true if the response was an ACK, else returns false.
+ */
+static inline bool ti_sci_is_response_ack(void *r)
+{
+ struct ti_sci_msg_hdr *hdr = r;
+
+ return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
+}
+
+/**
+ * ti_sci_set_device_state() - Set device state helper
+ * @handle: pointer to TI SCI handle
+ * @id: Device identifier
+ * @flags: flags to setup for the device
+ * @state: State to move the device to
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
+ u32 id, u32 flags, u8 state)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_set_device_state *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
+ flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_set_device_state *)xfer->xfer_buf;
+ req->id = id;
+ req->state = state;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_get_device_state() - Get device state helper
+ * @handle: Handle to the device
+ * @id: Device Identifier
+ * @clcnt: Pointer to Context Loss Count
+ * @resets: pointer to resets
+ * @p_state: pointer to p_state
+ * @c_state: pointer to c_state
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
+ u32 id, u32 *clcnt, u32 *resets,
+ u8 *p_state, u8 *c_state)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_get_device_state *req;
+ struct ti_sci_msg_resp_get_device_state *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ if (!clcnt && !resets && !p_state && !c_state)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ /* Response is expected, so need of any flags */
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
+ 0, sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_get_device_state *)xfer->xfer_buf;
+ req->id = id;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_get_device_state *)xfer->xfer_buf;
+ if (!ti_sci_is_response_ack(resp)) {
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ if (clcnt)
+ *clcnt = resp->context_loss_count;
+ if (resets)
+ *resets = resp->resets;
+ if (p_state)
+ *p_state = resp->programmed_state;
+ if (c_state)
+ *c_state = resp->current_state;
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_get_device() - command to request for device managed by TISCI
+ * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id: Device Identifier
+ *
+ * Request for the device - NOTE: the client MUST maintain integrity of
+ * usage count by balancing get_device with put_device. No refcounting is
+ * managed by driver for that purpose.
+ *
+ * NOTE: The request is for exclusive access for the processor.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
+{
+ return ti_sci_set_device_state(handle, id,
+ MSG_FLAG_DEVICE_EXCLUSIVE,
+ MSG_DEVICE_SW_STATE_ON);
+}
+
+/**
+ * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
+ * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id: Device Identifier
+ *
+ * Request for the device - NOTE: the client MUST maintain integrity of
+ * usage count by balancing get_device with put_device. No refcounting is
+ * managed by driver for that purpose.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
+{
+ return ti_sci_set_device_state(handle, id,
+ MSG_FLAG_DEVICE_EXCLUSIVE,
+ MSG_DEVICE_SW_STATE_RETENTION);
+}
+
+/**
+ * ti_sci_cmd_put_device() - command to release a device managed by TISCI
+ * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id: Device Identifier
+ *
+ * Request for the device - NOTE: the client MUST maintain integrity of
+ * usage count by balancing get_device with put_device. No refcounting is
+ * managed by driver for that purpose.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
+{
+ return ti_sci_set_device_state(handle, id,
+ 0, MSG_DEVICE_SW_STATE_AUTO_OFF);
+}
+
+/**
+ * ti_sci_cmd_dev_is_valid() - Is the device valid
+ * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id: Device Identifier
+ *
+ * Return: 0 if all went fine and the device ID is valid, else return
+ * appropriate error.
+ */
+static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
+{
+ u8 unused;
+
+ /* check the device state which will also tell us if the ID is valid */
+ return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
+}
+
+/**
+ * ti_sci_cmd_dev_get_clcnt() - Get context loss counter
+ * @handle: Pointer to TISCI handle
+ * @id: Device Identifier
+ * @count: Pointer to Context Loss counter to populate
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
+ u32 *count)
+{
+ return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
+}
+
+/**
+ * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
+ * @handle: Pointer to TISCI handle
+ * @id: Device Identifier
+ * @r_state: true if requested to be idle
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
+ bool *r_state)
+{
+ int ret;
+ u8 state;
+
+ if (!r_state)
+ return -EINVAL;
+
+ ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
+ if (ret)
+ return ret;
+
+ *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
+
+ return 0;
+}
+
+/**
+ * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
+ * @handle: Pointer to TISCI handle
+ * @id: Device Identifier
+ * @r_state: true if requested to be stopped
+ * @curr_state: true if currently stopped.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
+ bool *r_state, bool *curr_state)
+{
+ int ret;
+ u8 p_state, c_state;
+
+ if (!r_state && !curr_state)
+ return -EINVAL;
+
+ ret =
+ ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
+ if (ret)
+ return ret;
+
+ if (r_state)
+ *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
+ if (curr_state)
+ *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
+
+ return 0;
+}
+
+/**
+ * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
+ * @handle: Pointer to TISCI handle
+ * @id: Device Identifier
+ * @r_state: true if requested to be ON
+ * @curr_state: true if currently ON and active
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
+ bool *r_state, bool *curr_state)
+{
+ int ret;
+ u8 p_state, c_state;
+
+ if (!r_state && !curr_state)
+ return -EINVAL;
+
+ ret =
+ ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
+ if (ret)
+ return ret;
+
+ if (r_state)
+ *r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
+ if (curr_state)
+ *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
+
+ return 0;
+}
+
+/**
+ * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
+ * @handle: Pointer to TISCI handle
+ * @id: Device Identifier
+ * @curr_state: true if currently transitioning.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
+ bool *curr_state)
+{
+ int ret;
+ u8 state;
+
+ if (!curr_state)
+ return -EINVAL;
+
+ ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
+ if (ret)
+ return ret;
+
+ *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
+
+ return 0;
+}
+
+/**
+ * ti_sci_cmd_set_device_resets() - command to set resets for device managed
+ * by TISCI
+ * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id: Device Identifier
+ * @reset_state: Device specific reset bit field
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
+ u32 id, u32 reset_state)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_set_device_resets *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_set_device_resets *)xfer->xfer_buf;
+ req->id = id;
+ req->resets = reset_state;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_get_device_resets() - Get reset state for device managed
+ * by TISCI
+ * @handle: Pointer to TISCI handle
+ * @id: Device Identifier
+ * @reset_state: Pointer to reset state to populate
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
+ u32 id, u32 *reset_state)
+{
+ return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
+ NULL);
+}
+
+/**
+ * ti_sci_set_clock_state() - Set clock state helper
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @flags: Header flags as needed
+ * @state: State to request for the clock.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 clk_id,
+ u32 flags, u8 state)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_set_clock_state *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
+ flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ req->clk_id = clk_id;
+ req->request_state = state;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_get_clock_state() - Get clock state helper
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @programmed_state: State requested for clock to move to
+ * @current_state: State that the clock is currently in
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 clk_id,
+ u8 *programmed_state, u8 *current_state)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_get_clock_state *req;
+ struct ti_sci_msg_resp_get_clock_state *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ if (!programmed_state && !current_state)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ req->clk_id = clk_id;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ if (programmed_state)
+ *programmed_state = resp->programmed_state;
+ if (current_state)
+ *current_state = resp->current_state;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
+ * @can_change_freq: 'true' if frequency change is desired, else 'false'
+ * @enable_input_term: 'true' if input termination is desired, else 'false'
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
+ u8 clk_id, bool needs_ssc, bool can_change_freq,
+ bool enable_input_term)
+{
+ u32 flags = 0;
+
+ flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
+ flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
+ flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
+
+ return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
+ MSG_CLOCK_SW_STATE_REQ);
+}
+
+/**
+ * ti_sci_cmd_idle_clock() - Idle a clock which is in our control
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ *
+ * NOTE: This clock must have been requested by get_clock previously.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 clk_id)
+{
+ return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
+ MSG_CLOCK_SW_STATE_UNREQ);
+}
+
+/**
+ * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ *
+ * NOTE: This clock must have been requested by get_clock previously.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 clk_id)
+{
+ return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
+ MSG_CLOCK_SW_STATE_AUTO);
+}
+
+/**
+ * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @req_state: state indicating if the clock is auto managed
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 clk_id, bool *req_state)
+{
+ u8 state = 0;
+ int ret;
+
+ if (!req_state)
+ return -EINVAL;
+
+ ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
+ if (ret)
+ return ret;
+
+ *req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
+ return 0;
+}
+
+/**
+ * ti_sci_cmd_clk_is_on() - Is the clock ON
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @req_state: state indicating if the clock is managed by us and enabled
+ * @curr_state: state indicating if the clock is ready for operation
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
+ u8 clk_id, bool *req_state, bool *curr_state)
+{
+ u8 c_state = 0, r_state = 0;
+ int ret;
+
+ if (!req_state && !curr_state)
+ return -EINVAL;
+
+ ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
+ &r_state, &c_state);
+ if (ret)
+ return ret;
+
+ if (req_state)
+ *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
+ if (curr_state)
+ *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
+ return 0;
+}
+
+/**
+ * ti_sci_cmd_clk_is_off() - Is the clock OFF
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @req_state: state indicating if the clock is managed by us and disabled
+ * @curr_state: state indicating if the clock is NOT ready for operation
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
+ u8 clk_id, bool *req_state, bool *curr_state)
+{
+ u8 c_state = 0, r_state = 0;
+ int ret;
+
+ if (!req_state && !curr_state)
+ return -EINVAL;
+
+ ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
+ &r_state, &c_state);
+ if (ret)
+ return ret;
+
+ if (req_state)
+ *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
+ if (curr_state)
+ *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
+ return 0;
+}
+
+/**
+ * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @parent_id: Parent clock identifier to set
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 clk_id, u8 parent_id)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_set_clock_parent *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ req->clk_id = clk_id;
+ req->parent_id = parent_id;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_clk_get_parent() - Get current parent clock source
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @parent_id: Current clock parent
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 clk_id, u8 *parent_id)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_get_clock_parent *req;
+ struct ti_sci_msg_resp_get_clock_parent *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle || !parent_id)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ req->clk_id = clk_id;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp))
+ ret = -ENODEV;
+ else
+ *parent_id = resp->parent_id;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @num_parents: Returns he number of parents to the current clock.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 clk_id,
+ u8 *num_parents)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_get_clock_num_parents *req;
+ struct ti_sci_msg_resp_get_clock_num_parents *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle || !num_parents)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ req->clk_id = clk_id;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_get_clock_num_parents *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp))
+ ret = -ENODEV;
+ else
+ *num_parents = resp->num_parents;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @min_freq: The minimum allowable frequency in Hz. This is the minimum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * @target_freq: The target clock frequency in Hz. A frequency will be
+ * processed as close to this target frequency as possible.
+ * @max_freq: The maximum allowable frequency in Hz. This is the maximum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * @match_freq: Frequency match in Hz response.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 clk_id, u64 min_freq,
+ u64 target_freq, u64 max_freq,
+ u64 *match_freq)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_query_clock_freq *req;
+ struct ti_sci_msg_resp_query_clock_freq *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle || !match_freq)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ req->clk_id = clk_id;
+ req->min_freq_hz = min_freq;
+ req->target_freq_hz = target_freq;
+ req->max_freq_hz = max_freq;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp))
+ ret = -ENODEV;
+ else
+ *match_freq = resp->freq_hz;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_clk_set_freq() - Set a frequency for clock
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @min_freq: The minimum allowable frequency in Hz. This is the minimum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * @target_freq: The target clock frequency in Hz. A frequency will be
+ * processed as close to this target frequency as possible.
+ * @max_freq: The maximum allowable frequency in Hz. This is the maximum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 clk_id, u64 min_freq,
+ u64 target_freq, u64 max_freq)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_set_clock_freq *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ req->clk_id = clk_id;
+ req->min_freq_hz = min_freq;
+ req->target_freq_hz = target_freq;
+ req->max_freq_hz = max_freq;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_clk_get_freq() - Get current frequency
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @freq: Currently frequency in Hz
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 clk_id, u64 *freq)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_get_clock_freq *req;
+ struct ti_sci_msg_resp_get_clock_freq *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle || !freq)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ req->clk_id = clk_id;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp))
+ ret = -ENODEV;
+ else
+ *freq = resp->freq_hz;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_reboot *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SYS_RESET,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_reboot *)xfer->xfer_buf;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp))
+ ret = -ENODEV;
+ else
+ ret = 0;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/*
+ * ti_sci_setup_ops() - Setup the operations structures
+ * @info: pointer to TISCI pointer
+ */
+static void ti_sci_setup_ops(struct ti_sci_info *info)
+{
+ struct ti_sci_ops *ops = &info->handle.ops;
+ struct ti_sci_core_ops *core_ops = &ops->core_ops;
+ struct ti_sci_dev_ops *dops = &ops->dev_ops;
+ struct ti_sci_clk_ops *cops = &ops->clk_ops;
+
+ core_ops->reboot_device = ti_sci_cmd_core_reboot;
+
+ dops->get_device = ti_sci_cmd_get_device;
+ dops->idle_device = ti_sci_cmd_idle_device;
+ dops->put_device = ti_sci_cmd_put_device;
+
+ dops->is_valid = ti_sci_cmd_dev_is_valid;
+ dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
+ dops->is_idle = ti_sci_cmd_dev_is_idle;
+ dops->is_stop = ti_sci_cmd_dev_is_stop;
+ dops->is_on = ti_sci_cmd_dev_is_on;
+ dops->is_transitioning = ti_sci_cmd_dev_is_trans;
+ dops->set_device_resets = ti_sci_cmd_set_device_resets;
+ dops->get_device_resets = ti_sci_cmd_get_device_resets;
+
+ cops->get_clock = ti_sci_cmd_get_clock;
+ cops->idle_clock = ti_sci_cmd_idle_clock;
+ cops->put_clock = ti_sci_cmd_put_clock;
+ cops->is_auto = ti_sci_cmd_clk_is_auto;
+ cops->is_on = ti_sci_cmd_clk_is_on;
+ cops->is_off = ti_sci_cmd_clk_is_off;
+
+ cops->set_parent = ti_sci_cmd_clk_set_parent;
+ cops->get_parent = ti_sci_cmd_clk_get_parent;
+ cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
+
+ cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
+ cops->set_freq = ti_sci_cmd_clk_set_freq;
+ cops->get_freq = ti_sci_cmd_clk_get_freq;
+}
+
+/**
+ * ti_sci_get_handle() - Get the TI SCI handle for a device
+ * @dev: Pointer to device for which we want SCI handle
+ *
+ * NOTE: The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of TI SCI protocol library.
+ * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
+ * Return: pointer to handle if successful, else:
+ * -EPROBE_DEFER if the instance is not ready
+ * -ENODEV if the required node handler is missing
+ * -EINVAL if invalid conditions are encountered.
+ */
+const struct ti_sci_handle *ti_sci_get_handle(struct device *dev)
+{
+ struct device_node *ti_sci_np;
+ struct list_head *p;
+ struct ti_sci_handle *handle = NULL;
+ struct ti_sci_info *info;
+
+ if (!dev) {
+ pr_err("I need a device pointer\n");
+ return ERR_PTR(-EINVAL);
+ }
+ ti_sci_np = of_get_parent(dev->of_node);
+ if (!ti_sci_np) {
+ dev_err(dev, "No OF information\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ mutex_lock(&ti_sci_list_mutex);
+ list_for_each(p, &ti_sci_list) {
+ info = list_entry(p, struct ti_sci_info, node);
+ if (ti_sci_np == info->dev->of_node) {
+ handle = &info->handle;
+ info->users++;
+ break;
+ }
+ }
+ mutex_unlock(&ti_sci_list_mutex);
+ of_node_put(ti_sci_np);
+
+ if (!handle)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return handle;
+}
+EXPORT_SYMBOL_GPL(ti_sci_get_handle);
+
+/**
+ * ti_sci_put_handle() - Release the handle acquired by ti_sci_get_handle
+ * @handle: Handle acquired by ti_sci_get_handle
+ *
+ * NOTE: The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of TI SCI protocol library.
+ * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
+ *
+ * Return: 0 is successfully released
+ * if an error pointer was passed, it returns the error value back,
+ * if null was passed, it returns -EINVAL;
+ */
+int ti_sci_put_handle(const struct ti_sci_handle *handle)
+{
+ struct ti_sci_info *info;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ mutex_lock(&ti_sci_list_mutex);
+ if (!WARN_ON(!info->users))
+ info->users--;
+ mutex_unlock(&ti_sci_list_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ti_sci_put_handle);
+
+static void devm_ti_sci_release(struct device *dev, void *res)
+{
+ const struct ti_sci_handle **ptr = res;
+ const struct ti_sci_handle *handle = *ptr;
+ int ret;
+
+ ret = ti_sci_put_handle(handle);
+ if (ret)
+ dev_err(dev, "failed to put handle %d\n", ret);
+}
+
+/**
+ * devm_ti_sci_get_handle() - Managed get handle
+ * @dev: device for which we want SCI handle for.
+ *
+ * NOTE: This releases the handle once the device resources are
+ * no longer needed. MUST NOT BE released with ti_sci_put_handle.
+ * The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of TI SCI protocol library.
+ *
+ * Return: 0 if all went fine, else corresponding error.
+ */
+const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev)
+{
+ const struct ti_sci_handle **ptr;
+ const struct ti_sci_handle *handle;
+
+ ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+ handle = ti_sci_get_handle(dev);
+
+ if (!IS_ERR(handle)) {
+ *ptr = handle;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return handle;
+}
+EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle);
+
+static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
+ void *cmd)
+{
+ struct ti_sci_info *info = reboot_to_ti_sci_info(nb);
+ const struct ti_sci_handle *handle = &info->handle;
+
+ ti_sci_cmd_core_reboot(handle);
+
+ /* call fail OR pass, we should not be here in the first place */
+ return NOTIFY_BAD;
+}
+
+/* Description for K2G */
+static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
+ .host_id = 2,
+ /* Conservative duration */
+ .max_rx_timeout_ms = 1000,
+ /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
+ .max_msgs = 20,
+ .max_msg_size = 64,
+};
+
+static const struct of_device_id ti_sci_of_match[] = {
+ {.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc},
+ { /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ti_sci_of_match);
+
+static int ti_sci_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *of_id;
+ const struct ti_sci_desc *desc;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_info *info = NULL;
+ struct ti_sci_xfers_info *minfo;
+ struct mbox_client *cl;
+ int ret = -EINVAL;
+ int i;
+ int reboot = 0;
+
+ of_id = of_match_device(ti_sci_of_match, dev);
+ if (!of_id) {
+ dev_err(dev, "OF data missing\n");
+ return -EINVAL;
+ }
+ desc = of_id->data;
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->dev = dev;
+ info->desc = desc;
+ reboot = of_property_read_bool(dev->of_node,
+ "ti,system-reboot-controller");
+ INIT_LIST_HEAD(&info->node);
+ minfo = &info->minfo;
+
+ /*
+ * Pre-allocate messages
+ * NEVER allocate more than what we can indicate in hdr.seq
+ * if we have data description bug, force a fix..
+ */
+ if (WARN_ON(desc->max_msgs >=
+ 1 << 8 * sizeof(((struct ti_sci_msg_hdr *)0)->seq)))
+ return -EINVAL;
+
+ minfo->xfer_block = devm_kcalloc(dev,
+ desc->max_msgs,
+ sizeof(*minfo->xfer_block),
+ GFP_KERNEL);
+ if (!minfo->xfer_block)
+ return -ENOMEM;
+
+ minfo->xfer_alloc_table = devm_kzalloc(dev,
+ BITS_TO_LONGS(desc->max_msgs)
+ * sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!minfo->xfer_alloc_table)
+ return -ENOMEM;
+ bitmap_zero(minfo->xfer_alloc_table, desc->max_msgs);
+
+ /* Pre-initialize the buffer pointer to pre-allocated buffers */
+ for (i = 0, xfer = minfo->xfer_block; i < desc->max_msgs; i++, xfer++) {
+ xfer->xfer_buf = devm_kcalloc(dev, 1, desc->max_msg_size,
+ GFP_KERNEL);
+ if (!xfer->xfer_buf)
+ return -ENOMEM;
+
+ xfer->tx_message.buf = xfer->xfer_buf;
+ init_completion(&xfer->done);
+ }
+
+ ret = ti_sci_debugfs_create(pdev, info);
+ if (ret)
+ dev_warn(dev, "Failed to create debug file\n");
+
+ platform_set_drvdata(pdev, info);
+
+ cl = &info->cl;
+ cl->dev = dev;
+ cl->tx_block = false;
+ cl->rx_callback = ti_sci_rx_callback;
+ cl->knows_txdone = true;
+
+ spin_lock_init(&minfo->xfer_lock);
+ sema_init(&minfo->sem_xfer_count, desc->max_msgs);
+
+ info->chan_rx = mbox_request_channel_byname(cl, "rx");
+ if (IS_ERR(info->chan_rx)) {
+ ret = PTR_ERR(info->chan_rx);
+ goto out;
+ }
+
+ info->chan_tx = mbox_request_channel_byname(cl, "tx");
+ if (IS_ERR(info->chan_tx)) {
+ ret = PTR_ERR(info->chan_tx);
+ goto out;
+ }
+ ret = ti_sci_cmd_get_revision(info);
+ if (ret) {
+ dev_err(dev, "Unable to communicate with TISCI(%d)\n", ret);
+ goto out;
+ }
+
+ ti_sci_setup_ops(info);
+
+ if (reboot) {
+ info->nb.notifier_call = tisci_reboot_handler;
+ info->nb.priority = 128;
+
+ ret = register_restart_handler(&info->nb);
+ if (ret) {
+ dev_err(dev, "reboot registration fail(%d)\n", ret);
+ return ret;
+ }
+ }
+
+ dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n",
+ info->handle.version.abi_major, info->handle.version.abi_minor,
+ info->handle.version.firmware_revision,
+ info->handle.version.firmware_description);
+
+ mutex_lock(&ti_sci_list_mutex);
+ list_add_tail(&info->node, &ti_sci_list);
+ mutex_unlock(&ti_sci_list_mutex);
+
+ return of_platform_populate(dev->of_node, NULL, NULL, dev);
+out:
+ if (!IS_ERR(info->chan_tx))
+ mbox_free_channel(info->chan_tx);
+ if (!IS_ERR(info->chan_rx))
+ mbox_free_channel(info->chan_rx);
+ debugfs_remove(info->d);
+ return ret;
+}
+
+static int ti_sci_remove(struct platform_device *pdev)
+{
+ struct ti_sci_info *info;
+ struct device *dev = &pdev->dev;
+ int ret = 0;
+
+ of_platform_depopulate(dev);
+
+ info = platform_get_drvdata(pdev);
+
+ if (info->nb.notifier_call)
+ unregister_restart_handler(&info->nb);
+
+ mutex_lock(&ti_sci_list_mutex);
+ if (info->users)
+ ret = -EBUSY;
+ else
+ list_del(&info->node);
+ mutex_unlock(&ti_sci_list_mutex);
+
+ if (!ret) {
+ ti_sci_debugfs_destroy(pdev, info);
+
+ /* Safe to free channels since no more users */
+ mbox_free_channel(info->chan_tx);
+ mbox_free_channel(info->chan_rx);
+ }
+
+ return ret;
+}
+
+static struct platform_driver ti_sci_driver = {
+ .probe = ti_sci_probe,
+ .remove = ti_sci_remove,
+ .driver = {
+ .name = "ti-sci",
+ .of_match_table = of_match_ptr(ti_sci_of_match),
+ },
+};
+module_platform_driver(ti_sci_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI System Control Interface(SCI) driver");
+MODULE_AUTHOR("Nishanth Menon");
+MODULE_ALIAS("platform:ti-sci");
diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h
new file mode 100644
index 000000000000..9b611e9e6f6d
--- /dev/null
+++ b/drivers/firmware/ti_sci.h
@@ -0,0 +1,492 @@
+/*
+ * Texas Instruments System Control Interface (TISCI) Protocol
+ *
+ * Communication protocol with TI SCI hardware
+ * The system works in a message response protocol
+ * See: http://processors.wiki.ti.com/index.php/TISCI for details
+ *
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the
+ * distribution.
+ *
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __TI_SCI_H
+#define __TI_SCI_H
+
+/* Generic Messages */
+#define TI_SCI_MSG_ENABLE_WDT 0x0000
+#define TI_SCI_MSG_WAKE_RESET 0x0001
+#define TI_SCI_MSG_VERSION 0x0002
+#define TI_SCI_MSG_WAKE_REASON 0x0003
+#define TI_SCI_MSG_GOODBYE 0x0004
+#define TI_SCI_MSG_SYS_RESET 0x0005
+
+/* Device requests */
+#define TI_SCI_MSG_SET_DEVICE_STATE 0x0200
+#define TI_SCI_MSG_GET_DEVICE_STATE 0x0201
+#define TI_SCI_MSG_SET_DEVICE_RESETS 0x0202
+
+/* Clock requests */
+#define TI_SCI_MSG_SET_CLOCK_STATE 0x0100
+#define TI_SCI_MSG_GET_CLOCK_STATE 0x0101
+#define TI_SCI_MSG_SET_CLOCK_PARENT 0x0102
+#define TI_SCI_MSG_GET_CLOCK_PARENT 0x0103
+#define TI_SCI_MSG_GET_NUM_CLOCK_PARENTS 0x0104
+#define TI_SCI_MSG_SET_CLOCK_FREQ 0x010c
+#define TI_SCI_MSG_QUERY_CLOCK_FREQ 0x010d
+#define TI_SCI_MSG_GET_CLOCK_FREQ 0x010e
+
+/**
+ * struct ti_sci_msg_hdr - Generic Message Header for All messages and responses
+ * @type: Type of messages: One of TI_SCI_MSG* values
+ * @host: Host of the message
+ * @seq: Message identifier indicating a transfer sequence
+ * @flags: Flag for the message
+ */
+struct ti_sci_msg_hdr {
+ u16 type;
+ u8 host;
+ u8 seq;
+#define TI_SCI_MSG_FLAG(val) (1 << (val))
+#define TI_SCI_FLAG_REQ_GENERIC_NORESPONSE 0x0
+#define TI_SCI_FLAG_REQ_ACK_ON_RECEIVED TI_SCI_MSG_FLAG(0)
+#define TI_SCI_FLAG_REQ_ACK_ON_PROCESSED TI_SCI_MSG_FLAG(1)
+#define TI_SCI_FLAG_RESP_GENERIC_NACK 0x0
+#define TI_SCI_FLAG_RESP_GENERIC_ACK TI_SCI_MSG_FLAG(1)
+ /* Additional Flags */
+ u32 flags;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_version - Response for a message
+ * @hdr: Generic header
+ * @firmware_description: String describing the firmware
+ * @firmware_revision: Firmware revision
+ * @abi_major: Major version of the ABI that firmware supports
+ * @abi_minor: Minor version of the ABI that firmware supports
+ *
+ * In general, ABI version changes follow the rule that minor version increments
+ * are backward compatible. Major revision changes in ABI may not be
+ * backward compatible.
+ *
+ * Response to a generic message with message type TI_SCI_MSG_VERSION
+ */
+struct ti_sci_msg_resp_version {
+ struct ti_sci_msg_hdr hdr;
+ char firmware_description[32];
+ u16 firmware_revision;
+ u8 abi_major;
+ u8 abi_minor;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_reboot - Reboot the SoC
+ * @hdr: Generic Header
+ *
+ * Request type is TI_SCI_MSG_SYS_RESET, responded with a generic
+ * ACK/NACK message.
+ */
+struct ti_sci_msg_req_reboot {
+ struct ti_sci_msg_hdr hdr;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_device_state - Set the desired state of the device
+ * @hdr: Generic header
+ * @id: Indicates which device to modify
+ * @reserved: Reserved space in message, must be 0 for backward compatibility
+ * @state: The desired state of the device.
+ *
+ * Certain flags can also be set to alter the device state:
+ * + MSG_FLAG_DEVICE_WAKE_ENABLED - Configure the device to be a wake source.
+ * The meaning of this flag will vary slightly from device to device and from
+ * SoC to SoC but it generally allows the device to wake the SoC out of deep
+ * suspend states.
+ * + MSG_FLAG_DEVICE_RESET_ISO - Enable reset isolation for this device.
+ * + MSG_FLAG_DEVICE_EXCLUSIVE - Claim this device exclusively. When passed
+ * with STATE_RETENTION or STATE_ON, it will claim the device exclusively.
+ * If another host already has this device set to STATE_RETENTION or STATE_ON,
+ * the message will fail. Once successful, other hosts attempting to set
+ * STATE_RETENTION or STATE_ON will fail.
+ *
+ * Request type is TI_SCI_MSG_SET_DEVICE_STATE, responded with a generic
+ * ACK/NACK message.
+ */
+struct ti_sci_msg_req_set_device_state {
+ /* Additional hdr->flags options */
+#define MSG_FLAG_DEVICE_WAKE_ENABLED TI_SCI_MSG_FLAG(8)
+#define MSG_FLAG_DEVICE_RESET_ISO TI_SCI_MSG_FLAG(9)
+#define MSG_FLAG_DEVICE_EXCLUSIVE TI_SCI_MSG_FLAG(10)
+ struct ti_sci_msg_hdr hdr;
+ u32 id;
+ u32 reserved;
+
+#define MSG_DEVICE_SW_STATE_AUTO_OFF 0
+#define MSG_DEVICE_SW_STATE_RETENTION 1
+#define MSG_DEVICE_SW_STATE_ON 2
+ u8 state;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_device_state - Request to get device.
+ * @hdr: Generic header
+ * @id: Device Identifier
+ *
+ * Request type is TI_SCI_MSG_GET_DEVICE_STATE, responded device state
+ * information
+ */
+struct ti_sci_msg_req_get_device_state {
+ struct ti_sci_msg_hdr hdr;
+ u32 id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_device_state - Response to get device request.
+ * @hdr: Generic header
+ * @context_loss_count: Indicates how many times the device has lost context. A
+ * driver can use this monotonic counter to determine if the device has
+ * lost context since the last time this message was exchanged.
+ * @resets: Programmed state of the reset lines.
+ * @programmed_state: The state as programmed by set_device.
+ * - Uses the MSG_DEVICE_SW_* macros
+ * @current_state: The actual state of the hardware.
+ *
+ * Response to request TI_SCI_MSG_GET_DEVICE_STATE.
+ */
+struct ti_sci_msg_resp_get_device_state {
+ struct ti_sci_msg_hdr hdr;
+ u32 context_loss_count;
+ u32 resets;
+ u8 programmed_state;
+#define MSG_DEVICE_HW_STATE_OFF 0
+#define MSG_DEVICE_HW_STATE_ON 1
+#define MSG_DEVICE_HW_STATE_TRANS 2
+ u8 current_state;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_device_resets - Set the desired resets
+ * configuration of the device
+ * @hdr: Generic header
+ * @id: Indicates which device to modify
+ * @resets: A bit field of resets for the device. The meaning, behavior,
+ * and usage of the reset flags are device specific. 0 for a bit
+ * indicates releasing the reset represented by that bit while 1
+ * indicates keeping it held.
+ *
+ * Request type is TI_SCI_MSG_SET_DEVICE_RESETS, responded with a generic
+ * ACK/NACK message.
+ */
+struct ti_sci_msg_req_set_device_resets {
+ struct ti_sci_msg_hdr hdr;
+ u32 id;
+ u32 resets;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_clock_state - Request to setup a Clock state
+ * @hdr: Generic Header, Certain flags can be set specific to the clocks:
+ * MSG_FLAG_CLOCK_ALLOW_SSC: Allow this clock to be modified
+ * via spread spectrum clocking.
+ * MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE: Allow this clock's
+ * frequency to be changed while it is running so long as it
+ * is within the min/max limits.
+ * MSG_FLAG_CLOCK_INPUT_TERM: Enable input termination, this
+ * is only applicable to clock inputs on the SoC pseudo-device.
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @request_state: Request the state for the clock to be set to.
+ * MSG_CLOCK_SW_STATE_UNREQ: The IP does not require this clock,
+ * it can be disabled, regardless of the state of the device
+ * MSG_CLOCK_SW_STATE_AUTO: Allow the System Controller to
+ * automatically manage the state of this clock. If the device
+ * is enabled, then the clock is enabled. If the device is set
+ * to off or retention, then the clock is internally set as not
+ * being required by the device.(default)
+ * MSG_CLOCK_SW_STATE_REQ: Configure the clock to be enabled,
+ * regardless of the state of the device.
+ *
+ * Normally, all required clocks are managed by TISCI entity, this is used
+ * only for specific control *IF* required. Auto managed state is
+ * MSG_CLOCK_SW_STATE_AUTO, in other states, TISCI entity assume remote
+ * will explicitly control.
+ *
+ * Request type is TI_SCI_MSG_SET_CLOCK_STATE, response is a generic
+ * ACK or NACK message.
+ */
+struct ti_sci_msg_req_set_clock_state {
+ /* Additional hdr->flags options */
+#define MSG_FLAG_CLOCK_ALLOW_SSC TI_SCI_MSG_FLAG(8)
+#define MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE TI_SCI_MSG_FLAG(9)
+#define MSG_FLAG_CLOCK_INPUT_TERM TI_SCI_MSG_FLAG(10)
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u8 clk_id;
+#define MSG_CLOCK_SW_STATE_UNREQ 0
+#define MSG_CLOCK_SW_STATE_AUTO 1
+#define MSG_CLOCK_SW_STATE_REQ 2
+ u8 request_state;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_clock_state - Request for clock state
+ * @hdr: Generic Header
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to get state of.
+ *
+ * Request type is TI_SCI_MSG_GET_CLOCK_STATE, response is state
+ * of the clock
+ */
+struct ti_sci_msg_req_get_clock_state {
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u8 clk_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_clock_state - Response to get clock state
+ * @hdr: Generic Header
+ * @programmed_state: Any programmed state of the clock. This is one of
+ * MSG_CLOCK_SW_STATE* values.
+ * @current_state: Current state of the clock. This is one of:
+ * MSG_CLOCK_HW_STATE_NOT_READY: Clock is not ready
+ * MSG_CLOCK_HW_STATE_READY: Clock is ready
+ *
+ * Response to TI_SCI_MSG_GET_CLOCK_STATE.
+ */
+struct ti_sci_msg_resp_get_clock_state {
+ struct ti_sci_msg_hdr hdr;
+ u8 programmed_state;
+#define MSG_CLOCK_HW_STATE_NOT_READY 0
+#define MSG_CLOCK_HW_STATE_READY 1
+ u8 current_state;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_clock_parent - Set the clock parent
+ * @hdr: Generic Header
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @parent_id: The new clock parent is selectable by an index via this
+ * parameter.
+ *
+ * Request type is TI_SCI_MSG_SET_CLOCK_PARENT, response is generic
+ * ACK / NACK message.
+ */
+struct ti_sci_msg_req_set_clock_parent {
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u8 clk_id;
+ u8 parent_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_clock_parent - Get the clock parent
+ * @hdr: Generic Header
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to get the parent for.
+ *
+ * Request type is TI_SCI_MSG_GET_CLOCK_PARENT, response is parent information
+ */
+struct ti_sci_msg_req_get_clock_parent {
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u8 clk_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_clock_parent - Response with clock parent
+ * @hdr: Generic Header
+ * @parent_id: The current clock parent
+ *
+ * Response to TI_SCI_MSG_GET_CLOCK_PARENT.
+ */
+struct ti_sci_msg_resp_get_clock_parent {
+ struct ti_sci_msg_hdr hdr;
+ u8 parent_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_clock_num_parents - Request to get clock parents
+ * @hdr: Generic header
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ *
+ * This request provides information about how many clock parent options
+ * are available for a given clock to a device. This is typically used
+ * for input clocks.
+ *
+ * Request type is TI_SCI_MSG_GET_NUM_CLOCK_PARENTS, response is appropriate
+ * message, or NACK in case of inability to satisfy request.
+ */
+struct ti_sci_msg_req_get_clock_num_parents {
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u8 clk_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_clock_num_parents - Response for get clk parents
+ * @hdr: Generic header
+ * @num_parents: Number of clock parents
+ *
+ * Response to TI_SCI_MSG_GET_NUM_CLOCK_PARENTS
+ */
+struct ti_sci_msg_resp_get_clock_num_parents {
+ struct ti_sci_msg_hdr hdr;
+ u8 num_parents;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_query_clock_freq - Request to query a frequency
+ * @hdr: Generic Header
+ * @dev_id: Device identifier this request is for
+ * @min_freq_hz: The minimum allowable frequency in Hz. This is the minimum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * @target_freq_hz: The target clock frequency. A frequency will be found
+ * as close to this target frequency as possible.
+ * @max_freq_hz: The maximum allowable frequency in Hz. This is the maximum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * @clk_id: Clock identifier for the device for this request.
+ *
+ * NOTE: Normally clock frequency management is automatically done by TISCI
+ * entity. In case of specific requests, TISCI evaluates capability to achieve
+ * requested frequency within provided range and responds with
+ * result message.
+ *
+ * Request type is TI_SCI_MSG_QUERY_CLOCK_FREQ, response is appropriate message,
+ * or NACK in case of inability to satisfy request.
+ */
+struct ti_sci_msg_req_query_clock_freq {
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u64 min_freq_hz;
+ u64 target_freq_hz;
+ u64 max_freq_hz;
+ u8 clk_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_query_clock_freq - Response to a clock frequency query
+ * @hdr: Generic Header
+ * @freq_hz: Frequency that is the best match in Hz.
+ *
+ * Response to request type TI_SCI_MSG_QUERY_CLOCK_FREQ. NOTE: if the request
+ * cannot be satisfied, the message will be of type NACK.
+ */
+struct ti_sci_msg_resp_query_clock_freq {
+ struct ti_sci_msg_hdr hdr;
+ u64 freq_hz;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_clock_freq - Request to setup a clock frequency
+ * @hdr: Generic Header
+ * @dev_id: Device identifier this request is for
+ * @min_freq_hz: The minimum allowable frequency in Hz. This is the minimum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * @target_freq_hz: The target clock frequency. The clock will be programmed
+ * at a rate as close to this target frequency as possible.
+ * @max_freq_hz: The maximum allowable frequency in Hz. This is the maximum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * @clk_id: Clock identifier for the device for this request.
+ *
+ * NOTE: Normally clock frequency management is automatically done by TISCI
+ * entity. In case of specific requests, TISCI evaluates capability to achieve
+ * requested range and responds with success/failure message.
+ *
+ * This sets the desired frequency for a clock within an allowable
+ * range. This message will fail on an enabled clock unless
+ * MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE is set for the clock. Additionally,
+ * if other clocks have their frequency modified due to this message,
+ * they also must have the MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE or be disabled.
+ *
+ * Calling set frequency on a clock input to the SoC pseudo-device will
+ * inform the PMMC of that clock's frequency. Setting a frequency of
+ * zero will indicate the clock is disabled.
+ *
+ * Calling set frequency on clock outputs from the SoC pseudo-device will
+ * function similarly to setting the clock frequency on a device.
+ *
+ * Request type is TI_SCI_MSG_SET_CLOCK_FREQ, response is a generic ACK/NACK
+ * message.
+ */
+struct ti_sci_msg_req_set_clock_freq {
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u64 min_freq_hz;
+ u64 target_freq_hz;
+ u64 max_freq_hz;
+ u8 clk_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_clock_freq - Request to get the clock frequency
+ * @hdr: Generic Header
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ *
+ * NOTE: Normally clock frequency management is automatically done by TISCI
+ * entity. In some cases, clock frequencies are configured by host.
+ *
+ * Request type is TI_SCI_MSG_GET_CLOCK_FREQ, responded with clock frequency
+ * that the clock is currently at.
+ */
+struct ti_sci_msg_req_get_clock_freq {
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u8 clk_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_clock_freq - Response of clock frequency request
+ * @hdr: Generic Header
+ * @freq_hz: Frequency that the clock is currently on, in Hz.
+ *
+ * Response to request type TI_SCI_MSG_GET_CLOCK_FREQ.
+ */
+struct ti_sci_msg_resp_get_clock_freq {
+ struct ti_sci_msg_hdr hdr;
+ u64 freq_hz;
+} __packed;
+
+#endif /* __TI_SCI_H */
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index 1e8fde8cb803..2292742eac8f 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -205,7 +205,7 @@ static int mxs_gpio_set_wake_irq(struct irq_data *d, unsigned int enable)
return 0;
}
-static int __init mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base)
+static int mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base)
{
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
diff --git a/drivers/gpio/gpio-tps65218.c b/drivers/gpio/gpio-tps65218.c
index d779307a9685..46e6dcc089cb 100644
--- a/drivers/gpio/gpio-tps65218.c
+++ b/drivers/gpio/gpio-tps65218.c
@@ -16,6 +16,7 @@
#include <linux/errno.h>
#include <linux/gpio/driver.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/mfd/tps65218.h>
struct tps65218_gpio {
@@ -30,7 +31,7 @@ static int tps65218_gpio_get(struct gpio_chip *gc, unsigned offset)
unsigned int val;
int ret;
- ret = tps65218_reg_read(tps65218, TPS65218_REG_ENABLE2, &val);
+ ret = regmap_read(tps65218->regmap, TPS65218_REG_ENABLE2, &val);
if (ret)
return ret;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index f4c26c7826cd..86bf3b84ada5 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1317,12 +1317,12 @@ void gpiochip_remove(struct gpio_chip *chip)
/* FIXME: should the legacy sysfs handling be moved to gpio_device? */
gpiochip_sysfs_unregister(gdev);
+ gpiochip_free_hogs(chip);
/* Numb the device, cancelling all outstanding operations */
gdev->chip = NULL;
gpiochip_irqchip_remove(chip);
acpi_gpiochip_remove(chip);
gpiochip_remove_pin_ranges(chip);
- gpiochip_free_hogs(chip);
of_gpiochip_remove(chip);
/*
* We accept no more calls into the driver from this point, so
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 4f973a9c7b87..8ec1967a850b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -305,8 +305,9 @@ static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
GOP_VBIOS_CONTENT *vbios;
VFCT_IMAGE_HEADER *vhdr;
- if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size)))
+ if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr)))
return false;
+ tbl_size = hdr->length;
if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
goto out_unmap;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 9ada56c16a58..4c851fde1e82 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -840,6 +840,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
else if (type == CGS_UCODE_ID_SMU_SK)
strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
break;
+ case CHIP_POLARIS12:
+ strcpy(fw_name, "amdgpu/polaris12_smc.bin");
+ break;
default:
DRM_ERROR("SMC firmware not supported\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index cc8aafd9cb0d..fe3bb94fe58d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -73,6 +73,7 @@ static const char *amdgpu_asic_name[] = {
"STONEY",
"POLARIS10",
"POLARIS11",
+ "POLARIS12",
"LAST",
};
@@ -1277,6 +1278,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
case CHIP_FIJI:
case CHIP_POLARIS11:
case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
case CHIP_CARRIZO:
case CHIP_STONEY:
if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
@@ -2523,7 +2525,7 @@ static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev)
static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
- struct amdgpu_device *adev = f->f_inode->i_private;
+ struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
int r;
bool pm_pg_lock, use_bank;
@@ -2599,7 +2601,7 @@ end:
static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
- struct amdgpu_device *adev = f->f_inode->i_private;
+ struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
int r;
bool pm_pg_lock, use_bank;
@@ -2673,7 +2675,7 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
- struct amdgpu_device *adev = f->f_inode->i_private;
+ struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
int r;
@@ -2700,7 +2702,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
- struct amdgpu_device *adev = f->f_inode->i_private;
+ struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
int r;
@@ -2728,7 +2730,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
- struct amdgpu_device *adev = f->f_inode->i_private;
+ struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
int r;
@@ -2755,7 +2757,7 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
- struct amdgpu_device *adev = f->f_inode->i_private;
+ struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
int r;
@@ -2783,7 +2785,7 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
- struct amdgpu_device *adev = f->f_inode->i_private;
+ struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
int r;
@@ -2810,7 +2812,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
- struct amdgpu_device *adev = f->f_inode->i_private;
+ struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
int r;
@@ -2838,7 +2840,7 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
- struct amdgpu_device *adev = f->f_inode->i_private;
+ struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
int r;
uint32_t *config, no_regs = 0;
@@ -2908,7 +2910,7 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
- struct amdgpu_device *adev = f->f_inode->i_private;
+ struct amdgpu_device *adev = file_inode(f)->i_private;
int idx, r;
int32_t value;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 8cb937b2bfcc..2534adaebe30 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -418,6 +418,13 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
{0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
{0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ /* Polaris12 */
+ {0x1002, 0x6980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+ {0x1002, 0x6981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+ {0x1002, 0x6985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+ {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+ {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+ {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0, 0, 0}
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index fc592c2b0e16..95a568df8551 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -98,6 +98,7 @@ static int amdgpu_pp_early_init(void *handle)
switch (adev->asic_type) {
case CHIP_POLARIS11:
case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
case CHIP_TONGA:
case CHIP_FIJI:
case CHIP_TOPAZ:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 4c992826d2d6..a47628395914 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -280,7 +280,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
- struct amdgpu_ring *ring = (struct amdgpu_ring*)f->f_inode->i_private;
+ struct amdgpu_ring *ring = file_inode(f)->i_private;
int r, i;
uint32_t value, result, early[3];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index bc70f80260d8..8e35c1ff59e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1511,7 +1511,7 @@ static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
- struct amdgpu_device *adev = f->f_inode->i_private;
+ struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
int r;
@@ -1555,7 +1555,7 @@ static const struct file_operations amdgpu_ttm_vram_fops = {
static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
- struct amdgpu_device *adev = f->f_inode->i_private;
+ struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
int r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index a81dfaeeb8c0..1d564beb0fde 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -65,6 +65,7 @@
#define FIRMWARE_STONEY "amdgpu/stoney_uvd.bin"
#define FIRMWARE_POLARIS10 "amdgpu/polaris10_uvd.bin"
#define FIRMWARE_POLARIS11 "amdgpu/polaris11_uvd.bin"
+#define FIRMWARE_POLARIS12 "amdgpu/polaris12_uvd.bin"
/**
* amdgpu_uvd_cs_ctx - Command submission parser context
@@ -98,6 +99,7 @@ MODULE_FIRMWARE(FIRMWARE_FIJI);
MODULE_FIRMWARE(FIRMWARE_STONEY);
MODULE_FIRMWARE(FIRMWARE_POLARIS10);
MODULE_FIRMWARE(FIRMWARE_POLARIS11);
+MODULE_FIRMWARE(FIRMWARE_POLARIS12);
static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
@@ -149,6 +151,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
case CHIP_POLARIS11:
fw_name = FIRMWARE_POLARIS11;
break;
+ case CHIP_POLARIS12:
+ fw_name = FIRMWARE_POLARIS12;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 69b66b9e7f57..8fec802d3908 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -52,6 +52,7 @@
#define FIRMWARE_STONEY "amdgpu/stoney_vce.bin"
#define FIRMWARE_POLARIS10 "amdgpu/polaris10_vce.bin"
#define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin"
+#define FIRMWARE_POLARIS12 "amdgpu/polaris12_vce.bin"
#ifdef CONFIG_DRM_AMDGPU_CIK
MODULE_FIRMWARE(FIRMWARE_BONAIRE);
@@ -66,6 +67,7 @@ MODULE_FIRMWARE(FIRMWARE_FIJI);
MODULE_FIRMWARE(FIRMWARE_STONEY);
MODULE_FIRMWARE(FIRMWARE_POLARIS10);
MODULE_FIRMWARE(FIRMWARE_POLARIS11);
+MODULE_FIRMWARE(FIRMWARE_POLARIS12);
static void amdgpu_vce_idle_work_handler(struct work_struct *work);
@@ -121,6 +123,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
case CHIP_POLARIS11:
fw_name = FIRMWARE_POLARIS11;
break;
+ case CHIP_POLARIS12:
+ fw_name = FIRMWARE_POLARIS12;
+ break;
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index b3d62b909f43..2006abbbfb62 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -167,6 +167,7 @@ static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
(const u32)ARRAY_SIZE(stoney_golden_settings_a11));
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
amdgpu_program_register_sequence(adev,
polaris11_golden_settings_a11,
(const u32)ARRAY_SIZE(polaris11_golden_settings_a11));
@@ -608,6 +609,7 @@ static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev)
num_crtc = 6;
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
num_crtc = 5;
break;
default:
@@ -1589,6 +1591,7 @@ static int dce_v11_0_audio_init(struct amdgpu_device *adev)
adev->mode_info.audio.num_pins = 8;
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
adev->mode_info.audio.num_pins = 6;
break;
default:
@@ -2388,7 +2391,8 @@ static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
int pll;
if ((adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_POLARIS11)) {
+ (adev->asic_type == CHIP_POLARIS11) ||
+ (adev->asic_type == CHIP_POLARIS12)) {
struct amdgpu_encoder *amdgpu_encoder =
to_amdgpu_encoder(amdgpu_crtc->encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@@ -2822,7 +2826,8 @@ static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
return -EINVAL;
if ((adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_POLARIS11)) {
+ (adev->asic_type == CHIP_POLARIS11) ||
+ (adev->asic_type == CHIP_POLARIS12)) {
struct amdgpu_encoder *amdgpu_encoder =
to_amdgpu_encoder(amdgpu_crtc->encoder);
int encoder_mode =
@@ -2992,6 +2997,7 @@ static int dce_v11_0_early_init(void *handle)
adev->mode_info.num_dig = 6;
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
adev->mode_info.num_hpd = 5;
adev->mode_info.num_dig = 5;
break;
@@ -3101,7 +3107,8 @@ static int dce_v11_0_hw_init(void *handle)
amdgpu_atombios_crtc_powergate_init(adev);
amdgpu_atombios_encoder_init_dig(adev);
if ((adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_POLARIS11)) {
+ (adev->asic_type == CHIP_POLARIS11) ||
+ (adev->asic_type == CHIP_POLARIS12)) {
amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk,
DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS);
amdgpu_atombios_crtc_set_dce_clock(adev, 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index e564442b6393..b4e4ec630e8c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -1944,9 +1944,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
dce_v6_0_lock_cursor(crtc, true);
- if (width != amdgpu_crtc->cursor_width ||
- height != amdgpu_crtc->cursor_height ||
- hot_x != amdgpu_crtc->cursor_hot_x ||
+ if (hot_x != amdgpu_crtc->cursor_hot_x ||
hot_y != amdgpu_crtc->cursor_hot_y) {
int x, y;
@@ -1955,8 +1953,6 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
dce_v6_0_cursor_move_locked(crtc, x, y);
- amdgpu_crtc->cursor_width = width;
- amdgpu_crtc->cursor_height = height;
amdgpu_crtc->cursor_hot_x = hot_x;
amdgpu_crtc->cursor_hot_y = hot_y;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 6ce7fb42dbef..584abe834a3c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2438,8 +2438,6 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
dce_v8_0_cursor_move_locked(crtc, x, y);
- amdgpu_crtc->cursor_width = width;
- amdgpu_crtc->cursor_height = height;
amdgpu_crtc->cursor_hot_x = hot_x;
amdgpu_crtc->cursor_hot_y = hot_y;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index e4a5a5ac0ff3..762f8e82ceb7 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -752,7 +752,7 @@ static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vbla
drm_handle_vblank(ddev, amdgpu_crtc->crtc_id);
dce_virtual_pageflip(adev, amdgpu_crtc->crtc_id);
- hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD),
+ hrtimer_start(vblank_timer, DCE_VIRTUAL_VBLANK_PERIOD,
HRTIMER_MODE_REL);
return HRTIMER_NORESTART;
@@ -772,11 +772,11 @@ static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *ad
hrtimer_init(&adev->mode_info.crtcs[crtc]->vblank_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer_set_expires(&adev->mode_info.crtcs[crtc]->vblank_timer,
- ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD));
+ DCE_VIRTUAL_VBLANK_PERIOD);
adev->mode_info.crtcs[crtc]->vblank_timer.function =
dce_virtual_vblank_timer_handle;
hrtimer_start(&adev->mode_info.crtcs[crtc]->vblank_timer,
- ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
+ DCE_VIRTUAL_VBLANK_PERIOD, HRTIMER_MODE_REL);
} else if (!state && adev->mode_info.crtcs[crtc]->vsync_timer_enabled) {
DRM_DEBUG("Disable software vsync timer\n");
hrtimer_cancel(&adev->mode_info.crtcs[crtc]->vblank_timer);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 558640aee15a..b323f5ef64d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -411,244 +411,587 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
break;
}
- if (adev->asic_type == CHIP_VERDE ||
- adev->asic_type == CHIP_OLAND ||
- adev->asic_type == CHIP_HAINAN) {
+ if (adev->asic_type == CHIP_VERDE) {
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
switch (reg_offset) {
case 0:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
case 1:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
case 2:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
case 3:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK) |
+ TILE_SPLIT(split_equal_to_row_size));
break;
case 4:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16));
break;
case 5:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
break;
case 6:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
break;
case 7:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
break;
case 8:
- gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED));
break;
case 9:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16));
break;
case 10:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
case 11:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
case 12:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
case 13:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16));
break;
case 14:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
case 15:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
case 16:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
case 17:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 18:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16));
+ break;
+ case 19:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 20:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(split_equal_to_row_size));
break;
case 21:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
break;
case 22:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 23:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ break;
+ case 24:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ break;
+ case 25:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ case 26:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ case 27:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ case 28:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ case 29:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ case 30:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ default:
+ continue;
+ }
+ adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
+ WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
+ }
+ } else if (adev->asic_type == CHIP_OLAND ||
+ adev->asic_type == CHIP_HAINAN) {
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
+ switch (reg_offset) {
+ case 0:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 1:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 2:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 3:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK) |
+ TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 4:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2));
+ break;
+ case 5:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 6:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 7:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ break;
+ case 8:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED));
+ break;
+ case 9:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2));
+ break;
+ case 10:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 11:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 12:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 13:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2));
+ break;
+ case 14:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 15:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 16:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ break;
+ case 17:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 18:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P2));
+ break;
+ case 19:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 20:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 21:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ break;
+ case 22:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
break;
case 23:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
break;
case 24:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
break;
case 25:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
- NUM_BANKS(ADDR_SURF_8_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ break;
+ case 26:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ break;
+ case 27:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ break;
+ case 28:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ break;
+ case 29:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ break;
+ case 30:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
break;
default:
- gb_tile_moden = 0;
- break;
+ continue;
}
adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
@@ -656,239 +999,291 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
} else if ((adev->asic_type == CHIP_TAHITI) || (adev->asic_type == CHIP_PITCAIRN)) {
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
switch (reg_offset) {
- case 0: /* non-AA compressed depth or any compressed stencil */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ case 0:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
- case 1: /* 2xAA/4xAA compressed depth only */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ case 1:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
- case 2: /* 8xAA compressed depth only */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ case 2:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
- case 3: /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ case 3:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK) |
+ TILE_SPLIT(split_equal_to_row_size));
break;
- case 4: /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ case 4:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16));
break;
- case 5: /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ case 5:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
break;
- case 6: /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ case 6:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
break;
- case 7: /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
+ case 7:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
break;
- case 8: /* 1D and 1D Array Surfaces */
- gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ case 8:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED));
break;
- case 9: /* Displayable maps. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ case 9:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16));
break;
- case 10: /* Display 8bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ case 10:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
- case 11: /* Display 16bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ case 11:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
- case 12: /* Display 32bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ case 12:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
- case 13: /* Thin. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ case 13:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16));
break;
- case 14: /* Thin 8 bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ case 14:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
- case 15: /* Thin 16 bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ case 15:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
- case 16: /* Thin 32 bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ case 16:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
break;
- case 17: /* Thin 64 bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ case 17:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(split_equal_to_row_size));
+ break;
+ case 18:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16));
+ break;
+ case 19:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(split_equal_to_row_size));
break;
- case 21: /* 8 bpp PRT. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ case 20:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ TILE_SPLIT(split_equal_to_row_size));
break;
- case 22: /* 16 bpp PRT */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ case 21:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ break;
+ case 22:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
break;
- case 23: /* 32 bpp PRT */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ case 23:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
break;
- case 24: /* 64 bpp PRT */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ case 24:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
break;
- case 25: /* 128 bpp PRT */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ case 25:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
- NUM_BANKS(ADDR_SURF_8_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
break;
- default:
- gb_tile_moden = 0;
+ case 26:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ case 27:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ case 28:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
break;
+ case 29:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ case 30:
+ gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ break;
+ default:
+ continue;
}
adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 6324f67bdb1f..373374164bd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -139,6 +139,13 @@ MODULE_FIRMWARE("amdgpu/polaris10_mec.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_ce.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_pfp.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_me.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_mec.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_mec2.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_rlc.bin");
+
static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
{
{mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
@@ -689,6 +696,7 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
(const u32)ARRAY_SIZE(tonga_golden_common_all));
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
amdgpu_program_register_sequence(adev,
golden_settings_polaris11_a11,
(const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
@@ -903,6 +911,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
case CHIP_POLARIS10:
chip_name = "polaris10";
break;
+ case CHIP_POLARIS12:
+ chip_name = "polaris12";
+ break;
case CHIP_STONEY:
chip_name = "stoney";
break;
@@ -1768,6 +1779,7 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
ret = amdgpu_atombios_get_gfx_info(adev);
if (ret)
return ret;
@@ -2682,6 +2694,7 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
@@ -3503,6 +3516,7 @@ gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
*rconf1 |= 0x0;
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
*rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
SE_XSEL(1) | SE_YSEL(1);
*rconf1 |= 0x0;
@@ -3949,8 +3963,12 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
temp = mmRLC_SRM_INDEX_CNTL_ADDR_0;
data = mmRLC_SRM_INDEX_CNTL_DATA_0;
for (i = 0; i < sizeof(unique_indices) / sizeof(int); i++) {
- amdgpu_mm_wreg(adev, temp + i, unique_indices[i] & 0x3FFFF, false);
- amdgpu_mm_wreg(adev, data + i, unique_indices[i] >> 20, false);
+ if (unique_indices[i] != 0) {
+ amdgpu_mm_wreg(adev, temp + i,
+ unique_indices[i] & 0x3FFFF, false);
+ amdgpu_mm_wreg(adev, data + i,
+ unique_indices[i] >> 20, false);
+ }
}
kfree(register_list_format);
@@ -3966,20 +3984,17 @@ static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev)
{
uint32_t data;
- if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
- AMD_PG_SUPPORT_GFX_SMG |
- AMD_PG_SUPPORT_GFX_DMG)) {
- WREG32_FIELD(CP_RB_WPTR_POLL_CNTL, IDLE_POLL_COUNT, 0x60);
+ WREG32_FIELD(CP_RB_WPTR_POLL_CNTL, IDLE_POLL_COUNT, 0x60);
- data = REG_SET_FIELD(0, RLC_PG_DELAY, POWER_UP_DELAY, 0x10);
- data = REG_SET_FIELD(data, RLC_PG_DELAY, POWER_DOWN_DELAY, 0x10);
- data = REG_SET_FIELD(data, RLC_PG_DELAY, CMD_PROPAGATE_DELAY, 0x10);
- data = REG_SET_FIELD(data, RLC_PG_DELAY, MEM_SLEEP_DELAY, 0x10);
- WREG32(mmRLC_PG_DELAY, data);
+ data = REG_SET_FIELD(0, RLC_PG_DELAY, POWER_UP_DELAY, 0x10);
+ data = REG_SET_FIELD(data, RLC_PG_DELAY, POWER_DOWN_DELAY, 0x10);
+ data = REG_SET_FIELD(data, RLC_PG_DELAY, CMD_PROPAGATE_DELAY, 0x10);
+ data = REG_SET_FIELD(data, RLC_PG_DELAY, MEM_SLEEP_DELAY, 0x10);
+ WREG32(mmRLC_PG_DELAY, data);
+
+ WREG32_FIELD(RLC_PG_DELAY_2, SERDES_CMD_DELAY, 0x3);
+ WREG32_FIELD(RLC_AUTO_PG_CTRL, GRBM_REG_SAVE_GFX_IDLE_THRESHOLD, 0x55f0);
- WREG32_FIELD(RLC_PG_DELAY_2, SERDES_CMD_DELAY, 0x3);
- WREG32_FIELD(RLC_AUTO_PG_CTRL, GRBM_REG_SAVE_GFX_IDLE_THRESHOLD, 0x55f0);
- }
}
static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
@@ -3996,41 +4011,38 @@ static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable)
{
- WREG32_FIELD(RLC_PG_CNTL, CP_PG_DISABLE, enable ? 1 : 0);
+ WREG32_FIELD(RLC_PG_CNTL, CP_PG_DISABLE, enable ? 0 : 1);
}
static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
{
- if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
- AMD_PG_SUPPORT_GFX_SMG |
- AMD_PG_SUPPORT_GFX_DMG |
- AMD_PG_SUPPORT_CP |
- AMD_PG_SUPPORT_GDS |
- AMD_PG_SUPPORT_RLC_SMU_HS)) {
+ if ((adev->asic_type == CHIP_CARRIZO) ||
+ (adev->asic_type == CHIP_STONEY)) {
gfx_v8_0_init_csb(adev);
gfx_v8_0_init_save_restore_list(adev);
gfx_v8_0_enable_save_restore_machine(adev);
-
- if ((adev->asic_type == CHIP_CARRIZO) ||
- (adev->asic_type == CHIP_STONEY)) {
- WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
- gfx_v8_0_init_power_gating(adev);
- WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
- if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
- cz_enable_sck_slow_down_on_power_up(adev, true);
- cz_enable_sck_slow_down_on_power_down(adev, true);
- } else {
- cz_enable_sck_slow_down_on_power_up(adev, false);
- cz_enable_sck_slow_down_on_power_down(adev, false);
- }
- if (adev->pg_flags & AMD_PG_SUPPORT_CP)
- cz_enable_cp_power_gating(adev, true);
- else
- cz_enable_cp_power_gating(adev, false);
- } else if (adev->asic_type == CHIP_POLARIS11) {
- gfx_v8_0_init_power_gating(adev);
+ WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
+ gfx_v8_0_init_power_gating(adev);
+ WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
+ if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
+ cz_enable_sck_slow_down_on_power_up(adev, true);
+ cz_enable_sck_slow_down_on_power_down(adev, true);
+ } else {
+ cz_enable_sck_slow_down_on_power_up(adev, false);
+ cz_enable_sck_slow_down_on_power_down(adev, false);
}
+ if (adev->pg_flags & AMD_PG_SUPPORT_CP)
+ cz_enable_cp_power_gating(adev, true);
+ else
+ cz_enable_cp_power_gating(adev, false);
+ } else if ((adev->asic_type == CHIP_POLARIS11) ||
+ (adev->asic_type == CHIP_POLARIS12)) {
+ gfx_v8_0_init_csb(adev);
+ gfx_v8_0_init_save_restore_list(adev);
+ gfx_v8_0_enable_save_restore_machine(adev);
+ gfx_v8_0_init_power_gating(adev);
}
+
}
static void gfx_v8_0_rlc_stop(struct amdgpu_device *adev)
@@ -4098,7 +4110,8 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
if (adev->asic_type == CHIP_POLARIS11 ||
- adev->asic_type == CHIP_POLARIS10) {
+ adev->asic_type == CHIP_POLARIS10 ||
+ adev->asic_type == CHIP_POLARIS12) {
tmp = RREG32(mmRLC_CGCG_CGLS_CTRL_3D);
tmp &= ~0x3;
WREG32(mmRLC_CGCG_CGLS_CTRL_3D, tmp);
@@ -4286,6 +4299,7 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
amdgpu_ring_write(ring, 0x0000002A);
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
amdgpu_ring_write(ring, 0x16000012);
amdgpu_ring_write(ring, 0x00000000);
break;
@@ -4667,7 +4681,8 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
(adev->asic_type == CHIP_FIJI) ||
(adev->asic_type == CHIP_STONEY) ||
(adev->asic_type == CHIP_POLARIS11) ||
- (adev->asic_type == CHIP_POLARIS10)) {
+ (adev->asic_type == CHIP_POLARIS10) ||
+ (adev->asic_type == CHIP_POLARIS12)) {
WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
AMDGPU_DOORBELL_KIQ << 2);
WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
@@ -4703,7 +4718,8 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
mqd->cp_hqd_persistent_state = tmp;
if (adev->asic_type == CHIP_STONEY ||
adev->asic_type == CHIP_POLARIS11 ||
- adev->asic_type == CHIP_POLARIS10) {
+ adev->asic_type == CHIP_POLARIS10 ||
+ adev->asic_type == CHIP_POLARIS12) {
tmp = RREG32(mmCP_ME1_PIPE3_INT_CNTL);
tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE3_INT_CNTL, GENERIC2_INT_ENABLE, 1);
WREG32(mmCP_ME1_PIPE3_INT_CNTL, tmp);
@@ -5282,7 +5298,8 @@ static int gfx_v8_0_late_init(void *handle)
static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
bool enable)
{
- if (adev->asic_type == CHIP_POLARIS11)
+ if ((adev->asic_type == CHIP_POLARIS11) ||
+ (adev->asic_type == CHIP_POLARIS12))
/* Send msg to SMU via Powerplay */
amdgpu_set_powergating_state(adev,
AMD_IP_BLOCK_TYPE_SMC,
@@ -5339,14 +5356,11 @@ static int gfx_v8_0_set_powergating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
- if (!(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
- return 0;
-
switch (adev->asic_type) {
case CHIP_CARRIZO:
case CHIP_STONEY:
- if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)
- cz_update_gfx_cg_power_gating(adev, enable);
+
+ cz_update_gfx_cg_power_gating(adev, enable);
if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
@@ -5359,6 +5373,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
else
@@ -5791,25 +5806,49 @@ static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
enum amd_clockgating_state state)
{
- uint32_t msg_id, pp_state;
+ uint32_t msg_id, pp_state = 0;
+ uint32_t pp_support_state = 0;
void *pp_handle = adev->powerplay.pp_handle;
- if (state == AMD_CG_STATE_UNGATE)
- pp_state = 0;
- else
- pp_state = PP_STATE_CG | PP_STATE_LS;
+ if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
+ pp_support_state = PP_STATE_SUPPORT_LS;
+ pp_state = PP_STATE_LS;
+ }
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
+ pp_support_state |= PP_STATE_SUPPORT_CG;
+ pp_state |= PP_STATE_CG;
+ }
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_CG,
+ pp_support_state,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
- PP_BLOCK_GFX_CG,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
+ pp_support_state = PP_STATE_SUPPORT_LS;
+ pp_state = PP_STATE_LS;
+ }
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
- PP_BLOCK_GFX_MG,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
+ pp_support_state |= PP_STATE_SUPPORT_CG;
+ pp_state |= PP_STATE_CG;
+ }
+
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_MG,
+ pp_support_state,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
return 0;
}
@@ -5817,43 +5856,98 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
enum amd_clockgating_state state)
{
- uint32_t msg_id, pp_state;
+
+ uint32_t msg_id, pp_state = 0;
+ uint32_t pp_support_state = 0;
void *pp_handle = adev->powerplay.pp_handle;
- if (state == AMD_CG_STATE_UNGATE)
- pp_state = 0;
- else
- pp_state = PP_STATE_CG | PP_STATE_LS;
+ if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
+ pp_support_state = PP_STATE_SUPPORT_LS;
+ pp_state = PP_STATE_LS;
+ }
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
+ pp_support_state |= PP_STATE_SUPPORT_CG;
+ pp_state |= PP_STATE_CG;
+ }
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_CG,
+ pp_support_state,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
- PP_BLOCK_GFX_CG,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS)) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) {
+ pp_support_state = PP_STATE_SUPPORT_LS;
+ pp_state = PP_STATE_LS;
+ }
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) {
+ pp_support_state |= PP_STATE_SUPPORT_CG;
+ pp_state |= PP_STATE_CG;
+ }
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_3D,
+ pp_support_state,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
- PP_BLOCK_GFX_3D,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
+ pp_support_state = PP_STATE_SUPPORT_LS;
+ pp_state = PP_STATE_LS;
+ }
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
- PP_BLOCK_GFX_MG,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
+ pp_support_state |= PP_STATE_SUPPORT_CG;
+ pp_state |= PP_STATE_CG;
+ }
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
- PP_BLOCK_GFX_RLC,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_MG,
+ pp_support_state,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
+ pp_support_state = PP_STATE_SUPPORT_LS;
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ else
+ pp_state = PP_STATE_LS;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_RLC,
+ pp_support_state,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
+ pp_support_state = PP_STATE_SUPPORT_LS;
+
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ else
+ pp_state = PP_STATE_LS;
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
PP_BLOCK_GFX_CP,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_support_state,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 0daac3a5be79..476bc9f1954b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -46,6 +46,7 @@ static int gmc_v8_0_wait_for_idle(void *handle);
MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
static const u32 golden_settings_tonga_a11[] =
{
@@ -130,6 +131,7 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
(const u32)ARRAY_SIZE(golden_settings_tonga_a11));
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
amdgpu_program_register_sequence(adev,
golden_settings_polaris11_a11,
(const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
@@ -225,6 +227,9 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
case CHIP_POLARIS10:
chip_name = "polaris10";
break;
+ case CHIP_POLARIS12:
+ chip_name = "polaris12";
+ break;
case CHIP_FIJI:
case CHIP_CARRIZO:
case CHIP_STONEY:
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 1170a64a3184..034ace79ed49 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -60,6 +60,8 @@ MODULE_FIRMWARE("amdgpu/polaris10_sdma.bin");
MODULE_FIRMWARE("amdgpu/polaris10_sdma1.bin");
MODULE_FIRMWARE("amdgpu/polaris11_sdma.bin");
MODULE_FIRMWARE("amdgpu/polaris11_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_sdma.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_sdma1.bin");
static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
@@ -206,6 +208,7 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
(const u32)ARRAY_SIZE(golden_settings_tonga_a11));
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
amdgpu_program_register_sequence(adev,
golden_settings_polaris11_a11,
(const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
@@ -278,6 +281,9 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
case CHIP_POLARIS10:
chip_name = "polaris10";
break;
+ case CHIP_POLARIS12:
+ chip_name = "polaris12";
+ break;
case CHIP_CARRIZO:
chip_name = "carrizo";
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 3ed8ad8725b9..c46b0159007d 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -43,13 +43,14 @@
static const u32 tahiti_golden_registers[] =
{
+ 0x17bc, 0x00000030, 0x00000011,
0x2684, 0x00010000, 0x00018208,
0x260c, 0xffffffff, 0x00000000,
0x260d, 0xf00fffff, 0x00000400,
0x260e, 0x0002021c, 0x00020200,
0x031e, 0x00000080, 0x00000000,
- 0x340c, 0x000300c0, 0x00800040,
- 0x360c, 0x000300c0, 0x00800040,
+ 0x340c, 0x000000c0, 0x00800040,
+ 0x360c, 0x000000c0, 0x00800040,
0x16ec, 0x000000f0, 0x00000070,
0x16f0, 0x00200000, 0x50100000,
0x1c0c, 0x31000311, 0x00000011,
@@ -60,7 +61,7 @@ static const u32 tahiti_golden_registers[] =
0x22c4, 0x0000ff0f, 0x00000000,
0xa293, 0x07ffffff, 0x4e000000,
0xa0d4, 0x3f3f3fff, 0x2a00126a,
- 0x000c, 0x000000ff, 0x0040,
+ 0x000c, 0xffffffff, 0x0040,
0x000d, 0x00000040, 0x00004040,
0x2440, 0x07ffffff, 0x03000000,
0x23a2, 0x01ff1f3f, 0x00000000,
@@ -73,7 +74,11 @@ static const u32 tahiti_golden_registers[] =
0x2234, 0xffffffff, 0x000fff40,
0x2235, 0x0000001f, 0x00000010,
0x0504, 0x20000000, 0x20fffed8,
- 0x0570, 0x000c0fc0, 0x000c0400
+ 0x0570, 0x000c0fc0, 0x000c0400,
+ 0x052c, 0x0fffffff, 0xffffffff,
+ 0x052d, 0x0fffffff, 0x0fffffff,
+ 0x052e, 0x0fffffff, 0x0fffffff,
+ 0x052f, 0x0fffffff, 0x0fffffff
};
static const u32 tahiti_golden_registers2[] =
@@ -83,16 +88,18 @@ static const u32 tahiti_golden_registers2[] =
static const u32 tahiti_golden_rlc_registers[] =
{
+ 0x263e, 0xffffffff, 0x12011003,
0x3109, 0xffffffff, 0x00601005,
0x311f, 0xffffffff, 0x10104040,
0x3122, 0xffffffff, 0x0100000a,
0x30c5, 0xffffffff, 0x00000800,
0x30c3, 0xffffffff, 0x800000f4,
- 0x3d2a, 0xffffffff, 0x00000000
+ 0x3d2a, 0x00000008, 0x00000000
};
static const u32 pitcairn_golden_registers[] =
{
+ 0x17bc, 0x00000030, 0x00000011,
0x2684, 0x00010000, 0x00018208,
0x260c, 0xffffffff, 0x00000000,
0x260d, 0xf00fffff, 0x00000400,
@@ -110,7 +117,7 @@ static const u32 pitcairn_golden_registers[] =
0x22c4, 0x0000ff0f, 0x00000000,
0xa293, 0x07ffffff, 0x4e000000,
0xa0d4, 0x3f3f3fff, 0x2a00126a,
- 0x000c, 0x000000ff, 0x0040,
+ 0x000c, 0xffffffff, 0x0040,
0x000d, 0x00000040, 0x00004040,
0x2440, 0x07ffffff, 0x03000000,
0x2418, 0x0000007f, 0x00000020,
@@ -119,11 +126,16 @@ static const u32 pitcairn_golden_registers[] =
0x2b04, 0xffffffff, 0x00000000,
0x2b03, 0xffffffff, 0x32761054,
0x2235, 0x0000001f, 0x00000010,
- 0x0570, 0x000c0fc0, 0x000c0400
+ 0x0570, 0x000c0fc0, 0x000c0400,
+ 0x052c, 0x0fffffff, 0xffffffff,
+ 0x052d, 0x0fffffff, 0x0fffffff,
+ 0x052e, 0x0fffffff, 0x0fffffff,
+ 0x052f, 0x0fffffff, 0x0fffffff
};
static const u32 pitcairn_golden_rlc_registers[] =
{
+ 0x263e, 0xffffffff, 0x12011003,
0x3109, 0xffffffff, 0x00601004,
0x311f, 0xffffffff, 0x10102020,
0x3122, 0xffffffff, 0x01000020,
@@ -133,133 +145,134 @@ static const u32 pitcairn_golden_rlc_registers[] =
static const u32 verde_pg_init[] =
{
- 0xd4f, 0xffffffff, 0x40000,
- 0xd4e, 0xffffffff, 0x200010ff,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x7007,
- 0xd4e, 0xffffffff, 0x300010ff,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x400000,
- 0xd4e, 0xffffffff, 0x100010ff,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x120200,
- 0xd4e, 0xffffffff, 0x500010ff,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x1e1e16,
- 0xd4e, 0xffffffff, 0x600010ff,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x171f1e,
- 0xd4e, 0xffffffff, 0x700010ff,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4f, 0xffffffff, 0x0,
- 0xd4e, 0xffffffff, 0x9ff,
- 0xd40, 0xffffffff, 0x0,
- 0xd41, 0xffffffff, 0x10000800,
- 0xd41, 0xffffffff, 0xf,
- 0xd41, 0xffffffff, 0xf,
- 0xd40, 0xffffffff, 0x4,
- 0xd41, 0xffffffff, 0x1000051e,
- 0xd41, 0xffffffff, 0xffff,
- 0xd41, 0xffffffff, 0xffff,
- 0xd40, 0xffffffff, 0x8,
- 0xd41, 0xffffffff, 0x80500,
- 0xd40, 0xffffffff, 0x12,
- 0xd41, 0xffffffff, 0x9050c,
- 0xd40, 0xffffffff, 0x1d,
- 0xd41, 0xffffffff, 0xb052c,
- 0xd40, 0xffffffff, 0x2a,
- 0xd41, 0xffffffff, 0x1053e,
- 0xd40, 0xffffffff, 0x2d,
- 0xd41, 0xffffffff, 0x10546,
- 0xd40, 0xffffffff, 0x30,
- 0xd41, 0xffffffff, 0xa054e,
- 0xd40, 0xffffffff, 0x3c,
- 0xd41, 0xffffffff, 0x1055f,
- 0xd40, 0xffffffff, 0x3f,
- 0xd41, 0xffffffff, 0x10567,
- 0xd40, 0xffffffff, 0x42,
- 0xd41, 0xffffffff, 0x1056f,
- 0xd40, 0xffffffff, 0x45,
- 0xd41, 0xffffffff, 0x10572,
- 0xd40, 0xffffffff, 0x48,
- 0xd41, 0xffffffff, 0x20575,
- 0xd40, 0xffffffff, 0x4c,
- 0xd41, 0xffffffff, 0x190801,
- 0xd40, 0xffffffff, 0x67,
- 0xd41, 0xffffffff, 0x1082a,
- 0xd40, 0xffffffff, 0x6a,
- 0xd41, 0xffffffff, 0x1b082d,
- 0xd40, 0xffffffff, 0x87,
- 0xd41, 0xffffffff, 0x310851,
- 0xd40, 0xffffffff, 0xba,
- 0xd41, 0xffffffff, 0x891,
- 0xd40, 0xffffffff, 0xbc,
- 0xd41, 0xffffffff, 0x893,
- 0xd40, 0xffffffff, 0xbe,
- 0xd41, 0xffffffff, 0x20895,
- 0xd40, 0xffffffff, 0xc2,
- 0xd41, 0xffffffff, 0x20899,
- 0xd40, 0xffffffff, 0xc6,
- 0xd41, 0xffffffff, 0x2089d,
- 0xd40, 0xffffffff, 0xca,
- 0xd41, 0xffffffff, 0x8a1,
- 0xd40, 0xffffffff, 0xcc,
- 0xd41, 0xffffffff, 0x8a3,
- 0xd40, 0xffffffff, 0xce,
- 0xd41, 0xffffffff, 0x308a5,
- 0xd40, 0xffffffff, 0xd3,
- 0xd41, 0xffffffff, 0x6d08cd,
- 0xd40, 0xffffffff, 0x142,
- 0xd41, 0xffffffff, 0x2000095a,
- 0xd41, 0xffffffff, 0x1,
- 0xd40, 0xffffffff, 0x144,
- 0xd41, 0xffffffff, 0x301f095b,
- 0xd40, 0xffffffff, 0x165,
- 0xd41, 0xffffffff, 0xc094d,
- 0xd40, 0xffffffff, 0x173,
- 0xd41, 0xffffffff, 0xf096d,
- 0xd40, 0xffffffff, 0x184,
- 0xd41, 0xffffffff, 0x15097f,
- 0xd40, 0xffffffff, 0x19b,
- 0xd41, 0xffffffff, 0xc0998,
- 0xd40, 0xffffffff, 0x1a9,
- 0xd41, 0xffffffff, 0x409a7,
- 0xd40, 0xffffffff, 0x1af,
- 0xd41, 0xffffffff, 0xcdc,
- 0xd40, 0xffffffff, 0x1b1,
- 0xd41, 0xffffffff, 0x800,
- 0xd42, 0xffffffff, 0x6c9b2000,
- 0xd44, 0xfc00, 0x2000,
- 0xd51, 0xffffffff, 0xfc0,
- 0xa35, 0x00000100, 0x100
+ 0x0d4f, 0xffffffff, 0x40000,
+ 0x0d4e, 0xffffffff, 0x200010ff,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x7007,
+ 0x0d4e, 0xffffffff, 0x300010ff,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x400000,
+ 0x0d4e, 0xffffffff, 0x100010ff,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x120200,
+ 0x0d4e, 0xffffffff, 0x500010ff,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x1e1e16,
+ 0x0d4e, 0xffffffff, 0x600010ff,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x171f1e,
+ 0x0d4e, 0xffffffff, 0x700010ff,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4f, 0xffffffff, 0x0,
+ 0x0d4e, 0xffffffff, 0x9ff,
+ 0x0d40, 0xffffffff, 0x0,
+ 0x0d41, 0xffffffff, 0x10000800,
+ 0x0d41, 0xffffffff, 0xf,
+ 0x0d41, 0xffffffff, 0xf,
+ 0x0d40, 0xffffffff, 0x4,
+ 0x0d41, 0xffffffff, 0x1000051e,
+ 0x0d41, 0xffffffff, 0xffff,
+ 0x0d41, 0xffffffff, 0xffff,
+ 0x0d40, 0xffffffff, 0x8,
+ 0x0d41, 0xffffffff, 0x80500,
+ 0x0d40, 0xffffffff, 0x12,
+ 0x0d41, 0xffffffff, 0x9050c,
+ 0x0d40, 0xffffffff, 0x1d,
+ 0x0d41, 0xffffffff, 0xb052c,
+ 0x0d40, 0xffffffff, 0x2a,
+ 0x0d41, 0xffffffff, 0x1053e,
+ 0x0d40, 0xffffffff, 0x2d,
+ 0x0d41, 0xffffffff, 0x10546,
+ 0x0d40, 0xffffffff, 0x30,
+ 0x0d41, 0xffffffff, 0xa054e,
+ 0x0d40, 0xffffffff, 0x3c,
+ 0x0d41, 0xffffffff, 0x1055f,
+ 0x0d40, 0xffffffff, 0x3f,
+ 0x0d41, 0xffffffff, 0x10567,
+ 0x0d40, 0xffffffff, 0x42,
+ 0x0d41, 0xffffffff, 0x1056f,
+ 0x0d40, 0xffffffff, 0x45,
+ 0x0d41, 0xffffffff, 0x10572,
+ 0x0d40, 0xffffffff, 0x48,
+ 0x0d41, 0xffffffff, 0x20575,
+ 0x0d40, 0xffffffff, 0x4c,
+ 0x0d41, 0xffffffff, 0x190801,
+ 0x0d40, 0xffffffff, 0x67,
+ 0x0d41, 0xffffffff, 0x1082a,
+ 0x0d40, 0xffffffff, 0x6a,
+ 0x0d41, 0xffffffff, 0x1b082d,
+ 0x0d40, 0xffffffff, 0x87,
+ 0x0d41, 0xffffffff, 0x310851,
+ 0x0d40, 0xffffffff, 0xba,
+ 0x0d41, 0xffffffff, 0x891,
+ 0x0d40, 0xffffffff, 0xbc,
+ 0x0d41, 0xffffffff, 0x893,
+ 0x0d40, 0xffffffff, 0xbe,
+ 0x0d41, 0xffffffff, 0x20895,
+ 0x0d40, 0xffffffff, 0xc2,
+ 0x0d41, 0xffffffff, 0x20899,
+ 0x0d40, 0xffffffff, 0xc6,
+ 0x0d41, 0xffffffff, 0x2089d,
+ 0x0d40, 0xffffffff, 0xca,
+ 0x0d41, 0xffffffff, 0x8a1,
+ 0x0d40, 0xffffffff, 0xcc,
+ 0x0d41, 0xffffffff, 0x8a3,
+ 0x0d40, 0xffffffff, 0xce,
+ 0x0d41, 0xffffffff, 0x308a5,
+ 0x0d40, 0xffffffff, 0xd3,
+ 0x0d41, 0xffffffff, 0x6d08cd,
+ 0x0d40, 0xffffffff, 0x142,
+ 0x0d41, 0xffffffff, 0x2000095a,
+ 0x0d41, 0xffffffff, 0x1,
+ 0x0d40, 0xffffffff, 0x144,
+ 0x0d41, 0xffffffff, 0x301f095b,
+ 0x0d40, 0xffffffff, 0x165,
+ 0x0d41, 0xffffffff, 0xc094d,
+ 0x0d40, 0xffffffff, 0x173,
+ 0x0d41, 0xffffffff, 0xf096d,
+ 0x0d40, 0xffffffff, 0x184,
+ 0x0d41, 0xffffffff, 0x15097f,
+ 0x0d40, 0xffffffff, 0x19b,
+ 0x0d41, 0xffffffff, 0xc0998,
+ 0x0d40, 0xffffffff, 0x1a9,
+ 0x0d41, 0xffffffff, 0x409a7,
+ 0x0d40, 0xffffffff, 0x1af,
+ 0x0d41, 0xffffffff, 0xcdc,
+ 0x0d40, 0xffffffff, 0x1b1,
+ 0x0d41, 0xffffffff, 0x800,
+ 0x0d42, 0xffffffff, 0x6c9b2000,
+ 0x0d44, 0xfc00, 0x2000,
+ 0x0d51, 0xffffffff, 0xfc0,
+ 0x0a35, 0x00000100, 0x100
};
static const u32 verde_golden_rlc_registers[] =
{
+ 0x263e, 0xffffffff, 0x02010002,
0x3109, 0xffffffff, 0x033f1005,
0x311f, 0xffffffff, 0x10808020,
0x3122, 0xffffffff, 0x00800008,
@@ -269,65 +282,45 @@ static const u32 verde_golden_rlc_registers[] =
static const u32 verde_golden_registers[] =
{
+ 0x17bc, 0x00000030, 0x00000011,
0x2684, 0x00010000, 0x00018208,
0x260c, 0xffffffff, 0x00000000,
0x260d, 0xf00fffff, 0x00000400,
0x260e, 0x0002021c, 0x00020200,
0x031e, 0x00000080, 0x00000000,
0x340c, 0x000300c0, 0x00800040,
- 0x340c, 0x000300c0, 0x00800040,
- 0x360c, 0x000300c0, 0x00800040,
0x360c, 0x000300c0, 0x00800040,
0x16ec, 0x000000f0, 0x00000070,
0x16f0, 0x00200000, 0x50100000,
-
0x1c0c, 0x31000311, 0x00000011,
0x0ab9, 0x00073ffe, 0x000022a2,
- 0x0ab9, 0x00073ffe, 0x000022a2,
- 0x0ab9, 0x00073ffe, 0x000022a2,
- 0x0903, 0x000007ff, 0x00000000,
- 0x0903, 0x000007ff, 0x00000000,
0x0903, 0x000007ff, 0x00000000,
0x2285, 0xf000001f, 0x00000007,
- 0x2285, 0xf000001f, 0x00000007,
- 0x2285, 0xf000001f, 0x00000007,
- 0x2285, 0xffffffff, 0x00ffffff,
+ 0x22c9, 0xffffffff, 0x00ffffff,
0x22c4, 0x0000ff0f, 0x00000000,
-
0xa293, 0x07ffffff, 0x4e000000,
0xa0d4, 0x3f3f3fff, 0x0000124a,
- 0xa0d4, 0x3f3f3fff, 0x0000124a,
- 0xa0d4, 0x3f3f3fff, 0x0000124a,
- 0x000c, 0x000000ff, 0x0040,
+ 0x000c, 0xffffffff, 0x0040,
0x000d, 0x00000040, 0x00004040,
0x2440, 0x07ffffff, 0x03000000,
- 0x2440, 0x07ffffff, 0x03000000,
- 0x23a2, 0x01ff1f3f, 0x00000000,
- 0x23a3, 0x01ff1f3f, 0x00000000,
0x23a2, 0x01ff1f3f, 0x00000000,
0x23a1, 0x01ff1f3f, 0x00000000,
- 0x23a1, 0x01ff1f3f, 0x00000000,
-
- 0x23a1, 0x01ff1f3f, 0x00000000,
0x2418, 0x0000007f, 0x00000020,
0x2542, 0x00010000, 0x00010000,
- 0x2b01, 0x000003ff, 0x00000003,
- 0x2b05, 0x000003ff, 0x00000003,
0x2b05, 0x000003ff, 0x00000003,
0x2b04, 0xffffffff, 0x00000000,
- 0x2b04, 0xffffffff, 0x00000000,
- 0x2b04, 0xffffffff, 0x00000000,
- 0x2b03, 0xffffffff, 0x00001032,
- 0x2b03, 0xffffffff, 0x00001032,
0x2b03, 0xffffffff, 0x00001032,
0x2235, 0x0000001f, 0x00000010,
- 0x2235, 0x0000001f, 0x00000010,
- 0x2235, 0x0000001f, 0x00000010,
- 0x0570, 0x000c0fc0, 0x000c0400
+ 0x0570, 0x000c0fc0, 0x000c0400,
+ 0x052c, 0x0fffffff, 0xffffffff,
+ 0x052d, 0x0fffffff, 0x0fffffff,
+ 0x052e, 0x0fffffff, 0x0fffffff,
+ 0x052f, 0x0fffffff, 0x0fffffff
};
static const u32 oland_golden_registers[] =
{
+ 0x17bc, 0x00000030, 0x00000011,
0x2684, 0x00010000, 0x00018208,
0x260c, 0xffffffff, 0x00000000,
0x260d, 0xf00fffff, 0x00000400,
@@ -336,7 +329,7 @@ static const u32 oland_golden_registers[] =
0x340c, 0x000300c0, 0x00800040,
0x360c, 0x000300c0, 0x00800040,
0x16ec, 0x000000f0, 0x00000070,
- 0x16f9, 0x00200000, 0x50100000,
+ 0x16f0, 0x00200000, 0x50100000,
0x1c0c, 0x31000311, 0x00000011,
0x0ab9, 0x00073ffe, 0x000022a2,
0x0903, 0x000007ff, 0x00000000,
@@ -345,7 +338,7 @@ static const u32 oland_golden_registers[] =
0x22c4, 0x0000ff0f, 0x00000000,
0xa293, 0x07ffffff, 0x4e000000,
0xa0d4, 0x3f3f3fff, 0x00000082,
- 0x000c, 0x000000ff, 0x0040,
+ 0x000c, 0xffffffff, 0x0040,
0x000d, 0x00000040, 0x00004040,
0x2440, 0x07ffffff, 0x03000000,
0x2418, 0x0000007f, 0x00000020,
@@ -354,11 +347,16 @@ static const u32 oland_golden_registers[] =
0x2b04, 0xffffffff, 0x00000000,
0x2b03, 0xffffffff, 0x00003210,
0x2235, 0x0000001f, 0x00000010,
- 0x0570, 0x000c0fc0, 0x000c0400
+ 0x0570, 0x000c0fc0, 0x000c0400,
+ 0x052c, 0x0fffffff, 0xffffffff,
+ 0x052d, 0x0fffffff, 0x0fffffff,
+ 0x052e, 0x0fffffff, 0x0fffffff,
+ 0x052f, 0x0fffffff, 0x0fffffff
};
static const u32 oland_golden_rlc_registers[] =
{
+ 0x263e, 0xffffffff, 0x02010002,
0x3109, 0xffffffff, 0x00601005,
0x311f, 0xffffffff, 0x10104040,
0x3122, 0xffffffff, 0x0100000a,
@@ -368,22 +366,27 @@ static const u32 oland_golden_rlc_registers[] =
static const u32 hainan_golden_registers[] =
{
+ 0x17bc, 0x00000030, 0x00000011,
0x2684, 0x00010000, 0x00018208,
0x260c, 0xffffffff, 0x00000000,
0x260d, 0xf00fffff, 0x00000400,
0x260e, 0x0002021c, 0x00020200,
- 0x4595, 0xff000fff, 0x00000100,
+ 0x031e, 0x00000080, 0x00000000,
+ 0x3430, 0xff000fff, 0x00000100,
0x340c, 0x000300c0, 0x00800040,
0x3630, 0xff000fff, 0x00000100,
0x360c, 0x000300c0, 0x00800040,
+ 0x16ec, 0x000000f0, 0x00000070,
+ 0x16f0, 0x00200000, 0x50100000,
+ 0x1c0c, 0x31000311, 0x00000011,
0x0ab9, 0x00073ffe, 0x000022a2,
0x0903, 0x000007ff, 0x00000000,
0x2285, 0xf000001f, 0x00000007,
0x22c9, 0xffffffff, 0x00ffffff,
0x22c4, 0x0000ff0f, 0x00000000,
- 0xa393, 0x07ffffff, 0x4e000000,
+ 0xa293, 0x07ffffff, 0x4e000000,
0xa0d4, 0x3f3f3fff, 0x00000000,
- 0x000c, 0x000000ff, 0x0040,
+ 0x000c, 0xffffffff, 0x0040,
0x000d, 0x00000040, 0x00004040,
0x2440, 0x03e00000, 0x03600000,
0x2418, 0x0000007f, 0x00000020,
@@ -392,12 +395,16 @@ static const u32 hainan_golden_registers[] =
0x2b04, 0xffffffff, 0x00000000,
0x2b03, 0xffffffff, 0x00003210,
0x2235, 0x0000001f, 0x00000010,
- 0x0570, 0x000c0fc0, 0x000c0400
+ 0x0570, 0x000c0fc0, 0x000c0400,
+ 0x052c, 0x0fffffff, 0xffffffff,
+ 0x052d, 0x0fffffff, 0x0fffffff,
+ 0x052e, 0x0fffffff, 0x0fffffff,
+ 0x052f, 0x0fffffff, 0x0fffffff
};
static const u32 hainan_golden_registers2[] =
{
- 0x263e, 0xffffffff, 0x02010001
+ 0x263e, 0xffffffff, 0x2011003
};
static const u32 tahiti_mgcg_cgcg_init[] =
@@ -513,18 +520,18 @@ static const u32 tahiti_mgcg_cgcg_init[] =
0x21c2, 0xffffffff, 0x00900100,
0x311e, 0xffffffff, 0x00000080,
0x3101, 0xffffffff, 0x0020003f,
- 0xc, 0xffffffff, 0x0000001c,
- 0xd, 0x000f0000, 0x000f0000,
- 0x583, 0xffffffff, 0x00000100,
- 0x409, 0xffffffff, 0x00000100,
- 0x40b, 0x00000101, 0x00000000,
- 0x82a, 0xffffffff, 0x00000104,
- 0x993, 0x000c0000, 0x000c0000,
- 0x992, 0x000c0000, 0x000c0000,
+ 0x000c, 0xffffffff, 0x0000001c,
+ 0x000d, 0x000f0000, 0x000f0000,
+ 0x0583, 0xffffffff, 0x00000100,
+ 0x0409, 0xffffffff, 0x00000100,
+ 0x040b, 0x00000101, 0x00000000,
+ 0x082a, 0xffffffff, 0x00000104,
+ 0x0993, 0x000c0000, 0x000c0000,
+ 0x0992, 0x000c0000, 0x000c0000,
0x1579, 0xff000fff, 0x00000100,
0x157a, 0x00000001, 0x00000001,
- 0xbd4, 0x00000001, 0x00000001,
- 0xc33, 0xc0000fff, 0x00000104,
+ 0x0bd4, 0x00000001, 0x00000001,
+ 0x0c33, 0xc0000fff, 0x00000104,
0x3079, 0x00000001, 0x00000001,
0x3430, 0xfffffff0, 0x00000100,
0x3630, 0xfffffff0, 0x00000100
@@ -612,16 +619,16 @@ static const u32 pitcairn_mgcg_cgcg_init[] =
0x21c2, 0xffffffff, 0x00900100,
0x311e, 0xffffffff, 0x00000080,
0x3101, 0xffffffff, 0x0020003f,
- 0xc, 0xffffffff, 0x0000001c,
- 0xd, 0x000f0000, 0x000f0000,
- 0x583, 0xffffffff, 0x00000100,
- 0x409, 0xffffffff, 0x00000100,
- 0x40b, 0x00000101, 0x00000000,
- 0x82a, 0xffffffff, 0x00000104,
+ 0x000c, 0xffffffff, 0x0000001c,
+ 0x000d, 0x000f0000, 0x000f0000,
+ 0x0583, 0xffffffff, 0x00000100,
+ 0x0409, 0xffffffff, 0x00000100,
+ 0x040b, 0x00000101, 0x00000000,
+ 0x082a, 0xffffffff, 0x00000104,
0x1579, 0xff000fff, 0x00000100,
0x157a, 0x00000001, 0x00000001,
- 0xbd4, 0x00000001, 0x00000001,
- 0xc33, 0xc0000fff, 0x00000104,
+ 0x0bd4, 0x00000001, 0x00000001,
+ 0x0c33, 0xc0000fff, 0x00000104,
0x3079, 0x00000001, 0x00000001,
0x3430, 0xfffffff0, 0x00000100,
0x3630, 0xfffffff0, 0x00000100
@@ -709,18 +716,18 @@ static const u32 verde_mgcg_cgcg_init[] =
0x21c2, 0xffffffff, 0x00900100,
0x311e, 0xffffffff, 0x00000080,
0x3101, 0xffffffff, 0x0020003f,
- 0xc, 0xffffffff, 0x0000001c,
- 0xd, 0x000f0000, 0x000f0000,
- 0x583, 0xffffffff, 0x00000100,
- 0x409, 0xffffffff, 0x00000100,
- 0x40b, 0x00000101, 0x00000000,
- 0x82a, 0xffffffff, 0x00000104,
- 0x993, 0x000c0000, 0x000c0000,
- 0x992, 0x000c0000, 0x000c0000,
+ 0x000c, 0xffffffff, 0x0000001c,
+ 0x000d, 0x000f0000, 0x000f0000,
+ 0x0583, 0xffffffff, 0x00000100,
+ 0x0409, 0xffffffff, 0x00000100,
+ 0x040b, 0x00000101, 0x00000000,
+ 0x082a, 0xffffffff, 0x00000104,
+ 0x0993, 0x000c0000, 0x000c0000,
+ 0x0992, 0x000c0000, 0x000c0000,
0x1579, 0xff000fff, 0x00000100,
0x157a, 0x00000001, 0x00000001,
- 0xbd4, 0x00000001, 0x00000001,
- 0xc33, 0xc0000fff, 0x00000104,
+ 0x0bd4, 0x00000001, 0x00000001,
+ 0x0c33, 0xc0000fff, 0x00000104,
0x3079, 0x00000001, 0x00000001,
0x3430, 0xfffffff0, 0x00000100,
0x3630, 0xfffffff0, 0x00000100
@@ -788,18 +795,18 @@ static const u32 oland_mgcg_cgcg_init[] =
0x21c2, 0xffffffff, 0x00900100,
0x311e, 0xffffffff, 0x00000080,
0x3101, 0xffffffff, 0x0020003f,
- 0xc, 0xffffffff, 0x0000001c,
- 0xd, 0x000f0000, 0x000f0000,
- 0x583, 0xffffffff, 0x00000100,
- 0x409, 0xffffffff, 0x00000100,
- 0x40b, 0x00000101, 0x00000000,
- 0x82a, 0xffffffff, 0x00000104,
- 0x993, 0x000c0000, 0x000c0000,
- 0x992, 0x000c0000, 0x000c0000,
+ 0x000c, 0xffffffff, 0x0000001c,
+ 0x000d, 0x000f0000, 0x000f0000,
+ 0x0583, 0xffffffff, 0x00000100,
+ 0x0409, 0xffffffff, 0x00000100,
+ 0x040b, 0x00000101, 0x00000000,
+ 0x082a, 0xffffffff, 0x00000104,
+ 0x0993, 0x000c0000, 0x000c0000,
+ 0x0992, 0x000c0000, 0x000c0000,
0x1579, 0xff000fff, 0x00000100,
0x157a, 0x00000001, 0x00000001,
- 0xbd4, 0x00000001, 0x00000001,
- 0xc33, 0xc0000fff, 0x00000104,
+ 0x0bd4, 0x00000001, 0x00000001,
+ 0x0c33, 0xc0000fff, 0x00000104,
0x3079, 0x00000001, 0x00000001,
0x3430, 0xfffffff0, 0x00000100,
0x3630, 0xfffffff0, 0x00000100
@@ -867,15 +874,15 @@ static const u32 hainan_mgcg_cgcg_init[] =
0x21c2, 0xffffffff, 0x00900100,
0x311e, 0xffffffff, 0x00000080,
0x3101, 0xffffffff, 0x0020003f,
- 0xc, 0xffffffff, 0x0000001c,
- 0xd, 0x000f0000, 0x000f0000,
- 0x583, 0xffffffff, 0x00000100,
- 0x409, 0xffffffff, 0x00000100,
- 0x82a, 0xffffffff, 0x00000104,
- 0x993, 0x000c0000, 0x000c0000,
- 0x992, 0x000c0000, 0x000c0000,
- 0xbd4, 0x00000001, 0x00000001,
- 0xc33, 0xc0000fff, 0x00000104,
+ 0x000c, 0xffffffff, 0x0000001c,
+ 0x000d, 0x000f0000, 0x000f0000,
+ 0x0583, 0xffffffff, 0x00000100,
+ 0x0409, 0xffffffff, 0x00000100,
+ 0x082a, 0xffffffff, 0x00000104,
+ 0x0993, 0x000c0000, 0x000c0000,
+ 0x0992, 0x000c0000, 0x000c0000,
+ 0x0bd4, 0x00000001, 0x00000001,
+ 0x0c33, 0xc0000fff, 0x00000104,
0x3079, 0x00000001, 0x00000001,
0x3430, 0xfffffff0, 0x00000100,
0x3630, 0xfffffff0, 0x00000100
@@ -1179,6 +1186,8 @@ static int si_common_early_init(void *handle)
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_HDP_MGCG;
adev->pg_flags = 0;
+ adev->external_rev_id = (adev->rev_id == 0) ? 1 :
+ (adev->rev_id == 1) ? 5 : 6;
break;
case CHIP_PITCAIRN:
adev->cg_flags =
@@ -1198,6 +1207,7 @@ static int si_common_early_init(void *handle)
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_HDP_MGCG;
adev->pg_flags = 0;
+ adev->external_rev_id = adev->rev_id + 20;
break;
case CHIP_VERDE:
@@ -1219,7 +1229,7 @@ static int si_common_early_init(void *handle)
AMD_CG_SUPPORT_HDP_MGCG;
adev->pg_flags = 0;
//???
- adev->external_rev_id = adev->rev_id + 0x14;
+ adev->external_rev_id = adev->rev_id + 40;
break;
case CHIP_OLAND:
adev->cg_flags =
@@ -1238,6 +1248,7 @@ static int si_common_early_init(void *handle)
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_HDP_MGCG;
adev->pg_flags = 0;
+ adev->external_rev_id = 60;
break;
case CHIP_HAINAN:
adev->cg_flags =
@@ -1255,6 +1266,7 @@ static int si_common_early_init(void *handle)
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_HDP_MGCG;
adev->pg_flags = 0;
+ adev->external_rev_id = 70;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 6c65a1a2de79..10bedfac27b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -56,7 +56,6 @@
#define BIOS_SCRATCH_4 0x5cd
MODULE_FIRMWARE("radeon/tahiti_smc.bin");
-MODULE_FIRMWARE("radeon/tahiti_k_smc.bin");
MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin");
MODULE_FIRMWARE("radeon/verde_smc.bin");
@@ -3488,19 +3487,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
(adev->pdev->device == 0x6817) ||
(adev->pdev->device == 0x6806))
max_mclk = 120000;
- } else if (adev->asic_type == CHIP_VERDE) {
- if ((adev->pdev->revision == 0x81) ||
- (adev->pdev->revision == 0x83) ||
- (adev->pdev->revision == 0x87) ||
- (adev->pdev->device == 0x6820) ||
- (adev->pdev->device == 0x6821) ||
- (adev->pdev->device == 0x6822) ||
- (adev->pdev->device == 0x6823) ||
- (adev->pdev->device == 0x682A) ||
- (adev->pdev->device == 0x682B)) {
- max_sclk = 75000;
- max_mclk = 80000;
- }
} else if (adev->asic_type == CHIP_OLAND) {
if ((adev->pdev->revision == 0xC7) ||
(adev->pdev->revision == 0x80) ||
@@ -7687,49 +7673,49 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
chip_name = "tahiti";
break;
case CHIP_PITCAIRN:
- if ((adev->pdev->revision == 0x81) ||
- (adev->pdev->device == 0x6810) ||
- (adev->pdev->device == 0x6811) ||
- (adev->pdev->device == 0x6816) ||
- (adev->pdev->device == 0x6817) ||
- (adev->pdev->device == 0x6806))
+ if ((adev->pdev->revision == 0x81) &&
+ ((adev->pdev->device == 0x6810) ||
+ (adev->pdev->device == 0x6811)))
chip_name = "pitcairn_k";
else
chip_name = "pitcairn";
break;
case CHIP_VERDE:
- if ((adev->pdev->revision == 0x81) ||
- (adev->pdev->revision == 0x83) ||
- (adev->pdev->revision == 0x87) ||
- (adev->pdev->device == 0x6820) ||
- (adev->pdev->device == 0x6821) ||
- (adev->pdev->device == 0x6822) ||
- (adev->pdev->device == 0x6823) ||
- (adev->pdev->device == 0x682A) ||
- (adev->pdev->device == 0x682B))
+ if (((adev->pdev->device == 0x6820) &&
+ ((adev->pdev->revision == 0x81) ||
+ (adev->pdev->revision == 0x83))) ||
+ ((adev->pdev->device == 0x6821) &&
+ ((adev->pdev->revision == 0x83) ||
+ (adev->pdev->revision == 0x87))) ||
+ ((adev->pdev->revision == 0x87) &&
+ ((adev->pdev->device == 0x6823) ||
+ (adev->pdev->device == 0x682b))))
chip_name = "verde_k";
else
chip_name = "verde";
break;
case CHIP_OLAND:
- if ((adev->pdev->revision == 0xC7) ||
- (adev->pdev->revision == 0x80) ||
- (adev->pdev->revision == 0x81) ||
- (adev->pdev->revision == 0x83) ||
- (adev->pdev->revision == 0x87) ||
- (adev->pdev->device == 0x6604) ||
- (adev->pdev->device == 0x6605))
+ if (((adev->pdev->revision == 0x81) &&
+ ((adev->pdev->device == 0x6600) ||
+ (adev->pdev->device == 0x6604) ||
+ (adev->pdev->device == 0x6605) ||
+ (adev->pdev->device == 0x6610))) ||
+ ((adev->pdev->revision == 0x83) &&
+ (adev->pdev->device == 0x6610)))
chip_name = "oland_k";
else
chip_name = "oland";
break;
case CHIP_HAINAN:
- if ((adev->pdev->revision == 0x81) ||
- (adev->pdev->revision == 0x83) ||
- (adev->pdev->revision == 0xC3) ||
- (adev->pdev->device == 0x6664) ||
- (adev->pdev->device == 0x6665) ||
- (adev->pdev->device == 0x6667))
+ if (((adev->pdev->revision == 0x81) &&
+ (adev->pdev->device == 0x6660)) ||
+ ((adev->pdev->revision == 0x83) &&
+ ((adev->pdev->device == 0x6660) ||
+ (adev->pdev->device == 0x6663) ||
+ (adev->pdev->device == 0x6665) ||
+ (adev->pdev->device == 0x6667))) ||
+ ((adev->pdev->revision == 0xc3) &&
+ (adev->pdev->device == 0x6665)))
chip_name = "hainan_k";
else
chip_name = "hainan";
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index a79e283590fb..6de6becce745 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -791,15 +791,10 @@ static int uvd_v5_0_set_clockgating_state(void *handle,
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
- static int curstate = -1;
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
return 0;
- if (curstate == state)
- return 0;
-
- curstate = state;
if (enable) {
/* wait for STATUS to clear */
if (uvd_v5_0_wait_for_idle(handle))
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 6b3293a1c7b8..5fb0b7f5c065 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -320,11 +320,12 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
{
u32 tmp;
- /* Fiji, Stoney, Polaris10, Polaris11 are single pipe */
+ /* Fiji, Stoney, Polaris10, Polaris11, Polaris12 are single pipe */
if ((adev->asic_type == CHIP_FIJI) ||
(adev->asic_type == CHIP_STONEY) ||
(adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_POLARIS11))
+ (adev->asic_type == CHIP_POLARIS11) ||
+ (adev->asic_type == CHIP_POLARIS12))
return AMDGPU_VCE_HARVEST_VCE1;
/* Tonga and CZ are dual or single pipe */
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 9f771f4ffcb7..c2ac54f11341 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -88,6 +88,7 @@ MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
/*
* Indirect registers accessor
@@ -312,6 +313,7 @@ static void vi_init_golden_registers(struct amdgpu_device *adev)
break;
case CHIP_POLARIS11:
case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
default:
break;
}
@@ -671,6 +673,7 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
case CHIP_TONGA:
case CHIP_POLARIS11:
case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
case CHIP_CARRIZO:
case CHIP_STONEY:
asic_register_table = cz_allowed_read_registers;
@@ -932,22 +935,73 @@ static int vi_common_early_init(void *handle)
adev->external_rev_id = adev->rev_id + 0x3c;
break;
case CHIP_TONGA:
- adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG;
- adev->pg_flags = AMD_PG_SUPPORT_UVD;
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_HDP_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_ROM_MGCG |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_DRM_LS |
+ AMD_CG_SUPPORT_UVD_MGCG;
+ adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x14;
break;
case CHIP_POLARIS11:
- adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_RLC_LS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_3D_CGCG |
+ AMD_CG_SUPPORT_GFX_3D_CGLS |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_BIF_MGCG |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_HDP_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_ROM_MGCG |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_DRM_LS |
+ AMD_CG_SUPPORT_UVD_MGCG |
AMD_CG_SUPPORT_VCE_MGCG;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x5A;
break;
case CHIP_POLARIS10:
- adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_RLC_LS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_3D_CGCG |
+ AMD_CG_SUPPORT_GFX_3D_CGLS |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_BIF_MGCG |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_HDP_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_ROM_MGCG |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_DRM_LS |
+ AMD_CG_SUPPORT_UVD_MGCG |
AMD_CG_SUPPORT_VCE_MGCG;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x50;
break;
+ case CHIP_POLARIS12:
+ adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG;
+ adev->pg_flags = 0;
+ adev->external_rev_id = adev->rev_id + 0x64;
+ break;
case CHIP_CARRIZO:
adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
AMD_CG_SUPPORT_GFX_MGCG |
@@ -971,6 +1025,7 @@ static int vi_common_early_init(void *handle)
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_GFX_SMG |
AMD_PG_SUPPORT_GFX_PIPELINE |
+ AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_UVD |
AMD_PG_SUPPORT_VCE;
}
@@ -996,6 +1051,7 @@ static int vi_common_early_init(void *handle)
adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_GFX_SMG |
AMD_PG_SUPPORT_GFX_PIPELINE |
+ AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_UVD |
AMD_PG_SUPPORT_VCE;
adev->external_rev_id = adev->rev_id + 0x61;
@@ -1155,57 +1211,118 @@ static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
static int vi_common_set_clockgating_state_by_smu(void *handle,
enum amd_clockgating_state state)
{
- uint32_t msg_id, pp_state;
+ uint32_t msg_id, pp_state = 0;
+ uint32_t pp_support_state = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
void *pp_handle = adev->powerplay.pp_handle;
- if (state == AMD_CG_STATE_UNGATE)
- pp_state = 0;
- else
- pp_state = PP_STATE_CG | PP_STATE_LS;
-
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_MC,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
-
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_SDMA,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
-
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_HDP,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
-
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_BIF,
- PP_STATE_SUPPORT_LS,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
-
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_BIF,
- PP_STATE_SUPPORT_CG,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
-
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_DRM,
- PP_STATE_SUPPORT_LS,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
-
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_ROM,
- PP_STATE_SUPPORT_CG,
- pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
+ pp_support_state = AMD_CG_SUPPORT_MC_LS;
+ pp_state = PP_STATE_LS;
+ }
+ if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) {
+ pp_support_state |= AMD_CG_SUPPORT_MC_MGCG;
+ pp_state |= PP_STATE_CG;
+ }
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_MC,
+ pp_support_state,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
+ if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) {
+ pp_support_state = AMD_CG_SUPPORT_SDMA_LS;
+ pp_state = PP_STATE_LS;
+ }
+ if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) {
+ pp_support_state |= AMD_CG_SUPPORT_SDMA_MGCG;
+ pp_state |= PP_STATE_CG;
+ }
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_SDMA,
+ pp_support_state,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
+
+ if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
+ pp_support_state = AMD_CG_SUPPORT_HDP_LS;
+ pp_state = PP_STATE_LS;
+ }
+ if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) {
+ pp_support_state |= AMD_CG_SUPPORT_HDP_MGCG;
+ pp_state |= PP_STATE_CG;
+ }
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_HDP,
+ pp_support_state,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
+
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) {
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ else
+ pp_state = PP_STATE_LS;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_BIF,
+ PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
+ if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) {
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ else
+ pp_state = PP_STATE_CG;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_BIF,
+ PP_STATE_SUPPORT_CG,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) {
+
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ else
+ pp_state = PP_STATE_LS;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_DRM,
+ PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) {
+
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ else
+ pp_state = PP_STATE_CG;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_ROM,
+ PP_STATE_SUPPORT_CG,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+ }
return 0;
}
@@ -1237,6 +1354,7 @@ static int vi_common_set_clockgating_state(void *handle,
case CHIP_TONGA:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
vi_common_set_clockgating_state_by_smu(adev, state);
default:
break;
@@ -1320,6 +1438,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
break;
case CHIP_POLARIS11:
case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
amdgpu_ip_block_add(adev, &vi_common_ip_block);
amdgpu_ip_block_add(adev, &gmc_v8_1_ip_block);
amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index d1986276dbbd..85f358764bbc 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -23,7 +23,7 @@
#ifndef __AMD_SHARED_H__
#define __AMD_SHARED_H__
-#define AMD_MAX_USEC_TIMEOUT 100000 /* 100 ms */
+#define AMD_MAX_USEC_TIMEOUT 200000 /* 200 ms */
/*
* Supported ASIC types
@@ -46,6 +46,7 @@ enum amd_asic_type {
CHIP_STONEY,
CHIP_POLARIS10,
CHIP_POLARIS11,
+ CHIP_POLARIS12,
CHIP_LAST,
};
@@ -126,6 +127,10 @@ enum amd_vce_level {
#define AMD_CG_SUPPORT_HDP_LS (1 << 15)
#define AMD_CG_SUPPORT_HDP_MGCG (1 << 16)
#define AMD_CG_SUPPORT_ROM_MGCG (1 << 17)
+#define AMD_CG_SUPPORT_DRM_LS (1 << 18)
+#define AMD_CG_SUPPORT_BIF_MGCG (1 << 19)
+#define AMD_CG_SUPPORT_GFX_3D_CGCG (1 << 20)
+#define AMD_CG_SUPPORT_GFX_3D_CGLS (1 << 21)
/* PG flags */
#define AMD_PG_SUPPORT_GFX_PG (1 << 0)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index dc6700aee18f..b03606405a53 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -95,6 +95,7 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
break;
case CHIP_POLARIS11:
case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
polaris_set_asic_special_caps(hwmgr);
hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
break;
@@ -745,7 +746,7 @@ int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TablelessHardwareInterface);
- if (hwmgr->chip_id == CHIP_POLARIS11)
+ if ((hwmgr->chip_id == CHIP_POLARIS11) || (hwmgr->chip_id == CHIP_POLARIS12))
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SPLLShutdownSupport);
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index 26477f0f09dc..6cd1287a7a8f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -521,7 +521,7 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10);
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
- } else if (hwmgr->chip_id == CHIP_POLARIS11) {
+ } else if ((hwmgr->chip_id == CHIP_POLARIS11) || (hwmgr->chip_id == CHIP_POLARIS12)) {
result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11);
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index e5812aa456f3..6e618aa20719 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -65,6 +65,7 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
break;
case CHIP_POLARIS11:
case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
polaris10_smum_init(smumgr);
break;
default:
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 768087ddb046..a293c8be232c 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -17,12 +17,11 @@
static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data);
- unsigned long addr = (unsigned long)vmf->virtual_address;
unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
int ret;
- pfn += (addr - vma->vm_start) >> PAGE_SHIFT;
- ret = vm_insert_pfn(vma, addr, pfn);
+ pfn += (vmf->address - vma->vm_start) >> PAGE_SHIFT;
+ ret = vm_insert_pfn(vma, vmf->address, pfn);
switch (ret) {
case 0:
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 904beaa932d0..f75c6421db62 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -223,7 +223,8 @@ static int ast_get_dram_info(struct drm_device *dev)
ast_write32(ast, 0x10000, 0xfc600309);
do {
- ;
+ if (pci_channel_offline(dev->pdev))
+ return -EIO;
} while (ast_read32(ast, 0x10000) != 0x01);
data = ast_read32(ast, 0x10004);
@@ -428,7 +429,9 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
ast_detect_chip(dev, &need_post);
if (ast->chip != AST1180) {
- ast_get_dram_info(dev);
+ ret = ast_get_dram_info(dev);
+ if (ret)
+ goto out_free;
ast->vram_size = ast_get_vram_info(dev);
DRM_INFO("dram %d %d %d %08x\n", ast->mclk, ast->dram_type, ast->dram_bus_width, ast->vram_size);
}
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 583f47f27b36..34f757bcabae 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1259,8 +1259,10 @@ int drm_atomic_helper_commit(struct drm_device *dev,
if (!nonblock) {
ret = drm_atomic_helper_wait_for_fences(dev, state, true);
- if (ret)
+ if (ret) {
+ drm_atomic_helper_cleanup_planes(dev, state);
return ret;
+ }
}
/*
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index caa4e4ca616d..bd311c77c254 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -124,8 +124,7 @@ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* Using vm_pgoff as a selector forces us to use this unusual
* addressing scheme.
*/
- resource_size_t offset = (unsigned long)vmf->virtual_address -
- vma->vm_start;
+ resource_size_t offset = vmf->address - vma->vm_start;
resource_size_t baddr = map->offset + offset;
struct drm_agp_mem *agpmem;
struct page *page;
@@ -195,7 +194,7 @@ static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (!map)
return VM_FAULT_SIGBUS; /* Nothing allocated */
- offset = (unsigned long)vmf->virtual_address - vma->vm_start;
+ offset = vmf->address - vma->vm_start;
i = (unsigned long)map->handle + offset;
page = vmalloc_to_page((void *)i);
if (!page)
@@ -301,7 +300,8 @@ static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (!dma->pagelist)
return VM_FAULT_SIGBUS; /* Nothing allocated */
- offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
+ offset = vmf->address - vma->vm_start;
+ /* vm_[pg]off[set] should be 0 */
page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
page = virt_to_page((void *)dma->pagelist[page_nr]);
@@ -337,7 +337,7 @@ static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (!entry->pagelist)
return VM_FAULT_SIGBUS; /* Nothing allocated */
- offset = (unsigned long)vmf->virtual_address - vma->vm_start;
+ offset = vmf->address - vma->vm_start;
map_offset = map->offset - (unsigned long)dev->sg->virtual;
page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
page = entry->pagelist[page_offset];
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 7d066a91d778..114dddbd297b 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -202,15 +202,14 @@ int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
/* We don't use vmf->pgoff since that has the fake offset: */
- pgoff = ((unsigned long)vmf->virtual_address -
- vma->vm_start) >> PAGE_SHIFT;
+ pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
page = pages[pgoff];
- VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+ VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
- ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
+ ret = vm_insert_page(vma, vmf->address, page);
out:
switch (ret) {
@@ -759,7 +758,7 @@ static struct page **etnaviv_gem_userptr_do_get_pages(
down_read(&mm->mmap_sem);
while (pinned < npages) {
ret = get_user_pages_remote(task, mm, ptr, npages - pinned,
- flags, pvec + pinned, NULL);
+ flags, pvec + pinned, NULL, NULL);
if (ret < 0)
break;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index ea7a18230888..57b81460fec8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -455,8 +455,7 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
pgoff_t page_offset;
int ret;
- page_offset = ((unsigned long)vmf->virtual_address -
- vma->vm_start) >> PAGE_SHIFT;
+ page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) {
DRM_ERROR("invalid page offset\n");
@@ -465,8 +464,7 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
pfn = page_to_pfn(exynos_gem->pages[page_offset]);
- ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
- __pfn_to_pfn_t(pfn, PFN_DEV));
+ ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
out:
switch (ret) {
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 4071b2d1e8cf..8b44fa542562 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -125,7 +125,7 @@ static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
psbfb->gtt->offset;
page_num = vma_pages(vma);
- address = (unsigned long)vmf->virtual_address - (vmf->pgoff << PAGE_SHIFT);
+ address = vmf->address - (vmf->pgoff << PAGE_SHIFT);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index 6d1cb6b370b1..527c62917660 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -197,15 +197,14 @@ int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Page relative to the VMA start - we must calculate this ourselves
because vmf->pgoff is the fake GEM offset */
- page_offset = ((unsigned long) vmf->virtual_address - vma->vm_start)
- >> PAGE_SHIFT;
+ page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
/* CPU view of the page, don't go via the GART for CPU writes */
if (r->stolen)
pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT;
else
pfn = page_to_pfn(r->pages[page_offset]);
- ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+ ret = vm_insert_pfn(vma, vmf->address, pfn);
fail:
mutex_unlock(&dev_priv->mmap_mutex);
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 5ddde7349fbd..183f5dc1c3f2 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -116,6 +116,7 @@ config DRM_I915_GVT_KVMGT
tristate "Enable KVM/VFIO support for Intel GVT-g"
depends on DRM_I915_GVT
depends on KVM
+ depends on VFIO_MDEV && VFIO_MDEV_DEVICE
default n
help
Choose this option if you want to enable KVMGT support for
diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
index 8a46a7f31d53..b123c20e2097 100644
--- a/drivers/gpu/drm/i915/gvt/Makefile
+++ b/drivers/gpu/drm/i915/gvt/Makefile
@@ -5,6 +5,4 @@ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
-
-CFLAGS_kvmgt.o := -Wno-unused-function
obj-$(CONFIG_DRM_I915_GVT_KVMGT) += $(GVT_DIR)/kvmgt.o
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index db516382a4d4..711c31c8d8b4 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -123,6 +123,7 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
u8 changed = old ^ new;
int ret;
+ memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
if (!(changed & PCI_COMMAND_MEMORY))
return 0;
@@ -142,7 +143,6 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
return ret;
}
- memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
return 0;
}
@@ -240,7 +240,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
if (WARN_ON(bytes > 4))
return -EINVAL;
- if (WARN_ON(offset + bytes >= INTEL_GVT_MAX_CFG_SPACE_SZ))
+ if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ))
return -EINVAL;
/* First check if it's PCI_COMMAND */
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 7eaaf1c9ed2b..6c5fdf5b2ce2 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1998,6 +1998,8 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
INIT_LIST_HEAD(&gtt->oos_page_list_head);
INIT_LIST_HEAD(&gtt->post_shadow_list_head);
+ intel_vgpu_reset_ggtt(vgpu);
+
ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
NULL, 1, 0);
if (IS_ERR(ggtt_mm)) {
@@ -2206,6 +2208,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
int intel_gvt_init_gtt(struct intel_gvt *gvt)
{
int ret;
+ void *page_addr;
gvt_dbg_core("init gtt\n");
@@ -2218,6 +2221,23 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
return -ENODEV;
}
+ gvt->gtt.scratch_ggtt_page =
+ alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
+ if (!gvt->gtt.scratch_ggtt_page) {
+ gvt_err("fail to allocate scratch ggtt page\n");
+ return -ENOMEM;
+ }
+
+ page_addr = page_address(gvt->gtt.scratch_ggtt_page);
+
+ gvt->gtt.scratch_ggtt_mfn =
+ intel_gvt_hypervisor_virt_to_mfn(page_addr);
+ if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) {
+ gvt_err("fail to translate scratch ggtt page\n");
+ __free_page(gvt->gtt.scratch_ggtt_page);
+ return -EFAULT;
+ }
+
if (enable_out_of_sync) {
ret = setup_spt_oos(gvt);
if (ret) {
@@ -2239,6 +2259,41 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
*/
void intel_gvt_clean_gtt(struct intel_gvt *gvt)
{
+ __free_page(gvt->gtt.scratch_ggtt_page);
+
if (enable_out_of_sync)
clean_spt_oos(gvt);
}
+
+/**
+ * intel_vgpu_reset_ggtt - reset the GGTT entry
+ * @vgpu: a vGPU
+ *
+ * This function is called at the vGPU create stage
+ * to reset all the GGTT entries.
+ *
+ */
+void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ u32 index;
+ u32 offset;
+ u32 num_entries;
+ struct intel_gvt_gtt_entry e;
+
+ memset(&e, 0, sizeof(struct intel_gvt_gtt_entry));
+ e.type = GTT_TYPE_GGTT_PTE;
+ ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn);
+ e.val64 |= _PAGE_PRESENT;
+
+ index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
+ num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
+ for (offset = 0; offset < num_entries; offset++)
+ ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
+
+ index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
+ num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
+ for (offset = 0; offset < num_entries; offset++)
+ ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
+}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index d250013bc37b..b315ab3593ec 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -81,6 +81,9 @@ struct intel_gvt_gtt {
struct list_head oos_page_use_list_head;
struct list_head oos_page_free_list_head;
struct list_head mm_lru_list_head;
+
+ struct page *scratch_ggtt_page;
+ unsigned long scratch_ggtt_mfn;
};
enum {
@@ -202,6 +205,7 @@ struct intel_vgpu_gtt {
extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
+void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index b1a7c8dd4b5f..0af17016f33f 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -164,15 +164,18 @@ struct intel_vgpu {
#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
struct {
- struct device *mdev;
+ struct mdev_device *mdev;
struct vfio_region *region;
int num_regions;
struct eventfd_ctx *intx_trigger;
struct eventfd_ctx *msi_trigger;
struct rb_root cache;
struct mutex cache_lock;
- void *vfio_group;
struct notifier_block iommu_notifier;
+ struct notifier_block group_notifier;
+ struct kvm *kvm;
+ struct work_struct release_work;
+ atomic_t released;
} vdev;
#endif
};
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index dc0365033157..faaae07ae487 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -31,6 +31,7 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/mm.h>
+#include <linux/mmu_context.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/rbtree.h>
@@ -39,24 +40,13 @@
#include <linux/uuid.h>
#include <linux/kvm_host.h>
#include <linux/vfio.h>
+#include <linux/mdev.h>
#include "i915_drv.h"
#include "gvt.h"
-static inline long kvmgt_pin_pages(struct device *dev, unsigned long *user_pfn,
- long npage, int prot, unsigned long *phys_pfn)
-{
- return 0;
-}
-static inline long kvmgt_unpin_pages(struct device *dev, unsigned long *pfn,
- long npage)
-{
- return 0;
-}
-
static const struct intel_gvt_ops *intel_gvt_ops;
-
/* helper macros copied from vfio-pci */
#define VFIO_PCI_OFFSET_SHIFT 40
#define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT)
@@ -90,6 +80,15 @@ struct gvt_dma {
kvm_pfn_t pfn;
};
+static inline bool handle_valid(unsigned long handle)
+{
+ return !!(handle & ~0xff);
+}
+
+static int kvmgt_guest_init(struct mdev_device *mdev);
+static void intel_vgpu_release_work(struct work_struct *work);
+static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
+
static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
{
struct rb_node *node = vgpu->vdev.cache.rb_node;
@@ -115,12 +114,15 @@ out:
static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
{
struct gvt_dma *entry;
+ kvm_pfn_t pfn;
mutex_lock(&vgpu->vdev.cache_lock);
+
entry = __gvt_cache_find(vgpu, gfn);
- mutex_unlock(&vgpu->vdev.cache_lock);
+ pfn = (entry == NULL) ? 0 : entry->pfn;
- return entry == NULL ? 0 : entry->pfn;
+ mutex_unlock(&vgpu->vdev.cache_lock);
+ return pfn;
}
static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn)
@@ -167,9 +169,10 @@ static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
{
- struct device *dev = vgpu->vdev.mdev;
+ struct device *dev = mdev_dev(vgpu->vdev.mdev);
struct gvt_dma *this;
- unsigned long pfn;
+ unsigned long g1;
+ int rc;
mutex_lock(&vgpu->vdev.cache_lock);
this = __gvt_cache_find(vgpu, gfn);
@@ -178,8 +181,9 @@ static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
return;
}
- pfn = this->pfn;
- WARN_ON((kvmgt_unpin_pages(dev, &pfn, 1) != 1));
+ g1 = gfn;
+ rc = vfio_unpin_pages(dev, &g1, 1);
+ WARN_ON(rc != 1);
__gvt_cache_remove_entry(vgpu, this);
mutex_unlock(&vgpu->vdev.cache_lock);
}
@@ -194,15 +198,15 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu)
{
struct gvt_dma *dma;
struct rb_node *node = NULL;
- struct device *dev = vgpu->vdev.mdev;
- unsigned long pfn;
+ struct device *dev = mdev_dev(vgpu->vdev.mdev);
+ unsigned long gfn;
mutex_lock(&vgpu->vdev.cache_lock);
while ((node = rb_first(&vgpu->vdev.cache))) {
dma = rb_entry(node, struct gvt_dma, node);
- pfn = dma->pfn;
+ gfn = dma->gfn;
- kvmgt_unpin_pages(dev, &pfn, 1);
+ vfio_unpin_pages(dev, &gfn, 1);
__gvt_cache_remove_entry(vgpu, dma);
}
mutex_unlock(&vgpu->vdev.cache_lock);
@@ -226,7 +230,53 @@ static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
return NULL;
}
+static ssize_t available_instance_show(struct kobject *kobj, struct device *dev,
+ char *buf)
+{
+ struct intel_vgpu_type *type;
+ unsigned int num = 0;
+ void *gvt = kdev_to_i915(dev)->gvt;
+
+ type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
+ if (!type)
+ num = 0;
+ else
+ num = type->avail_instance;
+
+ return sprintf(buf, "%u\n", num);
+}
+
+static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
+ char *buf)
+{
+ return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
+}
+
+static ssize_t description_show(struct kobject *kobj, struct device *dev,
+ char *buf)
+{
+ struct intel_vgpu_type *type;
+ void *gvt = kdev_to_i915(dev)->gvt;
+
+ type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
+ if (!type)
+ return 0;
+
+ return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
+ "fence: %d\n",
+ BYTES_TO_MB(type->low_gm_size),
+ BYTES_TO_MB(type->high_gm_size),
+ type->fence);
+}
+
+static MDEV_TYPE_ATTR_RO(available_instance);
+static MDEV_TYPE_ATTR_RO(device_api);
+static MDEV_TYPE_ATTR_RO(description);
+
static struct attribute *type_attrs[] = {
+ &mdev_type_attr_available_instance.attr,
+ &mdev_type_attr_device_api.attr,
+ &mdev_type_attr_description.attr,
NULL,
};
@@ -322,7 +372,7 @@ static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn)
if (kvmgt_gfn_is_write_protected(info, gfn))
return;
- p = kmalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC);
+ p = kzalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC);
if (WARN(!p, "gfn: 0x%llx\n", gfn))
return;
@@ -342,6 +392,739 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
}
}
+static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
+{
+ struct intel_vgpu *vgpu;
+ struct intel_vgpu_type *type;
+ struct device *pdev;
+ void *gvt;
+
+ pdev = mdev_parent_dev(mdev);
+ gvt = kdev_to_i915(pdev)->gvt;
+
+ type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
+ if (!type) {
+ gvt_err("failed to find type %s to create\n",
+ kobject_name(kobj));
+ return -EINVAL;
+ }
+
+ vgpu = intel_gvt_ops->vgpu_create(gvt, type);
+ if (IS_ERR_OR_NULL(vgpu)) {
+ gvt_err("create intel vgpu failed\n");
+ return -EINVAL;
+ }
+
+ INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
+
+ vgpu->vdev.mdev = mdev;
+ mdev_set_drvdata(mdev, vgpu);
+
+ gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
+ dev_name(mdev_dev(mdev)));
+ return 0;
+}
+
+static int intel_vgpu_remove(struct mdev_device *mdev)
+{
+ struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+
+ if (handle_valid(vgpu->handle))
+ return -EBUSY;
+
+ intel_gvt_ops->vgpu_destroy(vgpu);
+ return 0;
+}
+
+static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct intel_vgpu *vgpu = container_of(nb,
+ struct intel_vgpu,
+ vdev.iommu_notifier);
+
+ if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
+ struct vfio_iommu_type1_dma_unmap *unmap = data;
+ unsigned long gfn, end_gfn;
+
+ gfn = unmap->iova >> PAGE_SHIFT;
+ end_gfn = gfn + unmap->size / PAGE_SIZE;
+
+ while (gfn < end_gfn)
+ gvt_cache_remove(vgpu, gfn++);
+ }
+
+ return NOTIFY_OK;
+}
+
+static int intel_vgpu_group_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct intel_vgpu *vgpu = container_of(nb,
+ struct intel_vgpu,
+ vdev.group_notifier);
+
+ /* the only action we care about */
+ if (action == VFIO_GROUP_NOTIFY_SET_KVM) {
+ vgpu->vdev.kvm = data;
+
+ if (!data)
+ schedule_work(&vgpu->vdev.release_work);
+ }
+
+ return NOTIFY_OK;
+}
+
+static int intel_vgpu_open(struct mdev_device *mdev)
+{
+ struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+ unsigned long events;
+ int ret;
+
+ vgpu->vdev.iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
+ vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier;
+
+ events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
+ ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
+ &vgpu->vdev.iommu_notifier);
+ if (ret != 0) {
+ gvt_err("vfio_register_notifier for iommu failed: %d\n", ret);
+ goto out;
+ }
+
+ events = VFIO_GROUP_NOTIFY_SET_KVM;
+ ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
+ &vgpu->vdev.group_notifier);
+ if (ret != 0) {
+ gvt_err("vfio_register_notifier for group failed: %d\n", ret);
+ goto undo_iommu;
+ }
+
+ ret = kvmgt_guest_init(mdev);
+ if (ret)
+ goto undo_group;
+
+ atomic_set(&vgpu->vdev.released, 0);
+ return ret;
+
+undo_group:
+ vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
+ &vgpu->vdev.group_notifier);
+
+undo_iommu:
+ vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
+ &vgpu->vdev.iommu_notifier);
+out:
+ return ret;
+}
+
+static void __intel_vgpu_release(struct intel_vgpu *vgpu)
+{
+ struct kvmgt_guest_info *info;
+ int ret;
+
+ if (!handle_valid(vgpu->handle))
+ return;
+
+ if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
+ return;
+
+ ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
+ &vgpu->vdev.iommu_notifier);
+ WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
+
+ ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_GROUP_NOTIFY,
+ &vgpu->vdev.group_notifier);
+ WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret);
+
+ info = (struct kvmgt_guest_info *)vgpu->handle;
+ kvmgt_guest_exit(info);
+
+ vgpu->vdev.kvm = NULL;
+ vgpu->handle = 0;
+}
+
+static void intel_vgpu_release(struct mdev_device *mdev)
+{
+ struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+
+ __intel_vgpu_release(vgpu);
+}
+
+static void intel_vgpu_release_work(struct work_struct *work)
+{
+ struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
+ vdev.release_work);
+
+ __intel_vgpu_release(vgpu);
+}
+
+static uint64_t intel_vgpu_get_bar0_addr(struct intel_vgpu *vgpu)
+{
+ u32 start_lo, start_hi;
+ u32 mem_type;
+ int pos = PCI_BASE_ADDRESS_0;
+
+ start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + pos)) &
+ PCI_BASE_ADDRESS_MEM_MASK;
+ mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + pos)) &
+ PCI_BASE_ADDRESS_MEM_TYPE_MASK;
+
+ switch (mem_type) {
+ case PCI_BASE_ADDRESS_MEM_TYPE_64:
+ start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space
+ + pos + 4));
+ break;
+ case PCI_BASE_ADDRESS_MEM_TYPE_32:
+ case PCI_BASE_ADDRESS_MEM_TYPE_1M:
+ /* 1M mem BAR treated as 32-bit BAR */
+ default:
+ /* mem unknown type treated as 32-bit BAR */
+ start_hi = 0;
+ break;
+ }
+
+ return ((u64)start_hi << 32) | start_lo;
+}
+
+static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
+ size_t count, loff_t *ppos, bool is_write)
+{
+ struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+ unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
+ uint64_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+ int ret = -EINVAL;
+
+
+ if (index >= VFIO_PCI_NUM_REGIONS) {
+ gvt_err("invalid index: %u\n", index);
+ return -EINVAL;
+ }
+
+ switch (index) {
+ case VFIO_PCI_CONFIG_REGION_INDEX:
+ if (is_write)
+ ret = intel_gvt_ops->emulate_cfg_write(vgpu, pos,
+ buf, count);
+ else
+ ret = intel_gvt_ops->emulate_cfg_read(vgpu, pos,
+ buf, count);
+ break;
+ case VFIO_PCI_BAR0_REGION_INDEX:
+ case VFIO_PCI_BAR1_REGION_INDEX:
+ if (is_write) {
+ uint64_t bar0_start = intel_vgpu_get_bar0_addr(vgpu);
+
+ ret = intel_gvt_ops->emulate_mmio_write(vgpu,
+ bar0_start + pos, buf, count);
+ } else {
+ uint64_t bar0_start = intel_vgpu_get_bar0_addr(vgpu);
+
+ ret = intel_gvt_ops->emulate_mmio_read(vgpu,
+ bar0_start + pos, buf, count);
+ }
+ break;
+ case VFIO_PCI_BAR2_REGION_INDEX:
+ case VFIO_PCI_BAR3_REGION_INDEX:
+ case VFIO_PCI_BAR4_REGION_INDEX:
+ case VFIO_PCI_BAR5_REGION_INDEX:
+ case VFIO_PCI_VGA_REGION_INDEX:
+ case VFIO_PCI_ROM_REGION_INDEX:
+ default:
+ gvt_err("unsupported region: %u\n", index);
+ }
+
+ return ret == 0 ? count : ret;
+}
+
+static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int done = 0;
+ int ret;
+
+ while (count) {
+ size_t filled;
+
+ if (count >= 4 && !(*ppos % 4)) {
+ u32 val;
+
+ ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
+ ppos, false);
+ if (ret <= 0)
+ goto read_err;
+
+ if (copy_to_user(buf, &val, sizeof(val)))
+ goto read_err;
+
+ filled = 4;
+ } else if (count >= 2 && !(*ppos % 2)) {
+ u16 val;
+
+ ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
+ ppos, false);
+ if (ret <= 0)
+ goto read_err;
+
+ if (copy_to_user(buf, &val, sizeof(val)))
+ goto read_err;
+
+ filled = 2;
+ } else {
+ u8 val;
+
+ ret = intel_vgpu_rw(mdev, &val, sizeof(val), ppos,
+ false);
+ if (ret <= 0)
+ goto read_err;
+
+ if (copy_to_user(buf, &val, sizeof(val)))
+ goto read_err;
+
+ filled = 1;
+ }
+
+ count -= filled;
+ done += filled;
+ *ppos += filled;
+ buf += filled;
+ }
+
+ return done;
+
+read_err:
+ return -EFAULT;
+}
+
+static ssize_t intel_vgpu_write(struct mdev_device *mdev,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int done = 0;
+ int ret;
+
+ while (count) {
+ size_t filled;
+
+ if (count >= 4 && !(*ppos % 4)) {
+ u32 val;
+
+ if (copy_from_user(&val, buf, sizeof(val)))
+ goto write_err;
+
+ ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
+ ppos, true);
+ if (ret <= 0)
+ goto write_err;
+
+ filled = 4;
+ } else if (count >= 2 && !(*ppos % 2)) {
+ u16 val;
+
+ if (copy_from_user(&val, buf, sizeof(val)))
+ goto write_err;
+
+ ret = intel_vgpu_rw(mdev, (char *)&val,
+ sizeof(val), ppos, true);
+ if (ret <= 0)
+ goto write_err;
+
+ filled = 2;
+ } else {
+ u8 val;
+
+ if (copy_from_user(&val, buf, sizeof(val)))
+ goto write_err;
+
+ ret = intel_vgpu_rw(mdev, &val, sizeof(val),
+ ppos, true);
+ if (ret <= 0)
+ goto write_err;
+
+ filled = 1;
+ }
+
+ count -= filled;
+ done += filled;
+ *ppos += filled;
+ buf += filled;
+ }
+
+ return done;
+write_err:
+ return -EFAULT;
+}
+
+static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
+{
+ unsigned int index;
+ u64 virtaddr;
+ unsigned long req_size, pgoff = 0;
+ pgprot_t pg_prot;
+ struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+
+ index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
+ if (index >= VFIO_PCI_ROM_REGION_INDEX)
+ return -EINVAL;
+
+ if (vma->vm_end < vma->vm_start)
+ return -EINVAL;
+ if ((vma->vm_flags & VM_SHARED) == 0)
+ return -EINVAL;
+ if (index != VFIO_PCI_BAR2_REGION_INDEX)
+ return -EINVAL;
+
+ pg_prot = vma->vm_page_prot;
+ virtaddr = vma->vm_start;
+ req_size = vma->vm_end - vma->vm_start;
+ pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
+
+ return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
+}
+
+static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type)
+{
+ if (type == VFIO_PCI_INTX_IRQ_INDEX || type == VFIO_PCI_MSI_IRQ_INDEX)
+ return 1;
+
+ return 0;
+}
+
+static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
+ unsigned int index, unsigned int start,
+ unsigned int count, uint32_t flags,
+ void *data)
+{
+ return 0;
+}
+
+static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu,
+ unsigned int index, unsigned int start,
+ unsigned int count, uint32_t flags, void *data)
+{
+ return 0;
+}
+
+static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu,
+ unsigned int index, unsigned int start, unsigned int count,
+ uint32_t flags, void *data)
+{
+ return 0;
+}
+
+static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
+ unsigned int index, unsigned int start, unsigned int count,
+ uint32_t flags, void *data)
+{
+ struct eventfd_ctx *trigger;
+
+ if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ int fd = *(int *)data;
+
+ trigger = eventfd_ctx_fdget(fd);
+ if (IS_ERR(trigger)) {
+ gvt_err("eventfd_ctx_fdget failed\n");
+ return PTR_ERR(trigger);
+ }
+ vgpu->vdev.msi_trigger = trigger;
+ }
+
+ return 0;
+}
+
+static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, uint32_t flags,
+ unsigned int index, unsigned int start, unsigned int count,
+ void *data)
+{
+ int (*func)(struct intel_vgpu *vgpu, unsigned int index,
+ unsigned int start, unsigned int count, uint32_t flags,
+ void *data) = NULL;
+
+ switch (index) {
+ case VFIO_PCI_INTX_IRQ_INDEX:
+ switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
+ case VFIO_IRQ_SET_ACTION_MASK:
+ func = intel_vgpu_set_intx_mask;
+ break;
+ case VFIO_IRQ_SET_ACTION_UNMASK:
+ func = intel_vgpu_set_intx_unmask;
+ break;
+ case VFIO_IRQ_SET_ACTION_TRIGGER:
+ func = intel_vgpu_set_intx_trigger;
+ break;
+ }
+ break;
+ case VFIO_PCI_MSI_IRQ_INDEX:
+ switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
+ case VFIO_IRQ_SET_ACTION_MASK:
+ case VFIO_IRQ_SET_ACTION_UNMASK:
+ /* XXX Need masking support exported */
+ break;
+ case VFIO_IRQ_SET_ACTION_TRIGGER:
+ func = intel_vgpu_set_msi_trigger;
+ break;
+ }
+ break;
+ }
+
+ if (!func)
+ return -ENOTTY;
+
+ return func(vgpu, index, start, count, flags, data);
+}
+
+static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
+ unsigned long arg)
+{
+ struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+ unsigned long minsz;
+
+ gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
+
+ if (cmd == VFIO_DEVICE_GET_INFO) {
+ struct vfio_device_info info;
+
+ minsz = offsetofend(struct vfio_device_info, num_irqs);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ info.flags = VFIO_DEVICE_FLAGS_PCI;
+ info.flags |= VFIO_DEVICE_FLAGS_RESET;
+ info.num_regions = VFIO_PCI_NUM_REGIONS;
+ info.num_irqs = VFIO_PCI_NUM_IRQS;
+
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
+
+ } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
+ struct vfio_region_info info;
+ struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
+ int i, ret;
+ struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
+ size_t size;
+ int nr_areas = 1;
+ int cap_type_id;
+
+ minsz = offsetofend(struct vfio_region_info, offset);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ switch (info.index) {
+ case VFIO_PCI_CONFIG_REGION_INDEX:
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = INTEL_GVT_MAX_CFG_SPACE_SZ;
+ info.flags = VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
+ break;
+ case VFIO_PCI_BAR0_REGION_INDEX:
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = vgpu->cfg_space.bar[info.index].size;
+ if (!info.size) {
+ info.flags = 0;
+ break;
+ }
+
+ info.flags = VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
+ break;
+ case VFIO_PCI_BAR1_REGION_INDEX:
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = 0;
+ info.flags = 0;
+ break;
+ case VFIO_PCI_BAR2_REGION_INDEX:
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.flags = VFIO_REGION_INFO_FLAG_CAPS |
+ VFIO_REGION_INFO_FLAG_MMAP |
+ VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
+ info.size = gvt_aperture_sz(vgpu->gvt);
+
+ size = sizeof(*sparse) +
+ (nr_areas * sizeof(*sparse->areas));
+ sparse = kzalloc(size, GFP_KERNEL);
+ if (!sparse)
+ return -ENOMEM;
+
+ sparse->nr_areas = nr_areas;
+ cap_type_id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
+ sparse->areas[0].offset =
+ PAGE_ALIGN(vgpu_aperture_offset(vgpu));
+ sparse->areas[0].size = vgpu_aperture_sz(vgpu);
+ if (!caps.buf) {
+ kfree(caps.buf);
+ caps.buf = NULL;
+ caps.size = 0;
+ }
+ break;
+
+ case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = 0;
+
+ info.flags = 0;
+ gvt_dbg_core("get region info bar:%d\n", info.index);
+ break;
+
+ case VFIO_PCI_ROM_REGION_INDEX:
+ case VFIO_PCI_VGA_REGION_INDEX:
+ gvt_dbg_core("get region info index:%d\n", info.index);
+ break;
+ default:
+ {
+ struct vfio_region_info_cap_type cap_type;
+
+ if (info.index >= VFIO_PCI_NUM_REGIONS +
+ vgpu->vdev.num_regions)
+ return -EINVAL;
+
+ i = info.index - VFIO_PCI_NUM_REGIONS;
+
+ info.offset =
+ VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = vgpu->vdev.region[i].size;
+ info.flags = vgpu->vdev.region[i].flags;
+
+ cap_type.type = vgpu->vdev.region[i].type;
+ cap_type.subtype = vgpu->vdev.region[i].subtype;
+
+ ret = vfio_info_add_capability(&caps,
+ VFIO_REGION_INFO_CAP_TYPE,
+ &cap_type);
+ if (ret)
+ return ret;
+ }
+ }
+
+ if ((info.flags & VFIO_REGION_INFO_FLAG_CAPS) && sparse) {
+ switch (cap_type_id) {
+ case VFIO_REGION_INFO_CAP_SPARSE_MMAP:
+ ret = vfio_info_add_capability(&caps,
+ VFIO_REGION_INFO_CAP_SPARSE_MMAP,
+ sparse);
+ kfree(sparse);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ if (caps.size) {
+ if (info.argsz < sizeof(info) + caps.size) {
+ info.argsz = sizeof(info) + caps.size;
+ info.cap_offset = 0;
+ } else {
+ vfio_info_cap_shift(&caps, sizeof(info));
+ if (copy_to_user((void __user *)arg +
+ sizeof(info), caps.buf,
+ caps.size)) {
+ kfree(caps.buf);
+ return -EFAULT;
+ }
+ info.cap_offset = sizeof(info);
+ }
+
+ kfree(caps.buf);
+ }
+
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
+ } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
+ struct vfio_irq_info info;
+
+ minsz = offsetofend(struct vfio_irq_info, count);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
+ return -EINVAL;
+
+ switch (info.index) {
+ case VFIO_PCI_INTX_IRQ_INDEX:
+ case VFIO_PCI_MSI_IRQ_INDEX:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ info.flags = VFIO_IRQ_INFO_EVENTFD;
+
+ info.count = intel_vgpu_get_irq_count(vgpu, info.index);
+
+ if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
+ info.flags |= (VFIO_IRQ_INFO_MASKABLE |
+ VFIO_IRQ_INFO_AUTOMASKED);
+ else
+ info.flags |= VFIO_IRQ_INFO_NORESIZE;
+
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
+ } else if (cmd == VFIO_DEVICE_SET_IRQS) {
+ struct vfio_irq_set hdr;
+ u8 *data = NULL;
+ int ret = 0;
+ size_t data_size = 0;
+
+ minsz = offsetofend(struct vfio_irq_set, count);
+
+ if (copy_from_user(&hdr, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
+ int max = intel_vgpu_get_irq_count(vgpu, hdr.index);
+
+ ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
+ VFIO_PCI_NUM_IRQS, &data_size);
+ if (ret) {
+ gvt_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
+ return -EINVAL;
+ }
+ if (data_size) {
+ data = memdup_user((void __user *)(arg + minsz),
+ data_size);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+ }
+ }
+
+ ret = intel_vgpu_set_irqs(vgpu, hdr.flags, hdr.index,
+ hdr.start, hdr.count, data);
+ kfree(data);
+
+ return ret;
+ } else if (cmd == VFIO_DEVICE_RESET) {
+ intel_gvt_ops->vgpu_reset(vgpu);
+ return 0;
+ }
+
+ return 0;
+}
+
+static const struct mdev_parent_ops intel_vgpu_ops = {
+ .supported_type_groups = intel_vgpu_type_groups,
+ .create = intel_vgpu_create,
+ .remove = intel_vgpu_remove,
+
+ .open = intel_vgpu_open,
+ .release = intel_vgpu_release,
+
+ .read = intel_vgpu_read,
+ .write = intel_vgpu_write,
+ .mmap = intel_vgpu_mmap,
+ .ioctl = intel_vgpu_ioctl,
+};
+
static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
{
if (!intel_gvt_init_vgpu_type_groups(gvt))
@@ -349,24 +1132,34 @@ static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
intel_gvt_ops = ops;
- /* MDEV is not yet available */
- return -ENODEV;
+ return mdev_register_device(dev, &intel_vgpu_ops);
}
static void kvmgt_host_exit(struct device *dev, void *gvt)
{
intel_gvt_cleanup_vgpu_type_groups(gvt);
+ mdev_unregister_device(dev);
}
static int kvmgt_write_protect_add(unsigned long handle, u64 gfn)
{
- struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle;
- struct kvm *kvm = info->kvm;
+ struct kvmgt_guest_info *info;
+ struct kvm *kvm;
struct kvm_memory_slot *slot;
int idx;
+ if (!handle_valid(handle))
+ return -ESRCH;
+
+ info = (struct kvmgt_guest_info *)handle;
+ kvm = info->kvm;
+
idx = srcu_read_lock(&kvm->srcu);
slot = gfn_to_memslot(kvm, gfn);
+ if (!slot) {
+ srcu_read_unlock(&kvm->srcu, idx);
+ return -EINVAL;
+ }
spin_lock(&kvm->mmu_lock);
@@ -384,13 +1177,23 @@ out:
static int kvmgt_write_protect_remove(unsigned long handle, u64 gfn)
{
- struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle;
- struct kvm *kvm = info->kvm;
+ struct kvmgt_guest_info *info;
+ struct kvm *kvm;
struct kvm_memory_slot *slot;
int idx;
+ if (!handle_valid(handle))
+ return 0;
+
+ info = (struct kvmgt_guest_info *)handle;
+ kvm = info->kvm;
+
idx = srcu_read_lock(&kvm->srcu);
slot = gfn_to_memslot(kvm, gfn);
+ if (!slot) {
+ srcu_read_unlock(&kvm->srcu, idx);
+ return -EINVAL;
+ }
spin_lock(&kvm->mmu_lock);
@@ -476,6 +1279,81 @@ static int kvmgt_detect_host(void)
return kvmgt_check_guest() ? -ENODEV : 0;
}
+static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm)
+{
+ struct intel_vgpu *itr;
+ struct kvmgt_guest_info *info;
+ int id;
+ bool ret = false;
+
+ mutex_lock(&vgpu->gvt->lock);
+ for_each_active_vgpu(vgpu->gvt, itr, id) {
+ if (!handle_valid(itr->handle))
+ continue;
+
+ info = (struct kvmgt_guest_info *)itr->handle;
+ if (kvm && kvm == info->kvm) {
+ ret = true;
+ goto out;
+ }
+ }
+out:
+ mutex_unlock(&vgpu->gvt->lock);
+ return ret;
+}
+
+static int kvmgt_guest_init(struct mdev_device *mdev)
+{
+ struct kvmgt_guest_info *info;
+ struct intel_vgpu *vgpu;
+ struct kvm *kvm;
+
+ vgpu = mdev_get_drvdata(mdev);
+ if (handle_valid(vgpu->handle))
+ return -EEXIST;
+
+ kvm = vgpu->vdev.kvm;
+ if (!kvm || kvm->mm != current->mm) {
+ gvt_err("KVM is required to use Intel vGPU\n");
+ return -ESRCH;
+ }
+
+ if (__kvmgt_vgpu_exist(vgpu, kvm))
+ return -EEXIST;
+
+ info = vzalloc(sizeof(struct kvmgt_guest_info));
+ if (!info)
+ return -ENOMEM;
+
+ vgpu->handle = (unsigned long)info;
+ info->vgpu = vgpu;
+ info->kvm = kvm;
+
+ kvmgt_protect_table_init(info);
+ gvt_cache_init(vgpu);
+
+ info->track_node.track_write = kvmgt_page_track_write;
+ info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
+ kvm_page_track_register_notifier(kvm, &info->track_node);
+
+ return 0;
+}
+
+static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
+{
+ if (!info) {
+ gvt_err("kvmgt_guest_info invalid\n");
+ return false;
+ }
+
+ kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
+ kvmgt_protect_table_destroy(info);
+ gvt_cache_destroy(info->vgpu);
+ vfree(info);
+
+ return true;
+}
+
static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle)
{
/* nothing to do here */
@@ -489,63 +1367,72 @@ static void kvmgt_detach_vgpu(unsigned long handle)
static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
{
- struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle;
- struct intel_vgpu *vgpu = info->vgpu;
+ struct kvmgt_guest_info *info;
+ struct intel_vgpu *vgpu;
- if (vgpu->vdev.msi_trigger)
- return eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1;
+ if (!handle_valid(handle))
+ return -ESRCH;
- return false;
+ info = (struct kvmgt_guest_info *)handle;
+ vgpu = info->vgpu;
+
+ if (eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1)
+ return 0;
+
+ return -EFAULT;
}
static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
{
unsigned long pfn;
- struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle;
+ struct kvmgt_guest_info *info;
+ struct device *dev;
int rc;
+ if (!handle_valid(handle))
+ return INTEL_GVT_INVALID_ADDR;
+
+ info = (struct kvmgt_guest_info *)handle;
pfn = gvt_cache_find(info->vgpu, gfn);
if (pfn != 0)
return pfn;
- rc = kvmgt_pin_pages(info->vgpu->vdev.mdev, &gfn, 1,
- IOMMU_READ | IOMMU_WRITE, &pfn);
+ pfn = INTEL_GVT_INVALID_ADDR;
+ dev = mdev_dev(info->vgpu->vdev.mdev);
+ rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn);
if (rc != 1) {
- gvt_err("vfio_pin_pages failed for gfn: 0x%lx\n", gfn);
- return 0;
+ gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc);
+ return INTEL_GVT_INVALID_ADDR;
}
gvt_cache_add(info->vgpu, gfn, pfn);
return pfn;
}
-static void *kvmgt_gpa_to_hva(unsigned long handle, unsigned long gpa)
+static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
+ void *buf, unsigned long len, bool write)
{
- unsigned long pfn;
- gfn_t gfn = gpa_to_gfn(gpa);
+ struct kvmgt_guest_info *info;
+ struct kvm *kvm;
+ int ret;
+ bool kthread = current->mm == NULL;
- pfn = kvmgt_gfn_to_pfn(handle, gfn);
- if (!pfn)
- return NULL;
+ if (!handle_valid(handle))
+ return -ESRCH;
- return (char *)pfn_to_kaddr(pfn) + offset_in_page(gpa);
-}
+ info = (struct kvmgt_guest_info *)handle;
+ kvm = info->kvm;
-static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
- void *buf, unsigned long len, bool write)
-{
- void *hva = NULL;
+ if (kthread)
+ use_mm(kvm->mm);
- hva = kvmgt_gpa_to_hva(handle, gpa);
- if (!hva)
- return -EFAULT;
+ ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
+ kvm_read_guest(kvm, gpa, buf, len);
- if (write)
- memcpy(hva, buf, len);
- else
- memcpy(buf, hva, len);
+ if (kthread)
+ unuse_mm(kvm->mm);
- return 0;
+ return ret;
}
static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index d2a0fbc896c3..81cd921770c6 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -65,7 +65,7 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
int i, ret;
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
- mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)
+ mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va
+ i * PAGE_SIZE);
if (mfn == INTEL_GVT_INVALID_ADDR) {
gvt_err("fail to get MFN from VA\n");
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 56002a52936d..243224aeabf8 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3509,6 +3509,8 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
+int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms);
/* intel_sideband.c */
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d0dcaf35b429..3dd7fc662859 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -174,21 +174,35 @@ static struct sg_table *
i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
{
struct address_space *mapping = obj->base.filp->f_mapping;
- char *vaddr = obj->phys_handle->vaddr;
+ drm_dma_handle_t *phys;
struct sg_table *st;
struct scatterlist *sg;
+ char *vaddr;
int i;
if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
return ERR_PTR(-EINVAL);
+ /* Always aligning to the object size, allows a single allocation
+ * to handle all possible callers, and given typical object sizes,
+ * the alignment of the buddy allocation will naturally match.
+ */
+ phys = drm_pci_alloc(obj->base.dev,
+ obj->base.size,
+ roundup_pow_of_two(obj->base.size));
+ if (!phys)
+ return ERR_PTR(-ENOMEM);
+
+ vaddr = phys->vaddr;
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
struct page *page;
char *src;
page = shmem_read_mapping_page(mapping, i);
- if (IS_ERR(page))
- return ERR_CAST(page);
+ if (IS_ERR(page)) {
+ st = ERR_CAST(page);
+ goto err_phys;
+ }
src = kmap_atomic(page);
memcpy(vaddr, src, PAGE_SIZE);
@@ -202,34 +216,44 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
i915_gem_chipset_flush(to_i915(obj->base.dev));
st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (st == NULL)
- return ERR_PTR(-ENOMEM);
+ if (!st) {
+ st = ERR_PTR(-ENOMEM);
+ goto err_phys;
+ }
if (sg_alloc_table(st, 1, GFP_KERNEL)) {
kfree(st);
- return ERR_PTR(-ENOMEM);
+ st = ERR_PTR(-ENOMEM);
+ goto err_phys;
}
sg = st->sgl;
sg->offset = 0;
sg->length = obj->base.size;
- sg_dma_address(sg) = obj->phys_handle->busaddr;
+ sg_dma_address(sg) = phys->busaddr;
sg_dma_len(sg) = obj->base.size;
+ obj->phys_handle = phys;
+ return st;
+
+err_phys:
+ drm_pci_free(obj->base.dev, phys);
return st;
}
static void
__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
- struct sg_table *pages)
+ struct sg_table *pages,
+ bool needs_clflush)
{
GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
if (obj->mm.madv == I915_MADV_DONTNEED)
obj->mm.dirty = false;
- if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
+ if (needs_clflush &&
+ (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
drm_clflush_sg(pages);
@@ -241,7 +265,7 @@ static void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
- __i915_gem_object_release_shmem(obj, pages);
+ __i915_gem_object_release_shmem(obj, pages, false);
if (obj->mm.dirty) {
struct address_space *mapping = obj->base.filp->f_mapping;
@@ -272,12 +296,13 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
sg_free_table(pages);
kfree(pages);
+
+ drm_pci_free(obj->base.dev, obj->phys_handle);
}
static void
i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
{
- drm_pci_free(obj->base.dev, obj->phys_handle);
i915_gem_object_unpin_pages(obj);
}
@@ -538,15 +563,13 @@ int
i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
int align)
{
- drm_dma_handle_t *phys;
int ret;
- if (obj->phys_handle) {
- if ((unsigned long)obj->phys_handle->vaddr & (align -1))
- return -EBUSY;
+ if (align > obj->base.size)
+ return -EINVAL;
+ if (obj->ops == &i915_gem_phys_ops)
return 0;
- }
if (obj->mm.madv != I915_MADV_WILLNEED)
return -EFAULT;
@@ -562,12 +585,6 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
if (obj->mm.pages)
return -EBUSY;
- /* create a new object */
- phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
- if (!phys)
- return -ENOMEM;
-
- obj->phys_handle = phys;
obj->ops = &i915_gem_phys_ops;
return i915_gem_object_pin_pages(obj);
@@ -1796,8 +1813,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
int ret;
/* We don't use vmf->pgoff since that has the fake offset */
- page_offset = ((unsigned long)vmf->virtual_address - area->vm_start) >>
- PAGE_SHIFT;
+ page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
trace_i915_gem_object_fault(obj, page_offset, true, write);
@@ -2217,7 +2233,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
struct sgt_iter sgt_iter;
struct page *page;
- __i915_gem_object_release_shmem(obj, pages);
+ __i915_gem_object_release_shmem(obj, pages, true);
i915_gem_gtt_finish_pages(obj, pages);
@@ -2290,15 +2306,6 @@ unlock:
mutex_unlock(&obj->mm.lock);
}
-static unsigned int swiotlb_max_size(void)
-{
-#if IS_ENABLED(CONFIG_SWIOTLB)
- return rounddown(swiotlb_nr_tbl() << IO_TLB_SHIFT, PAGE_SIZE);
-#else
- return 0;
-#endif
-}
-
static void i915_sg_trim(struct sg_table *orig_st)
{
struct sg_table new_st;
@@ -2308,7 +2315,7 @@ static void i915_sg_trim(struct sg_table *orig_st)
if (orig_st->nents == orig_st->orig_nents)
return;
- if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL))
+ if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
return;
new_sg = new_st.sgl;
@@ -2327,7 +2334,8 @@ static struct sg_table *
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- int page_count, i;
+ const unsigned long page_count = obj->base.size / PAGE_SIZE;
+ unsigned long i;
struct address_space *mapping;
struct sg_table *st;
struct scatterlist *sg;
@@ -2345,7 +2353,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
- max_segment = swiotlb_max_size();
+ max_segment = swiotlb_max_segment();
if (!max_segment)
max_segment = rounddown(UINT_MAX, PAGE_SIZE);
@@ -2353,7 +2361,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
if (st == NULL)
return ERR_PTR(-ENOMEM);
- page_count = obj->base.size / PAGE_SIZE;
+rebuild_st:
if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
kfree(st);
return ERR_PTR(-ENOMEM);
@@ -2412,8 +2420,25 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
i915_sg_trim(st);
ret = i915_gem_gtt_prepare_pages(obj, st);
- if (ret)
- goto err_pages;
+ if (ret) {
+ /* DMA remapping failed? One possible cause is that
+ * it could not reserve enough large entries, asking
+ * for PAGE_SIZE chunks instead may be helpful.
+ */
+ if (max_segment > PAGE_SIZE) {
+ for_each_sgt_page(page, sgt_iter, st)
+ put_page(page);
+ sg_free_table(st);
+
+ max_segment = PAGE_SIZE;
+ goto rebuild_st;
+ } else {
+ dev_warn(&dev_priv->drm.pdev->dev,
+ "Failed to DMA remap %lu pages\n",
+ page_count);
+ goto err_pages;
+ }
+ }
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj, st);
@@ -2696,6 +2721,7 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
struct drm_i915_gem_request *request;
struct i915_gem_context *incomplete_ctx;
struct intel_timeline *timeline;
+ unsigned long flags;
bool ring_hung;
if (engine->irq_seqno_barrier)
@@ -2731,13 +2757,20 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
if (i915_gem_context_is_default(incomplete_ctx))
return;
+ timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine);
+
+ spin_lock_irqsave(&engine->timeline->lock, flags);
+ spin_lock(&timeline->lock);
+
list_for_each_entry_continue(request, &engine->timeline->requests, link)
if (request->ctx == incomplete_ctx)
reset_request(request);
- timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine);
list_for_each_entry(request, &timeline->requests, link)
reset_request(request);
+
+ spin_unlock(&timeline->lock);
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
void i915_gem_reset(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index e2b077df2da0..d229f47d1028 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -413,6 +413,25 @@ i915_gem_active_set(struct i915_gem_active *active,
rcu_assign_pointer(active->request, request);
}
+/**
+ * i915_gem_active_set_retire_fn - updates the retirement callback
+ * @active - the active tracker
+ * @fn - the routine called when the request is retired
+ * @mutex - struct_mutex used to guard retirements
+ *
+ * i915_gem_active_set_retire_fn() updates the function pointer that
+ * is called when the final request associated with the @active tracker
+ * is retired.
+ */
+static inline void
+i915_gem_active_set_retire_fn(struct i915_gem_active *active,
+ i915_gem_retire_fn fn,
+ struct mutex *mutex)
+{
+ lockdep_assert_held(mutex);
+ active->retire = fn ?: i915_gem_retire_noop;
+}
+
static inline struct drm_i915_gem_request *
__i915_gem_active_peek(const struct i915_gem_active *active)
{
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index ebaa941c83af..abc78bbfc1dc 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -55,10 +55,9 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
return -ENODEV;
/* See the comment at the drm_mm_init() call for more about this check.
- * WaSkipStolenMemoryFirstPage:bdw,chv,kbl (incomplete)
+ * WaSkipStolenMemoryFirstPage:bdw+ (incomplete)
*/
- if (start < 4096 && (IS_GEN8(dev_priv) ||
- IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)))
+ if (start < 4096 && INTEL_GEN(dev_priv) >= 8)
start = 4096;
mutex_lock(&dev_priv->mm.stolen_lock);
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 107ddf51065e..d068af2ec3a3 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -515,7 +515,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
obj->userptr.ptr + pinned * PAGE_SIZE,
npages - pinned,
flags,
- pvec + pinned, NULL);
+ pvec + pinned, NULL, NULL);
if (ret < 0)
break;
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 47590ab08d7e..3df8d3dd31cd 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -460,7 +460,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL);
static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
-static DEVICE_ATTR(gt_boost_freq_mhz, S_IRUGO, gt_boost_freq_mhz_show, gt_boost_freq_mhz_store);
+static DEVICE_ATTR(gt_boost_freq_mhz, S_IRUGO | S_IWUSR, gt_boost_freq_mhz_show, gt_boost_freq_mhz_store);
static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 8405b5a367d7..7e3545f65257 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -46,14 +46,20 @@ struct edp_power_seq {
u16 t11_t12;
} __packed;
-/* MIPI Sequence Block definitions */
+/*
+ * MIPI Sequence Block definitions
+ *
+ * Note the VBT spec has AssertReset / DeassertReset swapped from their
+ * usual naming, we use the proper names here to avoid confusion when
+ * reading the code.
+ */
enum mipi_seq {
MIPI_SEQ_END = 0,
- MIPI_SEQ_ASSERT_RESET,
+ MIPI_SEQ_DEASSERT_RESET, /* Spec says MipiAssertResetPin */
MIPI_SEQ_INIT_OTP,
MIPI_SEQ_DISPLAY_ON,
MIPI_SEQ_DISPLAY_OFF,
- MIPI_SEQ_DEASSERT_RESET,
+ MIPI_SEQ_ASSERT_RESET, /* Spec says MipiDeassertResetPin */
MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */
MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */
MIPI_SEQ_TEAR_ON, /* sequence block v2+ */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index cf5cff7b03b8..3dc8724df400 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -6244,35 +6244,24 @@ skl_dpll0_disable(struct drm_i915_private *dev_priv)
dev_priv->cdclk_pll.vco = 0;
}
-static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
-{
- int ret;
- u32 val;
-
- /* inform PCU we want to change CDCLK */
- val = SKL_CDCLK_PREPARE_FOR_CHANGE;
- mutex_lock(&dev_priv->rps.hw_lock);
- ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val);
- mutex_unlock(&dev_priv->rps.hw_lock);
-
- return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE);
-}
-
-static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
-{
- return _wait_for(skl_cdclk_pcu_ready(dev_priv), 3000, 10) == 0;
-}
-
static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
{
u32 freq_select, pcu_ack;
+ int ret;
WARN_ON((cdclk == 24000) != (vco == 0));
DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
- if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
- DRM_ERROR("failed to inform PCU about cdclk change\n");
+ mutex_lock(&dev_priv->rps.hw_lock);
+ ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+ SKL_CDCLK_PREPARE_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE, 3);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ if (ret) {
+ DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
+ ret);
return;
}
@@ -16802,7 +16791,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
for_each_intel_crtc(dev, crtc) {
struct intel_crtc_state *crtc_state = crtc->config;
- int pixclk = 0;
__drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
memset(crtc_state, 0, sizeof(*crtc_state));
@@ -16814,23 +16802,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
crtc->base.enabled = crtc_state->base.enable;
crtc->active = crtc_state->base.active;
- if (crtc_state->base.active) {
+ if (crtc_state->base.active)
dev_priv->active_crtcs |= 1 << crtc->pipe;
- if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
- pixclk = ilk_pipe_pixel_rate(crtc_state);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- pixclk = crtc_state->base.adjusted_mode.crtc_clock;
- else
- WARN_ON(dev_priv->display.modeset_calc_cdclk);
-
- /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
- pixclk = DIV_ROUND_UP(pixclk * 100, 95);
- }
-
- dev_priv->min_pixclk[crtc->pipe] = pixclk;
-
readout_plane_state(crtc);
DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
@@ -16903,6 +16877,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
}
for_each_intel_crtc(dev, crtc) {
+ int pixclk = 0;
+
crtc->base.hwmode = crtc->config->base.adjusted_mode;
memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
@@ -16930,10 +16906,23 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
*/
crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
+ if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
+ pixclk = ilk_pipe_pixel_rate(crtc->config);
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ pixclk = crtc->config->base.adjusted_mode.crtc_clock;
+ else
+ WARN_ON(dev_priv->display.modeset_calc_cdclk);
+
+ /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+ if (IS_BROADWELL(dev_priv) && crtc->config->ips_enabled)
+ pixclk = DIV_ROUND_UP(pixclk * 100, 95);
+
drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
update_scanline_offset(crtc);
}
+ dev_priv->min_pixclk[crtc->pipe] = pixclk;
+
intel_pipe_config_sanity_check(dev_priv, crtc->config);
}
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 90283edcafba..0b8e8eb85c19 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -355,7 +355,8 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
struct intel_dp *intel_dp);
static void
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
- struct intel_dp *intel_dp);
+ struct intel_dp *intel_dp,
+ bool force_disable_vdd);
static void
intel_dp_pps_init(struct drm_device *dev, struct intel_dp *intel_dp);
@@ -516,7 +517,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
/* init power sequencer on this pipe and port */
intel_dp_init_panel_power_sequencer(dev, intel_dp);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
/*
* Even vdd force doesn't work until we've made
@@ -553,7 +554,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
* Only the HW needs to be reprogrammed, the SW state is fixed and
* has been setup during connector init.
*/
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
return 0;
}
@@ -636,7 +637,7 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
port_name(port), pipe_name(intel_dp->pps_pipe));
intel_dp_init_panel_power_sequencer(dev, intel_dp);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
}
void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
@@ -2912,7 +2913,7 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
/* init power sequencer on this pipe and port */
intel_dp_init_panel_power_sequencer(dev, intel_dp);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
}
static void vlv_pre_enable_dp(struct intel_encoder *encoder,
@@ -4014,8 +4015,8 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
return;
/* FIXME: we need to synchronize this sort of stuff with hardware
- * readout */
- if (WARN_ON_ONCE(!intel_dp->lane_count))
+ * readout. Currently fast link training doesn't work on boot-up. */
+ if (!intel_dp->lane_count)
return;
/* if link training is requested we should perform it always */
@@ -5055,7 +5056,8 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
static void
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
- struct intel_dp *intel_dp)
+ struct intel_dp *intel_dp,
+ bool force_disable_vdd)
{
struct drm_i915_private *dev_priv = to_i915(dev);
u32 pp_on, pp_off, pp_div, port_sel = 0;
@@ -5068,6 +5070,31 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
intel_pps_get_registers(dev_priv, intel_dp, &regs);
+ /*
+ * On some VLV machines the BIOS can leave the VDD
+ * enabled even on power seqeuencers which aren't
+ * hooked up to any port. This would mess up the
+ * power domain tracking the first time we pick
+ * one of these power sequencers for use since
+ * edp_panel_vdd_on() would notice that the VDD was
+ * already on and therefore wouldn't grab the power
+ * domain reference. Disable VDD first to avoid this.
+ * This also avoids spuriously turning the VDD on as
+ * soon as the new power seqeuencer gets initialized.
+ */
+ if (force_disable_vdd) {
+ u32 pp = ironlake_get_pp_control(intel_dp);
+
+ WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
+
+ if (pp & EDP_FORCE_VDD)
+ DRM_DEBUG_KMS("VDD already on, disabling first\n");
+
+ pp &= ~EDP_FORCE_VDD;
+
+ I915_WRITE(regs.pp_ctrl, pp);
+ }
+
pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
(seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
@@ -5122,7 +5149,7 @@ static void intel_dp_pps_init(struct drm_device *dev,
vlv_initial_power_sequencer_setup(intel_dp);
} else {
intel_dp_init_panel_power_sequencer(dev, intel_dp);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
}
}
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index 0d8ff0034b88..47cd1b20fb3e 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -300,7 +300,8 @@ static void chv_exec_gpio(struct drm_i915_private *dev_priv,
mutex_lock(&dev_priv->sb_lock);
vlv_iosf_sb_write(dev_priv, port, cfg1, 0);
vlv_iosf_sb_write(dev_priv, port, cfg0,
- CHV_GPIO_GPIOCFG_GPO | CHV_GPIO_GPIOTXSTATE(value));
+ CHV_GPIO_GPIOEN | CHV_GPIO_GPIOCFG_GPO |
+ CHV_GPIO_GPIOTXSTATE(value));
mutex_unlock(&dev_priv->sb_lock);
}
@@ -376,11 +377,11 @@ static const fn_mipi_elem_exec exec_elem[] = {
*/
static const char * const seq_name[] = {
- [MIPI_SEQ_ASSERT_RESET] = "MIPI_SEQ_ASSERT_RESET",
+ [MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET",
[MIPI_SEQ_INIT_OTP] = "MIPI_SEQ_INIT_OTP",
[MIPI_SEQ_DISPLAY_ON] = "MIPI_SEQ_DISPLAY_ON",
[MIPI_SEQ_DISPLAY_OFF] = "MIPI_SEQ_DISPLAY_OFF",
- [MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET",
+ [MIPI_SEQ_ASSERT_RESET] = "MIPI_SEQ_ASSERT_RESET",
[MIPI_SEQ_BACKLIGHT_ON] = "MIPI_SEQ_BACKLIGHT_ON",
[MIPI_SEQ_BACKLIGHT_OFF] = "MIPI_SEQ_BACKLIGHT_OFF",
[MIPI_SEQ_TEAR_ON] = "MIPI_SEQ_TEAR_ON",
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 0a09024d6ca3..d4961fa20c73 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1968,12 +1968,7 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
ret);
}
- ret = logical_ring_init(engine);
- if (ret) {
- lrc_destroy_wa_ctx_obj(engine);
- }
-
- return ret;
+ return logical_ring_init(engine);
}
int logical_xcs_ring_init(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index fd0e4dac7cc1..e589e17876dc 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -216,7 +216,8 @@ static void intel_overlay_submit_request(struct intel_overlay *overlay,
{
GEM_BUG_ON(i915_gem_active_peek(&overlay->last_flip,
&overlay->i915->drm.struct_mutex));
- overlay->last_flip.retire = retire;
+ i915_gem_active_set_retire_fn(&overlay->last_flip, retire,
+ &overlay->i915->drm.struct_mutex);
i915_gem_active_set(&overlay->last_flip, req);
i915_add_request(req);
}
@@ -839,8 +840,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret)
goto out_unpin;
- i915_gem_track_fb(overlay->vma->obj, new_bo,
- INTEL_FRONTBUFFER_OVERLAY(pipe));
+ i915_gem_track_fb(overlay->vma ? overlay->vma->obj : NULL,
+ vma->obj, INTEL_FRONTBUFFER_OVERLAY(pipe));
overlay->old_vma = overlay->vma;
overlay->vma = vma;
@@ -1430,6 +1431,8 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
overlay->contrast = 75;
overlay->saturation = 146;
+ init_request_active(&overlay->last_flip, NULL);
+
regs = intel_overlay_map_regs(overlay);
if (!regs)
goto out_unpin_bo;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d67974eb127a..ae2c0bb4b2e8 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2964,24 +2964,10 @@ intel_enable_sagv(struct drm_i915_private *dev_priv)
return 0;
}
-static int
-intel_do_sagv_disable(struct drm_i915_private *dev_priv)
-{
- int ret;
- uint32_t temp = GEN9_SAGV_DISABLE;
-
- ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_SAGV_CONTROL,
- &temp);
- if (ret)
- return ret;
- else
- return temp & GEN9_SAGV_IS_DISABLED;
-}
-
int
intel_disable_sagv(struct drm_i915_private *dev_priv)
{
- int ret, result;
+ int ret;
if (!intel_has_sagv(dev_priv))
return 0;
@@ -2993,25 +2979,23 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->rps.hw_lock);
/* bspec says to keep retrying for at least 1 ms */
- ret = wait_for(result = intel_do_sagv_disable(dev_priv), 1);
+ ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
+ GEN9_SAGV_DISABLE,
+ GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
+ 1);
mutex_unlock(&dev_priv->rps.hw_lock);
- if (ret == -ETIMEDOUT) {
- DRM_ERROR("Request to disable SAGV timed out\n");
- return -ETIMEDOUT;
- }
-
/*
* Some skl systems, pre-release machines in particular,
* don't actually have an SAGV.
*/
- if (IS_SKYLAKE(dev_priv) && result == -ENXIO) {
+ if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
return 0;
- } else if (result < 0) {
- DRM_ERROR("Failed to disable the SAGV\n");
- return result;
+ } else if (ret < 0) {
+ DRM_ERROR("Failed to disable the SAGV (%d)\n", ret);
+ return ret;
}
dev_priv->sagv_status = I915_SAGV_DISABLED;
@@ -7890,6 +7874,81 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
return 0;
}
+static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
+ u32 request, u32 reply_mask, u32 reply,
+ u32 *status)
+{
+ u32 val = request;
+
+ *status = sandybridge_pcode_read(dev_priv, mbox, &val);
+
+ return *status || ((val & reply_mask) == reply);
+}
+
+/**
+ * skl_pcode_request - send PCODE request until acknowledgment
+ * @dev_priv: device private
+ * @mbox: PCODE mailbox ID the request is targeted for
+ * @request: request ID
+ * @reply_mask: mask used to check for request acknowledgment
+ * @reply: value used to check for request acknowledgment
+ * @timeout_base_ms: timeout for polling with preemption enabled
+ *
+ * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
+ * reports an error or an overall timeout of @timeout_base_ms+10 ms expires.
+ * The request is acknowledged once the PCODE reply dword equals @reply after
+ * applying @reply_mask. Polling is first attempted with preemption enabled
+ * for @timeout_base_ms and if this times out for another 10 ms with
+ * preemption disabled.
+ *
+ * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
+ * other error as reported by PCODE.
+ */
+int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms)
+{
+ u32 status;
+ int ret;
+
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+#define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \
+ &status)
+
+ /*
+ * Prime the PCODE by doing a request first. Normally it guarantees
+ * that a subsequent request, at most @timeout_base_ms later, succeeds.
+ * _wait_for() doesn't guarantee when its passed condition is evaluated
+ * first, so send the first request explicitly.
+ */
+ if (COND) {
+ ret = 0;
+ goto out;
+ }
+ ret = _wait_for(COND, timeout_base_ms * 1000, 10);
+ if (!ret)
+ goto out;
+
+ /*
+ * The above can time out if the number of requests was low (2 in the
+ * worst case) _and_ PCODE was busy for some reason even after a
+ * (queued) request and @timeout_base_ms delay. As a workaround retry
+ * the poll with preemption disabled to maximize the number of
+ * requests. Increase the timeout from @timeout_base_ms to 10ms to
+ * account for interrupts that could reduce the number of these
+ * requests.
+ */
+ DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
+ WARN_ON_ONCE(timeout_base_ms > 3);
+ preempt_disable();
+ ret = wait_for_atomic(COND, 10);
+ preempt_enable();
+
+out:
+ return ret ? ret : status;
+#undef COND
+}
+
static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
/*
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 7b488e2793d9..c6be70686b4a 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -825,13 +825,9 @@ void intel_psr_init(struct drm_device *dev)
dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
- /* Per platform default */
- if (i915.enable_psr == -1) {
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- i915.enable_psr = 1;
- else
- i915.enable_psr = 0;
- }
+ /* Per platform default: all disabled. */
+ if (i915.enable_psr == -1)
+ i915.enable_psr = 0;
/* Set link_standby x link_off defaults */
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 356c662ad453..87b4af092d54 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -1039,7 +1039,18 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
{
- I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
+ u32 val;
+
+ /*
+ * On driver load, a pipe may be active and driving a DSI display.
+ * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
+ * (and never recovering) in this case. intel_dsi_post_disable() will
+ * clear it when we turn off the display.
+ */
+ val = I915_READ(DSPCLK_GATE_D);
+ val &= DPOUNIT_CLOCK_GATE_DISABLE;
+ val |= VRHUNIT_CLOCK_GATE_DISABLE;
+ I915_WRITE(DSPCLK_GATE_D, val);
/*
* Disable trickle feed and enable pnd deadline calculation
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index d7be0d94ba4d..0bffd3f0c15d 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -62,7 +62,7 @@ fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
{
d->wake_count++;
hrtimer_start_range_ns(&d->timer,
- ktime_set(0, NSEC_PER_MSEC),
+ NSEC_PER_MSEC,
NSEC_PER_MSEC,
HRTIMER_MODE_REL);
}
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index 4942ca090b46..7890e30eb584 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -51,6 +51,9 @@ static int meson_plane_atomic_check(struct drm_plane *plane,
struct drm_crtc_state *crtc_state;
struct drm_rect clip = { 0, };
+ if (!state->crtc)
+ return 0;
+
crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
index d836b2274531..f7c870172220 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -38,6 +38,11 @@
* - TV Panel encoding via ENCT
*/
+/* HHI Registers */
+#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
+#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
+#define HHI_HDMI_PHY_CNTL0 0x3a0 /* 0xe8 offset in data sheet */
+
struct meson_cvbs_enci_mode meson_cvbs_enci_pal = {
.mode_tag = MESON_VENC_MODE_CVBS_PAL,
.hso_begin = 3,
@@ -242,6 +247,20 @@ void meson_venc_disable_vsync(struct meson_drm *priv)
void meson_venc_init(struct meson_drm *priv)
{
+ /* Disable CVBS VDAC */
+ regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0);
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8);
+
+ /* Power Down Dacs */
+ writel_relaxed(0xff, priv->io_base + _REG(VENC_VDAC_SETTING));
+
+ /* Disable HDMI PHY */
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0);
+
+ /* Disable HDMI */
+ writel_bits_relaxed(0x3, 0,
+ priv->io_base + _REG(VPU_HDMI_SETTING));
+
/* Disable all encoders */
writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN));
writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN));
diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c
index c809c085fd78..a2bcc70a03ef 100644
--- a/drivers/gpu/drm/meson/meson_venc_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c
@@ -167,7 +167,7 @@ static void meson_venc_cvbs_encoder_disable(struct drm_encoder *encoder)
/* Disable CVBS VDAC */
regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0);
- regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0);
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8);
}
static void meson_venc_cvbs_encoder_enable(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index a18126150e11..14ff87686a36 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -213,7 +213,14 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
void adreno_flush(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- uint32_t wptr = get_wptr(gpu->rb);
+ uint32_t wptr;
+
+ /*
+ * Mask wptr value that we calculate to fit in the HW range. This is
+ * to account for the possibility that the last command fit exactly into
+ * the ringbuffer and rb->next hasn't wrapped to zero yet
+ */
+ wptr = get_wptr(gpu->rb) & ((gpu->rb->size / 4) - 1);
/* ensure writes to ringbuffer have hit system memory: */
mb();
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index cd06cfd94687..d8bc59c7e261 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -225,16 +225,14 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
/* We don't use vmf->pgoff since that has the fake offset: */
- pgoff = ((unsigned long)vmf->virtual_address -
- vma->vm_start) >> PAGE_SHIFT;
+ pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
pfn = page_to_pfn(pages[pgoff]);
- VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+ VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
pfn, pfn << PAGE_SHIFT);
- ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
- __pfn_to_pfn_t(pfn, PFN_DEV));
+ ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
out_unlock:
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 166e84e4f0d4..489676568a10 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -106,7 +106,8 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
pagefault_disable();
}
- if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
+ if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
+ !(submit_bo.flags & MSM_SUBMIT_BO_FLAGS)) {
DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
ret = -EINVAL;
goto out_unlock;
@@ -290,7 +291,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
{
uint32_t i, last_offset = 0;
uint32_t *ptr;
- int ret;
+ int ret = 0;
if (offset % 4) {
DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
@@ -318,12 +319,13 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
if (ret)
- return -EFAULT;
+ goto out;
if (submit_reloc.submit_offset % 4) {
DRM_ERROR("non-aligned reloc offset: %u\n",
submit_reloc.submit_offset);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
/* offset in dwords: */
@@ -332,12 +334,13 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
if ((off >= (obj->base.size / 4)) ||
(off < last_offset)) {
DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
if (ret)
- return ret;
+ goto out;
if (valid)
continue;
@@ -354,9 +357,10 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
last_offset = off;
}
+out:
msm_gem_put_vaddr_locked(&obj->base);
- return 0;
+ return ret;
}
static void submit_cleanup(struct msm_gem_submit *submit)
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index f326cf6a32e6..67b34e069abf 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -23,7 +23,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
struct msm_ringbuffer *ring;
int ret;
- size = ALIGN(size, 4); /* size should be dword aligned */
+ if (WARN_ON(!is_power_of_2(size)))
+ return ERR_PTR(-EINVAL);
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index f2f348f0160c..a6126c93f215 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -330,7 +330,7 @@ nouveau_fence_wait_legacy(struct dma_fence *f, bool intr, long wait)
__set_current_state(intr ? TASK_INTERRUPTIBLE :
TASK_UNINTERRUPTIBLE);
- kt = ktime_set(0, sleep_time);
+ kt = sleep_time;
schedule_hrtimeout(&kt, HRTIMER_MODE_REL);
sleep_time *= 2;
if (sleep_time > NSEC_PER_MSEC)
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index d4e1e11466f8..4a90c690f09e 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -398,8 +398,7 @@ static int fault_1d(struct drm_gem_object *obj,
pgoff_t pgoff;
/* We don't use vmf->pgoff since that has the fake offset: */
- pgoff = ((unsigned long)vmf->virtual_address -
- vma->vm_start) >> PAGE_SHIFT;
+ pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
if (omap_obj->pages) {
omap_gem_cpu_sync(obj, pgoff);
@@ -409,11 +408,10 @@ static int fault_1d(struct drm_gem_object *obj,
pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
}
- VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+ VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
pfn, pfn << PAGE_SHIFT);
- return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
- __pfn_to_pfn_t(pfn, PFN_DEV));
+ return vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
}
/* Special handling for the case of faulting in 2d tiled buffers */
@@ -427,7 +425,7 @@ static int fault_2d(struct drm_gem_object *obj,
struct page *pages[64]; /* XXX is this too much to have on stack? */
unsigned long pfn;
pgoff_t pgoff, base_pgoff;
- void __user *vaddr;
+ unsigned long vaddr;
int i, ret, slots;
/*
@@ -447,8 +445,7 @@ static int fault_2d(struct drm_gem_object *obj,
const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
/* We don't use vmf->pgoff since that has the fake offset: */
- pgoff = ((unsigned long)vmf->virtual_address -
- vma->vm_start) >> PAGE_SHIFT;
+ pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
/*
* Actual address we start mapping at is rounded down to previous slot
@@ -459,7 +456,7 @@ static int fault_2d(struct drm_gem_object *obj,
/* figure out buffer width in slots */
slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
- vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
+ vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT);
entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
@@ -503,12 +500,11 @@ static int fault_2d(struct drm_gem_object *obj,
pfn = entry->paddr >> PAGE_SHIFT;
- VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+ VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
pfn, pfn << PAGE_SHIFT);
for (i = n; i > 0; i--) {
- vm_insert_mixed(vma, (unsigned long)vaddr,
- __pfn_to_pfn_t(pfn, PFN_DEV));
+ vm_insert_mixed(vma, vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
pfn += priv->usergart[fmt].stride_pfn;
vaddr += PAGE_SIZE * m;
}
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 21b6732425c5..c829cfb02fc4 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -603,8 +603,9 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
GOP_VBIOS_CONTENT *vbios;
VFCT_IMAGE_HEADER *vhdr;
- if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size)))
+ if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr)))
return false;
+ tbl_size = hdr->length;
if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
goto out_unmap;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index ad4d7b8b8322..e8a38d296855 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -50,7 +50,6 @@ MODULE_FIRMWARE("radeon/tahiti_ce.bin");
MODULE_FIRMWARE("radeon/tahiti_mc.bin");
MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
MODULE_FIRMWARE("radeon/tahiti_smc.bin");
-MODULE_FIRMWARE("radeon/tahiti_k_smc.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
@@ -1657,9 +1656,6 @@ static int si_init_microcode(struct radeon_device *rdev)
switch (rdev->family) {
case CHIP_TAHITI:
chip_name = "TAHITI";
- /* XXX: figure out which Tahitis need the new ucode */
- if (0)
- new_smc = true;
new_chip_name = "tahiti";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1671,12 +1667,9 @@ static int si_init_microcode(struct radeon_device *rdev)
break;
case CHIP_PITCAIRN:
chip_name = "PITCAIRN";
- if ((rdev->pdev->revision == 0x81) ||
- (rdev->pdev->device == 0x6810) ||
- (rdev->pdev->device == 0x6811) ||
- (rdev->pdev->device == 0x6816) ||
- (rdev->pdev->device == 0x6817) ||
- (rdev->pdev->device == 0x6806))
+ if ((rdev->pdev->revision == 0x81) &&
+ ((rdev->pdev->device == 0x6810) ||
+ (rdev->pdev->device == 0x6811)))
new_smc = true;
new_chip_name = "pitcairn";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
@@ -1689,15 +1682,15 @@ static int si_init_microcode(struct radeon_device *rdev)
break;
case CHIP_VERDE:
chip_name = "VERDE";
- if ((rdev->pdev->revision == 0x81) ||
- (rdev->pdev->revision == 0x83) ||
- (rdev->pdev->revision == 0x87) ||
- (rdev->pdev->device == 0x6820) ||
- (rdev->pdev->device == 0x6821) ||
- (rdev->pdev->device == 0x6822) ||
- (rdev->pdev->device == 0x6823) ||
- (rdev->pdev->device == 0x682A) ||
- (rdev->pdev->device == 0x682B))
+ if (((rdev->pdev->device == 0x6820) &&
+ ((rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->revision == 0x83))) ||
+ ((rdev->pdev->device == 0x6821) &&
+ ((rdev->pdev->revision == 0x83) ||
+ (rdev->pdev->revision == 0x87))) ||
+ ((rdev->pdev->revision == 0x87) &&
+ ((rdev->pdev->device == 0x6823) ||
+ (rdev->pdev->device == 0x682b))))
new_smc = true;
new_chip_name = "verde";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
@@ -1710,13 +1703,13 @@ static int si_init_microcode(struct radeon_device *rdev)
break;
case CHIP_OLAND:
chip_name = "OLAND";
- if ((rdev->pdev->revision == 0xC7) ||
- (rdev->pdev->revision == 0x80) ||
- (rdev->pdev->revision == 0x81) ||
- (rdev->pdev->revision == 0x83) ||
- (rdev->pdev->revision == 0x87) ||
- (rdev->pdev->device == 0x6604) ||
- (rdev->pdev->device == 0x6605))
+ if (((rdev->pdev->revision == 0x81) &&
+ ((rdev->pdev->device == 0x6600) ||
+ (rdev->pdev->device == 0x6604) ||
+ (rdev->pdev->device == 0x6605) ||
+ (rdev->pdev->device == 0x6610))) ||
+ ((rdev->pdev->revision == 0x83) &&
+ (rdev->pdev->device == 0x6610)))
new_smc = true;
new_chip_name = "oland";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
@@ -1728,12 +1721,15 @@ static int si_init_microcode(struct radeon_device *rdev)
break;
case CHIP_HAINAN:
chip_name = "HAINAN";
- if ((rdev->pdev->revision == 0x81) ||
- (rdev->pdev->revision == 0x83) ||
- (rdev->pdev->revision == 0xC3) ||
- (rdev->pdev->device == 0x6664) ||
- (rdev->pdev->device == 0x6665) ||
- (rdev->pdev->device == 0x6667))
+ if (((rdev->pdev->revision == 0x81) &&
+ (rdev->pdev->device == 0x6660)) ||
+ ((rdev->pdev->revision == 0x83) &&
+ ((rdev->pdev->device == 0x6660) ||
+ (rdev->pdev->device == 0x6663) ||
+ (rdev->pdev->device == 0x6665) ||
+ (rdev->pdev->device == 0x6667))) ||
+ ((rdev->pdev->revision == 0xc3) &&
+ (rdev->pdev->device == 0x6665)))
new_smc = true;
new_chip_name = "hainan";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 8b5e697f2549..13ba73fd9b68 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -3008,19 +3008,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
(rdev->pdev->device == 0x6817) ||
(rdev->pdev->device == 0x6806))
max_mclk = 120000;
- } else if (rdev->family == CHIP_VERDE) {
- if ((rdev->pdev->revision == 0x81) ||
- (rdev->pdev->revision == 0x83) ||
- (rdev->pdev->revision == 0x87) ||
- (rdev->pdev->device == 0x6820) ||
- (rdev->pdev->device == 0x6821) ||
- (rdev->pdev->device == 0x6822) ||
- (rdev->pdev->device == 0x6823) ||
- (rdev->pdev->device == 0x682A) ||
- (rdev->pdev->device == 0x682B)) {
- max_sclk = 75000;
- max_mclk = 80000;
- }
} else if (rdev->family == CHIP_OLAND) {
if ((rdev->pdev->revision == 0xC7) ||
(rdev->pdev->revision == 0x80) ||
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index c08e5279eeac..7d853e6b5ff0 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -452,10 +452,10 @@ static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (!bo->pages)
return VM_FAULT_SIGBUS;
- offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT;
+ offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
page = bo->pages[offset];
- err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
+ err = vm_insert_page(vma, vmf->address, page);
switch (err) {
case -EAGAIN:
case 0:
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 9942b0577d6e..6dfdb145f3bb 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -539,7 +539,7 @@ static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
}
drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
- tilcdc_crtc->last_vblank = ktime_set(0, 0);
+ tilcdc_crtc->last_vblank = 0;
tilcdc_crtc->enabled = false;
mutex_unlock(&tilcdc_crtc->enable_lock);
@@ -856,7 +856,7 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct tilcdc_drm_private *priv = dev->dev_private;
- uint32_t stat;
+ uint32_t stat, reg;
stat = tilcdc_read_irqstatus(dev);
tilcdc_clear_irqstatus(dev, stat);
@@ -921,17 +921,26 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost",
__func__, stat);
tilcdc_crtc->frame_intact = false;
- if (tilcdc_crtc->sync_lost_count++ >
- SYNC_LOST_COUNT_LIMIT) {
- dev_err(dev->dev, "%s(0x%08x): Sync lost flood detected, recovering", __func__, stat);
- queue_work(system_wq, &tilcdc_crtc->recover_work);
- if (priv->rev == 1)
+ if (priv->rev == 1) {
+ reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG);
+ if (reg & LCDC_RASTER_ENABLE) {
tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
- LCDC_V1_SYNC_LOST_INT_ENA);
- else
+ LCDC_RASTER_ENABLE);
+ tilcdc_set(dev, LCDC_RASTER_CTRL_REG,
+ LCDC_RASTER_ENABLE);
+ }
+ } else {
+ if (tilcdc_crtc->sync_lost_count++ >
+ SYNC_LOST_COUNT_LIMIT) {
+ dev_err(dev->dev,
+ "%s(0x%08x): Sync lost flood detected, recovering",
+ __func__, stat);
+ queue_work(system_wq,
+ &tilcdc_crtc->recover_work);
tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
LCDC_SYNC_LOST);
- tilcdc_crtc->sync_lost_count = 0;
+ tilcdc_crtc->sync_lost_count = 0;
+ }
}
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 4748aedc933a..68ef993ab431 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -101,7 +101,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct page *page;
int ret;
int i;
- unsigned long address = (unsigned long)vmf->virtual_address;
+ unsigned long address = vmf->address;
int retval = VM_FAULT_NOPAGE;
struct ttm_mem_type_manager *man =
&bdev->man[bo->mem.mem_type];
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 818e70712b18..3c0c4bd3f750 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -107,14 +107,13 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned int page_offset;
int ret = 0;
- page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
- PAGE_SHIFT;
+ page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
if (!obj->pages)
return VM_FAULT_SIGBUS;
page = obj->pages[page_offset];
- ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
+ ret = vm_insert_page(vma, vmf->address, page);
switch (ret) {
case -EAGAIN:
case 0:
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index f36c14729b55..477e07f0ecb6 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -54,7 +54,7 @@ static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_vgem_gem_object *obj = vma->vm_private_data;
/* We don't use vmf->pgoff since that has the fake offset */
- unsigned long vaddr = (unsigned long)vmf->virtual_address;
+ unsigned long vaddr = vmf->address;
struct page *page;
page = shmem_read_mapping_page(file_inode(obj->base.filp)->i_mapping,
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index cb75f0663ba0..11288ffa4af6 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -88,8 +88,8 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
(vgdev, handle, 0,
cpu_to_le32(plane->state->src_w >> 16),
cpu_to_le32(plane->state->src_h >> 16),
- plane->state->src_x >> 16,
- plane->state->src_y >> 16, NULL);
+ cpu_to_le32(plane->state->src_x >> 16),
+ cpu_to_le32(plane->state->src_y >> 16), NULL);
}
} else {
handle = 0;
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 974f9410474b..43ea0dc957d2 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -109,8 +109,10 @@ void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
spin_lock(&vgdev->free_vbufs_lock);
for (i = 0; i < count; i++) {
- if (WARN_ON(list_empty(&vgdev->free_vbufs)))
+ if (WARN_ON(list_empty(&vgdev->free_vbufs))) {
+ spin_unlock(&vgdev->free_vbufs_lock);
return;
+ }
vbuf = list_first_entry(&vgdev->free_vbufs,
struct virtio_gpu_vbuffer, list);
list_del(&vbuf->list);
@@ -295,6 +297,8 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
+ __releases(&vgdev->ctrlq.qlock)
+ __acquires(&vgdev->ctrlq.qlock)
{
struct virtqueue *vq = vgdev->ctrlq.vq;
struct scatterlist *sgs[3], vcmd, vout, vresp;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 4070b7386e9d..1aeb80e52424 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -785,6 +785,11 @@ config HID_SUNPLUS
config HID_RMI
tristate "Synaptics RMI4 device support"
depends on HID
+ select RMI4_CORE
+ select RMI4_F03
+ select RMI4_F11
+ select RMI4_F12
+ select RMI4_F30
---help---
Support for Synaptics RMI4 touchpads.
Say Y here if you have a Synaptics RMI4 touchpads over i2c-hid or usbhid
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 1f6d0c000381..538ff697a4cf 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -43,7 +43,6 @@
*/
#define DRIVER_DESC "HID core driver"
-#define DRIVER_LICENSE "GPL"
int hid_debug = 0;
module_param_named(debug, hid_debug, int, 0600);
@@ -724,13 +723,7 @@ static void hid_scan_collection(struct hid_parser *parser, unsigned type)
hid->group = HID_GROUP_SENSOR_HUB;
if (hid->vendor == USB_VENDOR_ID_MICROSOFT &&
- (hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3 ||
- hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2 ||
- hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP ||
- hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_4 ||
- hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2 ||
- hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP ||
- hid->product == USB_DEVICE_ID_MS_POWER_COVER) &&
+ hid->product == USB_DEVICE_ID_MS_POWER_COVER &&
hid->group == HID_GROUP_MULTITOUCH)
hid->group = HID_GROUP_GENERIC;
@@ -826,7 +819,8 @@ static int hid_scan_report(struct hid_device *hid)
hid->group = HID_GROUP_WACOM;
break;
case USB_VENDOR_ID_SYNAPTICS:
- if (hid->group == HID_GROUP_GENERIC)
+ if (hid->group == HID_GROUP_GENERIC ||
+ hid->group == HID_GROUP_MULTITOUCH_WIN_8)
if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC)
&& (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER))
/*
@@ -1887,6 +1881,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
#if IS_ENABLED(CONFIG_HID_MAYFLASH)
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) },
#endif
{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
@@ -1986,12 +1983,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_OFFICE_KB) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) },
@@ -2127,6 +2118,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM6533) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) },
{ }
};
@@ -2315,7 +2307,7 @@ __ATTRIBUTE_GROUPS(hid_dev);
static int hid_uevent(struct device *dev, struct kobj_uevent_env *env)
{
- struct hid_device *hdev = to_hid_device(dev);
+ struct hid_device *hdev = to_hid_device(dev);
if (add_uevent_var(env, "HID_ID=%04X:%08X:%08X",
hdev->bus, hdev->vendor, hdev->product))
@@ -2868,5 +2860,5 @@ module_exit(hid_exit);
MODULE_AUTHOR("Andreas Gal");
MODULE_AUTHOR("Vojtech Pavlik");
MODULE_AUTHOR("Jiri Kosina");
-MODULE_LICENSE(DRIVER_LICENSE);
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 8943a58c626d..86c95d30ac80 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -323,7 +323,8 @@
#define USB_DEVICE_ID_DRAGONRISE_WIIU 0x1800
#define USB_DEVICE_ID_DRAGONRISE_PS3 0x1801
#define USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR 0x1803
-#define USB_DEVICE_ID_DRAGONRISE_GAMECUBE 0x1843
+#define USB_DEVICE_ID_DRAGONRISE_GAMECUBE1 0x1843
+#define USB_DEVICE_ID_DRAGONRISE_GAMECUBE2 0x1844
#define USB_VENDOR_ID_DWAV 0x0eef
#define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001
@@ -630,6 +631,7 @@
#define USB_DEVICE_ID_LENOVO_CUSBKBD 0x6047
#define USB_DEVICE_ID_LENOVO_CBTKBD 0x6048
#define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067
+#define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085
#define USB_VENDOR_ID_LG 0x1fd2
#define USB_DEVICE_ID_LG_MULTITOUCH 0x0064
@@ -726,12 +728,6 @@
#define USB_DEVICE_ID_MS_SURFACE_PRO_2 0x0799
#define USB_DEVICE_ID_MS_TOUCH_COVER_2 0x07a7
#define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9
-#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3 0x07dc
-#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2 0x07e2
-#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP 0x07dd
-#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_4 0x07e4
-#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2 0x07e8
-#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP 0x07e9
#define USB_DEVICE_ID_MS_POWER_COVER 0x07da
#define USB_VENDOR_ID_MOJO 0x8282
diff --git a/drivers/hid/hid-mf.c b/drivers/hid/hid-mf.c
index d9090765a6e5..03f10516131d 100644
--- a/drivers/hid/hid-mf.c
+++ b/drivers/hid/hid-mf.c
@@ -6,12 +6,14 @@
*
* Tested with:
* 0079:1801 "DragonRise Inc. Mayflash PS3 Game Controller Adapter"
+ * 0079:1803 "DragonRise Inc. Mayflash Wireless Sensor DolphinBar"
+ * 0079:1843 "DragonRise Inc. Mayflash GameCube Game Controller Adapter"
+ * 0079:1844 "DragonRise Inc. Mayflash GameCube Game Controller Adapter (v04)"
*
* The following adapters probably work too, but need to be tested:
* 0079:1800 "DragonRise Inc. Mayflash WIIU Game Controller Adapter"
- * 0079:1843 "DragonRise Inc. Mayflash GameCube Game Controller Adapter"
*
- * Copyright (c) 2016 Marcel Hasler <mahasler@gmail.com>
+ * Copyright (c) 2016-2017 Marcel Hasler <mahasler@gmail.com>
*/
/*
@@ -125,8 +127,8 @@ static int mf_probe(struct hid_device *hid, const struct hid_device_id *id)
dev_dbg(&hid->dev, "Mayflash HID hardware probe...\n");
- /* Split device into four inputs */
- hid->quirks |= HID_QUIRK_MULTI_INPUT;
+ /* Apply quirks as needed */
+ hid->quirks |= id->driver_data;
error = hid_parse(hid);
if (error) {
@@ -151,7 +153,14 @@ static int mf_probe(struct hid_device *hid, const struct hid_device_id *id)
}
static const struct hid_device_id mf_devices[] = {
- { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3), },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3),
+ .driver_data = HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR),
+ .driver_data = HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1),
+ .driver_data = HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2),
+ .driver_data = 0 }, /* No quirk required */
{ }
};
MODULE_DEVICE_TABLE(hid, mf_devices);
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index 74b7b84a0420..96e7d3231d2f 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -274,18 +274,6 @@ static const struct hid_device_id ms_devices[] = {
.driver_data = MS_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500),
.driver_data = MS_DUPLICATE_USAGES },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3),
- .driver_data = MS_HIDINPUT },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2),
- .driver_data = MS_HIDINPUT },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP),
- .driver_data = MS_HIDINPUT },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4),
- .driver_data = MS_HIDINPUT },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2),
- .driver_data = MS_HIDINPUT },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP),
- .driver_data = MS_HIDINPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER),
.driver_data = MS_HIDINPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD),
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index be89bcbf6a71..5b40c2614599 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -14,11 +14,14 @@
#include <linux/hid.h>
#include <linux/input.h>
#include <linux/input/mt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/sched.h>
+#include <linux/rmi.h>
#include "hid-ids.h"
#define RMI_MOUSE_REPORT_ID 0x01 /* Mouse emulation Report */
@@ -33,9 +36,6 @@
#define RMI_READ_DATA_PENDING 1
#define RMI_STARTED 2
-#define RMI_SLEEP_NORMAL 0x0
-#define RMI_SLEEP_DEEP_SLEEP 0x1
-
/* device flags */
#define RMI_DEVICE BIT(0)
#define RMI_DEVICE_HAS_PHYS_BUTTONS BIT(1)
@@ -54,25 +54,12 @@ enum rmi_mode_type {
RMI_MODE_NO_PACKED_ATTN_REPORTS = 2,
};
-struct rmi_function {
- unsigned page; /* page of the function */
- u16 query_base_addr; /* base address for queries */
- u16 command_base_addr; /* base address for commands */
- u16 control_base_addr; /* base address for controls */
- u16 data_base_addr; /* base address for datas */
- unsigned int interrupt_base; /* cross-function interrupt number
- * (uniq in the device)*/
- unsigned int interrupt_count; /* number of interrupts */
- unsigned int report_size; /* size of a report */
- unsigned long irq_mask; /* mask of the interrupts
- * (to be applied against ATTN IRQ) */
-};
-
/**
* struct rmi_data - stores information for hid communication
*
* @page_mutex: Locks current page to avoid changing pages in unexpected ways.
* @page: Keeps track of the current virtual page
+ * @xport: transport device to be registered with the RMI4 core.
*
* @wait: Used for waiting for read data
*
@@ -84,26 +71,18 @@ struct rmi_function {
*
* @flags: flags for the current device (started, reading, etc...)
*
- * @f11: placeholder of internal RMI function F11 description
- * @f30: placeholder of internal RMI function F30 description
- *
- * @max_fingers: maximum finger count reported by the device
- * @max_x: maximum x value reported by the device
- * @max_y: maximum y value reported by the device
- *
- * @gpio_led_count: count of GPIOs + LEDs reported by F30
- * @button_count: actual physical buttons count
- * @button_mask: button mask used to decode GPIO ATTN reports
- * @button_state_mask: pull state of the buttons
- *
- * @input: pointer to the kernel input device
- *
* @reset_work: worker which will be called in case of a mouse report
* @hdev: pointer to the struct hid_device
+ *
+ * @device_flags: flags which describe the device
+ *
+ * @domain: the IRQ domain allocated for this RMI4 device
+ * @rmi_irq: the irq that will be used to generate events to rmi-core
*/
struct rmi_data {
struct mutex page_mutex;
int page;
+ struct rmi_transport_dev xport;
wait_queue_head_t wait;
@@ -115,34 +94,13 @@ struct rmi_data {
unsigned long flags;
- struct rmi_function f01;
- struct rmi_function f11;
- struct rmi_function f30;
-
- unsigned int max_fingers;
- unsigned int max_x;
- unsigned int max_y;
- unsigned int x_size_mm;
- unsigned int y_size_mm;
- bool read_f11_ctrl_regs;
- u8 f11_ctrl_regs[RMI_F11_CTRL_REG_COUNT];
-
- unsigned int gpio_led_count;
- unsigned int button_count;
- unsigned long button_mask;
- unsigned long button_state_mask;
-
- struct input_dev *input;
-
struct work_struct reset_work;
struct hid_device *hdev;
unsigned long device_flags;
- unsigned long firmware_id;
- u8 f01_ctrl0;
- u8 interrupt_enable_mask;
- bool restore_interrupt_mask;
+ struct irq_domain *domain;
+ int rmi_irq;
};
#define RMI_PAGE(addr) (((addr) >> 8) & 0xff)
@@ -220,10 +178,11 @@ static int rmi_write_report(struct hid_device *hdev, u8 *report, int len)
return ret;
}
-static int rmi_read_block(struct hid_device *hdev, u16 addr, void *buf,
- const int len)
+static int rmi_hid_read_block(struct rmi_transport_dev *xport, u16 addr,
+ void *buf, size_t len)
{
- struct rmi_data *data = hid_get_drvdata(hdev);
+ struct rmi_data *data = container_of(xport, struct rmi_data, xport);
+ struct hid_device *hdev = data->hdev;
int ret;
int bytes_read;
int bytes_needed;
@@ -292,15 +251,11 @@ exit:
return ret;
}
-static inline int rmi_read(struct hid_device *hdev, u16 addr, void *buf)
-{
- return rmi_read_block(hdev, addr, buf, 1);
-}
-
-static int rmi_write_block(struct hid_device *hdev, u16 addr, void *buf,
- const int len)
+static int rmi_hid_write_block(struct rmi_transport_dev *xport, u16 addr,
+ const void *buf, size_t len)
{
- struct rmi_data *data = hid_get_drvdata(hdev);
+ struct rmi_data *data = container_of(xport, struct rmi_data, xport);
+ struct hid_device *hdev = data->hdev;
int ret;
mutex_lock(&data->page_mutex);
@@ -332,62 +287,20 @@ exit:
return ret;
}
-static inline int rmi_write(struct hid_device *hdev, u16 addr, void *buf)
-{
- return rmi_write_block(hdev, addr, buf, 1);
-}
-
-static void rmi_f11_process_touch(struct rmi_data *hdata, int slot,
- u8 finger_state, u8 *touch_data)
-{
- int x, y, wx, wy;
- int wide, major, minor;
- int z;
-
- input_mt_slot(hdata->input, slot);
- input_mt_report_slot_state(hdata->input, MT_TOOL_FINGER,
- finger_state == 0x01);
- if (finger_state == 0x01) {
- x = (touch_data[0] << 4) | (touch_data[2] & 0x0F);
- y = (touch_data[1] << 4) | (touch_data[2] >> 4);
- wx = touch_data[3] & 0x0F;
- wy = touch_data[3] >> 4;
- wide = (wx > wy);
- major = max(wx, wy);
- minor = min(wx, wy);
- z = touch_data[4];
-
- /* y is inverted */
- y = hdata->max_y - y;
-
- input_event(hdata->input, EV_ABS, ABS_MT_POSITION_X, x);
- input_event(hdata->input, EV_ABS, ABS_MT_POSITION_Y, y);
- input_event(hdata->input, EV_ABS, ABS_MT_ORIENTATION, wide);
- input_event(hdata->input, EV_ABS, ABS_MT_PRESSURE, z);
- input_event(hdata->input, EV_ABS, ABS_MT_TOUCH_MAJOR, major);
- input_event(hdata->input, EV_ABS, ABS_MT_TOUCH_MINOR, minor);
- }
-}
-
static int rmi_reset_attn_mode(struct hid_device *hdev)
{
struct rmi_data *data = hid_get_drvdata(hdev);
+ struct rmi_device *rmi_dev = data->xport.rmi_dev;
int ret;
ret = rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS);
if (ret)
return ret;
- if (data->restore_interrupt_mask) {
- ret = rmi_write(hdev, data->f01.control_base_addr + 1,
- &data->interrupt_enable_mask);
- if (ret) {
- hid_err(hdev, "can not write F01 control register\n");
- return ret;
- }
- }
+ if (test_bit(RMI_STARTED, &data->flags))
+ ret = rmi_dev->driver->reset_handler(rmi_dev);
- return 0;
+ return ret;
}
static void rmi_reset_work(struct work_struct *work)
@@ -399,102 +312,22 @@ static void rmi_reset_work(struct work_struct *work)
rmi_reset_attn_mode(hdata->hdev);
}
-static inline int rmi_schedule_reset(struct hid_device *hdev)
-{
- struct rmi_data *hdata = hid_get_drvdata(hdev);
- return schedule_work(&hdata->reset_work);
-}
-
-static int rmi_f11_input_event(struct hid_device *hdev, u8 irq, u8 *data,
- int size)
-{
- struct rmi_data *hdata = hid_get_drvdata(hdev);
- int offset;
- int i;
-
- if (!(irq & hdata->f11.irq_mask) || size <= 0)
- return 0;
-
- offset = (hdata->max_fingers >> 2) + 1;
- for (i = 0; i < hdata->max_fingers; i++) {
- int fs_byte_position = i >> 2;
- int fs_bit_position = (i & 0x3) << 1;
- int finger_state = (data[fs_byte_position] >> fs_bit_position) &
- 0x03;
- int position = offset + 5 * i;
-
- if (position + 5 > size) {
- /* partial report, go on with what we received */
- printk_once(KERN_WARNING
- "%s %s: Detected incomplete finger report. Finger reports may occasionally get dropped on this platform.\n",
- dev_driver_string(&hdev->dev),
- dev_name(&hdev->dev));
- hid_dbg(hdev, "Incomplete finger report\n");
- break;
- }
-
- rmi_f11_process_touch(hdata, i, finger_state, &data[position]);
- }
- input_mt_sync_frame(hdata->input);
- input_sync(hdata->input);
- return hdata->f11.report_size;
-}
-
-static int rmi_f30_input_event(struct hid_device *hdev, u8 irq, u8 *data,
- int size)
+static int rmi_input_event(struct hid_device *hdev, u8 *data, int size)
{
struct rmi_data *hdata = hid_get_drvdata(hdev);
- int i;
- int button = 0;
- bool value;
+ struct rmi_device *rmi_dev = hdata->xport.rmi_dev;
+ unsigned long flags;
- if (!(irq & hdata->f30.irq_mask))
+ if (!(test_bit(RMI_STARTED, &hdata->flags)))
return 0;
- if (size < (int)hdata->f30.report_size) {
- hid_warn(hdev, "Click Button pressed, but the click data is missing\n");
- return 0;
- }
+ local_irq_save(flags);
- for (i = 0; i < hdata->gpio_led_count; i++) {
- if (test_bit(i, &hdata->button_mask)) {
- value = (data[i / 8] >> (i & 0x07)) & BIT(0);
- if (test_bit(i, &hdata->button_state_mask))
- value = !value;
- input_event(hdata->input, EV_KEY, BTN_LEFT + button++,
- value);
- }
- }
- return hdata->f30.report_size;
-}
-
-static int rmi_input_event(struct hid_device *hdev, u8 *data, int size)
-{
- struct rmi_data *hdata = hid_get_drvdata(hdev);
- unsigned long irq_mask = 0;
- unsigned index = 2;
+ rmi_set_attn_data(rmi_dev, data[1], &data[2], size - 2);
- if (!(test_bit(RMI_STARTED, &hdata->flags)))
- return 0;
+ generic_handle_irq(hdata->rmi_irq);
- irq_mask |= hdata->f11.irq_mask;
- irq_mask |= hdata->f30.irq_mask;
-
- if (data[1] & ~irq_mask)
- hid_dbg(hdev, "unknown intr source:%02lx %s:%d\n",
- data[1] & ~irq_mask, __FILE__, __LINE__);
-
- if (hdata->f11.interrupt_base < hdata->f30.interrupt_base) {
- index += rmi_f11_input_event(hdev, data[1], &data[index],
- size - index);
- index += rmi_f30_input_event(hdev, data[1], &data[index],
- size - index);
- } else {
- index += rmi_f30_input_event(hdev, data[1], &data[index],
- size - index);
- index += rmi_f11_input_event(hdev, data[1], &data[index],
- size - index);
- }
+ local_irq_restore(flags);
return 1;
}
@@ -568,7 +401,7 @@ static int rmi_event(struct hid_device *hdev, struct hid_field *field,
return 1;
}
- rmi_schedule_reset(hdev);
+ schedule_work(&data->reset_work);
return 1;
}
@@ -576,637 +409,71 @@ static int rmi_event(struct hid_device *hdev, struct hid_field *field,
}
#ifdef CONFIG_PM
-static int rmi_set_sleep_mode(struct hid_device *hdev, int sleep_mode)
-{
- struct rmi_data *data = hid_get_drvdata(hdev);
- int ret;
- u8 f01_ctrl0;
-
- f01_ctrl0 = (data->f01_ctrl0 & ~0x3) | sleep_mode;
-
- ret = rmi_write(hdev, data->f01.control_base_addr,
- &f01_ctrl0);
- if (ret) {
- hid_err(hdev, "can not write sleep mode\n");
- return ret;
- }
-
- return 0;
-}
-
static int rmi_suspend(struct hid_device *hdev, pm_message_t message)
{
struct rmi_data *data = hid_get_drvdata(hdev);
- int ret;
- u8 buf[RMI_F11_CTRL_REG_COUNT];
-
- if (!(data->device_flags & RMI_DEVICE))
- return 0;
-
- ret = rmi_read_block(hdev, data->f11.control_base_addr, buf,
- RMI_F11_CTRL_REG_COUNT);
- if (ret)
- hid_warn(hdev, "can not read F11 control registers\n");
- else
- memcpy(data->f11_ctrl_regs, buf, RMI_F11_CTRL_REG_COUNT);
-
-
- if (!device_may_wakeup(hdev->dev.parent))
- return rmi_set_sleep_mode(hdev, RMI_SLEEP_DEEP_SLEEP);
-
- return 0;
-}
-
-static int rmi_post_reset(struct hid_device *hdev)
-{
- struct rmi_data *data = hid_get_drvdata(hdev);
+ struct rmi_device *rmi_dev = data->xport.rmi_dev;
int ret;
if (!(data->device_flags & RMI_DEVICE))
return 0;
- ret = rmi_reset_attn_mode(hdev);
+ ret = rmi_driver_suspend(rmi_dev, false);
if (ret) {
- hid_err(hdev, "can not set rmi mode\n");
+ hid_warn(hdev, "Failed to suspend device: %d\n", ret);
return ret;
}
- if (data->read_f11_ctrl_regs) {
- ret = rmi_write_block(hdev, data->f11.control_base_addr,
- data->f11_ctrl_regs, RMI_F11_CTRL_REG_COUNT);
- if (ret)
- hid_warn(hdev,
- "can not write F11 control registers after reset\n");
- }
-
- if (!device_may_wakeup(hdev->dev.parent)) {
- ret = rmi_set_sleep_mode(hdev, RMI_SLEEP_NORMAL);
- if (ret) {
- hid_err(hdev, "can not write sleep mode\n");
- return ret;
- }
- }
-
- return ret;
+ return 0;
}
static int rmi_post_resume(struct hid_device *hdev)
{
struct rmi_data *data = hid_get_drvdata(hdev);
+ struct rmi_device *rmi_dev = data->xport.rmi_dev;
+ int ret;
if (!(data->device_flags & RMI_DEVICE))
return 0;
- return rmi_reset_attn_mode(hdev);
-}
-#endif /* CONFIG_PM */
-
-#define RMI4_MAX_PAGE 0xff
-#define RMI4_PAGE_SIZE 0x0100
-
-#define PDT_START_SCAN_LOCATION 0x00e9
-#define PDT_END_SCAN_LOCATION 0x0005
-#define RMI4_END_OF_PDT(id) ((id) == 0x00 || (id) == 0xff)
-
-struct pdt_entry {
- u8 query_base_addr:8;
- u8 command_base_addr:8;
- u8 control_base_addr:8;
- u8 data_base_addr:8;
- u8 interrupt_source_count:3;
- u8 bits3and4:2;
- u8 function_version:2;
- u8 bit7:1;
- u8 function_number:8;
-} __attribute__((__packed__));
-
-static inline unsigned long rmi_gen_mask(unsigned irq_base, unsigned irq_count)
-{
- return GENMASK(irq_count + irq_base - 1, irq_base);
-}
-
-static void rmi_register_function(struct rmi_data *data,
- struct pdt_entry *pdt_entry, int page, unsigned interrupt_count)
-{
- struct rmi_function *f = NULL;
- u16 page_base = page << 8;
-
- switch (pdt_entry->function_number) {
- case 0x01:
- f = &data->f01;
- break;
- case 0x11:
- f = &data->f11;
- break;
- case 0x30:
- f = &data->f30;
- break;
- }
-
- if (f) {
- f->page = page;
- f->query_base_addr = page_base | pdt_entry->query_base_addr;
- f->command_base_addr = page_base | pdt_entry->command_base_addr;
- f->control_base_addr = page_base | pdt_entry->control_base_addr;
- f->data_base_addr = page_base | pdt_entry->data_base_addr;
- f->interrupt_base = interrupt_count;
- f->interrupt_count = pdt_entry->interrupt_source_count;
- f->irq_mask = rmi_gen_mask(f->interrupt_base,
- f->interrupt_count);
- data->interrupt_enable_mask |= f->irq_mask;
- }
-}
-
-static int rmi_scan_pdt(struct hid_device *hdev)
-{
- struct rmi_data *data = hid_get_drvdata(hdev);
- struct pdt_entry entry;
- int page;
- bool page_has_function;
- int i;
- int retval;
- int interrupt = 0;
- u16 page_start, pdt_start , pdt_end;
-
- hid_info(hdev, "Scanning PDT...\n");
-
- for (page = 0; (page <= RMI4_MAX_PAGE); page++) {
- page_start = RMI4_PAGE_SIZE * page;
- pdt_start = page_start + PDT_START_SCAN_LOCATION;
- pdt_end = page_start + PDT_END_SCAN_LOCATION;
-
- page_has_function = false;
- for (i = pdt_start; i >= pdt_end; i -= sizeof(entry)) {
- retval = rmi_read_block(hdev, i, &entry, sizeof(entry));
- if (retval) {
- hid_err(hdev,
- "Read of PDT entry at %#06x failed.\n",
- i);
- goto error_exit;
- }
-
- if (RMI4_END_OF_PDT(entry.function_number))
- break;
-
- page_has_function = true;
-
- hid_info(hdev, "Found F%02X on page %#04x\n",
- entry.function_number, page);
-
- rmi_register_function(data, &entry, page, interrupt);
- interrupt += entry.interrupt_source_count;
- }
-
- if (!page_has_function)
- break;
- }
-
- hid_info(hdev, "%s: Done with PDT scan.\n", __func__);
- retval = 0;
-
-error_exit:
- return retval;
-}
-
-#define RMI_DEVICE_F01_BASIC_QUERY_LEN 11
-
-static int rmi_populate_f01(struct hid_device *hdev)
-{
- struct rmi_data *data = hid_get_drvdata(hdev);
- u8 basic_queries[RMI_DEVICE_F01_BASIC_QUERY_LEN];
- u8 info[3];
- int ret;
- bool has_query42;
- bool has_lts;
- bool has_sensor_id;
- bool has_ds4_queries = false;
- bool has_build_id_query = false;
- bool has_package_id_query = false;
- u16 query_offset = data->f01.query_base_addr;
- u16 prod_info_addr;
- u8 ds4_query_len;
-
- ret = rmi_read_block(hdev, query_offset, basic_queries,
- RMI_DEVICE_F01_BASIC_QUERY_LEN);
- if (ret) {
- hid_err(hdev, "Can not read basic queries from Function 0x1.\n");
- return ret;
- }
-
- has_lts = !!(basic_queries[0] & BIT(2));
- has_sensor_id = !!(basic_queries[1] & BIT(3));
- has_query42 = !!(basic_queries[1] & BIT(7));
-
- query_offset += 11;
- prod_info_addr = query_offset + 6;
- query_offset += 10;
-
- if (has_lts)
- query_offset += 20;
-
- if (has_sensor_id)
- query_offset++;
-
- if (has_query42) {
- ret = rmi_read(hdev, query_offset, info);
- if (ret) {
- hid_err(hdev, "Can not read query42.\n");
- return ret;
- }
- has_ds4_queries = !!(info[0] & BIT(0));
- query_offset++;
- }
-
- if (has_ds4_queries) {
- ret = rmi_read(hdev, query_offset, &ds4_query_len);
- if (ret) {
- hid_err(hdev, "Can not read DS4 Query length.\n");
- return ret;
- }
- query_offset++;
-
- if (ds4_query_len > 0) {
- ret = rmi_read(hdev, query_offset, info);
- if (ret) {
- hid_err(hdev, "Can not read DS4 query.\n");
- return ret;
- }
-
- has_package_id_query = !!(info[0] & BIT(0));
- has_build_id_query = !!(info[0] & BIT(1));
- }
- }
-
- if (has_package_id_query)
- prod_info_addr++;
-
- if (has_build_id_query) {
- ret = rmi_read_block(hdev, prod_info_addr, info, 3);
- if (ret) {
- hid_err(hdev, "Can not read product info.\n");
- return ret;
- }
-
- data->firmware_id = info[1] << 8 | info[0];
- data->firmware_id += info[2] * 65536;
- }
-
- ret = rmi_read_block(hdev, data->f01.control_base_addr, info,
- 2);
-
- if (ret) {
- hid_err(hdev, "can not read f01 ctrl registers\n");
- return ret;
- }
-
- data->f01_ctrl0 = info[0];
-
- if (!info[1]) {
- /*
- * Do to a firmware bug in some touchpads the F01 interrupt
- * enable control register will be cleared on reset.
- * This will stop the touchpad from reporting data, so
- * if F01 CTRL1 is 0 then we need to explicitly enable
- * interrupts for the functions we want data for.
- */
- data->restore_interrupt_mask = true;
-
- ret = rmi_write(hdev, data->f01.control_base_addr + 1,
- &data->interrupt_enable_mask);
- if (ret) {
- hid_err(hdev, "can not write to control reg 1: %d.\n",
- ret);
- return ret;
- }
- }
-
- return 0;
-}
-
-static int rmi_populate_f11(struct hid_device *hdev)
-{
- struct rmi_data *data = hid_get_drvdata(hdev);
- u8 buf[20];
- int ret;
- bool has_query9;
- bool has_query10 = false;
- bool has_query11;
- bool has_query12;
- bool has_query27;
- bool has_query28;
- bool has_query36 = false;
- bool has_physical_props;
- bool has_gestures;
- bool has_rel;
- bool has_data40 = false;
- bool has_dribble = false;
- bool has_palm_detect = false;
- unsigned x_size, y_size;
- u16 query_offset;
-
- if (!data->f11.query_base_addr) {
- hid_err(hdev, "No 2D sensor found, giving up.\n");
- return -ENODEV;
- }
-
- /* query 0 contains some useful information */
- ret = rmi_read(hdev, data->f11.query_base_addr, buf);
- if (ret) {
- hid_err(hdev, "can not get query 0: %d.\n", ret);
- return ret;
- }
- has_query9 = !!(buf[0] & BIT(3));
- has_query11 = !!(buf[0] & BIT(4));
- has_query12 = !!(buf[0] & BIT(5));
- has_query27 = !!(buf[0] & BIT(6));
- has_query28 = !!(buf[0] & BIT(7));
-
- /* query 1 to get the max number of fingers */
- ret = rmi_read(hdev, data->f11.query_base_addr + 1, buf);
- if (ret) {
- hid_err(hdev, "can not get NumberOfFingers: %d.\n", ret);
- return ret;
- }
- data->max_fingers = (buf[0] & 0x07) + 1;
- if (data->max_fingers > 5)
- data->max_fingers = 10;
-
- data->f11.report_size = data->max_fingers * 5 +
- DIV_ROUND_UP(data->max_fingers, 4);
-
- if (!(buf[0] & BIT(4))) {
- hid_err(hdev, "No absolute events, giving up.\n");
- return -ENODEV;
- }
-
- has_rel = !!(buf[0] & BIT(3));
- has_gestures = !!(buf[0] & BIT(5));
-
- ret = rmi_read(hdev, data->f11.query_base_addr + 5, buf);
- if (ret) {
- hid_err(hdev, "can not get absolute data sources: %d.\n", ret);
+ ret = rmi_reset_attn_mode(hdev);
+ if (ret)
return ret;
- }
-
- has_dribble = !!(buf[0] & BIT(4));
-
- /*
- * At least 4 queries are guaranteed to be present in F11
- * +1 for query 5 which is present since absolute events are
- * reported and +1 for query 12.
- */
- query_offset = 6;
-
- if (has_rel)
- ++query_offset; /* query 6 is present */
-
- if (has_gestures) {
- /* query 8 to find out if query 10 exists */
- ret = rmi_read(hdev,
- data->f11.query_base_addr + query_offset + 1, buf);
- if (ret) {
- hid_err(hdev, "can not read gesture information: %d.\n",
- ret);
- return ret;
- }
- has_palm_detect = !!(buf[0] & BIT(0));
- has_query10 = !!(buf[0] & BIT(2));
-
- query_offset += 2; /* query 7 and 8 are present */
- }
-
- if (has_query9)
- ++query_offset;
-
- if (has_query10)
- ++query_offset;
-
- if (has_query11)
- ++query_offset;
-
- /* query 12 to know if the physical properties are reported */
- if (has_query12) {
- ret = rmi_read(hdev, data->f11.query_base_addr
- + query_offset, buf);
- if (ret) {
- hid_err(hdev, "can not get query 12: %d.\n", ret);
- return ret;
- }
- has_physical_props = !!(buf[0] & BIT(5));
-
- if (has_physical_props) {
- query_offset += 1;
- ret = rmi_read_block(hdev,
- data->f11.query_base_addr
- + query_offset, buf, 4);
- if (ret) {
- hid_err(hdev, "can not read query 15-18: %d.\n",
- ret);
- return ret;
- }
-
- x_size = buf[0] | (buf[1] << 8);
- y_size = buf[2] | (buf[3] << 8);
-
- data->x_size_mm = DIV_ROUND_CLOSEST(x_size, 10);
- data->y_size_mm = DIV_ROUND_CLOSEST(y_size, 10);
-
- hid_info(hdev, "%s: size in mm: %d x %d\n",
- __func__, data->x_size_mm, data->y_size_mm);
-
- /*
- * query 15 - 18 contain the size of the sensor
- * and query 19 - 26 contain bezel dimensions
- */
- query_offset += 12;
- }
- }
-
- if (has_query27)
- ++query_offset;
- if (has_query28) {
- ret = rmi_read(hdev, data->f11.query_base_addr
- + query_offset, buf);
- if (ret) {
- hid_err(hdev, "can not get query 28: %d.\n", ret);
- return ret;
- }
-
- has_query36 = !!(buf[0] & BIT(6));
- }
-
- if (has_query36) {
- query_offset += 2;
- ret = rmi_read(hdev, data->f11.query_base_addr
- + query_offset, buf);
- if (ret) {
- hid_err(hdev, "can not get query 36: %d.\n", ret);
- return ret;
- }
-
- has_data40 = !!(buf[0] & BIT(5));
- }
-
-
- if (has_data40)
- data->f11.report_size += data->max_fingers * 2;
-
- ret = rmi_read_block(hdev, data->f11.control_base_addr,
- data->f11_ctrl_regs, RMI_F11_CTRL_REG_COUNT);
+ ret = rmi_driver_resume(rmi_dev, false);
if (ret) {
- hid_err(hdev, "can not read ctrl block of size 11: %d.\n", ret);
+ hid_warn(hdev, "Failed to resume device: %d\n", ret);
return ret;
}
- /* data->f11_ctrl_regs now contains valid register data */
- data->read_f11_ctrl_regs = true;
-
- data->max_x = data->f11_ctrl_regs[6] | (data->f11_ctrl_regs[7] << 8);
- data->max_y = data->f11_ctrl_regs[8] | (data->f11_ctrl_regs[9] << 8);
-
- if (has_dribble) {
- data->f11_ctrl_regs[0] = data->f11_ctrl_regs[0] & ~BIT(6);
- ret = rmi_write(hdev, data->f11.control_base_addr,
- data->f11_ctrl_regs);
- if (ret) {
- hid_err(hdev, "can not write to control reg 0: %d.\n",
- ret);
- return ret;
- }
- }
-
- if (has_palm_detect) {
- data->f11_ctrl_regs[11] = data->f11_ctrl_regs[11] & ~BIT(0);
- ret = rmi_write(hdev, data->f11.control_base_addr + 11,
- &data->f11_ctrl_regs[11]);
- if (ret) {
- hid_err(hdev, "can not write to control reg 11: %d.\n",
- ret);
- return ret;
- }
- }
-
- return 0;
-}
-
-static int rmi_populate_f30(struct hid_device *hdev)
-{
- struct rmi_data *data = hid_get_drvdata(hdev);
- u8 buf[20];
- int ret;
- bool has_gpio, has_led;
- unsigned bytes_per_ctrl;
- u8 ctrl2_addr;
- int ctrl2_3_length;
- int i;
-
- /* function F30 is for physical buttons */
- if (!data->f30.query_base_addr) {
- hid_err(hdev, "No GPIO/LEDs found, giving up.\n");
- return -ENODEV;
- }
-
- ret = rmi_read_block(hdev, data->f30.query_base_addr, buf, 2);
- if (ret) {
- hid_err(hdev, "can not get F30 query registers: %d.\n", ret);
- return ret;
- }
-
- has_gpio = !!(buf[0] & BIT(3));
- has_led = !!(buf[0] & BIT(2));
- data->gpio_led_count = buf[1] & 0x1f;
-
- /* retrieve ctrl 2 & 3 registers */
- bytes_per_ctrl = (data->gpio_led_count + 7) / 8;
- /* Ctrl0 is present only if both has_gpio and has_led are set*/
- ctrl2_addr = (has_gpio && has_led) ? bytes_per_ctrl : 0;
- /* Ctrl1 is always be present */
- ctrl2_addr += bytes_per_ctrl;
- ctrl2_3_length = 2 * bytes_per_ctrl;
-
- data->f30.report_size = bytes_per_ctrl;
-
- ret = rmi_read_block(hdev, data->f30.control_base_addr + ctrl2_addr,
- buf, ctrl2_3_length);
- if (ret) {
- hid_err(hdev, "can not read ctrl 2&3 block of size %d: %d.\n",
- ctrl2_3_length, ret);
- return ret;
- }
-
- for (i = 0; i < data->gpio_led_count; i++) {
- int byte_position = i >> 3;
- int bit_position = i & 0x07;
- u8 dir_byte = buf[byte_position];
- u8 data_byte = buf[byte_position + bytes_per_ctrl];
- bool dir = (dir_byte >> bit_position) & BIT(0);
- bool dat = (data_byte >> bit_position) & BIT(0);
-
- if (dir == 0) {
- /* input mode */
- if (dat) {
- /* actual buttons have pull up resistor */
- data->button_count++;
- set_bit(i, &data->button_mask);
- set_bit(i, &data->button_state_mask);
- }
- }
-
- }
-
return 0;
}
+#endif /* CONFIG_PM */
-static int rmi_populate(struct hid_device *hdev)
+static int rmi_hid_reset(struct rmi_transport_dev *xport, u16 reset_addr)
{
- struct rmi_data *data = hid_get_drvdata(hdev);
- int ret;
-
- ret = rmi_scan_pdt(hdev);
- if (ret) {
- hid_err(hdev, "PDT scan failed with code %d.\n", ret);
- return ret;
- }
-
- ret = rmi_populate_f01(hdev);
- if (ret) {
- hid_err(hdev, "Error while initializing F01 (%d).\n", ret);
- return ret;
- }
-
- ret = rmi_populate_f11(hdev);
- if (ret) {
- hid_err(hdev, "Error while initializing F11 (%d).\n", ret);
- return ret;
- }
-
- if (!(data->device_flags & RMI_DEVICE_HAS_PHYS_BUTTONS)) {
- ret = rmi_populate_f30(hdev);
- if (ret)
- hid_warn(hdev, "Error while initializing F30 (%d).\n", ret);
- }
+ struct rmi_data *data = container_of(xport, struct rmi_data, xport);
+ struct hid_device *hdev = data->hdev;
- return 0;
+ return rmi_reset_attn_mode(hdev);
}
static int rmi_input_configured(struct hid_device *hdev, struct hid_input *hi)
{
struct rmi_data *data = hid_get_drvdata(hdev);
struct input_dev *input = hi->input;
- int ret;
- int res_x, res_y, i;
+ int ret = 0;
+
+ if (!(data->device_flags & RMI_DEVICE))
+ return 0;
- data->input = input;
+ data->xport.input = input;
hid_dbg(hdev, "Opening low level driver\n");
ret = hid_hw_open(hdev);
if (ret)
return ret;
- if (!(data->device_flags & RMI_DEVICE))
- return 0;
-
/* Allow incoming hid reports */
hid_device_io_start(hdev);
@@ -1222,40 +489,10 @@ static int rmi_input_configured(struct hid_device *hdev, struct hid_input *hi)
goto exit;
}
- ret = rmi_populate(hdev);
- if (ret)
- goto exit;
-
- hid_info(hdev, "firmware id: %ld\n", data->firmware_id);
-
- __set_bit(EV_ABS, input->evbit);
- input_set_abs_params(input, ABS_MT_POSITION_X, 1, data->max_x, 0, 0);
- input_set_abs_params(input, ABS_MT_POSITION_Y, 1, data->max_y, 0, 0);
-
- if (data->x_size_mm && data->y_size_mm) {
- res_x = (data->max_x - 1) / data->x_size_mm;
- res_y = (data->max_y - 1) / data->y_size_mm;
-
- input_abs_set_res(input, ABS_MT_POSITION_X, res_x);
- input_abs_set_res(input, ABS_MT_POSITION_Y, res_y);
- }
-
- input_set_abs_params(input, ABS_MT_ORIENTATION, 0, 1, 0, 0);
- input_set_abs_params(input, ABS_MT_PRESSURE, 0, 0xff, 0, 0);
- input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 0x0f, 0, 0);
- input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 0x0f, 0, 0);
-
- ret = input_mt_init_slots(input, data->max_fingers, INPUT_MT_POINTER);
- if (ret < 0)
+ ret = rmi_register_transport_device(&data->xport);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed to register transport driver\n");
goto exit;
-
- if (data->button_count) {
- __set_bit(EV_KEY, input->evbit);
- for (i = 0; i < data->button_count; i++)
- __set_bit(BTN_LEFT + i, input->keybit);
-
- if (data->button_count == 1)
- __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
}
set_bit(RMI_STARTED, &data->flags);
@@ -1304,6 +541,71 @@ static int rmi_check_valid_report_id(struct hid_device *hdev, unsigned type,
return 0;
}
+static struct rmi_device_platform_data rmi_hid_pdata = {
+ .sensor_pdata = {
+ .sensor_type = rmi_sensor_touchpad,
+ .axis_align.flip_y = true,
+ .dribble = RMI_REG_STATE_ON,
+ .palm_detect = RMI_REG_STATE_OFF,
+ },
+};
+
+static const struct rmi_transport_ops hid_rmi_ops = {
+ .write_block = rmi_hid_write_block,
+ .read_block = rmi_hid_read_block,
+ .reset = rmi_hid_reset,
+};
+
+static void rmi_irq_teardown(void *data)
+{
+ struct rmi_data *hdata = data;
+ struct irq_domain *domain = hdata->domain;
+
+ if (!domain)
+ return;
+
+ irq_dispose_mapping(irq_find_mapping(domain, 0));
+
+ irq_domain_remove(domain);
+ hdata->domain = NULL;
+ hdata->rmi_irq = 0;
+}
+
+static int rmi_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw_irq_num)
+{
+ irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_simple_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops rmi_irq_ops = {
+ .map = rmi_irq_map,
+};
+
+static int rmi_setup_irq_domain(struct hid_device *hdev)
+{
+ struct rmi_data *hdata = hid_get_drvdata(hdev);
+ int ret;
+
+ hdata->domain = irq_domain_create_linear(hdev->dev.fwnode, 1,
+ &rmi_irq_ops, hdata);
+ if (!hdata->domain)
+ return -ENOMEM;
+
+ ret = devm_add_action_or_reset(&hdev->dev, &rmi_irq_teardown, hdata);
+ if (ret)
+ return ret;
+
+ hdata->rmi_irq = irq_create_mapping(hdata->domain, 0);
+ if (hdata->rmi_irq <= 0) {
+ hid_err(hdev, "Can't allocate an IRQ\n");
+ return hdata->rmi_irq < 0 ? hdata->rmi_irq : -ENXIO;
+ }
+
+ return 0;
+}
+
static int rmi_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
struct rmi_data *data = NULL;
@@ -1365,8 +667,8 @@ static int rmi_probe(struct hid_device *hdev, const struct hid_device_id *id)
data->writeReport = devm_kzalloc(&hdev->dev, alloc_size, GFP_KERNEL);
if (!data->writeReport) {
- ret = -ENOMEM;
- return ret;
+ hid_err(hdev, "failed to allocate buffer for HID reports\n");
+ return -ENOMEM;
}
data->readReport = data->writeReport + data->output_report_size;
@@ -1375,6 +677,21 @@ static int rmi_probe(struct hid_device *hdev, const struct hid_device_id *id)
mutex_init(&data->page_mutex);
+ ret = rmi_setup_irq_domain(hdev);
+ if (ret) {
+ hid_err(hdev, "failed to allocate IRQ domain\n");
+ return ret;
+ }
+
+ if (data->device_flags & RMI_DEVICE_HAS_PHYS_BUTTONS)
+ rmi_hid_pdata.f30_data.disable = true;
+
+ data->xport.dev = hdev->dev.parent;
+ data->xport.pdata = rmi_hid_pdata;
+ data->xport.pdata.irq = data->rmi_irq;
+ data->xport.proto_name = "hid";
+ data->xport.ops = &hid_rmi_ops;
+
start:
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
@@ -1382,17 +699,6 @@ start:
return ret;
}
- if ((data->device_flags & RMI_DEVICE) &&
- !test_bit(RMI_STARTED, &data->flags))
- /*
- * The device maybe in the bootloader if rmi_input_configured
- * failed to find F11 in the PDT. Print an error, but don't
- * return an error from rmi_probe so that hidraw will be
- * accessible from userspace. That way a userspace tool
- * can be used to reload working firmware on the touchpad.
- */
- hid_err(hdev, "Device failed to be properly configured\n");
-
return 0;
}
@@ -1401,6 +707,8 @@ static void rmi_remove(struct hid_device *hdev)
struct rmi_data *hdata = hid_get_drvdata(hdev);
clear_bit(RMI_STARTED, &hdata->flags);
+ cancel_work_sync(&hdata->reset_work);
+ rmi_unregister_transport_device(&hdata->xport);
hid_hw_stop(hdev);
}
@@ -1408,6 +716,7 @@ static void rmi_remove(struct hid_device *hdev)
static const struct hid_device_id rmi_id[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14),
.driver_data = RMI_DEVICE_HAS_PHYS_BUTTONS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) },
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_RMI, HID_ANY_ID, HID_ANY_ID) },
{ }
};
@@ -1425,7 +734,7 @@ static struct hid_driver rmi_driver = {
#ifdef CONFIG_PM
.suspend = rmi_suspend,
.resume = rmi_post_resume,
- .reset_resume = rmi_post_reset,
+ .reset_resume = rmi_post_resume,
#endif
};
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish-regs.h b/drivers/hid/intel-ish-hid/ipc/hw-ish-regs.h
index ab68afcba2a2..a5897b9c0956 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish-regs.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish-regs.h
@@ -111,6 +111,14 @@
#define IPC_ILUP_BIT (1<<IPC_ILUP_OFFS)
/*
+ * ISH FW status bits in ISH FW Status Register
+ */
+#define IPC_ISH_FWSTS_SHIFT 12
+#define IPC_ISH_FWSTS_MASK GENMASK(15, 12)
+#define IPC_GET_ISH_FWSTS(status) \
+ (((status) & IPC_ISH_FWSTS_MASK) >> IPC_ISH_FWSTS_SHIFT)
+
+/*
* FW status bits (relevant)
*/
#define IPC_FWSTS_ILUP 0x1
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index 46615a03e78f..fd34307a7a70 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -61,6 +61,18 @@ struct ish_hw {
void __iomem *mem_addr;
};
+/*
+ * ISH FW status type
+ */
+enum {
+ FWSTS_AFTER_RESET = 0,
+ FWSTS_WAIT_FOR_HOST = 4,
+ FWSTS_START_KERNEL_DMA = 5,
+ FWSTS_FW_IS_RUNNING = 7,
+ FWSTS_SENSOR_APP_LOADED = 8,
+ FWSTS_SENSOR_APP_RUNNING = 15
+};
+
#define to_ish_hw(dev) (struct ish_hw *)((dev)->hw)
irqreturn_t ish_irq_handler(int irq, void *dev_id);
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 20d647d2dd2c..8df81dc84529 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -24,7 +24,6 @@
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
-#include <linux/miscdevice.h>
#define CREATE_TRACE_POINTS
#include <trace/events/intel_ish.h>
#include "ishtp-dev.h"
@@ -47,7 +46,8 @@ MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
*
* Callback to direct log messages to Linux trace buffers
*/
-static void ish_event_tracer(struct ishtp_device *dev, char *format, ...)
+static __printf(2, 3)
+void ish_event_tracer(struct ishtp_device *dev, const char *format, ...)
{
if (trace_ishtp_dump_enabled()) {
va_list args;
@@ -205,12 +205,15 @@ static void ish_remove(struct pci_dev *pdev)
#ifdef CONFIG_PM
static struct device *ish_resume_device;
+/* 50ms to get resume response */
+#define WAIT_FOR_RESUME_ACK_MS 50
+
/**
* ish_resume_handler() - Work function to complete resume
* @work: work struct
*
* The resume work function to complete resume function asynchronously.
- * There are two types of platforms, one where ISH is not powered off,
+ * There are two resume paths, one where ISH is not powered off,
* in that case a simple resume message is enough, others we need
* a reset sequence.
*/
@@ -218,20 +221,31 @@ static void ish_resume_handler(struct work_struct *work)
{
struct pci_dev *pdev = to_pci_dev(ish_resume_device);
struct ishtp_device *dev = pci_get_drvdata(pdev);
+ uint32_t fwsts;
int ret;
- ishtp_send_resume(dev);
+ /* Get ISH FW status */
+ fwsts = IPC_GET_ISH_FWSTS(dev->ops->get_fw_status(dev));
- /* 50 ms to get resume response */
- if (dev->resume_flag)
- ret = wait_event_interruptible_timeout(dev->resume_wait,
- !dev->resume_flag,
- msecs_to_jiffies(50));
+ /*
+ * If currently, in ISH FW, sensor app is loaded or beyond that,
+ * it means ISH isn't powered off, in this case, send a resume message.
+ */
+ if (fwsts >= FWSTS_SENSOR_APP_LOADED) {
+ ishtp_send_resume(dev);
+
+ /* Waiting to get resume response */
+ if (dev->resume_flag)
+ ret = wait_event_interruptible_timeout(dev->resume_wait,
+ !dev->resume_flag,
+ msecs_to_jiffies(WAIT_FOR_RESUME_ACK_MS));
+ }
/*
- * If no resume response. This platform is not S0ix compatible
- * So on resume full reboot of ISH processor will happen, so
- * need to go through init sequence again
+ * If in ISH FW, sensor app isn't loaded yet, or no resume response.
+ * That means this platform is not S0ix compatible, or something is
+ * wrong with ISH FW. So on resume, full reboot of ISH processor will
+ * happen, so need to go through init sequence again.
*/
if (dev->resume_flag)
ish_init(dev);
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.c b/drivers/hid/intel-ish-hid/ishtp-hid.c
index 277983aa1d90..cd23903ddcf1 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid.c
@@ -208,7 +208,7 @@ int ishtp_hid_probe(unsigned int cur_hid_dev,
hid->version = le16_to_cpu(ISH_HID_VERSION);
hid->vendor = le16_to_cpu(ISH_HID_VENDOR);
hid->product = le16_to_cpu(ISH_HID_PRODUCT);
- snprintf(hid->name, sizeof(hid->name), "%s %04hX:%04hX", "hid-ishtp",
+ snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X", "hid-ishtp",
hid->vendor, hid->product);
rv = hid_add_device(hid);
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index f4cbc744e657..5f382fedc2ab 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -358,7 +358,7 @@ static void ishtp_cl_dev_release(struct device *dev)
kfree(to_ishtp_cl_device(dev));
}
-static struct device_type ishtp_cl_device_type = {
+static const struct device_type ishtp_cl_device_type = {
.release = ishtp_cl_dev_release,
};
diff --git a/drivers/hid/intel-ish-hid/ishtp/hbm.c b/drivers/hid/intel-ish-hid/ishtp/hbm.c
index 59460b66e689..b7213608ce43 100644
--- a/drivers/hid/intel-ish-hid/ishtp/hbm.c
+++ b/drivers/hid/intel-ish-hid/ishtp/hbm.c
@@ -19,7 +19,6 @@
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/spinlock.h>
-#include <linux/miscdevice.h>
#include "ishtp-dev.h"
#include "hbm.h"
#include "client.h"
diff --git a/drivers/hid/intel-ish-hid/ishtp/init.c b/drivers/hid/intel-ish-hid/ishtp/init.c
index ac364418e17c..d27e03526acd 100644
--- a/drivers/hid/intel-ish-hid/ishtp/init.c
+++ b/drivers/hid/intel-ish-hid/ishtp/init.c
@@ -16,7 +16,6 @@
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/sched.h>
-#include <linux/miscdevice.h>
#include "ishtp-dev.h"
#include "hbm.h"
#include "client.h"
diff --git a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
index a94f9a8a96a0..6a6d927b78b0 100644
--- a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
+++ b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
@@ -238,7 +238,8 @@ struct ishtp_device {
uint64_t ishtp_host_dma_rx_buf_phys;
/* Dump to trace buffers if enabled*/
- void (*print_log)(struct ishtp_device *dev, char *format, ...);
+ __printf(2, 3) void (*print_log)(struct ishtp_device *dev,
+ const char *format, ...);
/* Debug stats */
unsigned int ipc_rx_cnt;
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 333108ef18cf..961bc6fdd2d9 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -43,7 +43,6 @@
*/
#define DRIVER_DESC "USB HID core driver"
-#define DRIVER_LICENSE "GPL"
/*
* Module parameters.
@@ -1660,4 +1659,4 @@ MODULE_AUTHOR("Andreas Gal");
MODULE_AUTHOR("Vojtech Pavlik");
MODULE_AUTHOR("Jiri Kosina");
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE(DRIVER_LICENSE);
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 30a2977e2645..d6847a664446 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -85,7 +85,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
@@ -103,12 +103,6 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
@@ -297,7 +291,7 @@ static void usbhid_remove_all_dquirks(void)
}
-/**
+/**
* usbhid_quirks_init: apply USB HID quirks specified at module load time
*/
int usbhid_quirks_init(char **quirks_param)
@@ -361,7 +355,7 @@ static const struct hid_blacklist *usbhid_exists_squirk(const u16 idVendor,
if (bl_entry != NULL)
dbg_hid("Found squirk 0x%x for USB HID vendor 0x%hx prod 0x%hx\n",
- bl_entry->quirks, bl_entry->idVendor,
+ bl_entry->quirks, bl_entry->idVendor,
bl_entry->idProduct);
return bl_entry;
}
diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c
index 9a332e683db7..7fb2d1e4f5dd 100644
--- a/drivers/hid/usbhid/usbkbd.c
+++ b/drivers/hid/usbhid/usbkbd.c
@@ -39,11 +39,10 @@
#define DRIVER_VERSION ""
#define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@ucw.cz>"
#define DRIVER_DESC "USB HID Boot Protocol keyboard driver"
-#define DRIVER_LICENSE "GPL"
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE(DRIVER_LICENSE);
+MODULE_LICENSE("GPL");
static const unsigned char usb_kbd_keycode[256] = {
0, 0, 0, 0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36, 37, 38,
diff --git a/drivers/hid/usbhid/usbmouse.c b/drivers/hid/usbhid/usbmouse.c
index bf16d72dc370..dd911c5241d8 100644
--- a/drivers/hid/usbhid/usbmouse.c
+++ b/drivers/hid/usbhid/usbmouse.c
@@ -42,11 +42,10 @@
#define DRIVER_VERSION "v1.6"
#define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@ucw.cz>"
#define DRIVER_DESC "USB HID Boot Protocol mouse driver"
-#define DRIVER_LICENSE "GPL"
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE(DRIVER_LICENSE);
+MODULE_LICENSE("GPL");
struct usb_mouse {
char name[128];
diff --git a/drivers/hid/wacom.h b/drivers/hid/wacom.h
index d303e413306d..38ee2125412f 100644
--- a/drivers/hid/wacom.h
+++ b/drivers/hid/wacom.h
@@ -102,7 +102,6 @@
#define DRIVER_VERSION "v2.00"
#define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@ucw.cz>"
#define DRIVER_DESC "USB Wacom tablet driver"
-#define DRIVER_LICENSE "GPL"
#define USB_VENDOR_ID_WACOM 0x056a
#define USB_VENDOR_ID_LENOVO 0x17ef
@@ -166,7 +165,9 @@ struct wacom {
struct work_struct wireless_work;
struct work_struct battery_work;
struct work_struct remote_work;
+ struct delayed_work init_work;
struct wacom_remote *remote;
+ bool generic_has_leds;
struct wacom_leds {
struct wacom_group_leds *groups;
unsigned int count;
@@ -218,4 +219,6 @@ enum led_brightness wacom_leds_brightness_get(struct wacom_led *led);
struct wacom_led *wacom_led_find(struct wacom *wacom, unsigned int group,
unsigned int id);
struct wacom_led *wacom_led_next(struct wacom *wacom, struct wacom_led *cur);
+int wacom_equivalent_usage(int usage);
+int wacom_initialize_leds(struct wacom *wacom);
#endif
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 8aeca038cc73..be8f7e2a026f 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -16,15 +16,7 @@
#include <linux/input/mt.h>
#define WAC_MSG_RETRIES 5
-
-#define WAC_CMD_WL_LED_CONTROL 0x03
-#define WAC_CMD_LED_CONTROL 0x20
-#define WAC_CMD_ICON_START 0x21
-#define WAC_CMD_ICON_XFER 0x23
-#define WAC_CMD_ICON_BT_XFER 0x26
#define WAC_CMD_RETRIES 10
-#define WAC_CMD_DELETE_PAIRING 0x20
-#define WAC_CMD_UNPAIR_ALL 0xFF
#define DEV_ATTR_RW_PERM (S_IRUGO | S_IWUSR | S_IWGRP)
#define DEV_ATTR_WO_PERM (S_IWUSR | S_IWGRP)
@@ -120,11 +112,12 @@ static void wacom_feature_mapping(struct hid_device *hdev,
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_features *features = &wacom->wacom_wac.features;
struct hid_data *hid_data = &wacom->wacom_wac.hid_data;
+ unsigned int equivalent_usage = wacom_equivalent_usage(usage->hid);
u8 *data;
int ret;
int n;
- switch (usage->hid) {
+ switch (equivalent_usage) {
case HID_DG_CONTACTMAX:
/* leave touch_max as is if predefined */
if (!features->touch_max) {
@@ -333,8 +326,14 @@ static void wacom_post_parse_hid(struct hid_device *hdev,
if (features->type == HID_GENERIC) {
/* Any last-minute generic device setup */
if (features->touch_max > 1) {
- input_mt_init_slots(wacom_wac->touch_input, wacom_wac->features.touch_max,
- INPUT_MT_DIRECT);
+ if (features->device_type & WACOM_DEVICETYPE_DIRECT)
+ input_mt_init_slots(wacom_wac->touch_input,
+ wacom_wac->features.touch_max,
+ INPUT_MT_DIRECT);
+ else
+ input_mt_init_slots(wacom_wac->touch_input,
+ wacom_wac->features.touch_max,
+ INPUT_MT_POINTER);
}
}
}
@@ -497,11 +496,11 @@ static int wacom_bt_query_tablet_data(struct hid_device *hdev, u8 speed,
* from the tablet, it is necessary to switch the tablet out of this
* mode and into one which sends the full range of tablet data.
*/
-static int wacom_query_tablet_data(struct hid_device *hdev,
- struct wacom_features *features)
+static int _wacom_query_tablet_data(struct wacom *wacom)
{
- struct wacom *wacom = hid_get_drvdata(hdev);
+ struct hid_device *hdev = wacom->hdev;
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
if (hdev->bus == BUS_BLUETOOTH)
return wacom_bt_query_tablet_data(hdev, 1, features);
@@ -757,9 +756,6 @@ static int wacom_led_control(struct wacom *wacom)
unsigned char report_id = WAC_CMD_LED_CONTROL;
int buf_size = 9;
- if (!hid_get_drvdata(wacom->hdev))
- return -ENODEV;
-
if (!wacom->led.groups)
return -ENOTSUPP;
@@ -767,12 +763,21 @@ static int wacom_led_control(struct wacom *wacom)
report_id = WAC_CMD_WL_LED_CONTROL;
buf_size = 13;
}
+ else if (wacom->wacom_wac.features.type == INTUOSP2_BT) {
+ report_id = WAC_CMD_WL_INTUOSP2;
+ buf_size = 51;
+ }
buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- if (wacom->wacom_wac.features.type >= INTUOS5S &&
- wacom->wacom_wac.features.type <= INTUOSPL) {
+ if (wacom->wacom_wac.features.type == HID_GENERIC) {
+ buf[0] = WAC_CMD_LED_CONTROL_GENERIC;
+ buf[1] = wacom->led.llv;
+ buf[2] = wacom->led.groups[0].select & 0x03;
+
+ } else if ((wacom->wacom_wac.features.type >= INTUOS5S &&
+ wacom->wacom_wac.features.type <= INTUOSPL)) {
/*
* Touch Ring and crop mark LED luminance may take on
* one of four values:
@@ -792,6 +797,16 @@ static int wacom_led_control(struct wacom *wacom)
} else
buf[1] = led_bits;
}
+ else if (wacom->wacom_wac.features.type == INTUOSP2_BT) {
+ buf[0] = report_id;
+ buf[4] = 100; // Power Connection LED (ORANGE)
+ buf[5] = 100; // BT Connection LED (BLUE)
+ buf[6] = 100; // Paper Mode (RED?)
+ buf[7] = 100; // Paper Mode (GREEN?)
+ buf[8] = 100; // Paper Mode (BLUE?)
+ buf[9] = wacom->led.llv;
+ buf[10] = wacom->led.groups[0].select & 0x03;
+ }
else {
int led = wacom->led.groups[0].select | 0x4;
@@ -1032,6 +1047,17 @@ static struct attribute_group intuos5_led_attr_group = {
.attrs = intuos5_led_attrs,
};
+static struct attribute *generic_led_attrs[] = {
+ &dev_attr_status0_luminance.attr,
+ &dev_attr_status_led0_select.attr,
+ NULL
+};
+
+static struct attribute_group generic_led_attr_group = {
+ .name = "wacom_led",
+ .attrs = generic_led_attrs,
+};
+
struct wacom_sysfs_group_devres {
struct attribute_group *group;
struct kobject *root;
@@ -1353,7 +1379,7 @@ static int wacom_leds_alloc_and_register(struct wacom *wacom, int group_count,
return 0;
}
-static int wacom_initialize_leds(struct wacom *wacom)
+int wacom_initialize_leds(struct wacom *wacom)
{
int error;
@@ -1362,6 +1388,23 @@ static int wacom_initialize_leds(struct wacom *wacom)
/* Initialize default values */
switch (wacom->wacom_wac.features.type) {
+ case HID_GENERIC:
+ if (!wacom->generic_has_leds)
+ return 0;
+ wacom->led.llv = 100;
+ wacom->led.max_llv = 100;
+
+ error = wacom_leds_alloc_and_register(wacom, 1, 4, false);
+ if (error) {
+ hid_err(wacom->hdev,
+ "cannot create leds err: %d\n", error);
+ return error;
+ }
+
+ error = wacom_devm_sysfs_create_group(wacom,
+ &generic_led_attr_group);
+ break;
+
case INTUOS4S:
case INTUOS4:
case INTUOS4WL:
@@ -1420,6 +1463,17 @@ static int wacom_initialize_leds(struct wacom *wacom)
&intuos5_led_attr_group);
break;
+ case INTUOSP2_BT:
+ wacom->led.llv = 50;
+ wacom->led.max_llv = 100;
+ error = wacom_leds_alloc_and_register(wacom, 1, 4, false);
+ if (error) {
+ hid_err(wacom->hdev,
+ "cannot create leds err: %d\n", error);
+ return error;
+ }
+ return 0;
+
case REMOTE:
wacom->led.llv = 255;
wacom->led.max_llv = 255;
@@ -1440,11 +1494,23 @@ static int wacom_initialize_leds(struct wacom *wacom)
"cannot create sysfs group err: %d\n", error);
return error;
}
- wacom_led_control(wacom);
return 0;
}
+static void wacom_init_work(struct work_struct *work)
+{
+ struct wacom *wacom = container_of(work, struct wacom, init_work.work);
+
+ _wacom_query_tablet_data(wacom);
+ wacom_led_control(wacom);
+}
+
+static void wacom_query_tablet_data(struct wacom *wacom)
+{
+ schedule_delayed_work(&wacom->init_work, msecs_to_jiffies(1000));
+}
+
static enum power_supply_property wacom_battery_props[] = {
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_PRESENT,
@@ -2020,6 +2086,24 @@ static void wacom_release_resources(struct wacom *wacom)
wacom->wacom_wac.pad_input = NULL;
}
+static void wacom_set_shared_values(struct wacom_wac *wacom_wac)
+{
+ if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH) {
+ wacom_wac->shared->type = wacom_wac->features.type;
+ wacom_wac->shared->touch_input = wacom_wac->touch_input;
+ }
+
+ if (wacom_wac->has_mute_touch_switch)
+ wacom_wac->shared->has_mute_touch_switch = true;
+
+ if (wacom_wac->shared->has_mute_touch_switch &&
+ wacom_wac->shared->touch_input) {
+ set_bit(EV_SW, wacom_wac->shared->touch_input->evbit);
+ input_set_capability(wacom_wac->shared->touch_input, EV_SW,
+ SW_MUTE_DEVICE);
+ }
+}
+
static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
{
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
@@ -2118,7 +2202,7 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
if (!wireless) {
/* Note that if query fails it is not a hard failure */
- wacom_query_tablet_data(hdev, features);
+ wacom_query_tablet_data(wacom);
}
/* touch only Bamboo doesn't support pen */
@@ -2139,13 +2223,7 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
if (features->device_type & WACOM_DEVICETYPE_WL_MONITOR)
error = hid_hw_open(hdev);
- if ((wacom_wac->features.type == INTUOSHT ||
- wacom_wac->features.type == INTUOSHT2) &&
- (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH)) {
- wacom_wac->shared->type = wacom_wac->features.type;
- wacom_wac->shared->touch_input = wacom_wac->touch_input;
- }
-
+ wacom_set_shared_values(wacom_wac);
devres_close_group(&hdev->dev, wacom);
return 0;
@@ -2450,6 +2528,7 @@ static int wacom_probe(struct hid_device *hdev,
wacom->usbdev = dev;
wacom->intf = intf;
mutex_init(&wacom->lock);
+ INIT_DELAYED_WORK(&wacom->init_work, wacom_init_work);
INIT_WORK(&wacom->wireless_work, wacom_wireless_work);
INIT_WORK(&wacom->battery_work, wacom_battery_work);
INIT_WORK(&wacom->remote_work, wacom_remote_work);
@@ -2491,12 +2570,17 @@ static void wacom_remove(struct hid_device *hdev)
hid_hw_stop(hdev);
+ cancel_delayed_work_sync(&wacom->init_work);
cancel_work_sync(&wacom->wireless_work);
cancel_work_sync(&wacom->battery_work);
cancel_work_sync(&wacom->remote_work);
if (hdev->bus == BUS_BLUETOOTH)
device_remove_file(&hdev->dev, &dev_attr_speed);
+ /* make sure we don't trigger the LEDs */
+ wacom_led_groups_release(wacom);
+ wacom_release_resources(wacom);
+
hid_set_drvdata(hdev, NULL);
}
@@ -2504,12 +2588,11 @@ static void wacom_remove(struct hid_device *hdev)
static int wacom_resume(struct hid_device *hdev)
{
struct wacom *wacom = hid_get_drvdata(hdev);
- struct wacom_features *features = &wacom->wacom_wac.features;
mutex_lock(&wacom->lock);
/* switch to wacom mode first */
- wacom_query_tablet_data(hdev, features);
+ _wacom_query_tablet_data(wacom);
wacom_led_control(wacom);
mutex_unlock(&wacom->lock);
@@ -2540,4 +2623,4 @@ module_hid_driver(wacom_driver);
MODULE_VERSION(DRIVER_VERSION);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE(DRIVER_LICENSE);
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 672145b0d8f5..4aa3de9f1163 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -43,6 +43,8 @@ static void wacom_report_numbered_buttons(struct input_dev *input_dev,
static int wacom_numbered_button_to_key(int n);
+static void wacom_update_led(struct wacom *wacom, int button_count, int mask,
+ int group);
/*
* Percent of battery capacity for Graphire.
* 8th value means AC online and show 100% capacity.
@@ -1192,6 +1194,166 @@ static int wacom_wac_finger_count_touches(struct wacom_wac *wacom)
return count;
}
+static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
+{
+ const int pen_frame_len = 14;
+ const int pen_frames = 7;
+
+ struct input_dev *pen_input = wacom->pen_input;
+ unsigned char *data = wacom->data;
+ int i;
+
+ wacom->serial[0] = get_unaligned_le64(&data[99]);
+ wacom->id[0] = get_unaligned_le16(&data[107]);
+ if (wacom->serial[0] >> 52 == 1) {
+ /* Add back in missing bits of ID for non-USI pens */
+ wacom->id[0] |= (wacom->serial[0] >> 32) & 0xFFFFF;
+ }
+ wacom->tool[0] = wacom_intuos_get_tool_type(wacom_intuos_id_mangle(wacom->id[0]));
+
+ for (i = 0; i < pen_frames; i++) {
+ unsigned char *frame = &data[i*pen_frame_len + 1];
+ bool valid = frame[0] & 0x80;
+ bool prox = frame[0] & 0x40;
+ bool range = frame[0] & 0x20;
+
+ if (!valid)
+ continue;
+
+ if (range) {
+ input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1]));
+ input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3]));
+ input_report_abs(pen_input, ABS_TILT_X, frame[7]);
+ input_report_abs(pen_input, ABS_TILT_Y, frame[8]);
+ input_report_abs(pen_input, ABS_Z, get_unaligned_le16(&frame[9]));
+ input_report_abs(pen_input, ABS_WHEEL, get_unaligned_le16(&frame[11]));
+ }
+ input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
+ input_report_abs(pen_input, ABS_DISTANCE, range ? frame[13] : wacom->features.distance_max);
+
+ input_report_key(pen_input, BTN_TOUCH, frame[0] & 0x01);
+ input_report_key(pen_input, BTN_STYLUS, frame[0] & 0x02);
+ input_report_key(pen_input, BTN_STYLUS2, frame[0] & 0x04);
+
+ input_report_key(pen_input, wacom->tool[0], prox);
+ input_event(pen_input, EV_MSC, MSC_SERIAL, wacom->serial[0]);
+ input_report_abs(pen_input, ABS_MISC,
+ wacom_intuos_id_mangle(wacom->id[0])); /* report tool id */
+
+ wacom->shared->stylus_in_proximity = prox;
+
+ input_sync(pen_input);
+ }
+}
+
+static void wacom_intuos_pro2_bt_touch(struct wacom_wac *wacom)
+{
+ const int finger_touch_len = 8;
+ const int finger_frames = 4;
+ const int finger_frame_len = 43;
+
+ struct input_dev *touch_input = wacom->touch_input;
+ unsigned char *data = wacom->data;
+ int num_contacts_left = 5;
+ int i, j;
+
+ for (i = 0; i < finger_frames; i++) {
+ unsigned char *frame = &data[i*finger_frame_len + 109];
+ int current_num_contacts = frame[0] & 0x7F;
+ int contacts_to_send;
+
+ if (!(frame[0] & 0x80))
+ continue;
+
+ /*
+ * First packet resets the counter since only the first
+ * packet in series will have non-zero current_num_contacts.
+ */
+ if (current_num_contacts)
+ wacom->num_contacts_left = current_num_contacts;
+
+ contacts_to_send = min(num_contacts_left, wacom->num_contacts_left);
+
+ for (j = 0; j < contacts_to_send; j++) {
+ unsigned char *touch = &frame[j*finger_touch_len + 1];
+ int slot = input_mt_get_slot_by_key(touch_input, touch[0]);
+ int x = get_unaligned_le16(&touch[2]);
+ int y = get_unaligned_le16(&touch[4]);
+ int w = touch[6] * input_abs_get_res(touch_input, ABS_MT_POSITION_X);
+ int h = touch[7] * input_abs_get_res(touch_input, ABS_MT_POSITION_Y);
+
+ if (slot < 0)
+ continue;
+
+ input_mt_slot(touch_input, slot);
+ input_mt_report_slot_state(touch_input, MT_TOOL_FINGER, touch[1] & 0x01);
+ input_report_abs(touch_input, ABS_MT_POSITION_X, x);
+ input_report_abs(touch_input, ABS_MT_POSITION_Y, y);
+ input_report_abs(touch_input, ABS_MT_TOUCH_MAJOR, max(w, h));
+ input_report_abs(touch_input, ABS_MT_TOUCH_MINOR, min(w, h));
+ input_report_abs(touch_input, ABS_MT_ORIENTATION, w > h);
+ }
+
+ input_mt_sync_frame(touch_input);
+
+ wacom->num_contacts_left -= contacts_to_send;
+ if (wacom->num_contacts_left <= 0) {
+ wacom->num_contacts_left = 0;
+ wacom->shared->touch_down = wacom_wac_finger_count_touches(wacom);
+ }
+ }
+
+ input_report_switch(touch_input, SW_MUTE_DEVICE, !(data[281] >> 7));
+ input_sync(touch_input);
+}
+
+static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
+{
+ struct input_dev *pad_input = wacom->pad_input;
+ unsigned char *data = wacom->data;
+
+ int buttons = (data[282] << 1) | ((data[281] >> 6) & 0x01);
+ int ring = data[285];
+ int prox = buttons | (ring & 0x80);
+
+ wacom_report_numbered_buttons(pad_input, 9, buttons);
+
+ input_report_abs(pad_input, ABS_WHEEL, (ring & 0x80) ? (ring & 0x7f) : 0);
+
+ input_report_key(pad_input, wacom->tool[1], prox ? 1 : 0);
+ input_report_abs(pad_input, ABS_MISC, prox ? PAD_DEVICE_ID : 0);
+ input_event(pad_input, EV_MSC, MSC_SERIAL, 0xffffffff);
+
+ input_sync(pad_input);
+}
+
+static void wacom_intuos_pro2_bt_battery(struct wacom_wac *wacom)
+{
+ unsigned char *data = wacom->data;
+
+ bool chg = data[284] & 0x80;
+ int battery_status = data[284] & 0x7F;
+
+ wacom_notify_battery(wacom, battery_status, chg, 1, chg);
+}
+
+static int wacom_intuos_pro2_bt_irq(struct wacom_wac *wacom, size_t len)
+{
+ unsigned char *data = wacom->data;
+
+ if (data[0] != 0x80) {
+ dev_dbg(wacom->pen_input->dev.parent,
+ "%s: received unknown report #%d\n", __func__, data[0]);
+ return 0;
+ }
+
+ wacom_intuos_pro2_bt_pen(wacom);
+ wacom_intuos_pro2_bt_touch(wacom);
+ wacom_intuos_pro2_bt_pad(wacom);
+ wacom_intuos_pro2_bt_battery(wacom);
+ return 0;
+}
+
static int wacom_24hdt_irq(struct wacom_wac *wacom)
{
struct input_dev *input = wacom->touch_input;
@@ -1446,7 +1608,7 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
return 0;
}
-static int wacom_equivalent_usage(int usage)
+int wacom_equivalent_usage(int usage)
{
if ((usage & HID_USAGE_PAGE) == WACOM_HID_UP_WACOMDIGITIZER) {
int subpage = (usage & 0xFF00) << 8;
@@ -1473,6 +1635,16 @@ static int wacom_equivalent_usage(int usage)
return subpage | subusage;
}
+ if ((usage & HID_USAGE_PAGE) == WACOM_HID_UP_WACOMTOUCH) {
+ int subpage = (usage & 0xFF00) << 8;
+ int subusage = (usage & 0xFF);
+
+ if (subpage == HID_UP_UNDEFINED)
+ subpage = WACOM_HID_SP_DIGITIZER;
+
+ return subpage | subusage;
+ }
+
return usage;
}
@@ -1552,12 +1724,14 @@ static void wacom_wac_pad_usage_mapping(struct hid_device *hdev,
wacom_map_usage(input, usage, field, EV_ABS, ABS_Z, 0);
features->device_type |= WACOM_DEVICETYPE_PAD;
break;
+ case WACOM_HID_WD_BUTTONCENTER:
+ wacom->generic_has_leds = true;
+ /* fall through */
case WACOM_HID_WD_BUTTONHOME:
case WACOM_HID_WD_BUTTONUP:
case WACOM_HID_WD_BUTTONDOWN:
case WACOM_HID_WD_BUTTONLEFT:
case WACOM_HID_WD_BUTTONRIGHT:
- case WACOM_HID_WD_BUTTONCENTER:
wacom_map_usage(input, usage, field, EV_KEY,
wacom_numbered_button_to_key(features->numbered_buttons),
0);
@@ -1565,7 +1739,17 @@ static void wacom_wac_pad_usage_mapping(struct hid_device *hdev,
features->device_type |= WACOM_DEVICETYPE_PAD;
break;
case WACOM_HID_WD_TOUCHONOFF:
- wacom_map_usage(input, usage, field, EV_SW, SW_MUTE_DEVICE, 0);
+ /*
+ * This usage, which is used to mute touch events, comes
+ * from the pad packet, but is reported on the touch
+ * interface. Because the touch interface may not have
+ * been created yet, we cannot call wacom_map_usage(). In
+ * order to process this usage when we receive it, we set
+ * the usage type and code directly.
+ */
+ wacom_wac->has_mute_touch_switch = true;
+ usage->type = EV_SW;
+ usage->code = SW_MUTE_DEVICE;
features->device_type |= WACOM_DEVICETYPE_PAD;
break;
case WACOM_HID_WD_TOUCHSTRIP:
@@ -1580,6 +1764,10 @@ static void wacom_wac_pad_usage_mapping(struct hid_device *hdev,
wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0);
features->device_type |= WACOM_DEVICETYPE_PAD;
break;
+ case WACOM_HID_WD_TOUCHRINGSTATUS:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
}
switch (equivalent_usage & 0xfffffff0) {
@@ -1622,17 +1810,40 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
struct input_dev *input = wacom_wac->pad_input;
struct wacom_features *features = &wacom_wac->features;
unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
+ int i;
+
+ /*
+ * Avoid reporting this event and setting inrange_state if this usage
+ * hasn't been mapped.
+ */
+ if (!usage->type)
+ return;
if (wacom_equivalent_usage(field->physical) == HID_DG_TABLETFUNCTIONKEY) {
- wacom_wac->hid_data.inrange_state |= value;
+ if (usage->hid != WACOM_HID_WD_TOUCHRING)
+ wacom_wac->hid_data.inrange_state |= value;
}
switch (equivalent_usage) {
case WACOM_HID_WD_TOUCHRINGSTATUS:
+ if (!value)
+ input_event(input, usage->type, usage->code, 0);
break;
+ case WACOM_HID_WD_TOUCHONOFF:
+ if (wacom_wac->shared->touch_input) {
+ input_report_switch(wacom_wac->shared->touch_input,
+ SW_MUTE_DEVICE, !value);
+ input_sync(wacom_wac->shared->touch_input);
+ }
+ break;
+
+ case WACOM_HID_WD_BUTTONCENTER:
+ for (i = 0; i < wacom->led.count; i++)
+ wacom_update_led(wacom, features->numbered_buttons,
+ value, i);
+ /* fall through*/
default:
- features->input_event_flag = true;
input_event(input, usage->type, usage->code, value);
break;
}
@@ -1670,20 +1881,15 @@ static void wacom_wac_pad_report(struct hid_device *hdev,
{
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
- struct wacom_features *features = &wacom_wac->features;
struct input_dev *input = wacom_wac->pad_input;
bool active = wacom_wac->hid_data.inrange_state != 0;
/* report prox for expresskey events */
if (wacom_equivalent_usage(report->field[0]->physical) == HID_DG_TABLETFUNCTIONKEY) {
- features->input_event_flag = true;
input_event(input, EV_ABS, ABS_MISC, active ? PAD_DEVICE_ID : 0);
- }
-
- if (features->input_event_flag) {
- features->input_event_flag = false;
input_sync(input);
}
+
}
static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
@@ -2058,8 +2264,10 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev,
for (j = 0; j < field->maxusage; j++) {
struct hid_usage *usage = &field->usage[j];
+ unsigned int equivalent_usage =
+ wacom_equivalent_usage(usage->hid);
- switch (usage->hid) {
+ switch (equivalent_usage) {
case HID_GD_X:
case HID_GD_Y:
case HID_DG_WIDTH:
@@ -2068,7 +2276,7 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev,
case HID_DG_INRANGE:
case HID_DG_INVERT:
case HID_DG_TIPSWITCH:
- hid_data->last_slot_field = usage->hid;
+ hid_data->last_slot_field = equivalent_usage;
break;
case HID_DG_CONTACTCOUNT:
hid_data->cc_report = report->id;
@@ -2123,8 +2331,8 @@ void wacom_wac_usage_mapping(struct hid_device *hdev,
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
struct wacom_features *features = &wacom_wac->features;
- /* currently, only direct devices have proper hid report descriptors */
- features->device_type |= WACOM_DEVICETYPE_DIRECT;
+ if (WACOM_DIRECT_DEVICE(field))
+ features->device_type |= WACOM_DEVICETYPE_DIRECT;
if (WACOM_PAD_FIELD(field))
wacom_wac_pad_usage_mapping(hdev, field, usage);
@@ -2142,6 +2350,9 @@ void wacom_wac_event(struct hid_device *hdev, struct hid_field *field,
if (wacom->wacom_wac.features.type != HID_GENERIC)
return;
+ if (value > field->logical_maximum || value < field->logical_minimum)
+ return;
+
if (WACOM_PAD_FIELD(field)) {
wacom_wac_pad_battery_event(hdev, field, usage, value);
if (wacom->wacom_wac.pad_input)
@@ -2669,6 +2880,10 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
sync = wacom_intuos_irq(wacom_wac);
break;
+ case INTUOSP2_BT:
+ sync = wacom_intuos_pro2_bt_irq(wacom_wac, len);
+ break;
+
case TABLETPC:
case TABLETPCE:
case TABLETPC2FG:
@@ -2779,8 +2994,6 @@ void wacom_setup_device_quirks(struct wacom *wacom)
struct wacom_features *features = &wacom->wacom_wac.features;
/* The pen and pad share the same interface on most devices */
- if (features->numbered_buttons > 0)
- features->device_type |= WACOM_DEVICETYPE_PAD;
if (features->type == GRAPHIRE_BT || features->type == WACOM_G4 ||
features->type == DTUS ||
(features->type >= INTUOS3S && features->type <= WACOM_MO)) {
@@ -2840,6 +3053,13 @@ void wacom_setup_device_quirks(struct wacom *wacom)
if (features->type == REMOTE)
features->device_type = WACOM_DEVICETYPE_PAD;
+ if (features->type == INTUOSP2_BT) {
+ features->device_type |= WACOM_DEVICETYPE_PEN |
+ WACOM_DEVICETYPE_PAD |
+ WACOM_DEVICETYPE_TOUCH;
+ features->quirks |= WACOM_QUIRK_BATTERY;
+ }
+
switch (features->type) {
case PL:
case DTU:
@@ -2986,6 +3206,7 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
case INTUOSPL:
case INTUOS5S:
case INTUOSPS:
+ case INTUOSP2_BT:
input_set_abs_params(input_dev, ABS_DISTANCE, 0,
features->distance_max,
features->distance_fuzz, 0);
@@ -3094,6 +3315,27 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
}
switch (features->type) {
+ case INTUOSP2_BT:
+ input_dev->evbit[0] |= BIT_MASK(EV_SW);
+ __set_bit(SW_MUTE_DEVICE, input_dev->swbit);
+
+ if (wacom_wac->shared->touch->product == 0x361) {
+ input_set_abs_params(input_dev, ABS_MT_POSITION_X,
+ 0, 12440, 4, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
+ 0, 8640, 4, 0);
+ }
+ else if (wacom_wac->shared->touch->product == 0x360) {
+ input_set_abs_params(input_dev, ABS_MT_POSITION_X,
+ 0, 8960, 4, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
+ 0, 5920, 4, 0);
+ }
+ input_abs_set_res(input_dev, ABS_MT_POSITION_X, 40);
+ input_abs_set_res(input_dev, ABS_MT_POSITION_X, 40);
+
+ /* fall through */
+
case INTUOS5:
case INTUOS5L:
case INTUOSPM:
@@ -3290,6 +3532,9 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
{
struct wacom_features *features = &wacom_wac->features;
+ if ((features->type == HID_GENERIC) && features->numbered_buttons > 0)
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+
if (!(features->device_type & WACOM_DEVICETYPE_PAD))
return -ENODEV;
@@ -3391,6 +3636,7 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
case INTUOSPL:
case INTUOS5S:
case INTUOSPS:
+ case INTUOSP2_BT:
input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
break;
@@ -3949,6 +4195,12 @@ static const struct wacom_features wacom_features_0x343 =
DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
WACOM_DTU_OFFSET, WACOM_DTU_OFFSET,
WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
+static const struct wacom_features wacom_features_0x360 =
+ { "Wacom Intuos Pro M", 44800, 29600, 8191, 63,
+ INTUOSP2_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 9, .touch_max = 10 };
+static const struct wacom_features wacom_features_0x361 =
+ { "Wacom Intuos Pro L", 62200, 43200, 8191, 63,
+ INTUOSP2_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 9, .touch_max = 10 };
static const struct wacom_features wacom_features_HID_ANY_ID =
{ "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID };
@@ -4115,6 +4367,8 @@ const struct hid_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x33D) },
{ USB_DEVICE_WACOM(0x33E) },
{ USB_DEVICE_WACOM(0x343) },
+ { BT_DEVICE_WACOM(0x360) },
+ { BT_DEVICE_WACOM(0x361) },
{ USB_DEVICE_WACOM(0x4001) },
{ USB_DEVICE_WACOM(0x4004) },
{ USB_DEVICE_WACOM(0x5000) },
@@ -4123,6 +4377,7 @@ const struct hid_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(HID_ANY_ID) },
{ I2C_DEVICE_WACOM(HID_ANY_ID) },
+ { BT_DEVICE_WACOM(HID_ANY_ID) },
{ }
};
MODULE_DEVICE_TABLE(hid, wacom_ids);
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index fb0e50acb10d..857ccee16f38 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -12,8 +12,8 @@
#include <linux/types.h>
#include <linux/hid.h>
-/* maximum packet length for USB devices */
-#define WACOM_PKGLEN_MAX 192
+/* maximum packet length for USB/BT devices */
+#define WACOM_PKGLEN_MAX 361
#define WACOM_NAME_MAX 64
#define WACOM_MAX_REMOTES 5
@@ -72,6 +72,17 @@
#define WACOM_REPORT_REMOTE 17
#define WACOM_REPORT_INTUOSHT2_ID 8
+/* wacom command report ids */
+#define WAC_CMD_WL_LED_CONTROL 0x03
+#define WAC_CMD_LED_CONTROL 0x20
+#define WAC_CMD_ICON_START 0x21
+#define WAC_CMD_ICON_XFER 0x23
+#define WAC_CMD_ICON_BT_XFER 0x26
+#define WAC_CMD_DELETE_PAIRING 0x20
+#define WAC_CMD_LED_CONTROL_GENERIC 0x32
+#define WAC_CMD_UNPAIR_ALL 0xFF
+#define WAC_CMD_WL_INTUOSP2 0x82
+
/* device quirks */
#define WACOM_QUIRK_BBTOUCH_LOWRES 0x0001
#define WACOM_QUIRK_SENSE 0x0002
@@ -91,6 +102,7 @@
#define WACOM_HID_SP_DIGITIZER 0x000d0000
#define WACOM_HID_SP_DIGITIZERINFO 0x00100000
#define WACOM_HID_WD_DIGITIZER (WACOM_HID_UP_WACOMDIGITIZER | 0x01)
+#define WACOM_HID_WD_PEN (WACOM_HID_UP_WACOMDIGITIZER | 0x02)
#define WACOM_HID_WD_SENSE (WACOM_HID_UP_WACOMDIGITIZER | 0x36)
#define WACOM_HID_WD_DIGITIZERFNKEYS (WACOM_HID_UP_WACOMDIGITIZER | 0x39)
#define WACOM_HID_WD_SERIALHI (WACOM_HID_UP_WACOMDIGITIZER | 0x5c)
@@ -104,6 +116,7 @@
#define WACOM_HID_WD_ACCELEROMETER_Y (WACOM_HID_UP_WACOMDIGITIZER | 0x0402)
#define WACOM_HID_WD_ACCELEROMETER_Z (WACOM_HID_UP_WACOMDIGITIZER | 0x0403)
#define WACOM_HID_WD_BATTERY_CHARGING (WACOM_HID_UP_WACOMDIGITIZER | 0x0404)
+#define WACOM_HID_WD_TOUCHONOFF (WACOM_HID_UP_WACOMDIGITIZER | 0x0454)
#define WACOM_HID_WD_BATTERY_LEVEL (WACOM_HID_UP_WACOMDIGITIZER | 0x043b)
#define WACOM_HID_WD_EXPRESSKEY00 (WACOM_HID_UP_WACOMDIGITIZER | 0x0910)
#define WACOM_HID_WD_EXPRESSKEYCAP00 (WACOM_HID_UP_WACOMDIGITIZER | 0x0950)
@@ -113,7 +126,6 @@
#define WACOM_HID_WD_BUTTONLEFT (WACOM_HID_UP_WACOMDIGITIZER | 0x0993)
#define WACOM_HID_WD_BUTTONRIGHT (WACOM_HID_UP_WACOMDIGITIZER | 0x0994)
#define WACOM_HID_WD_BUTTONCENTER (WACOM_HID_UP_WACOMDIGITIZER | 0x0995)
-#define WACOM_HID_WD_TOUCHONOFF (WACOM_HID_UP_WACOMDIGITIZER | 0x0996)
#define WACOM_HID_WD_FINGERWHEEL (WACOM_HID_UP_WACOMDIGITIZER | 0x0d03)
#define WACOM_HID_WD_OFFSETLEFT (WACOM_HID_UP_WACOMDIGITIZER | 0x0d30)
#define WACOM_HID_WD_OFFSETTOP (WACOM_HID_UP_WACOMDIGITIZER | 0x0d31)
@@ -127,6 +139,12 @@
#define WACOM_HID_UP_G11 0xff110000
#define WACOM_HID_G11_PEN (WACOM_HID_UP_G11 | 0x02)
#define WACOM_HID_G11_TOUCHSCREEN (WACOM_HID_UP_G11 | 0x11)
+#define WACOM_HID_UP_WACOMTOUCH 0xff000000
+#define WACOM_HID_WT_TOUCHSCREEN (WACOM_HID_UP_WACOMTOUCH | 0x04)
+#define WACOM_HID_WT_TOUCHPAD (WACOM_HID_UP_WACOMTOUCH | 0x05)
+#define WACOM_HID_WT_CONTACTMAX (WACOM_HID_UP_WACOMTOUCH | 0x55)
+#define WACOM_HID_WT_X (WACOM_HID_UP_WACOMTOUCH | 0x130)
+#define WACOM_HID_WT_Y (WACOM_HID_UP_WACOMTOUCH | 0x131)
#define WACOM_PAD_FIELD(f) (((f)->physical == HID_DG_TABLETFUNCTIONKEY) || \
((f)->physical == WACOM_HID_WD_DIGITIZERFNKEYS) || \
@@ -144,7 +162,14 @@
((f)->physical == HID_DG_FINGER) || \
((f)->application == HID_DG_TOUCHSCREEN) || \
((f)->application == WACOM_HID_G9_TOUCHSCREEN) || \
- ((f)->application == WACOM_HID_G11_TOUCHSCREEN))
+ ((f)->application == WACOM_HID_G11_TOUCHSCREEN) || \
+ ((f)->application == WACOM_HID_WT_TOUCHPAD) || \
+ ((f)->application == HID_DG_TOUCHPAD))
+
+#define WACOM_DIRECT_DEVICE(f) (((f)->application == HID_DG_TOUCHSCREEN) || \
+ ((f)->application == WACOM_HID_WT_TOUCHSCREEN) || \
+ ((f)->application == HID_DG_PEN) || \
+ ((f)->application == WACOM_HID_WD_PEN))
enum {
PENPARTNER = 0,
@@ -170,6 +195,7 @@ enum {
INTUOSPS,
INTUOSPM,
INTUOSPL,
+ INTUOSP2_BT,
WACOM_21UX2,
WACOM_22HD,
DTK,
@@ -232,7 +258,6 @@ struct wacom_features {
int pktlen;
bool check_for_hid_type;
int hid_type;
- bool input_event_flag;
};
struct wacom_shared {
@@ -244,6 +269,7 @@ struct wacom_shared {
struct input_dev *touch_input;
struct hid_device *pen;
struct hid_device *touch;
+ bool has_mute_touch_switch;
};
struct hid_data {
@@ -300,6 +326,7 @@ struct wacom_wac {
int mode_report;
int mode_value;
struct hid_data hid_data;
+ bool has_mute_touch_switch;
};
#endif
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 446802ae8f1b..b44b32f21e61 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -135,9 +135,9 @@ u64 hv_do_hypercall(u64 control, void *input, void *output)
EXPORT_SYMBOL_GPL(hv_do_hypercall);
#ifdef CONFIG_X86_64
-static cycle_t read_hv_clock_tsc(struct clocksource *arg)
+static u64 read_hv_clock_tsc(struct clocksource *arg)
{
- cycle_t current_tick;
+ u64 current_tick;
struct ms_hyperv_tsc_page *tsc_pg = hv_context.tsc_page;
if (tsc_pg->tsc_sequence != 0) {
@@ -146,7 +146,7 @@ static cycle_t read_hv_clock_tsc(struct clocksource *arg)
*/
while (1) {
- cycle_t tmp;
+ u64 tmp;
u32 sequence = tsc_pg->tsc_sequence;
u64 cur_tsc;
u64 scale = tsc_pg->tsc_scale;
@@ -350,7 +350,7 @@ int hv_post_message(union hv_connection_id connection_id,
static int hv_ce_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
- cycle_t current_tick;
+ u64 current_tick;
WARN_ON(!clockevent_state_oneshot(evt));
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 322ed9272811..841f2428e84a 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -1036,7 +1036,7 @@ static const u8 lm90_temp_emerg_index[3] = {
};
static const u8 lm90_min_alarm_bits[3] = { 5, 3, 11 };
-static const u8 lm90_max_alarm_bits[3] = { 0, 4, 12 };
+static const u8 lm90_max_alarm_bits[3] = { 6, 4, 12 };
static const u8 lm90_crit_alarm_bits[3] = { 0, 1, 9 };
static const u8 lm90_emergency_alarm_bits[3] = { 15, 13, 14 };
static const u8 lm90_fault_bits[3] = { 0, 2, 10 };
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index 3fe368b23d15..a51b6b64ecdf 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -804,10 +804,10 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
if (!etm_count++) {
cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING,
- "AP_ARM_CORESIGHT_STARTING",
+ "arm/coresight:starting",
etm_starting_cpu, etm_dying_cpu);
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
- "AP_ARM_CORESIGHT_ONLINE",
+ "arm/coresight:online",
etm_online_cpu, NULL);
if (ret < 0)
goto err_arch_supported;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 4db8d6a4d0cb..031480f2c34d 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -986,11 +986,11 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
dev_err(dev, "ETM arch init failed\n");
if (!etm4_count++) {
- cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT4_STARTING,
- "AP_ARM_CORESIGHT4_STARTING",
+ cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING,
+ "arm/coresight4:starting",
etm4_starting_cpu, etm4_dying_cpu);
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
- "AP_ARM_CORESIGHT4_ONLINE",
+ "arm/coresight4:online",
etm4_online_cpu, NULL);
if (ret < 0)
goto err_arch_supported;
@@ -1037,7 +1037,7 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
err_arch_supported:
if (--etm4_count == 0) {
- cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT4_STARTING);
+ cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
if (hp_online)
cpuhp_remove_state_nocalls(hp_online);
}
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index 944c17b48d23..e4c55c5f9988 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -406,7 +406,7 @@ static long stm_generic_set_options(struct stm_data *stm_data,
return 0;
}
-static ssize_t stm_generic_packet(struct stm_data *stm_data,
+static ssize_t notrace stm_generic_packet(struct stm_data *stm_data,
unsigned int master,
unsigned int channel,
unsigned int packet,
diff --git a/drivers/hwtracing/intel_th/sth.c b/drivers/hwtracing/intel_th/sth.c
index e1aee61dd7b3..b03444624648 100644
--- a/drivers/hwtracing/intel_th/sth.c
+++ b/drivers/hwtracing/intel_th/sth.c
@@ -67,10 +67,13 @@ static void sth_iowrite(void __iomem *dest, const unsigned char *payload,
}
}
-static ssize_t sth_stm_packet(struct stm_data *stm_data, unsigned int master,
- unsigned int channel, unsigned int packet,
- unsigned int flags, unsigned int size,
- const unsigned char *payload)
+static ssize_t notrace sth_stm_packet(struct stm_data *stm_data,
+ unsigned int master,
+ unsigned int channel,
+ unsigned int packet,
+ unsigned int flags,
+ unsigned int size,
+ const unsigned char *payload)
{
struct sth_device *sth = container_of(stm_data, struct sth_device, stm);
struct intel_th_channel __iomem *out =
diff --git a/drivers/hwtracing/stm/Kconfig b/drivers/hwtracing/stm/Kconfig
index 847a39b35307..723e2d90083d 100644
--- a/drivers/hwtracing/stm/Kconfig
+++ b/drivers/hwtracing/stm/Kconfig
@@ -39,4 +39,15 @@ config STM_SOURCE_HEARTBEAT
If you want to send heartbeat messages over STM devices,
say Y.
+config STM_SOURCE_FTRACE
+ tristate "Copy the output from kernel Ftrace to STM engine"
+ depends on FUNCTION_TRACER
+ help
+ This option can be used to copy the output from kernel Ftrace
+ to STM engine. Enabling this option will introduce a slight
+ timing effect.
+
+ If you want to send kernel Ftrace messages over STM devices,
+ say Y.
+
endif
diff --git a/drivers/hwtracing/stm/Makefile b/drivers/hwtracing/stm/Makefile
index a9ce3d487e57..3abd84ce13d4 100644
--- a/drivers/hwtracing/stm/Makefile
+++ b/drivers/hwtracing/stm/Makefile
@@ -6,6 +6,8 @@ obj-$(CONFIG_STM_DUMMY) += dummy_stm.o
obj-$(CONFIG_STM_SOURCE_CONSOLE) += stm_console.o
obj-$(CONFIG_STM_SOURCE_HEARTBEAT) += stm_heartbeat.o
+obj-$(CONFIG_STM_SOURCE_FTRACE) += stm_ftrace.o
stm_console-y := console.o
stm_heartbeat-y := heartbeat.o
+stm_ftrace-y := ftrace.o
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index a6ea387b5b00..0e731143f6a4 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -427,7 +427,7 @@ static int stm_file_assign(struct stm_file *stmf, char *id, unsigned int width)
return ret;
}
-static ssize_t stm_write(struct stm_data *data, unsigned int master,
+static ssize_t notrace stm_write(struct stm_data *data, unsigned int master,
unsigned int channel, const char *buf, size_t count)
{
unsigned int flags = STP_PACKET_TIMESTAMPED;
@@ -1123,8 +1123,9 @@ void stm_source_unregister_device(struct stm_source_data *data)
}
EXPORT_SYMBOL_GPL(stm_source_unregister_device);
-int stm_source_write(struct stm_source_data *data, unsigned int chan,
- const char *buf, size_t count)
+int notrace stm_source_write(struct stm_source_data *data,
+ unsigned int chan,
+ const char *buf, size_t count)
{
struct stm_source_device *src = data->src;
struct stm_device *stm;
diff --git a/drivers/hwtracing/stm/dummy_stm.c b/drivers/hwtracing/stm/dummy_stm.c
index a86612d989f9..c5f94ca31c4d 100644
--- a/drivers/hwtracing/stm/dummy_stm.c
+++ b/drivers/hwtracing/stm/dummy_stm.c
@@ -21,7 +21,7 @@
#include <linux/slab.h>
#include <linux/stm.h>
-static ssize_t
+static ssize_t notrace
dummy_stm_packet(struct stm_data *stm_data, unsigned int master,
unsigned int channel, unsigned int packet, unsigned int flags,
unsigned int size, const unsigned char *payload)
diff --git a/drivers/hwtracing/stm/ftrace.c b/drivers/hwtracing/stm/ftrace.c
new file mode 100644
index 000000000000..bd126a7c6da2
--- /dev/null
+++ b/drivers/hwtracing/stm/ftrace.c
@@ -0,0 +1,87 @@
+/*
+ * Simple kernel driver to link kernel Ftrace and an STM device
+ * Copyright (c) 2016, Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * STM Ftrace will be registered as a trace_export.
+ */
+
+#include <linux/module.h>
+#include <linux/stm.h>
+#include <linux/trace.h>
+
+#define STM_FTRACE_NR_CHANNELS 1
+#define STM_FTRACE_CHAN 0
+
+static int stm_ftrace_link(struct stm_source_data *data);
+static void stm_ftrace_unlink(struct stm_source_data *data);
+
+static struct stm_ftrace {
+ struct stm_source_data data;
+ struct trace_export ftrace;
+} stm_ftrace = {
+ .data = {
+ .name = "ftrace",
+ .nr_chans = STM_FTRACE_NR_CHANNELS,
+ .link = stm_ftrace_link,
+ .unlink = stm_ftrace_unlink,
+ },
+};
+
+/**
+ * stm_ftrace_write() - write data to STM via 'stm_ftrace' source
+ * @buf: buffer containing the data packet
+ * @len: length of the data packet
+ */
+static void notrace
+stm_ftrace_write(const void *buf, unsigned int len)
+{
+ stm_source_write(&stm_ftrace.data, STM_FTRACE_CHAN, buf, len);
+}
+
+static int stm_ftrace_link(struct stm_source_data *data)
+{
+ struct stm_ftrace *sf = container_of(data, struct stm_ftrace, data);
+
+ sf->ftrace.write = stm_ftrace_write;
+
+ return register_ftrace_export(&sf->ftrace);
+}
+
+static void stm_ftrace_unlink(struct stm_source_data *data)
+{
+ struct stm_ftrace *sf = container_of(data, struct stm_ftrace, data);
+
+ unregister_ftrace_export(&sf->ftrace);
+}
+
+static int __init stm_ftrace_init(void)
+{
+ int ret;
+
+ ret = stm_source_register_device(NULL, &stm_ftrace.data);
+ if (ret)
+ pr_err("Failed to register stm_source - ftrace.\n");
+
+ return ret;
+}
+
+static void __exit stm_ftrace_exit(void)
+{
+ stm_source_unregister_device(&stm_ftrace.data);
+}
+
+module_init(stm_ftrace_init);
+module_exit(stm_ftrace_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("stm_ftrace driver");
+MODULE_AUTHOR("Chunyan Zhang <zhang.chunyan@linaro.org>");
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 11edabf425ae..efc3354d60ae 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -7,6 +7,7 @@ menu "I2C support"
config I2C
tristate "I2C support"
select RT_MUTEXES
+ select IRQ_DOMAIN
---help---
I2C (pronounce: I-squared-C) is a slow serial bus protocol used in
many micro controller applications and developed by Philips. SMBus,
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index d252276feadf..0cdc8443deab 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -426,7 +426,7 @@ config I2C_BLACKFIN_TWI_CLK_KHZ
config I2C_CADENCE
tristate "Cadence I2C Controller"
- depends on ARCH_ZYNQ || ARM64
+ depends on ARCH_ZYNQ || ARM64 || XTENSA
help
Say yes here to select Cadence I2C Host Controller. This controller is
e.g. used by Xilinx Zynq.
@@ -597,6 +597,16 @@ config I2C_IMX
This driver can also be built as a module. If so, the module
will be called i2c-imx.
+config I2C_IMX_LPI2C
+ tristate "IMX Low Power I2C interface"
+ depends on ARCH_MXC || COMPILE_TEST
+ help
+ Say Y here if you want to use the Low Power IIC bus controller
+ on the Freescale i.MX processors.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-imx-lpi2c.
+
config I2C_IOP3XX
tristate "Intel IOPx3xx and IXP4xx on-chip I2C interface"
depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IXP4XX || ARCH_IOP13XX
@@ -763,7 +773,7 @@ config I2C_PUV3
config I2C_PXA
tristate "Intel PXA2XX I2C adapter"
- depends on ARCH_PXA || ARCH_MMP || (X86_32 && PCI && OF)
+ depends on ARCH_PXA || ARCH_MMP || ARCH_MVEBU || (X86_32 && PCI && OF)
help
If you have devices in the PXA I2C bus, say yes to this option.
This driver can also be built as a module. If so, the module
@@ -1150,6 +1160,17 @@ config I2C_ELEKTOR
This support is also available as a module. If so, the module
will be called i2c-elektor.
+config I2C_MLXCPLD
+ tristate "Mellanox I2C driver"
+ depends on X86_64
+ help
+ This exposes the Mellanox platform I2C busses to the linux I2C layer
+ for X86 based systems.
+ Controller is implemented as CPLD logic.
+
+ This driver can also be built as a module. If so, the module will be
+ called as i2c-mlxcpld.
+
config I2C_PCA_ISA
tristate "PCA9564/PCA9665 on an ISA bus"
depends on ISA
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 29764cc20a44..1c1bac87a9db 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -56,6 +56,7 @@ obj-$(CONFIG_I2C_HIX5HD2) += i2c-hix5hd2.o
obj-$(CONFIG_I2C_IBM_IIC) += i2c-ibm_iic.o
obj-$(CONFIG_I2C_IMG) += i2c-img-scb.o
obj-$(CONFIG_I2C_IMX) += i2c-imx.o
+obj-$(CONFIG_I2C_IMX_LPI2C) += i2c-imx-lpi2c.o
obj-$(CONFIG_I2C_IOP3XX) += i2c-iop3xx.o
obj-$(CONFIG_I2C_JZ4780) += i2c-jz4780.o
obj-$(CONFIG_I2C_KEMPLD) += i2c-kempld.o
@@ -116,6 +117,7 @@ obj-$(CONFIG_I2C_BCM_KONA) += i2c-bcm-kona.o
obj-$(CONFIG_I2C_BRCMSTB) += i2c-brcmstb.o
obj-$(CONFIG_I2C_CROS_EC_TUNNEL) += i2c-cros-ec-tunnel.o
obj-$(CONFIG_I2C_ELEKTOR) += i2c-elektor.o
+obj-$(CONFIG_I2C_MLXCPLD) += i2c-mlxcpld.o
obj-$(CONFIG_I2C_OPAL) += i2c-opal.o
obj-$(CONFIG_I2C_PCA_ISA) += i2c-pca-isa.o
obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o
diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c
index 4351a9343058..13f07482ec68 100644
--- a/drivers/i2c/busses/i2c-axxia.c
+++ b/drivers/i2c/busses/i2c-axxia.c
@@ -489,7 +489,7 @@ static const struct i2c_algorithm axxia_i2c_algo = {
.functionality = axxia_i2c_func,
};
-static struct i2c_adapter_quirks axxia_i2c_quirks = {
+static const struct i2c_adapter_quirks axxia_i2c_quirks = {
.max_read_len = 255,
.max_write_len = 255,
};
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index 326b3db02c48..318df559adc5 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -395,7 +395,7 @@ static const struct i2c_algorithm bcm_iproc_algo = {
.functionality = bcm_iproc_i2c_functionality,
};
-static struct i2c_adapter_quirks bcm_iproc_i2c_quirks = {
+static const struct i2c_adapter_quirks bcm_iproc_i2c_quirks = {
/* need to reserve one byte in the FIFO for the slave address */
.max_read_len = M_TX_RX_FIFO_SIZE - 1,
};
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index d4f3239b5686..c3436f627028 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -50,20 +50,19 @@
#define BCM2835_I2C_S_CLKT BIT(9)
#define BCM2835_I2C_S_LEN BIT(10) /* Fake bit for SW error reporting */
-#define BCM2835_I2C_BITMSK_S 0x03FF
-
#define BCM2835_I2C_CDIV_MIN 0x0002
#define BCM2835_I2C_CDIV_MAX 0xFFFE
-#define BCM2835_I2C_TIMEOUT (msecs_to_jiffies(1000))
-
struct bcm2835_i2c_dev {
struct device *dev;
void __iomem *regs;
struct clk *clk;
int irq;
+ u32 bus_clk_rate;
struct i2c_adapter adapter;
struct completion completion;
+ struct i2c_msg *curr_msg;
+ int num_msgs;
u32 msg_err;
u8 *msg_buf;
size_t msg_buf_remaining;
@@ -80,6 +79,30 @@ static inline u32 bcm2835_i2c_readl(struct bcm2835_i2c_dev *i2c_dev, u32 reg)
return readl(i2c_dev->regs + reg);
}
+static int bcm2835_i2c_set_divider(struct bcm2835_i2c_dev *i2c_dev)
+{
+ u32 divider;
+
+ divider = DIV_ROUND_UP(clk_get_rate(i2c_dev->clk),
+ i2c_dev->bus_clk_rate);
+ /*
+ * Per the datasheet, the register is always interpreted as an even
+ * number, by rounding down. In other words, the LSB is ignored. So,
+ * if the LSB is set, increment the divider to avoid any issue.
+ */
+ if (divider & 1)
+ divider++;
+ if ((divider < BCM2835_I2C_CDIV_MIN) ||
+ (divider > BCM2835_I2C_CDIV_MAX)) {
+ dev_err_ratelimited(i2c_dev->dev, "Invalid clock-frequency\n");
+ return -EINVAL;
+ }
+
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DIV, divider);
+
+ return 0;
+}
+
static void bcm2835_fill_txfifo(struct bcm2835_i2c_dev *i2c_dev)
{
u32 val;
@@ -110,106 +133,159 @@ static void bcm2835_drain_rxfifo(struct bcm2835_i2c_dev *i2c_dev)
}
}
+/*
+ * Repeated Start Condition (Sr)
+ * The BCM2835 ARM Peripherals datasheet mentions a way to trigger a Sr when it
+ * talks about reading from a slave with 10 bit address. This is achieved by
+ * issuing a write, poll the I2CS.TA flag and wait for it to be set, and then
+ * issue a read.
+ * A comment in https://github.com/raspberrypi/linux/issues/254 shows how the
+ * firmware actually does it using polling and says that it's a workaround for
+ * a problem in the state machine.
+ * It turns out that it is possible to use the TXW interrupt to know when the
+ * transfer is active, provided the FIFO has not been prefilled.
+ */
+
+static void bcm2835_i2c_start_transfer(struct bcm2835_i2c_dev *i2c_dev)
+{
+ u32 c = BCM2835_I2C_C_ST | BCM2835_I2C_C_I2CEN;
+ struct i2c_msg *msg = i2c_dev->curr_msg;
+ bool last_msg = (i2c_dev->num_msgs == 1);
+
+ if (!i2c_dev->num_msgs)
+ return;
+
+ i2c_dev->num_msgs--;
+ i2c_dev->msg_buf = msg->buf;
+ i2c_dev->msg_buf_remaining = msg->len;
+
+ if (msg->flags & I2C_M_RD)
+ c |= BCM2835_I2C_C_READ | BCM2835_I2C_C_INTR;
+ else
+ c |= BCM2835_I2C_C_INTT;
+
+ if (last_msg)
+ c |= BCM2835_I2C_C_INTD;
+
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_A, msg->addr);
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DLEN, msg->len);
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, c);
+}
+
+/*
+ * Note about I2C_C_CLEAR on error:
+ * The I2C_C_CLEAR on errors will take some time to resolve -- if you were in
+ * non-idle state and I2C_C_READ, it sets an abort_rx flag and runs through
+ * the state machine to send a NACK and a STOP. Since we're setting CLEAR
+ * without I2CEN, that NACK will be hanging around queued up for next time
+ * we start the engine.
+ */
+
static irqreturn_t bcm2835_i2c_isr(int this_irq, void *data)
{
struct bcm2835_i2c_dev *i2c_dev = data;
u32 val, err;
val = bcm2835_i2c_readl(i2c_dev, BCM2835_I2C_S);
- val &= BCM2835_I2C_BITMSK_S;
- bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_S, val);
err = val & (BCM2835_I2C_S_CLKT | BCM2835_I2C_S_ERR);
if (err) {
i2c_dev->msg_err = err;
- complete(&i2c_dev->completion);
- return IRQ_HANDLED;
- }
-
- if (val & BCM2835_I2C_S_RXD) {
- bcm2835_drain_rxfifo(i2c_dev);
- if (!(val & BCM2835_I2C_S_DONE))
- return IRQ_HANDLED;
+ goto complete;
}
if (val & BCM2835_I2C_S_DONE) {
- if (i2c_dev->msg_buf_remaining)
+ if (i2c_dev->curr_msg->flags & I2C_M_RD) {
+ bcm2835_drain_rxfifo(i2c_dev);
+ val = bcm2835_i2c_readl(i2c_dev, BCM2835_I2C_S);
+ }
+
+ if ((val & BCM2835_I2C_S_RXD) || i2c_dev->msg_buf_remaining)
i2c_dev->msg_err = BCM2835_I2C_S_LEN;
else
i2c_dev->msg_err = 0;
- complete(&i2c_dev->completion);
- return IRQ_HANDLED;
+ goto complete;
}
- if (val & BCM2835_I2C_S_TXD) {
+ if (val & BCM2835_I2C_S_TXW) {
+ if (!i2c_dev->msg_buf_remaining) {
+ i2c_dev->msg_err = val | BCM2835_I2C_S_LEN;
+ goto complete;
+ }
+
bcm2835_fill_txfifo(i2c_dev);
+
+ if (i2c_dev->num_msgs && !i2c_dev->msg_buf_remaining) {
+ i2c_dev->curr_msg++;
+ bcm2835_i2c_start_transfer(i2c_dev);
+ }
+
+ return IRQ_HANDLED;
+ }
+
+ if (val & BCM2835_I2C_S_RXR) {
+ if (!i2c_dev->msg_buf_remaining) {
+ i2c_dev->msg_err = val | BCM2835_I2C_S_LEN;
+ goto complete;
+ }
+
+ bcm2835_drain_rxfifo(i2c_dev);
return IRQ_HANDLED;
}
return IRQ_NONE;
+
+complete:
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, BCM2835_I2C_C_CLEAR);
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_S, BCM2835_I2C_S_CLKT |
+ BCM2835_I2C_S_ERR | BCM2835_I2C_S_DONE);
+ complete(&i2c_dev->completion);
+
+ return IRQ_HANDLED;
}
-static int bcm2835_i2c_xfer_msg(struct bcm2835_i2c_dev *i2c_dev,
- struct i2c_msg *msg)
+static int bcm2835_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
+ int num)
{
- u32 c;
+ struct bcm2835_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
unsigned long time_left;
+ int i, ret;
- i2c_dev->msg_buf = msg->buf;
- i2c_dev->msg_buf_remaining = msg->len;
- reinit_completion(&i2c_dev->completion);
+ for (i = 0; i < (num - 1); i++)
+ if (msgs[i].flags & I2C_M_RD) {
+ dev_warn_once(i2c_dev->dev,
+ "only one read message supported, has to be last\n");
+ return -EOPNOTSUPP;
+ }
- bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, BCM2835_I2C_C_CLEAR);
+ ret = bcm2835_i2c_set_divider(i2c_dev);
+ if (ret)
+ return ret;
- if (msg->flags & I2C_M_RD) {
- c = BCM2835_I2C_C_READ | BCM2835_I2C_C_INTR;
- } else {
- c = BCM2835_I2C_C_INTT;
- bcm2835_fill_txfifo(i2c_dev);
- }
- c |= BCM2835_I2C_C_ST | BCM2835_I2C_C_INTD | BCM2835_I2C_C_I2CEN;
+ i2c_dev->curr_msg = msgs;
+ i2c_dev->num_msgs = num;
+ reinit_completion(&i2c_dev->completion);
- bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_A, msg->addr);
- bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DLEN, msg->len);
- bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, c);
+ bcm2835_i2c_start_transfer(i2c_dev);
time_left = wait_for_completion_timeout(&i2c_dev->completion,
- BCM2835_I2C_TIMEOUT);
- bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, BCM2835_I2C_C_CLEAR);
+ adap->timeout);
if (!time_left) {
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C,
+ BCM2835_I2C_C_CLEAR);
dev_err(i2c_dev->dev, "i2c transfer timed out\n");
return -ETIMEDOUT;
}
- if (likely(!i2c_dev->msg_err))
- return 0;
+ if (!i2c_dev->msg_err)
+ return num;
- if ((i2c_dev->msg_err & BCM2835_I2C_S_ERR) &&
- (msg->flags & I2C_M_IGNORE_NAK))
- return 0;
-
- dev_err(i2c_dev->dev, "i2c transfer failed: %x\n", i2c_dev->msg_err);
+ dev_dbg(i2c_dev->dev, "i2c transfer failed: %x\n", i2c_dev->msg_err);
if (i2c_dev->msg_err & BCM2835_I2C_S_ERR)
return -EREMOTEIO;
- else
- return -EIO;
-}
-
-static int bcm2835_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
- int num)
-{
- struct bcm2835_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
- int i;
- int ret = 0;
-
- for (i = 0; i < num; i++) {
- ret = bcm2835_i2c_xfer_msg(i2c_dev, &msgs[i]);
- if (ret)
- break;
- }
- return ret ?: i;
+ return -EIO;
}
static u32 bcm2835_i2c_func(struct i2c_adapter *adap)
@@ -235,7 +311,6 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
{
struct bcm2835_i2c_dev *i2c_dev;
struct resource *mem, *irq;
- u32 bus_clk_rate, divider;
int ret;
struct i2c_adapter *adap;
@@ -259,27 +334,12 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
}
ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
- &bus_clk_rate);
+ &i2c_dev->bus_clk_rate);
if (ret < 0) {
dev_warn(&pdev->dev,
"Could not read clock-frequency property\n");
- bus_clk_rate = 100000;
- }
-
- divider = DIV_ROUND_UP(clk_get_rate(i2c_dev->clk), bus_clk_rate);
- /*
- * Per the datasheet, the register is always interpreted as an even
- * number, by rounding down. In other words, the LSB is ignored. So,
- * if the LSB is set, increment the divider to avoid any issue.
- */
- if (divider & 1)
- divider++;
- if ((divider < BCM2835_I2C_CDIV_MIN) ||
- (divider > BCM2835_I2C_CDIV_MAX)) {
- dev_err(&pdev->dev, "Invalid clock-frequency\n");
- return -ENODEV;
+ i2c_dev->bus_clk_rate = 100000;
}
- bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DIV, divider);
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!irq) {
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index b403fa5ecf49..6d81c56184d3 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -536,6 +536,8 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
intr_mask = DW_IC_INTR_DEFAULT_MASK;
for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++) {
+ u32 flags = msgs[dev->msg_write_idx].flags;
+
/*
* if target address has changed, we need to
* reprogram the target address in the i2c
@@ -581,8 +583,15 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
* detected from the registers so we set it always
* when writing/reading the last byte.
*/
+
+ /*
+ * i2c-core.c always sets the buffer length of
+ * I2C_FUNC_SMBUS_BLOCK_DATA to 1. The length will
+ * be adjusted when receiving the first byte.
+ * Thus we can't stop the transaction here.
+ */
if (dev->msg_write_idx == dev->msgs_num - 1 &&
- buf_len == 1)
+ buf_len == 1 && !(flags & I2C_M_RECV_LEN))
cmd |= BIT(9);
if (need_restart) {
@@ -607,7 +616,12 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
dev->tx_buf = buf;
dev->tx_buf_len = buf_len;
- if (buf_len > 0) {
+ /*
+ * Because we don't know the buffer length in the
+ * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop
+ * the transaction here.
+ */
+ if (buf_len > 0 || flags & I2C_M_RECV_LEN) {
/* more bytes to be written */
dev->status |= STATUS_WRITE_IN_PROGRESS;
break;
@@ -628,6 +642,24 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
dw_writel(dev, intr_mask, DW_IC_INTR_MASK);
}
+static u8
+i2c_dw_recv_len(struct dw_i2c_dev *dev, u8 len)
+{
+ struct i2c_msg *msgs = dev->msgs;
+ u32 flags = msgs[dev->msg_read_idx].flags;
+
+ /*
+ * Adjust the buffer length and mask the flag
+ * after receiving the first byte.
+ */
+ len += (flags & I2C_CLIENT_PEC) ? 2 : 1;
+ dev->tx_buf_len = len - min_t(u8, len, dev->rx_outstanding);
+ msgs[dev->msg_read_idx].len = len;
+ msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN;
+
+ return len;
+}
+
static void
i2c_dw_read(struct dw_i2c_dev *dev)
{
@@ -652,7 +684,15 @@ i2c_dw_read(struct dw_i2c_dev *dev)
rx_valid = dw_readl(dev, DW_IC_RXFLR);
for (; len > 0 && rx_valid > 0; len--, rx_valid--) {
- *buf++ = dw_readl(dev, DW_IC_DATA_CMD);
+ u32 flags = msgs[dev->msg_read_idx].flags;
+
+ *buf = dw_readl(dev, DW_IC_DATA_CMD);
+ /* Ensure length byte is a valid value */
+ if (flags & I2C_M_RECV_LEN &&
+ *buf <= I2C_SMBUS_BLOCK_MAX && *buf > 0) {
+ len = i2c_dw_recv_len(dev, *buf);
+ }
+ buf++;
dev->rx_outstanding--;
}
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 0d44d2ae7d4c..26250b425e2f 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -22,6 +22,14 @@
*
*/
+#include <linux/i2c.h>
+
+#define DW_IC_DEFAULT_FUNCTIONALITY (I2C_FUNC_I2C | \
+ I2C_FUNC_SMBUS_BYTE | \
+ I2C_FUNC_SMBUS_BYTE_DATA | \
+ I2C_FUNC_SMBUS_WORD_DATA | \
+ I2C_FUNC_SMBUS_BLOCK_DATA | \
+ I2C_FUNC_SMBUS_I2C_BLOCK)
#define DW_IC_CON_MASTER 0x1
#define DW_IC_CON_SPEED_STD 0x2
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 96f8230cd2d3..d6423cfac588 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -71,12 +71,6 @@ struct dw_pci_controller {
DW_IC_CON_SLAVE_DISABLE | \
DW_IC_CON_RESTART_EN)
-#define DW_DEFAULT_FUNCTIONALITY (I2C_FUNC_I2C | \
- I2C_FUNC_SMBUS_BYTE | \
- I2C_FUNC_SMBUS_BYTE_DATA | \
- I2C_FUNC_SMBUS_WORD_DATA | \
- I2C_FUNC_SMBUS_I2C_BLOCK)
-
/* Merrifield HCNT/LCNT/SDA hold time */
static struct dw_scl_sda_cfg mrfld_config = {
.ss_hcnt = 0x2f8,
@@ -147,6 +141,7 @@ static struct dw_pci_controller dw_pci_controllers[] = {
.bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
.tx_fifo_depth = 32,
.rx_fifo_depth = 32,
+ .functionality = I2C_FUNC_10BIT_ADDR,
.clk_khz = 25000,
.setup = mfld_setup,
},
@@ -155,6 +150,7 @@ static struct dw_pci_controller dw_pci_controllers[] = {
.bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
.tx_fifo_depth = 64,
.rx_fifo_depth = 64,
+ .functionality = I2C_FUNC_10BIT_ADDR,
.scl_sda_cfg = &mrfld_config,
.setup = mrfld_setup,
},
@@ -249,7 +245,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
}
dev->functionality = controller->functionality |
- DW_DEFAULT_FUNCTIONALITY;
+ DW_IC_DEFAULT_FUNCTIONALITY;
dev->master_cfg = controller->bus_cfg;
if (controller->scl_sda_cfg) {
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 0b42a12171f3..6ce431323125 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -150,6 +150,29 @@ static int i2c_dw_plat_prepare_clk(struct dw_i2c_dev *i_dev, bool prepare)
return 0;
}
+static void dw_i2c_set_fifo_size(struct dw_i2c_dev *dev, int id)
+{
+ u32 param, tx_fifo_depth, rx_fifo_depth;
+
+ /*
+ * Try to detect the FIFO depth if not set by interface driver,
+ * the depth could be from 2 to 256 from HW spec.
+ */
+ param = i2c_dw_read_comp_param(dev);
+ tx_fifo_depth = ((param >> 16) & 0xff) + 1;
+ rx_fifo_depth = ((param >> 8) & 0xff) + 1;
+ if (!dev->tx_fifo_depth) {
+ dev->tx_fifo_depth = tx_fifo_depth;
+ dev->rx_fifo_depth = rx_fifo_depth;
+ dev->adapter.nr = id;
+ } else if (tx_fifo_depth >= 2) {
+ dev->tx_fifo_depth = min_t(u32, dev->tx_fifo_depth,
+ tx_fifo_depth);
+ dev->rx_fifo_depth = min_t(u32, dev->rx_fifo_depth,
+ rx_fifo_depth);
+ }
+}
+
static int dw_i2c_plat_probe(struct platform_device *pdev)
{
struct dw_i2c_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -176,9 +199,6 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
dev->irq = irq;
platform_set_drvdata(pdev, dev);
- /* fast mode by default because of legacy reasons */
- dev->clk_freq = 400000;
-
if (pdata) {
dev->clk_freq = pdata->i2c_scl_freq;
} else {
@@ -193,8 +213,16 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
}
acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev);
- if (acpi_speed)
- dev->clk_freq = acpi_speed;
+ /*
+ * Find bus speed from the "clock-frequency" device property, ACPI
+ * or by using fast mode if neither is set.
+ */
+ if (acpi_speed && dev->clk_freq)
+ dev->clk_freq = min(dev->clk_freq, acpi_speed);
+ else if (acpi_speed || dev->clk_freq)
+ dev->clk_freq = max(dev->clk_freq, acpi_speed);
+ else
+ dev->clk_freq = 400000;
if (has_acpi_companion(&pdev->dev))
dw_i2c_acpi_configure(pdev);
@@ -214,13 +242,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
if (r)
return r;
- dev->functionality =
- I2C_FUNC_I2C |
- I2C_FUNC_10BIT_ADDR |
- I2C_FUNC_SMBUS_BYTE |
- I2C_FUNC_SMBUS_BYTE_DATA |
- I2C_FUNC_SMBUS_WORD_DATA |
- I2C_FUNC_SMBUS_I2C_BLOCK;
+ dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY;
dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
DW_IC_CON_RESTART_EN;
@@ -246,13 +268,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
1000000);
}
- if (!dev->tx_fifo_depth) {
- u32 param1 = i2c_dw_read_comp_param(dev);
-
- dev->tx_fifo_depth = ((param1 >> 16) & 0xff) + 1;
- dev->rx_fifo_depth = ((param1 >> 8) & 0xff) + 1;
- dev->adapter.nr = pdev->id;
- }
+ dw_i2c_set_fifo_size(dev, pdev->id);
adap = &dev->adapter;
adap->owner = THIS_MODULE;
diff --git a/drivers/i2c/busses/i2c-dln2.c b/drivers/i2c/busses/i2c-dln2.c
index 8acda2aa1558..69075a32073e 100644
--- a/drivers/i2c/busses/i2c-dln2.c
+++ b/drivers/i2c/busses/i2c-dln2.c
@@ -182,7 +182,7 @@ static const struct i2c_algorithm dln2_i2c_usb_algorithm = {
.functionality = dln2_i2c_func,
};
-static struct i2c_adapter_quirks dln2_i2c_quirks = {
+static const struct i2c_adapter_quirks dln2_i2c_quirks = {
.max_read_len = DLN2_I2C_MAX_XFER_SIZE,
.max_write_len = DLN2_I2C_MAX_XFER_SIZE,
};
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index eb3627f35d12..e242db43774b 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -118,7 +118,6 @@
#define SMBSLVSTS(p) (16 + (p)->smba) /* ICH3 and later */
#define SMBSLVCMD(p) (17 + (p)->smba) /* ICH3 and later */
#define SMBNTFDADD(p) (20 + (p)->smba) /* ICH3 and later */
-#define SMBNTFDDAT(p) (22 + (p)->smba) /* ICH3 and later */
/* PCI Address Constants */
#define SMBBAR 4
@@ -137,27 +136,27 @@
#define SBREG_SMBCTRL 0xc6000c
/* Host status bits for SMBPCISTS */
-#define SMBPCISTS_INTS 0x08
+#define SMBPCISTS_INTS BIT(3)
/* Control bits for SMBPCICTL */
-#define SMBPCICTL_INTDIS 0x0400
+#define SMBPCICTL_INTDIS BIT(10)
/* Host configuration bits for SMBHSTCFG */
-#define SMBHSTCFG_HST_EN 1
-#define SMBHSTCFG_SMB_SMI_EN 2
-#define SMBHSTCFG_I2C_EN 4
-#define SMBHSTCFG_SPD_WD 0x10
+#define SMBHSTCFG_HST_EN BIT(0)
+#define SMBHSTCFG_SMB_SMI_EN BIT(1)
+#define SMBHSTCFG_I2C_EN BIT(2)
+#define SMBHSTCFG_SPD_WD BIT(4)
/* TCO configuration bits for TCOCTL */
-#define TCOCTL_EN 0x0100
+#define TCOCTL_EN BIT(8)
/* Auxiliary status register bits, ICH4+ only */
-#define SMBAUXSTS_CRCE 1
-#define SMBAUXSTS_STCO 2
+#define SMBAUXSTS_CRCE BIT(0)
+#define SMBAUXSTS_STCO BIT(1)
/* Auxiliary control register bits, ICH4+ only */
-#define SMBAUXCTL_CRC 1
-#define SMBAUXCTL_E32B 2
+#define SMBAUXCTL_CRC BIT(0)
+#define SMBAUXCTL_E32B BIT(1)
/* Other settings */
#define MAX_RETRIES 400
@@ -172,27 +171,27 @@
#define I801_I2C_BLOCK_DATA 0x18 /* ICH5 and later */
/* I801 Host Control register bits */
-#define SMBHSTCNT_INTREN 0x01
-#define SMBHSTCNT_KILL 0x02
-#define SMBHSTCNT_LAST_BYTE 0x20
-#define SMBHSTCNT_START 0x40
-#define SMBHSTCNT_PEC_EN 0x80 /* ICH3 and later */
+#define SMBHSTCNT_INTREN BIT(0)
+#define SMBHSTCNT_KILL BIT(1)
+#define SMBHSTCNT_LAST_BYTE BIT(5)
+#define SMBHSTCNT_START BIT(6)
+#define SMBHSTCNT_PEC_EN BIT(7) /* ICH3 and later */
/* I801 Hosts Status register bits */
-#define SMBHSTSTS_BYTE_DONE 0x80
-#define SMBHSTSTS_INUSE_STS 0x40
-#define SMBHSTSTS_SMBALERT_STS 0x20
-#define SMBHSTSTS_FAILED 0x10
-#define SMBHSTSTS_BUS_ERR 0x08
-#define SMBHSTSTS_DEV_ERR 0x04
-#define SMBHSTSTS_INTR 0x02
-#define SMBHSTSTS_HOST_BUSY 0x01
+#define SMBHSTSTS_BYTE_DONE BIT(7)
+#define SMBHSTSTS_INUSE_STS BIT(6)
+#define SMBHSTSTS_SMBALERT_STS BIT(5)
+#define SMBHSTSTS_FAILED BIT(4)
+#define SMBHSTSTS_BUS_ERR BIT(3)
+#define SMBHSTSTS_DEV_ERR BIT(2)
+#define SMBHSTSTS_INTR BIT(1)
+#define SMBHSTSTS_HOST_BUSY BIT(0)
-/* Host Notify Status registers bits */
-#define SMBSLVSTS_HST_NTFY_STS 1
+/* Host Notify Status register bits */
+#define SMBSLVSTS_HST_NTFY_STS BIT(0)
-/* Host Notify Command registers bits */
-#define SMBSLVCMD_HST_NTFY_INTREN 0x01
+/* Host Notify Command register bits */
+#define SMBSLVCMD_HST_NTFY_INTREN BIT(0)
#define STATUS_ERROR_FLAGS (SMBHSTSTS_FAILED | SMBHSTSTS_BUS_ERR | \
SMBHSTSTS_DEV_ERR)
@@ -243,6 +242,7 @@ struct i801_priv {
struct i2c_adapter adapter;
unsigned long smba;
unsigned char original_hstcfg;
+ unsigned char original_slvcmd;
struct pci_dev *pci_dev;
unsigned int features;
@@ -269,20 +269,17 @@ struct i801_priv {
*/
bool acpi_reserved;
struct mutex acpi_lock;
- struct smbus_host_notify *host_notify;
};
-#define SMBHSTNTFY_SIZE 8
-
-#define FEATURE_SMBUS_PEC (1 << 0)
-#define FEATURE_BLOCK_BUFFER (1 << 1)
-#define FEATURE_BLOCK_PROC (1 << 2)
-#define FEATURE_I2C_BLOCK_READ (1 << 3)
-#define FEATURE_IRQ (1 << 4)
-#define FEATURE_HOST_NOTIFY (1 << 5)
+#define FEATURE_SMBUS_PEC BIT(0)
+#define FEATURE_BLOCK_BUFFER BIT(1)
+#define FEATURE_BLOCK_PROC BIT(2)
+#define FEATURE_I2C_BLOCK_READ BIT(3)
+#define FEATURE_IRQ BIT(4)
+#define FEATURE_HOST_NOTIFY BIT(5)
/* Not really a feature, but it's convenient to handle it as such */
-#define FEATURE_IDF (1 << 15)
-#define FEATURE_TCO (1 << 16)
+#define FEATURE_IDF BIT(15)
+#define FEATURE_TCO BIT(16)
static const char *i801_feature_names[] = {
"SMBus PEC",
@@ -582,12 +579,15 @@ static void i801_isr_byte_done(struct i801_priv *priv)
static irqreturn_t i801_host_notify_isr(struct i801_priv *priv)
{
unsigned short addr;
- unsigned int data;
addr = inb_p(SMBNTFDADD(priv)) >> 1;
- data = inw_p(SMBNTFDDAT(priv));
- i2c_handle_smbus_host_notify(priv->host_notify, addr, data);
+ /*
+ * With the tested platforms, reading SMBNTFDDAT (22 + (p)->smba)
+ * always returns 0. Our current implementation doesn't provide
+ * data, so we just ignore it.
+ */
+ i2c_handle_smbus_host_notify(&priv->adapter, addr);
/* clear Host Notify bit and return */
outb_p(SMBSLVSTS_HST_NTFY_STS, SMBSLVSTS(priv));
@@ -950,23 +950,29 @@ static u32 i801_func(struct i2c_adapter *adapter)
I2C_FUNC_SMBUS_HOST_NOTIFY : 0);
}
-static int i801_enable_host_notify(struct i2c_adapter *adapter)
+static void i801_enable_host_notify(struct i2c_adapter *adapter)
{
struct i801_priv *priv = i2c_get_adapdata(adapter);
if (!(priv->features & FEATURE_HOST_NOTIFY))
- return -ENOTSUPP;
+ return;
- if (!priv->host_notify)
- priv->host_notify = i2c_setup_smbus_host_notify(adapter);
- if (!priv->host_notify)
- return -ENOMEM;
+ priv->original_slvcmd = inb_p(SMBSLVCMD(priv));
+
+ if (!(SMBSLVCMD_HST_NTFY_INTREN & priv->original_slvcmd))
+ outb_p(SMBSLVCMD_HST_NTFY_INTREN | priv->original_slvcmd,
+ SMBSLVCMD(priv));
- outb_p(SMBSLVCMD_HST_NTFY_INTREN, SMBSLVCMD(priv));
/* clear Host Notify bit to allow a new notification */
outb_p(SMBSLVSTS_HST_NTFY_STS, SMBSLVSTS(priv));
+}
- return 0;
+static void i801_disable_host_notify(struct i801_priv *priv)
+{
+ if (!(priv->features & FEATURE_HOST_NOTIFY))
+ return;
+
+ outb_p(priv->original_slvcmd, SMBSLVCMD(priv));
}
static const struct i2c_algorithm smbus_algorithm = {
@@ -1633,14 +1639,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
return err;
}
- /*
- * Enable Host Notify for chips that supports it.
- * It is done after i2c_add_adapter() so that we are sure the work queue
- * is not used if i2c_add_adapter() fails.
- */
- err = i801_enable_host_notify(&priv->adapter);
- if (err && err != -ENOTSUPP)
- dev_warn(&dev->dev, "Unable to enable SMBus Host Notify\n");
+ i801_enable_host_notify(&priv->adapter);
i801_probe_optional_slaves(priv);
/* We ignore errors - multiplexing is optional */
@@ -1663,6 +1662,7 @@ static void i801_remove(struct pci_dev *dev)
pm_runtime_forbid(&dev->dev);
pm_runtime_get_noresume(&dev->dev);
+ i801_disable_host_notify(priv);
i801_del_mux(priv);
i2c_del_adapter(&priv->adapter);
i801_acpi_remove(priv);
@@ -1690,11 +1690,8 @@ static int i801_resume(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct i801_priv *priv = pci_get_drvdata(pci_dev);
- int err;
- err = i801_enable_host_notify(&priv->adapter);
- if (err && err != -ENOTSUPP)
- dev_warn(dev, "Unable to enable SMBus Host Notify\n");
+ i801_enable_host_notify(&priv->adapter);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
new file mode 100644
index 000000000000..c62b7cd475f8
--- /dev/null
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -0,0 +1,652 @@
+/*
+ * This is i.MX low power i2c controller driver.
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#define DRIVER_NAME "imx-lpi2c"
+
+#define LPI2C_PARAM 0x04 /* i2c RX/TX FIFO size */
+#define LPI2C_MCR 0x10 /* i2c contrl register */
+#define LPI2C_MSR 0x14 /* i2c status register */
+#define LPI2C_MIER 0x18 /* i2c interrupt enable */
+#define LPI2C_MCFGR0 0x20 /* i2c master configuration */
+#define LPI2C_MCFGR1 0x24 /* i2c master configuration */
+#define LPI2C_MCFGR2 0x28 /* i2c master configuration */
+#define LPI2C_MCFGR3 0x2C /* i2c master configuration */
+#define LPI2C_MCCR0 0x48 /* i2c master clk configuration */
+#define LPI2C_MCCR1 0x50 /* i2c master clk configuration */
+#define LPI2C_MFCR 0x58 /* i2c master FIFO control */
+#define LPI2C_MFSR 0x5C /* i2c master FIFO status */
+#define LPI2C_MTDR 0x60 /* i2c master TX data register */
+#define LPI2C_MRDR 0x70 /* i2c master RX data register */
+
+/* i2c command */
+#define TRAN_DATA 0X00
+#define RECV_DATA 0X01
+#define GEN_STOP 0X02
+#define RECV_DISCARD 0X03
+#define GEN_START 0X04
+#define START_NACK 0X05
+#define START_HIGH 0X06
+#define START_HIGH_NACK 0X07
+
+#define MCR_MEN BIT(0)
+#define MCR_RST BIT(1)
+#define MCR_DOZEN BIT(2)
+#define MCR_DBGEN BIT(3)
+#define MCR_RTF BIT(8)
+#define MCR_RRF BIT(9)
+#define MSR_TDF BIT(0)
+#define MSR_RDF BIT(1)
+#define MSR_SDF BIT(9)
+#define MSR_NDF BIT(10)
+#define MSR_ALF BIT(11)
+#define MSR_MBF BIT(24)
+#define MSR_BBF BIT(25)
+#define MIER_TDIE BIT(0)
+#define MIER_RDIE BIT(1)
+#define MIER_SDIE BIT(9)
+#define MIER_NDIE BIT(10)
+#define MCFGR1_AUTOSTOP BIT(8)
+#define MCFGR1_IGNACK BIT(9)
+#define MRDR_RXEMPTY BIT(14)
+
+#define I2C_CLK_RATIO 2
+#define CHUNK_DATA 256
+
+#define LPI2C_DEFAULT_RATE 100000
+#define STARDARD_MAX_BITRATE 400000
+#define FAST_MAX_BITRATE 1000000
+#define FAST_PLUS_MAX_BITRATE 3400000
+#define HIGHSPEED_MAX_BITRATE 5000000
+
+enum lpi2c_imx_mode {
+ STANDARD, /* 100+Kbps */
+ FAST, /* 400+Kbps */
+ FAST_PLUS, /* 1.0+Mbps */
+ HS, /* 3.4+Mbps */
+ ULTRA_FAST, /* 5.0+Mbps */
+};
+
+enum lpi2c_imx_pincfg {
+ TWO_PIN_OD,
+ TWO_PIN_OO,
+ TWO_PIN_PP,
+ FOUR_PIN_PP,
+};
+
+struct lpi2c_imx_struct {
+ struct i2c_adapter adapter;
+ struct clk *clk;
+ void __iomem *base;
+ __u8 *rx_buf;
+ __u8 *tx_buf;
+ struct completion complete;
+ unsigned int msglen;
+ unsigned int delivered;
+ unsigned int block_data;
+ unsigned int bitrate;
+ unsigned int txfifosize;
+ unsigned int rxfifosize;
+ enum lpi2c_imx_mode mode;
+};
+
+static void lpi2c_imx_intctrl(struct lpi2c_imx_struct *lpi2c_imx,
+ unsigned int enable)
+{
+ writel(enable, lpi2c_imx->base + LPI2C_MIER);
+}
+
+static int lpi2c_imx_bus_busy(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ unsigned long orig_jiffies = jiffies;
+ unsigned int temp;
+
+ while (1) {
+ temp = readl(lpi2c_imx->base + LPI2C_MSR);
+
+ /* check for arbitration lost, clear if set */
+ if (temp & MSR_ALF) {
+ writel(temp, lpi2c_imx->base + LPI2C_MSR);
+ return -EAGAIN;
+ }
+
+ if (temp & (MSR_BBF | MSR_MBF))
+ break;
+
+ if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) {
+ dev_dbg(&lpi2c_imx->adapter.dev, "bus not work\n");
+ return -ETIMEDOUT;
+ }
+ schedule();
+ }
+
+ return 0;
+}
+
+static void lpi2c_imx_set_mode(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ unsigned int bitrate = lpi2c_imx->bitrate;
+ enum lpi2c_imx_mode mode;
+
+ if (bitrate < STARDARD_MAX_BITRATE)
+ mode = STANDARD;
+ else if (bitrate < FAST_MAX_BITRATE)
+ mode = FAST;
+ else if (bitrate < FAST_PLUS_MAX_BITRATE)
+ mode = FAST_PLUS;
+ else if (bitrate < HIGHSPEED_MAX_BITRATE)
+ mode = HS;
+ else
+ mode = ULTRA_FAST;
+
+ lpi2c_imx->mode = mode;
+}
+
+static int lpi2c_imx_start(struct lpi2c_imx_struct *lpi2c_imx,
+ struct i2c_msg *msgs)
+{
+ unsigned int temp;
+ u8 read;
+
+ temp = readl(lpi2c_imx->base + LPI2C_MCR);
+ temp |= MCR_RRF | MCR_RTF;
+ writel(temp, lpi2c_imx->base + LPI2C_MCR);
+ writel(0x7f00, lpi2c_imx->base + LPI2C_MSR);
+
+ read = msgs->flags & I2C_M_RD;
+ temp = (msgs->addr << 1 | read) | (GEN_START << 8);
+ writel(temp, lpi2c_imx->base + LPI2C_MTDR);
+
+ return lpi2c_imx_bus_busy(lpi2c_imx);
+}
+
+static void lpi2c_imx_stop(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ unsigned long orig_jiffies = jiffies;
+ unsigned int temp;
+
+ writel(GEN_STOP << 8, lpi2c_imx->base + LPI2C_MTDR);
+
+ do {
+ temp = readl(lpi2c_imx->base + LPI2C_MSR);
+ if (temp & MSR_SDF)
+ break;
+
+ if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) {
+ dev_dbg(&lpi2c_imx->adapter.dev, "stop timeout\n");
+ break;
+ }
+ schedule();
+
+ } while (1);
+}
+
+/* CLKLO = I2C_CLK_RATIO * CLKHI, SETHOLD = CLKHI, DATAVD = CLKHI/2 */
+static int lpi2c_imx_config(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ u8 prescale, filt, sethold, clkhi, clklo, datavd;
+ unsigned int clk_rate, clk_cycle;
+ enum lpi2c_imx_pincfg pincfg;
+ unsigned int temp;
+
+ lpi2c_imx_set_mode(lpi2c_imx);
+
+ clk_rate = clk_get_rate(lpi2c_imx->clk);
+ if (lpi2c_imx->mode == HS || lpi2c_imx->mode == ULTRA_FAST)
+ filt = 0;
+ else
+ filt = 2;
+
+ for (prescale = 0; prescale <= 7; prescale++) {
+ clk_cycle = clk_rate / ((1 << prescale) * lpi2c_imx->bitrate)
+ - 3 - (filt >> 1);
+ clkhi = (clk_cycle + I2C_CLK_RATIO) / (I2C_CLK_RATIO + 1);
+ clklo = clk_cycle - clkhi;
+ if (clklo < 64)
+ break;
+ }
+
+ if (prescale > 7)
+ return -EINVAL;
+
+ /* set MCFGR1: PINCFG, PRESCALE, IGNACK */
+ if (lpi2c_imx->mode == ULTRA_FAST)
+ pincfg = TWO_PIN_OO;
+ else
+ pincfg = TWO_PIN_OD;
+ temp = prescale | pincfg << 24;
+
+ if (lpi2c_imx->mode == ULTRA_FAST)
+ temp |= MCFGR1_IGNACK;
+
+ writel(temp, lpi2c_imx->base + LPI2C_MCFGR1);
+
+ /* set MCFGR2: FILTSDA, FILTSCL */
+ temp = (filt << 16) | (filt << 24);
+ writel(temp, lpi2c_imx->base + LPI2C_MCFGR2);
+
+ /* set MCCR: DATAVD, SETHOLD, CLKHI, CLKLO */
+ sethold = clkhi;
+ datavd = clkhi >> 1;
+ temp = datavd << 24 | sethold << 16 | clkhi << 8 | clklo;
+
+ if (lpi2c_imx->mode == HS)
+ writel(temp, lpi2c_imx->base + LPI2C_MCCR1);
+ else
+ writel(temp, lpi2c_imx->base + LPI2C_MCCR0);
+
+ return 0;
+}
+
+static int lpi2c_imx_master_enable(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ unsigned int temp;
+ int ret;
+
+ ret = clk_enable(lpi2c_imx->clk);
+ if (ret)
+ return ret;
+
+ temp = MCR_RST;
+ writel(temp, lpi2c_imx->base + LPI2C_MCR);
+ writel(0, lpi2c_imx->base + LPI2C_MCR);
+
+ ret = lpi2c_imx_config(lpi2c_imx);
+ if (ret)
+ goto clk_disable;
+
+ temp = readl(lpi2c_imx->base + LPI2C_MCR);
+ temp |= MCR_MEN;
+ writel(temp, lpi2c_imx->base + LPI2C_MCR);
+
+ return 0;
+
+clk_disable:
+ clk_disable(lpi2c_imx->clk);
+
+ return ret;
+}
+
+static int lpi2c_imx_master_disable(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ u32 temp;
+
+ temp = readl(lpi2c_imx->base + LPI2C_MCR);
+ temp &= ~MCR_MEN;
+ writel(temp, lpi2c_imx->base + LPI2C_MCR);
+
+ clk_disable(lpi2c_imx->clk);
+
+ return 0;
+}
+
+static int lpi2c_imx_msg_complete(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ unsigned long timeout;
+
+ timeout = wait_for_completion_timeout(&lpi2c_imx->complete, HZ);
+
+ return timeout ? 0 : -ETIMEDOUT;
+}
+
+static int lpi2c_imx_txfifo_empty(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ unsigned long orig_jiffies = jiffies;
+ u32 txcnt;
+
+ do {
+ txcnt = readl(lpi2c_imx->base + LPI2C_MFSR) & 0xff;
+
+ if (readl(lpi2c_imx->base + LPI2C_MSR) & MSR_NDF) {
+ dev_dbg(&lpi2c_imx->adapter.dev, "NDF detected\n");
+ return -EIO;
+ }
+
+ if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) {
+ dev_dbg(&lpi2c_imx->adapter.dev, "txfifo empty timeout\n");
+ return -ETIMEDOUT;
+ }
+ schedule();
+
+ } while (txcnt);
+
+ return 0;
+}
+
+static void lpi2c_imx_set_tx_watermark(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ writel(lpi2c_imx->txfifosize >> 1, lpi2c_imx->base + LPI2C_MFCR);
+}
+
+static void lpi2c_imx_set_rx_watermark(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ unsigned int temp, remaining;
+
+ remaining = lpi2c_imx->msglen - lpi2c_imx->delivered;
+
+ if (remaining > (lpi2c_imx->rxfifosize >> 1))
+ temp = lpi2c_imx->rxfifosize >> 1;
+ else
+ temp = 0;
+
+ writel(temp << 16, lpi2c_imx->base + LPI2C_MFCR);
+}
+
+static void lpi2c_imx_write_txfifo(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ unsigned int data, txcnt;
+
+ txcnt = readl(lpi2c_imx->base + LPI2C_MFSR) & 0xff;
+
+ while (txcnt < lpi2c_imx->txfifosize) {
+ if (lpi2c_imx->delivered == lpi2c_imx->msglen)
+ break;
+
+ data = lpi2c_imx->tx_buf[lpi2c_imx->delivered++];
+ writel(data, lpi2c_imx->base + LPI2C_MTDR);
+ txcnt++;
+ }
+
+ if (lpi2c_imx->delivered < lpi2c_imx->msglen)
+ lpi2c_imx_intctrl(lpi2c_imx, MIER_TDIE | MIER_NDIE);
+ else
+ complete(&lpi2c_imx->complete);
+}
+
+static void lpi2c_imx_read_rxfifo(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ unsigned int blocklen, remaining;
+ unsigned int temp, data;
+
+ do {
+ data = readl(lpi2c_imx->base + LPI2C_MRDR);
+ if (data & MRDR_RXEMPTY)
+ break;
+
+ lpi2c_imx->rx_buf[lpi2c_imx->delivered++] = data & 0xff;
+ } while (1);
+
+ /*
+ * First byte is the length of remaining packet in the SMBus block
+ * data read. Add it to msgs->len.
+ */
+ if (lpi2c_imx->block_data) {
+ blocklen = lpi2c_imx->rx_buf[0];
+ lpi2c_imx->msglen += blocklen;
+ }
+
+ remaining = lpi2c_imx->msglen - lpi2c_imx->delivered;
+
+ if (!remaining) {
+ complete(&lpi2c_imx->complete);
+ return;
+ }
+
+ /* not finished, still waiting for rx data */
+ lpi2c_imx_set_rx_watermark(lpi2c_imx);
+
+ /* multiple receive commands */
+ if (lpi2c_imx->block_data) {
+ lpi2c_imx->block_data = 0;
+ temp = remaining;
+ temp |= (RECV_DATA << 8);
+ writel(temp, lpi2c_imx->base + LPI2C_MTDR);
+ } else if (!(lpi2c_imx->delivered & 0xff)) {
+ temp = (remaining > CHUNK_DATA ? CHUNK_DATA : remaining) - 1;
+ temp |= (RECV_DATA << 8);
+ writel(temp, lpi2c_imx->base + LPI2C_MTDR);
+ }
+
+ lpi2c_imx_intctrl(lpi2c_imx, MIER_RDIE);
+}
+
+static void lpi2c_imx_write(struct lpi2c_imx_struct *lpi2c_imx,
+ struct i2c_msg *msgs)
+{
+ lpi2c_imx->tx_buf = msgs->buf;
+ lpi2c_imx_set_tx_watermark(lpi2c_imx);
+ lpi2c_imx_write_txfifo(lpi2c_imx);
+}
+
+static void lpi2c_imx_read(struct lpi2c_imx_struct *lpi2c_imx,
+ struct i2c_msg *msgs)
+{
+ unsigned int temp;
+
+ lpi2c_imx->rx_buf = msgs->buf;
+ lpi2c_imx->block_data = msgs->flags & I2C_M_RECV_LEN;
+
+ lpi2c_imx_set_rx_watermark(lpi2c_imx);
+ temp = msgs->len > CHUNK_DATA ? CHUNK_DATA - 1 : msgs->len - 1;
+ temp |= (RECV_DATA << 8);
+ writel(temp, lpi2c_imx->base + LPI2C_MTDR);
+
+ lpi2c_imx_intctrl(lpi2c_imx, MIER_RDIE | MIER_NDIE);
+}
+
+static int lpi2c_imx_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs, int num)
+{
+ struct lpi2c_imx_struct *lpi2c_imx = i2c_get_adapdata(adapter);
+ unsigned int temp;
+ int i, result;
+
+ result = lpi2c_imx_master_enable(lpi2c_imx);
+ if (result)
+ return result;
+
+ for (i = 0; i < num; i++) {
+ result = lpi2c_imx_start(lpi2c_imx, &msgs[i]);
+ if (result)
+ goto disable;
+
+ /* quick smbus */
+ if (num == 1 && msgs[0].len == 0)
+ goto stop;
+
+ lpi2c_imx->delivered = 0;
+ lpi2c_imx->msglen = msgs[i].len;
+ init_completion(&lpi2c_imx->complete);
+
+ if (msgs[i].flags & I2C_M_RD)
+ lpi2c_imx_read(lpi2c_imx, &msgs[i]);
+ else
+ lpi2c_imx_write(lpi2c_imx, &msgs[i]);
+
+ result = lpi2c_imx_msg_complete(lpi2c_imx);
+ if (result)
+ goto stop;
+
+ if (!(msgs[i].flags & I2C_M_RD)) {
+ result = lpi2c_imx_txfifo_empty(lpi2c_imx);
+ if (result)
+ goto stop;
+ }
+ }
+
+stop:
+ lpi2c_imx_stop(lpi2c_imx);
+
+ temp = readl(lpi2c_imx->base + LPI2C_MSR);
+ if ((temp & MSR_NDF) && !result)
+ result = -EIO;
+
+disable:
+ lpi2c_imx_master_disable(lpi2c_imx);
+
+ dev_dbg(&lpi2c_imx->adapter.dev, "<%s> exit with: %s: %d\n", __func__,
+ (result < 0) ? "error" : "success msg",
+ (result < 0) ? result : num);
+
+ return (result < 0) ? result : num;
+}
+
+static irqreturn_t lpi2c_imx_isr(int irq, void *dev_id)
+{
+ struct lpi2c_imx_struct *lpi2c_imx = dev_id;
+ unsigned int temp;
+
+ lpi2c_imx_intctrl(lpi2c_imx, 0);
+ temp = readl(lpi2c_imx->base + LPI2C_MSR);
+
+ if (temp & MSR_RDF)
+ lpi2c_imx_read_rxfifo(lpi2c_imx);
+
+ if (temp & MSR_TDF)
+ lpi2c_imx_write_txfifo(lpi2c_imx);
+
+ if (temp & MSR_NDF)
+ complete(&lpi2c_imx->complete);
+
+ return IRQ_HANDLED;
+}
+
+static u32 lpi2c_imx_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA;
+}
+
+static struct i2c_algorithm lpi2c_imx_algo = {
+ .master_xfer = lpi2c_imx_xfer,
+ .functionality = lpi2c_imx_func,
+};
+
+static const struct of_device_id lpi2c_imx_of_match[] = {
+ { .compatible = "fsl,imx7ulp-lpi2c" },
+ { .compatible = "fsl,imx8dv-lpi2c" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, lpi2c_imx_of_match);
+
+static int lpi2c_imx_probe(struct platform_device *pdev)
+{
+ struct lpi2c_imx_struct *lpi2c_imx;
+ struct resource *res;
+ unsigned int temp;
+ int irq, ret;
+
+ lpi2c_imx = devm_kzalloc(&pdev->dev, sizeof(*lpi2c_imx), GFP_KERNEL);
+ if (!lpi2c_imx)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lpi2c_imx->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(lpi2c_imx->base))
+ return PTR_ERR(lpi2c_imx->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "can't get irq number\n");
+ return irq;
+ }
+
+ lpi2c_imx->adapter.owner = THIS_MODULE;
+ lpi2c_imx->adapter.algo = &lpi2c_imx_algo;
+ lpi2c_imx->adapter.dev.parent = &pdev->dev;
+ lpi2c_imx->adapter.dev.of_node = pdev->dev.of_node;
+ strlcpy(lpi2c_imx->adapter.name, pdev->name,
+ sizeof(lpi2c_imx->adapter.name));
+
+ lpi2c_imx->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(lpi2c_imx->clk)) {
+ dev_err(&pdev->dev, "can't get I2C peripheral clock\n");
+ return PTR_ERR(lpi2c_imx->clk);
+ }
+
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "clock-frequency", &lpi2c_imx->bitrate);
+ if (ret)
+ lpi2c_imx->bitrate = LPI2C_DEFAULT_RATE;
+
+ ret = devm_request_irq(&pdev->dev, irq, lpi2c_imx_isr, 0,
+ pdev->name, lpi2c_imx);
+ if (ret) {
+ dev_err(&pdev->dev, "can't claim irq %d\n", irq);
+ return ret;
+ }
+
+ i2c_set_adapdata(&lpi2c_imx->adapter, lpi2c_imx);
+ platform_set_drvdata(pdev, lpi2c_imx);
+
+ ret = clk_prepare_enable(lpi2c_imx->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "clk enable failed %d\n", ret);
+ return ret;
+ }
+
+ temp = readl(lpi2c_imx->base + LPI2C_PARAM);
+ lpi2c_imx->txfifosize = 1 << (temp & 0x0f);
+ lpi2c_imx->rxfifosize = 1 << ((temp >> 8) & 0x0f);
+
+ clk_disable(lpi2c_imx->clk);
+
+ ret = i2c_add_adapter(&lpi2c_imx->adapter);
+ if (ret)
+ goto clk_unprepare;
+
+ dev_info(&lpi2c_imx->adapter.dev, "LPI2C adapter registered\n");
+
+ return 0;
+
+clk_unprepare:
+ clk_unprepare(lpi2c_imx->clk);
+
+ return ret;
+}
+
+static int lpi2c_imx_remove(struct platform_device *pdev)
+{
+ struct lpi2c_imx_struct *lpi2c_imx = platform_get_drvdata(pdev);
+
+ i2c_del_adapter(&lpi2c_imx->adapter);
+
+ clk_unprepare(lpi2c_imx->clk);
+
+ return 0;
+}
+
+static struct platform_driver lpi2c_imx_driver = {
+ .probe = lpi2c_imx_probe,
+ .remove = lpi2c_imx_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = lpi2c_imx_of_match,
+ },
+};
+
+module_platform_driver(lpi2c_imx_driver);
+
+MODULE_AUTHOR("Gao Pan <pandy.gao@nxp.com>");
+MODULE_DESCRIPTION("I2C adapter driver for LPI2C bus");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c
new file mode 100644
index 000000000000..d271e6a0954c
--- /dev/null
+++ b/drivers/i2c/busses/i2c-mlxcpld.c
@@ -0,0 +1,504 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Michael Shych <michaels@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+/* General defines */
+#define MLXPLAT_CPLD_LPC_I2C_BASE_ADDR 0x2000
+#define MLXCPLD_I2C_DEVICE_NAME "i2c_mlxcpld"
+#define MLXCPLD_I2C_VALID_FLAG (I2C_M_RECV_LEN | I2C_M_RD)
+#define MLXCPLD_I2C_BUS_NUM 1
+#define MLXCPLD_I2C_DATA_REG_SZ 36
+#define MLXCPLD_I2C_MAX_ADDR_LEN 4
+#define MLXCPLD_I2C_RETR_NUM 2
+#define MLXCPLD_I2C_XFER_TO 500000 /* usec */
+#define MLXCPLD_I2C_POLL_TIME 2000 /* usec */
+
+/* LPC I2C registers */
+#define MLXCPLD_LPCI2C_LPF_REG 0x0
+#define MLXCPLD_LPCI2C_CTRL_REG 0x1
+#define MLXCPLD_LPCI2C_HALF_CYC_REG 0x4
+#define MLXCPLD_LPCI2C_I2C_HOLD_REG 0x5
+#define MLXCPLD_LPCI2C_CMD_REG 0x6
+#define MLXCPLD_LPCI2C_NUM_DAT_REG 0x7
+#define MLXCPLD_LPCI2C_NUM_ADDR_REG 0x8
+#define MLXCPLD_LPCI2C_STATUS_REG 0x9
+#define MLXCPLD_LPCI2C_DATA_REG 0xa
+
+/* LPC I2C masks and parametres */
+#define MLXCPLD_LPCI2C_RST_SEL_MASK 0x1
+#define MLXCPLD_LPCI2C_TRANS_END 0x1
+#define MLXCPLD_LPCI2C_STATUS_NACK 0x10
+#define MLXCPLD_LPCI2C_NO_IND 0
+#define MLXCPLD_LPCI2C_ACK_IND 1
+#define MLXCPLD_LPCI2C_NACK_IND 2
+
+struct mlxcpld_i2c_curr_xfer {
+ u8 cmd;
+ u8 addr_width;
+ u8 data_len;
+ u8 msg_num;
+ struct i2c_msg *msg;
+};
+
+struct mlxcpld_i2c_priv {
+ struct i2c_adapter adap;
+ u32 base_addr;
+ struct mutex lock;
+ struct mlxcpld_i2c_curr_xfer xfer;
+ struct device *dev;
+};
+
+static void mlxcpld_i2c_lpc_write_buf(u8 *data, u8 len, u32 addr)
+{
+ int i;
+
+ for (i = 0; i < len - len % 4; i += 4)
+ outl(*(u32 *)(data + i), addr + i);
+ for (; i < len; ++i)
+ outb(*(data + i), addr + i);
+}
+
+static void mlxcpld_i2c_lpc_read_buf(u8 *data, u8 len, u32 addr)
+{
+ int i;
+
+ for (i = 0; i < len - len % 4; i += 4)
+ *(u32 *)(data + i) = inl(addr + i);
+ for (; i < len; ++i)
+ *(data + i) = inb(addr + i);
+}
+
+static void mlxcpld_i2c_read_comm(struct mlxcpld_i2c_priv *priv, u8 offs,
+ u8 *data, u8 datalen)
+{
+ u32 addr = priv->base_addr + offs;
+
+ switch (datalen) {
+ case 1:
+ *(data) = inb(addr);
+ break;
+ case 2:
+ *((u16 *)data) = inw(addr);
+ break;
+ case 3:
+ *((u16 *)data) = inw(addr);
+ *(data + 2) = inb(addr + 2);
+ break;
+ case 4:
+ *((u32 *)data) = inl(addr);
+ break;
+ default:
+ mlxcpld_i2c_lpc_read_buf(data, datalen, addr);
+ break;
+ }
+}
+
+static void mlxcpld_i2c_write_comm(struct mlxcpld_i2c_priv *priv, u8 offs,
+ u8 *data, u8 datalen)
+{
+ u32 addr = priv->base_addr + offs;
+
+ switch (datalen) {
+ case 1:
+ outb(*(data), addr);
+ break;
+ case 2:
+ outw(*((u16 *)data), addr);
+ break;
+ case 3:
+ outw(*((u16 *)data), addr);
+ outb(*(data + 2), addr + 2);
+ break;
+ case 4:
+ outl(*((u32 *)data), addr);
+ break;
+ default:
+ mlxcpld_i2c_lpc_write_buf(data, datalen, addr);
+ break;
+ }
+}
+
+/*
+ * Check validity of received i2c messages parameters.
+ * Returns 0 if OK, other - in case of invalid parameters.
+ */
+static int mlxcpld_i2c_check_msg_params(struct mlxcpld_i2c_priv *priv,
+ struct i2c_msg *msgs, int num)
+{
+ int i;
+
+ if (!num) {
+ dev_err(priv->dev, "Incorrect 0 num of messages\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(msgs[0].addr > 0x7f)) {
+ dev_err(priv->dev, "Invalid address 0x%03x\n",
+ msgs[0].addr);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num; ++i) {
+ if (unlikely(!msgs[i].buf)) {
+ dev_err(priv->dev, "Invalid buf in msg[%d]\n",
+ i);
+ return -EINVAL;
+ }
+ if (unlikely(msgs[0].addr != msgs[i].addr)) {
+ dev_err(priv->dev, "Invalid addr in msg[%d]\n",
+ i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Check if transfer is completed and status of operation.
+ * Returns 0 - transfer completed (both ACK or NACK),
+ * negative - transfer isn't finished.
+ */
+static int mlxcpld_i2c_check_status(struct mlxcpld_i2c_priv *priv, int *status)
+{
+ u8 val;
+
+ mlxcpld_i2c_read_comm(priv, MLXCPLD_LPCI2C_STATUS_REG, &val, 1);
+
+ if (val & MLXCPLD_LPCI2C_TRANS_END) {
+ if (val & MLXCPLD_LPCI2C_STATUS_NACK)
+ /*
+ * The slave is unable to accept the data. No such
+ * slave, command not understood, or unable to accept
+ * any more data.
+ */
+ *status = MLXCPLD_LPCI2C_NACK_IND;
+ else
+ *status = MLXCPLD_LPCI2C_ACK_IND;
+ return 0;
+ }
+ *status = MLXCPLD_LPCI2C_NO_IND;
+
+ return -EIO;
+}
+
+static void mlxcpld_i2c_set_transf_data(struct mlxcpld_i2c_priv *priv,
+ struct i2c_msg *msgs, int num,
+ u8 comm_len)
+{
+ priv->xfer.msg = msgs;
+ priv->xfer.msg_num = num;
+
+ /*
+ * All upper layers currently are never use transfer with more than
+ * 2 messages. Actually, it's also not so relevant in Mellanox systems
+ * because of HW limitation. Max size of transfer is not more than 32
+ * bytes in the current x86 LPCI2C bridge.
+ */
+ priv->xfer.cmd = msgs[num - 1].flags & I2C_M_RD;
+
+ if (priv->xfer.cmd == I2C_M_RD && comm_len != msgs[0].len) {
+ priv->xfer.addr_width = msgs[0].len;
+ priv->xfer.data_len = comm_len - priv->xfer.addr_width;
+ } else {
+ priv->xfer.addr_width = 0;
+ priv->xfer.data_len = comm_len;
+ }
+}
+
+/* Reset CPLD LPCI2C block */
+static void mlxcpld_i2c_reset(struct mlxcpld_i2c_priv *priv)
+{
+ u8 val;
+
+ mutex_lock(&priv->lock);
+
+ mlxcpld_i2c_read_comm(priv, MLXCPLD_LPCI2C_CTRL_REG, &val, 1);
+ val &= ~MLXCPLD_LPCI2C_RST_SEL_MASK;
+ mlxcpld_i2c_write_comm(priv, MLXCPLD_LPCI2C_CTRL_REG, &val, 1);
+
+ mutex_unlock(&priv->lock);
+}
+
+/* Make sure the CPLD is ready to start transmitting. */
+static int mlxcpld_i2c_check_busy(struct mlxcpld_i2c_priv *priv)
+{
+ u8 val;
+
+ mlxcpld_i2c_read_comm(priv, MLXCPLD_LPCI2C_STATUS_REG, &val, 1);
+
+ if (val & MLXCPLD_LPCI2C_TRANS_END)
+ return 0;
+
+ return -EIO;
+}
+
+static int mlxcpld_i2c_wait_for_free(struct mlxcpld_i2c_priv *priv)
+{
+ int timeout = 0;
+
+ do {
+ if (!mlxcpld_i2c_check_busy(priv))
+ break;
+ usleep_range(MLXCPLD_I2C_POLL_TIME / 2, MLXCPLD_I2C_POLL_TIME);
+ timeout += MLXCPLD_I2C_POLL_TIME;
+ } while (timeout <= MLXCPLD_I2C_XFER_TO);
+
+ if (timeout > MLXCPLD_I2C_XFER_TO)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/*
+ * Wait for master transfer to complete.
+ * It puts current process to sleep until we get interrupt or timeout expires.
+ * Returns the number of transferred or read bytes or error (<0).
+ */
+static int mlxcpld_i2c_wait_for_tc(struct mlxcpld_i2c_priv *priv)
+{
+ int status, i, timeout = 0;
+ u8 datalen;
+
+ do {
+ usleep_range(MLXCPLD_I2C_POLL_TIME / 2, MLXCPLD_I2C_POLL_TIME);
+ if (!mlxcpld_i2c_check_status(priv, &status))
+ break;
+ timeout += MLXCPLD_I2C_POLL_TIME;
+ } while (status == 0 && timeout < MLXCPLD_I2C_XFER_TO);
+
+ switch (status) {
+ case MLXCPLD_LPCI2C_NO_IND:
+ return -ETIMEDOUT;
+
+ case MLXCPLD_LPCI2C_ACK_IND:
+ if (priv->xfer.cmd != I2C_M_RD)
+ return (priv->xfer.addr_width + priv->xfer.data_len);
+
+ if (priv->xfer.msg_num == 1)
+ i = 0;
+ else
+ i = 1;
+
+ if (!priv->xfer.msg[i].buf)
+ return -EINVAL;
+
+ /*
+ * Actual read data len will be always the same as
+ * requested len. 0xff (line pull-up) will be returned
+ * if slave has no data to return. Thus don't read
+ * MLXCPLD_LPCI2C_NUM_DAT_REG reg from CPLD.
+ */
+ datalen = priv->xfer.data_len;
+
+ mlxcpld_i2c_read_comm(priv, MLXCPLD_LPCI2C_DATA_REG,
+ priv->xfer.msg[i].buf, datalen);
+
+ return datalen;
+
+ case MLXCPLD_LPCI2C_NACK_IND:
+ return -ENXIO;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static void mlxcpld_i2c_xfer_msg(struct mlxcpld_i2c_priv *priv)
+{
+ int i, len = 0;
+ u8 cmd;
+
+ mlxcpld_i2c_write_comm(priv, MLXCPLD_LPCI2C_NUM_DAT_REG,
+ &priv->xfer.data_len, 1);
+ mlxcpld_i2c_write_comm(priv, MLXCPLD_LPCI2C_NUM_ADDR_REG,
+ &priv->xfer.addr_width, 1);
+
+ for (i = 0; i < priv->xfer.msg_num; i++) {
+ if ((priv->xfer.msg[i].flags & I2C_M_RD) != I2C_M_RD) {
+ /* Don't write to CPLD buffer in read transaction */
+ mlxcpld_i2c_write_comm(priv, MLXCPLD_LPCI2C_DATA_REG +
+ len, priv->xfer.msg[i].buf,
+ priv->xfer.msg[i].len);
+ len += priv->xfer.msg[i].len;
+ }
+ }
+
+ /*
+ * Set target slave address with command for master transfer.
+ * It should be latest executed function before CPLD transaction.
+ */
+ cmd = (priv->xfer.msg[0].addr << 1) | priv->xfer.cmd;
+ mlxcpld_i2c_write_comm(priv, MLXCPLD_LPCI2C_CMD_REG, &cmd, 1);
+}
+
+/*
+ * Generic lpc-i2c transfer.
+ * Returns the number of processed messages or error (<0).
+ */
+static int mlxcpld_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num)
+{
+ struct mlxcpld_i2c_priv *priv = i2c_get_adapdata(adap);
+ u8 comm_len = 0;
+ int i, err;
+
+ err = mlxcpld_i2c_check_msg_params(priv, msgs, num);
+ if (err) {
+ dev_err(priv->dev, "Incorrect message\n");
+ return err;
+ }
+
+ for (i = 0; i < num; ++i)
+ comm_len += msgs[i].len;
+
+ /* Check bus state */
+ if (mlxcpld_i2c_wait_for_free(priv)) {
+ dev_err(priv->dev, "LPCI2C bridge is busy\n");
+
+ /*
+ * Usually it means something serious has happened.
+ * We can not have unfinished previous transfer
+ * so it doesn't make any sense to try to stop it.
+ * Probably we were not able to recover from the
+ * previous error.
+ * The only reasonable thing - is soft reset.
+ */
+ mlxcpld_i2c_reset(priv);
+ if (mlxcpld_i2c_check_busy(priv)) {
+ dev_err(priv->dev, "LPCI2C bridge is busy after reset\n");
+ return -EIO;
+ }
+ }
+
+ mlxcpld_i2c_set_transf_data(priv, msgs, num, comm_len);
+
+ mutex_lock(&priv->lock);
+
+ /* Do real transfer. Can't fail */
+ mlxcpld_i2c_xfer_msg(priv);
+
+ /* Wait for transaction complete */
+ err = mlxcpld_i2c_wait_for_tc(priv);
+
+ mutex_unlock(&priv->lock);
+
+ return err < 0 ? err : num;
+}
+
+static u32 mlxcpld_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_BLOCK_DATA;
+}
+
+static const struct i2c_algorithm mlxcpld_i2c_algo = {
+ .master_xfer = mlxcpld_i2c_xfer,
+ .functionality = mlxcpld_i2c_func
+};
+
+static struct i2c_adapter_quirks mlxcpld_i2c_quirks = {
+ .flags = I2C_AQ_COMB_WRITE_THEN_READ,
+ .max_read_len = MLXCPLD_I2C_DATA_REG_SZ - MLXCPLD_I2C_MAX_ADDR_LEN,
+ .max_write_len = MLXCPLD_I2C_DATA_REG_SZ,
+ .max_comb_1st_msg_len = 4,
+};
+
+static struct i2c_adapter mlxcpld_i2c_adapter = {
+ .owner = THIS_MODULE,
+ .name = "i2c-mlxcpld",
+ .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+ .algo = &mlxcpld_i2c_algo,
+ .quirks = &mlxcpld_i2c_quirks,
+ .retries = MLXCPLD_I2C_RETR_NUM,
+ .nr = MLXCPLD_I2C_BUS_NUM,
+};
+
+static int mlxcpld_i2c_probe(struct platform_device *pdev)
+{
+ struct mlxcpld_i2c_priv *priv;
+ int err;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_init(&priv->lock);
+ platform_set_drvdata(pdev, priv);
+
+ priv->dev = &pdev->dev;
+
+ /* Register with i2c layer */
+ mlxcpld_i2c_adapter.timeout = usecs_to_jiffies(MLXCPLD_I2C_XFER_TO);
+ priv->adap = mlxcpld_i2c_adapter;
+ priv->adap.dev.parent = &pdev->dev;
+ priv->base_addr = MLXPLAT_CPLD_LPC_I2C_BASE_ADDR;
+ i2c_set_adapdata(&priv->adap, priv);
+
+ err = i2c_add_numbered_adapter(&priv->adap);
+ if (err)
+ mutex_destroy(&priv->lock);
+
+ return err;
+}
+
+static int mlxcpld_i2c_remove(struct platform_device *pdev)
+{
+ struct mlxcpld_i2c_priv *priv = platform_get_drvdata(pdev);
+
+ i2c_del_adapter(&priv->adap);
+ mutex_destroy(&priv->lock);
+
+ return 0;
+}
+
+static struct platform_driver mlxcpld_i2c_driver = {
+ .probe = mlxcpld_i2c_probe,
+ .remove = mlxcpld_i2c_remove,
+ .driver = {
+ .name = MLXCPLD_I2C_DEVICE_NAME,
+ },
+};
+
+module_platform_driver(mlxcpld_i2c_driver);
+
+MODULE_AUTHOR("Michael Shych <michaels@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox I2C-CPLD controller driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("platform:i2c-mlxcpld");
diff --git a/drivers/i2c/busses/i2c-octeon-core.c b/drivers/i2c/busses/i2c-octeon-core.c
index 5e63b17f935d..1d8775799056 100644
--- a/drivers/i2c/busses/i2c-octeon-core.c
+++ b/drivers/i2c/busses/i2c-octeon-core.c
@@ -36,24 +36,6 @@ static bool octeon_i2c_test_iflg(struct octeon_i2c *i2c)
return (octeon_i2c_ctl_read(i2c) & TWSI_CTL_IFLG);
}
-static bool octeon_i2c_test_ready(struct octeon_i2c *i2c, bool *first)
-{
- if (octeon_i2c_test_iflg(i2c))
- return true;
-
- if (*first) {
- *first = false;
- return false;
- }
-
- /*
- * IRQ has signaled an event but IFLG hasn't changed.
- * Sleep and retry once.
- */
- usleep_range(I2C_OCTEON_EVENT_WAIT, 2 * I2C_OCTEON_EVENT_WAIT);
- return octeon_i2c_test_iflg(i2c);
-}
-
/**
* octeon_i2c_wait - wait for the IFLG to be set
* @i2c: The struct octeon_i2c
@@ -63,7 +45,6 @@ static bool octeon_i2c_test_ready(struct octeon_i2c *i2c, bool *first)
static int octeon_i2c_wait(struct octeon_i2c *i2c)
{
long time_left;
- bool first = true;
/*
* Some chip revisions don't assert the irq in the interrupt
@@ -80,7 +61,7 @@ static int octeon_i2c_wait(struct octeon_i2c *i2c)
}
i2c->int_enable(i2c);
- time_left = wait_event_timeout(i2c->queue, octeon_i2c_test_ready(i2c, &first),
+ time_left = wait_event_timeout(i2c->queue, octeon_i2c_test_iflg(i2c),
i2c->adap.timeout);
i2c->int_disable(i2c);
@@ -102,25 +83,6 @@ static bool octeon_i2c_hlc_test_valid(struct octeon_i2c *i2c)
return (__raw_readq(i2c->twsi_base + SW_TWSI(i2c)) & SW_TWSI_V) == 0;
}
-static bool octeon_i2c_hlc_test_ready(struct octeon_i2c *i2c, bool *first)
-{
- /* check if valid bit is cleared */
- if (octeon_i2c_hlc_test_valid(i2c))
- return true;
-
- if (*first) {
- *first = false;
- return false;
- }
-
- /*
- * IRQ has signaled an event but valid bit isn't cleared.
- * Sleep and retry once.
- */
- usleep_range(I2C_OCTEON_EVENT_WAIT, 2 * I2C_OCTEON_EVENT_WAIT);
- return octeon_i2c_hlc_test_valid(i2c);
-}
-
static void octeon_i2c_hlc_int_clear(struct octeon_i2c *i2c)
{
/* clear ST/TS events, listen for neither */
@@ -176,7 +138,6 @@ static void octeon_i2c_hlc_disable(struct octeon_i2c *i2c)
*/
static int octeon_i2c_hlc_wait(struct octeon_i2c *i2c)
{
- bool first = true;
int time_left;
/*
@@ -195,7 +156,7 @@ static int octeon_i2c_hlc_wait(struct octeon_i2c *i2c)
i2c->hlc_int_enable(i2c);
time_left = wait_event_timeout(i2c->queue,
- octeon_i2c_hlc_test_ready(i2c, &first),
+ octeon_i2c_hlc_test_valid(i2c),
i2c->adap.timeout);
i2c->hlc_int_disable(i2c);
if (!time_left)
@@ -381,7 +342,9 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
if (result)
return result;
- data[i] = octeon_i2c_data_read(i2c);
+ data[i] = octeon_i2c_data_read(i2c, &result);
+ if (result)
+ return result;
if (recv_len && i == 0) {
if (data[i] > I2C_SMBUS_BLOCK_MAX + 1)
return -EPROTO;
@@ -789,6 +752,9 @@ static void octeon_i2c_prepare_recovery(struct i2c_adapter *adap)
struct octeon_i2c *i2c = i2c_get_adapdata(adap);
octeon_i2c_hlc_disable(i2c);
+ octeon_i2c_reg_write(i2c, SW_TWSI_EOP_TWSI_RST, 0);
+ /* wait for software reset to settle */
+ udelay(5);
/*
* Bring control register to a good state regardless
diff --git a/drivers/i2c/busses/i2c-octeon-core.h b/drivers/i2c/busses/i2c-octeon-core.h
index 87151ea74acd..e160f838c254 100644
--- a/drivers/i2c/busses/i2c-octeon-core.h
+++ b/drivers/i2c/busses/i2c-octeon-core.h
@@ -141,11 +141,14 @@ static inline void octeon_i2c_writeq_flush(u64 val, void __iomem *addr)
*/
static inline void octeon_i2c_reg_write(struct octeon_i2c *i2c, u64 eop_reg, u8 data)
{
+ int tries = 1000;
u64 tmp;
__raw_writeq(SW_TWSI_V | eop_reg | data, i2c->twsi_base + SW_TWSI(i2c));
do {
tmp = __raw_readq(i2c->twsi_base + SW_TWSI(i2c));
+ if (--tries < 0)
+ return;
} while ((tmp & SW_TWSI_V) != 0);
}
@@ -163,24 +166,32 @@ static inline void octeon_i2c_reg_write(struct octeon_i2c *i2c, u64 eop_reg, u8
*
* The I2C core registers are accessed indirectly via the SW_TWSI CSR.
*/
-static inline u8 octeon_i2c_reg_read(struct octeon_i2c *i2c, u64 eop_reg)
+static inline int octeon_i2c_reg_read(struct octeon_i2c *i2c, u64 eop_reg,
+ int *error)
{
+ int tries = 1000;
u64 tmp;
__raw_writeq(SW_TWSI_V | eop_reg | SW_TWSI_R, i2c->twsi_base + SW_TWSI(i2c));
do {
tmp = __raw_readq(i2c->twsi_base + SW_TWSI(i2c));
+ if (--tries < 0) {
+ /* signal that the returned data is invalid */
+ if (error)
+ *error = -EIO;
+ return 0;
+ }
} while ((tmp & SW_TWSI_V) != 0);
return tmp & 0xFF;
}
#define octeon_i2c_ctl_read(i2c) \
- octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_CTL)
-#define octeon_i2c_data_read(i2c) \
- octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_DATA)
+ octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_CTL, NULL)
+#define octeon_i2c_data_read(i2c, error) \
+ octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_DATA, error)
#define octeon_i2c_stat_read(i2c) \
- octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_STAT)
+ octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_STAT, NULL)
/**
* octeon_i2c_read_int - read the TWSI_INT register
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index c2268cdf38e8..e34d82e79b98 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -585,10 +585,29 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
u8 command, int size, union i2c_smbus_data *data)
{
struct i2c_piix4_adapdata *adapdata = i2c_get_adapdata(adap);
+ unsigned short piix4_smba = adapdata->smba;
+ int retries = MAX_TIMEOUT;
+ int smbslvcnt;
u8 smba_en_lo;
u8 port;
int retval;
+ /* Request the SMBUS semaphore, avoid conflicts with the IMC */
+ smbslvcnt = inb_p(SMBSLVCNT);
+ do {
+ outb_p(smbslvcnt | 0x10, SMBSLVCNT);
+
+ /* Check the semaphore status */
+ smbslvcnt = inb_p(SMBSLVCNT);
+ if (smbslvcnt & 0x10)
+ break;
+
+ usleep_range(1000, 2000);
+ } while (--retries);
+ /* SMBus is still owned by the IMC, we give up */
+ if (!retries)
+ return -EBUSY;
+
mutex_lock(&piix4_mutex_sb800);
outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX);
@@ -606,6 +625,9 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
mutex_unlock(&piix4_mutex_sb800);
+ /* Release the semaphore */
+ outb_p(smbslvcnt | 0x20, SMBSLVCNT);
+
return retval;
}
diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c
index 417464e9ea2a..004deb96afe3 100644
--- a/drivers/i2c/busses/i2c-pxa-pci.c
+++ b/drivers/i2c/busses/i2c-pxa-pci.c
@@ -1,9 +1,13 @@
/*
+ * CE4100 PCI-I2C glue code for PXA's driver
+ * Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ * License: GPL v2
+ *
* The CE4100's I2C device is more or less the same one as found on PXA.
* It does not support slave mode, the register slightly moved. This PCI
* device provides three bars, every contains a single I2C controller.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/i2c/pxa-i2c.h>
@@ -134,35 +138,17 @@ err_mem:
return ret;
}
-static void ce4100_i2c_remove(struct pci_dev *dev)
-{
- struct ce4100_devices *sds;
- unsigned int i;
-
- sds = pci_get_drvdata(dev);
-
- for (i = 0; i < ARRAY_SIZE(sds->pdev); i++)
- platform_device_unregister(sds->pdev[i]);
-
- pci_disable_device(dev);
- kfree(sds);
-}
-
static const struct pci_device_id ce4100_i2c_devices[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e68)},
{ },
};
-MODULE_DEVICE_TABLE(pci, ce4100_i2c_devices);
static struct pci_driver ce4100_i2c_driver = {
+ .driver = {
+ .suppress_bind_attrs = true,
+ },
.name = "ce4100_i2c",
.id_table = ce4100_i2c_devices,
.probe = ce4100_i2c_probe,
- .remove = ce4100_i2c_remove,
};
-
-module_pci_driver(ce4100_i2c_driver);
-
-MODULE_DESCRIPTION("CE4100 PCI-I2C glue code for PXA's driver");
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");
+builtin_pci_driver(ce4100_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index e28b825b0433..6cf333ecc8b8 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -48,6 +48,8 @@ struct pxa_reg_layout {
u32 isar;
u32 ilcr;
u32 iwcr;
+ u32 fm;
+ u32 hs;
};
enum pxa_i2c_types {
@@ -55,8 +57,12 @@ enum pxa_i2c_types {
REGS_PXA3XX,
REGS_CE4100,
REGS_PXA910,
+ REGS_A3700,
};
+#define ICR_BUSMODE_FM (1 << 16) /* shifted fast mode for armada-3700 */
+#define ICR_BUSMODE_HS (1 << 17) /* shifted high speed mode for armada-3700 */
+
/*
* I2C registers definitions
*/
@@ -91,6 +97,15 @@ static struct pxa_reg_layout pxa_reg_layout[] = {
.ilcr = 0x28,
.iwcr = 0x30,
},
+ [REGS_A3700] = {
+ .ibmr = 0x00,
+ .idbr = 0x04,
+ .icr = 0x08,
+ .isr = 0x0c,
+ .isar = 0x10,
+ .fm = ICR_BUSMODE_FM,
+ .hs = ICR_BUSMODE_HS,
+ },
};
static const struct platform_device_id i2c_pxa_id_table[] = {
@@ -98,6 +113,7 @@ static const struct platform_device_id i2c_pxa_id_table[] = {
{ "pxa3xx-pwri2c", REGS_PXA3XX },
{ "ce4100-i2c", REGS_CE4100 },
{ "pxa910-i2c", REGS_PXA910 },
+ { "armada-3700-i2c", REGS_A3700 },
{ },
};
MODULE_DEVICE_TABLE(platform, i2c_pxa_id_table);
@@ -193,6 +209,8 @@ struct pxa_i2c {
unsigned char master_code;
unsigned long rate;
bool highmode_enter;
+ u32 fm_mask;
+ u32 hs_mask;
};
#define _IBMR(i2c) ((i2c)->reg_ibmr)
@@ -503,8 +521,8 @@ static void i2c_pxa_reset(struct pxa_i2c *i2c)
writel(i2c->slave_addr, _ISAR(i2c));
/* set control register values */
- writel(I2C_ICR_INIT | (i2c->fast_mode ? ICR_FM : 0), _ICR(i2c));
- writel(readl(_ICR(i2c)) | (i2c->high_mode ? ICR_HS : 0), _ICR(i2c));
+ writel(I2C_ICR_INIT | (i2c->fast_mode ? i2c->fm_mask : 0), _ICR(i2c));
+ writel(readl(_ICR(i2c)) | (i2c->high_mode ? i2c->hs_mask : 0), _ICR(i2c));
#ifdef CONFIG_I2C_PXA_SLAVE
dev_info(&i2c->adap.dev, "Enabling slave mode\n");
@@ -1137,6 +1155,7 @@ static const struct of_device_id i2c_pxa_dt_ids[] = {
{ .compatible = "mrvl,pxa-i2c", .data = (void *)REGS_PXA2XX },
{ .compatible = "mrvl,pwri2c", .data = (void *)REGS_PXA3XX },
{ .compatible = "mrvl,mmp-twsi", .data = (void *)REGS_PXA910 },
+ { .compatible = "marvell,armada-3700-i2c", .data = (void *)REGS_A3700 },
{}
};
MODULE_DEVICE_TABLE(of, i2c_pxa_dt_ids);
@@ -1234,6 +1253,9 @@ static int i2c_pxa_probe(struct platform_device *dev)
i2c->reg_idbr = i2c->reg_base + pxa_reg_layout[i2c_type].idbr;
i2c->reg_icr = i2c->reg_base + pxa_reg_layout[i2c_type].icr;
i2c->reg_isr = i2c->reg_base + pxa_reg_layout[i2c_type].isr;
+ i2c->fm_mask = pxa_reg_layout[i2c_type].fm ? : ICR_FM;
+ i2c->hs_mask = pxa_reg_layout[i2c_type].hs ? : ICR_HS;
+
if (i2c_type != REGS_CE4100)
i2c->reg_isar = i2c->reg_base + pxa_reg_layout[i2c_type].isar;
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index a8497cfdae6f..1902d8ac9753 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -14,6 +14,7 @@
*
*/
+#include <linux/acpi.h>
#include <linux/atomic.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -132,6 +133,10 @@
/* Max timeout in ms for 32k bytes */
#define TOUT_MAX 300
+/* Default values. Use these if FW query fails */
+#define DEFAULT_CLK_FREQ 100000
+#define DEFAULT_SRC_CLK 20000000
+
struct qup_i2c_block {
int count;
int pos;
@@ -525,6 +530,33 @@ static int qup_i2c_get_data_len(struct qup_i2c_dev *qup)
return data_len;
}
+static bool qup_i2c_check_msg_len(struct i2c_msg *msg)
+{
+ return ((msg->flags & I2C_M_RD) && (msg->flags & I2C_M_RECV_LEN));
+}
+
+static int qup_i2c_set_tags_smb(u16 addr, u8 *tags, struct qup_i2c_dev *qup,
+ struct i2c_msg *msg)
+{
+ int len = 0;
+
+ if (msg->len > 1) {
+ tags[len++] = QUP_TAG_V2_DATARD_STOP;
+ tags[len++] = qup_i2c_get_data_len(qup) - 1;
+ } else {
+ tags[len++] = QUP_TAG_V2_START;
+ tags[len++] = addr & 0xff;
+
+ if (msg->flags & I2C_M_TEN)
+ tags[len++] = addr >> 8;
+
+ tags[len++] = QUP_TAG_V2_DATARD;
+ /* Read 1 byte indicating the length of the SMBus message */
+ tags[len++] = 1;
+ }
+ return len;
+}
+
static int qup_i2c_set_tags(u8 *tags, struct qup_i2c_dev *qup,
struct i2c_msg *msg, int is_dma)
{
@@ -534,6 +566,10 @@ static int qup_i2c_set_tags(u8 *tags, struct qup_i2c_dev *qup,
int last = (qup->blk.pos == (qup->blk.count - 1)) && (qup->is_last);
+ /* Handle tags for SMBus block read */
+ if (qup_i2c_check_msg_len(msg))
+ return qup_i2c_set_tags_smb(addr, tags, qup, msg);
+
if (qup->blk.pos == 0) {
tags[len++] = QUP_TAG_V2_START;
tags[len++] = addr & 0xff;
@@ -1056,9 +1092,17 @@ static int qup_i2c_read_fifo_v2(struct qup_i2c_dev *qup,
struct i2c_msg *msg)
{
u32 val;
- int idx, pos = 0, ret = 0, total;
+ int idx, pos = 0, ret = 0, total, msg_offset = 0;
+ /*
+ * If the message length is already read in
+ * the first byte of the buffer, account for
+ * that by setting the offset
+ */
+ if (qup_i2c_check_msg_len(msg) && (msg->len > 1))
+ msg_offset = 1;
total = qup_i2c_get_data_len(qup);
+ total -= msg_offset;
/* 2 extra bytes for read tags */
while (pos < (total + 2)) {
@@ -1078,8 +1122,8 @@ static int qup_i2c_read_fifo_v2(struct qup_i2c_dev *qup,
if (pos >= (total + 2))
goto out;
-
- msg->buf[qup->pos++] = val & 0xff;
+ msg->buf[qup->pos + msg_offset] = val & 0xff;
+ qup->pos++;
}
}
@@ -1119,6 +1163,20 @@ static int qup_i2c_read_one_v2(struct qup_i2c_dev *qup, struct i2c_msg *msg)
goto err;
qup->blk.pos++;
+
+ /* Handle SMBus block read length */
+ if (qup_i2c_check_msg_len(msg) && (msg->len == 1)) {
+ if (msg->buf[0] > I2C_SMBUS_BLOCK_MAX) {
+ ret = -EPROTO;
+ goto err;
+ }
+ msg->len += msg->buf[0];
+ qup->pos = 0;
+ qup_i2c_set_blk_data(qup, msg);
+ /* set tag length for block read */
+ qup->blk.tx_tag_len = 2;
+ qup_i2c_set_read_mode_v2(qup, msg->buf[0]);
+ }
} while (qup->blk.pos < qup->blk.count);
err:
@@ -1204,6 +1262,11 @@ static int qup_i2c_xfer(struct i2c_adapter *adap,
goto out;
}
+ if (qup_i2c_check_msg_len(&msgs[idx])) {
+ ret = -EINVAL;
+ goto out;
+ }
+
if (msgs[idx].flags & I2C_M_RD)
ret = qup_i2c_read_one(qup, &msgs[idx]);
else
@@ -1358,14 +1421,13 @@ static void qup_i2c_disable_clocks(struct qup_i2c_dev *qup)
static int qup_i2c_probe(struct platform_device *pdev)
{
static const int blk_sizes[] = {4, 16, 32};
- struct device_node *node = pdev->dev.of_node;
struct qup_i2c_dev *qup;
unsigned long one_bit_t;
struct resource *res;
u32 io_mode, hw_ver, size;
int ret, fs_div, hs_div;
- int src_clk_freq;
- u32 clk_freq = 100000;
+ u32 src_clk_freq = DEFAULT_SRC_CLK;
+ u32 clk_freq = DEFAULT_CLK_FREQ;
int blocks;
qup = devm_kzalloc(&pdev->dev, sizeof(*qup), GFP_KERNEL);
@@ -1376,7 +1438,11 @@ static int qup_i2c_probe(struct platform_device *pdev)
init_completion(&qup->xfer);
platform_set_drvdata(pdev, qup);
- of_property_read_u32(node, "clock-frequency", &clk_freq);
+ ret = device_property_read_u32(qup->dev, "clock-frequency", &clk_freq);
+ if (ret) {
+ dev_notice(qup->dev, "using default clock-frequency %d",
+ DEFAULT_CLK_FREQ);
+ }
if (of_device_is_compatible(pdev->dev.of_node, "qcom,i2c-qup-v1.1.1")) {
qup->adap.algo = &qup_i2c_algo;
@@ -1452,20 +1518,30 @@ nodma:
return qup->irq;
}
- qup->clk = devm_clk_get(qup->dev, "core");
- if (IS_ERR(qup->clk)) {
- dev_err(qup->dev, "Could not get core clock\n");
- return PTR_ERR(qup->clk);
- }
+ if (has_acpi_companion(qup->dev)) {
+ ret = device_property_read_u32(qup->dev,
+ "src-clock-hz", &src_clk_freq);
+ if (ret) {
+ dev_notice(qup->dev, "using default src-clock-hz %d",
+ DEFAULT_SRC_CLK);
+ }
+ ACPI_COMPANION_SET(&qup->adap.dev, ACPI_COMPANION(qup->dev));
+ } else {
+ qup->clk = devm_clk_get(qup->dev, "core");
+ if (IS_ERR(qup->clk)) {
+ dev_err(qup->dev, "Could not get core clock\n");
+ return PTR_ERR(qup->clk);
+ }
- qup->pclk = devm_clk_get(qup->dev, "iface");
- if (IS_ERR(qup->pclk)) {
- dev_err(qup->dev, "Could not get iface clock\n");
- return PTR_ERR(qup->pclk);
+ qup->pclk = devm_clk_get(qup->dev, "iface");
+ if (IS_ERR(qup->pclk)) {
+ dev_err(qup->dev, "Could not get iface clock\n");
+ return PTR_ERR(qup->pclk);
+ }
+ qup_i2c_enable_clocks(qup);
+ src_clk_freq = clk_get_rate(qup->clk);
}
- qup_i2c_enable_clocks(qup);
-
/*
* Bootloaders might leave a pending interrupt on certain QUP's,
* so we reset the core before registering for interrupts.
@@ -1512,7 +1588,6 @@ nodma:
size = QUP_INPUT_FIFO_SIZE(io_mode);
qup->in_fifo_sz = qup->in_blk_sz * (2 << size);
- src_clk_freq = clk_get_rate(qup->clk);
fs_div = ((src_clk_freq / clk_freq) / 2) - 3;
hs_div = 3;
qup->clk_ctl = (hs_div << 8) | (fs_div & 0xff);
@@ -1631,6 +1706,14 @@ static const struct of_device_id qup_i2c_dt_match[] = {
};
MODULE_DEVICE_TABLE(of, qup_i2c_dt_match);
+#if IS_ENABLED(CONFIG_ACPI)
+static const struct acpi_device_id qup_i2c_acpi_match[] = {
+ { "QCOM8010"},
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, qup_i2c_acpi_match);
+#endif
+
static struct platform_driver qup_i2c_driver = {
.probe = qup_i2c_probe,
.remove = qup_i2c_remove,
@@ -1638,6 +1721,7 @@ static struct platform_driver qup_i2c_driver = {
.name = "i2c_qup",
.pm = &qup_i2c_qup_pm_ops,
.of_match_table = qup_i2c_dt_match,
+ .acpi_match_table = ACPI_PTR(qup_i2c_acpi_match),
},
};
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 726615e54f2a..26f2ff22e97e 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -793,7 +793,6 @@ static const struct i2c_algorithm rcar_i2c_algo = {
};
static const struct of_device_id rcar_i2c_dt_ids[] = {
- { .compatible = "renesas,i2c-rcar", .data = (void *)I2C_RCAR_GEN1 },
{ .compatible = "renesas,i2c-r8a7778", .data = (void *)I2C_RCAR_GEN1 },
{ .compatible = "renesas,i2c-r8a7779", .data = (void *)I2C_RCAR_GEN1 },
{ .compatible = "renesas,i2c-r8a7790", .data = (void *)I2C_RCAR_GEN2 },
@@ -803,6 +802,10 @@ static const struct of_device_id rcar_i2c_dt_ids[] = {
{ .compatible = "renesas,i2c-r8a7794", .data = (void *)I2C_RCAR_GEN2 },
{ .compatible = "renesas,i2c-r8a7795", .data = (void *)I2C_RCAR_GEN3 },
{ .compatible = "renesas,i2c-r8a7796", .data = (void *)I2C_RCAR_GEN3 },
+ { .compatible = "renesas,i2c-rcar", .data = (void *)I2C_RCAR_GEN1 }, /* Deprecated */
+ { .compatible = "renesas,rcar-gen1-i2c", .data = (void *)I2C_RCAR_GEN1 },
+ { .compatible = "renesas,rcar-gen2-i2c", .data = (void *)I2C_RCAR_GEN2 },
+ { .compatible = "renesas,rcar-gen3-i2c", .data = (void *)I2C_RCAR_GEN3 },
{},
};
MODULE_DEVICE_TABLE(of, rcar_i2c_dt_ids);
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 192f36f00e4d..3d9ebe6e5716 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -827,7 +827,6 @@ static const struct sh_mobile_dt_config r8a7740_dt_config = {
};
static const struct of_device_id sh_mobile_i2c_dt_ids[] = {
- { .compatible = "renesas,rmobile-iic", .data = &default_dt_config },
{ .compatible = "renesas,iic-r8a73a4", .data = &fast_clock_dt_config },
{ .compatible = "renesas,iic-r8a7740", .data = &r8a7740_dt_config },
{ .compatible = "renesas,iic-r8a7790", .data = &fast_clock_dt_config },
@@ -835,8 +834,11 @@ static const struct of_device_id sh_mobile_i2c_dt_ids[] = {
{ .compatible = "renesas,iic-r8a7792", .data = &fast_clock_dt_config },
{ .compatible = "renesas,iic-r8a7793", .data = &fast_clock_dt_config },
{ .compatible = "renesas,iic-r8a7794", .data = &fast_clock_dt_config },
+ { .compatible = "renesas,rcar-gen2-iic", .data = &fast_clock_dt_config },
{ .compatible = "renesas,iic-r8a7795", .data = &fast_clock_dt_config },
+ { .compatible = "renesas,rcar-gen3-iic", .data = &fast_clock_dt_config },
{ .compatible = "renesas,iic-sh73a0", .data = &fast_clock_dt_config },
+ { .compatible = "renesas,rmobile-iic", .data = &default_dt_config },
{},
};
MODULE_DEVICE_TABLE(of, sh_mobile_i2c_dt_ids);
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
index db9105e52c79..beee31892295 100644
--- a/drivers/i2c/busses/i2c-uniphier-f.c
+++ b/drivers/i2c/busses/i2c-uniphier-f.c
@@ -528,7 +528,7 @@ static int uniphier_fi2c_probe(struct platform_device *pdev)
if (!clk_rate) {
dev_err(dev, "input clock rate should not be zero\n");
ret = -EINVAL;
- goto err;
+ goto disable_clk;
}
init_completion(&priv->comp);
@@ -547,11 +547,11 @@ static int uniphier_fi2c_probe(struct platform_device *pdev)
pdev->name, priv);
if (ret) {
dev_err(dev, "failed to request irq %d\n", irq);
- goto err;
+ goto disable_clk;
}
ret = i2c_add_adapter(&priv->adap);
-err:
+disable_clk:
if (ret)
clk_disable_unprepare(priv->clk);
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
index 56e92af46ddc..777c0fe93653 100644
--- a/drivers/i2c/busses/i2c-uniphier.c
+++ b/drivers/i2c/busses/i2c-uniphier.c
@@ -373,7 +373,7 @@ static int uniphier_i2c_probe(struct platform_device *pdev)
if (!clk_rate) {
dev_err(dev, "input clock rate should not be zero\n");
ret = -EINVAL;
- goto err;
+ goto disable_clk;
}
init_completion(&priv->comp);
@@ -392,11 +392,11 @@ static int uniphier_i2c_probe(struct platform_device *pdev)
priv);
if (ret) {
dev_err(dev, "failed to request irq %d\n", irq);
- goto err;
+ goto disable_clk;
}
ret = i2c_add_adapter(&priv->adap);
-err:
+disable_clk:
if (ret)
clk_disable_unprepare(priv->clk);
diff --git a/drivers/i2c/busses/i2c-viperboard.c b/drivers/i2c/busses/i2c-viperboard.c
index 543456a0a338..e4be86b3de9a 100644
--- a/drivers/i2c/busses/i2c-viperboard.c
+++ b/drivers/i2c/busses/i2c-viperboard.c
@@ -354,7 +354,7 @@ static const struct i2c_algorithm vprbrd_algorithm = {
.functionality = vprbrd_i2c_func,
};
-static struct i2c_adapter_quirks vprbrd_quirks = {
+static const struct i2c_adapter_quirks vprbrd_quirks = {
.max_read_len = 2048,
.max_write_len = 2048,
};
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
index 05cf192ef1ac..0ab1e55558bc 100644
--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
+++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
@@ -415,6 +415,7 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
adapter->algo = &xgene_slimpro_i2c_algorithm;
adapter->class = I2C_CLASS_HWMON;
adapter->dev.parent = &pdev->dev;
+ adapter->dev.of_node = pdev->dev.of_node;
i2c_set_adapdata(adapter, ctx);
rc = i2c_add_adapter(adapter);
if (rc) {
diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c
index e29ff37a43bd..84a8b2eccffb 100644
--- a/drivers/i2c/busses/i2c-xlp9xx.c
+++ b/drivers/i2c/busses/i2c-xlp9xx.c
@@ -393,6 +393,7 @@ static int xlp9xx_i2c_probe(struct platform_device *pdev)
init_completion(&priv->msg_complete);
priv->adapter.dev.parent = &pdev->dev;
priv->adapter.algo = &xlp9xx_i2c_algo;
+ ACPI_COMPANION_SET(&priv->adapter.dev, ACPI_COMPANION(&pdev->dev));
priv->adapter.dev.of_node = pdev->dev.of_node;
priv->dev = &pdev->dev;
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index b432b64e307a..583e95042a21 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -30,7 +30,7 @@
#define pr_fmt(fmt) "i2c-core: " fmt
#include <dt-bindings/i2c/i2c.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/acpi.h>
#include <linux/clk/clk-conf.h>
#include <linux/completion.h>
@@ -65,6 +65,9 @@
#define I2C_ADDR_OFFSET_TEN_BIT 0xa000
#define I2C_ADDR_OFFSET_SLAVE 0x1000
+#define I2C_ADDR_7BITS_MAX 0x77
+#define I2C_ADDR_7BITS_COUNT (I2C_ADDR_7BITS_MAX + 1)
+
/* core_lock protects i2c_adapter_idr, and guarantees
that device detection, deletion of detected devices, and attach_adapter
calls are serialized */
@@ -77,9 +80,10 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver);
static struct static_key i2c_trace_msg = STATIC_KEY_INIT_FALSE;
static bool is_registered;
-void i2c_transfer_trace_reg(void)
+int i2c_transfer_trace_reg(void)
{
static_key_slow_inc(&i2c_trace_msg);
+ return 0;
}
void i2c_transfer_trace_unreg(void)
@@ -676,9 +680,12 @@ static inline int i2c_acpi_install_space_handler(struct i2c_adapter *adapter)
/* ------------------------------------------------------------------------- */
-static const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
+const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
const struct i2c_client *client)
{
+ if (!(id && client))
+ return NULL;
+
while (id->name[0]) {
if (strcmp(client->name, id->name) == 0)
return id;
@@ -686,17 +693,16 @@ static const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
}
return NULL;
}
+EXPORT_SYMBOL_GPL(i2c_match_id);
static int i2c_device_match(struct device *dev, struct device_driver *drv)
{
struct i2c_client *client = i2c_verify_client(dev);
struct i2c_driver *driver;
- if (!client)
- return 0;
/* Attempt an OF style match */
- if (of_driver_match_device(dev, drv))
+ if (i2c_of_match_device(drv->of_match_table, client))
return 1;
/* Then ACPI style match */
@@ -704,9 +710,10 @@ static int i2c_device_match(struct device *dev, struct device_driver *drv)
return 1;
driver = to_i2c_driver(drv);
- /* match on an id table if there is one */
- if (driver->id_table)
- return i2c_match_id(driver->id_table, client) != NULL;
+
+ /* Finally an I2C match */
+ if (i2c_match_id(driver->id_table, client))
+ return 1;
return 0;
}
@@ -893,6 +900,25 @@ static void i2c_init_recovery(struct i2c_adapter *adap)
adap->bus_recovery_info = NULL;
}
+static int i2c_smbus_host_notify_to_irq(const struct i2c_client *client)
+{
+ struct i2c_adapter *adap = client->adapter;
+ unsigned int irq;
+
+ if (!adap->host_notify_domain)
+ return -ENXIO;
+
+ if (client->flags & I2C_CLIENT_TEN)
+ return -EINVAL;
+
+ irq = irq_find_mapping(adap->host_notify_domain, client->addr);
+ if (!irq)
+ irq = irq_create_mapping(adap->host_notify_domain,
+ client->addr);
+
+ return irq > 0 ? irq : -ENXIO;
+}
+
static int i2c_device_probe(struct device *dev)
{
struct i2c_client *client = i2c_verify_client(dev);
@@ -905,7 +931,10 @@ static int i2c_device_probe(struct device *dev)
if (!client->irq) {
int irq = -ENOENT;
- if (dev->of_node) {
+ if (client->flags & I2C_CLIENT_HOST_NOTIFY) {
+ dev_dbg(dev, "Using Host Notify IRQ\n");
+ irq = i2c_smbus_host_notify_to_irq(client);
+ } else if (dev->of_node) {
irq = of_irq_get_byname(dev->of_node, "irq");
if (irq == -EINVAL || irq == -ENODATA)
irq = of_irq_get(dev->of_node, 0);
@@ -914,6 +943,7 @@ static int i2c_device_probe(struct device *dev)
}
if (irq == -EPROBE_DEFER)
return irq;
+
if (irq < 0)
irq = 0;
@@ -921,7 +951,13 @@ static int i2c_device_probe(struct device *dev)
}
driver = to_i2c_driver(dev->driver);
- if (!driver->probe || !driver->id_table)
+
+ /*
+ * An I2C ID table is not mandatory, if and only if, a suitable Device
+ * Tree match table entry is supplied for the probing device.
+ */
+ if (!driver->id_table &&
+ !i2c_of_match_device(dev->driver->of_match_table, client))
return -ENODEV;
if (client->flags & I2C_CLIENT_WAKE) {
@@ -956,7 +992,18 @@ static int i2c_device_probe(struct device *dev)
if (status == -EPROBE_DEFER)
goto err_clear_wakeup_irq;
- status = driver->probe(client, i2c_match_id(driver->id_table, client));
+ /*
+ * When there are no more users of probe(),
+ * rename probe_new to probe.
+ */
+ if (driver->probe_new)
+ status = driver->probe_new(client);
+ else if (driver->probe)
+ status = driver->probe(client,
+ i2c_match_id(driver->id_table, client));
+ else
+ status = -EINVAL;
+
if (status)
goto err_detach_pm_domain;
@@ -1657,7 +1704,7 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
if (i2c_check_addr_validity(addr, info.flags)) {
dev_err(&adap->dev, "of_i2c: invalid addr=%x on %s\n",
- info.addr, node->full_name);
+ addr, node->full_name);
return ERR_PTR(-EINVAL);
}
@@ -1665,6 +1712,9 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
info.of_node = of_node_get(node);
info.archdata = &dev_ad;
+ if (of_property_read_bool(node, "host-notify"))
+ info.flags |= I2C_CLIENT_HOST_NOTIFY;
+
if (of_get_property(node, "wakeup-source", NULL))
info.flags |= I2C_CLIENT_WAKE;
@@ -1767,6 +1817,52 @@ struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node)
return adapter;
}
EXPORT_SYMBOL(of_get_i2c_adapter_by_node);
+
+static const struct of_device_id*
+i2c_of_match_device_sysfs(const struct of_device_id *matches,
+ struct i2c_client *client)
+{
+ const char *name;
+
+ for (; matches->compatible[0]; matches++) {
+ /*
+ * Adding devices through the i2c sysfs interface provides us
+ * a string to match which may be compatible with the device
+ * tree compatible strings, however with no actual of_node the
+ * of_match_device() will not match
+ */
+ if (sysfs_streq(client->name, matches->compatible))
+ return matches;
+
+ name = strchr(matches->compatible, ',');
+ if (!name)
+ name = matches->compatible;
+ else
+ name++;
+
+ if (sysfs_streq(client->name, name))
+ return matches;
+ }
+
+ return NULL;
+}
+
+const struct of_device_id
+*i2c_of_match_device(const struct of_device_id *matches,
+ struct i2c_client *client)
+{
+ const struct of_device_id *match;
+
+ if (!(client && matches))
+ return NULL;
+
+ match = of_match_device(matches, &client->dev);
+ if (match)
+ return match;
+
+ return i2c_of_match_device_sysfs(matches, client);
+}
+EXPORT_SYMBOL_GPL(i2c_of_match_device);
#else
static void of_i2c_register_devices(struct i2c_adapter *adap) { }
#endif /* CONFIG_OF */
@@ -1800,6 +1896,79 @@ static const struct i2c_lock_operations i2c_adapter_lock_ops = {
.unlock_bus = i2c_adapter_unlock_bus,
};
+static void i2c_host_notify_irq_teardown(struct i2c_adapter *adap)
+{
+ struct irq_domain *domain = adap->host_notify_domain;
+ irq_hw_number_t hwirq;
+
+ if (!domain)
+ return;
+
+ for (hwirq = 0 ; hwirq < I2C_ADDR_7BITS_COUNT ; hwirq++)
+ irq_dispose_mapping(irq_find_mapping(domain, hwirq));
+
+ irq_domain_remove(domain);
+ adap->host_notify_domain = NULL;
+}
+
+static int i2c_host_notify_irq_map(struct irq_domain *h,
+ unsigned int virq,
+ irq_hw_number_t hw_irq_num)
+{
+ irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_simple_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops i2c_host_notify_irq_ops = {
+ .map = i2c_host_notify_irq_map,
+};
+
+static int i2c_setup_host_notify_irq_domain(struct i2c_adapter *adap)
+{
+ struct irq_domain *domain;
+
+ if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_HOST_NOTIFY))
+ return 0;
+
+ domain = irq_domain_create_linear(adap->dev.fwnode,
+ I2C_ADDR_7BITS_COUNT,
+ &i2c_host_notify_irq_ops, adap);
+ if (!domain)
+ return -ENOMEM;
+
+ adap->host_notify_domain = domain;
+
+ return 0;
+}
+
+/**
+ * i2c_handle_smbus_host_notify - Forward a Host Notify event to the correct
+ * I2C client.
+ * @adap: the adapter
+ * @addr: the I2C address of the notifying device
+ * Context: can't sleep
+ *
+ * Helper function to be called from an I2C bus driver's interrupt
+ * handler. It will schedule the Host Notify IRQ.
+ */
+int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr)
+{
+ int irq;
+
+ if (!adap)
+ return -EINVAL;
+
+ irq = irq_find_mapping(adap->host_notify_domain, addr);
+ if (irq <= 0)
+ return -ENXIO;
+
+ generic_handle_irq(irq);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(i2c_handle_smbus_host_notify);
+
static int i2c_register_adapter(struct i2c_adapter *adap)
{
int res = -EINVAL;
@@ -1831,6 +2000,14 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
if (adap->timeout == 0)
adap->timeout = HZ;
+ /* register soft irqs for Host Notify */
+ res = i2c_setup_host_notify_irq_domain(adap);
+ if (res) {
+ pr_err("adapter '%s': can't create Host Notify IRQs (%d)\n",
+ adap->name, res);
+ goto out_list;
+ }
+
dev_set_name(&adap->dev, "i2c-%d", adap->nr);
adap->dev.bus = &i2c_bus_type;
adap->dev.type = &i2c_adapter_type;
@@ -2068,6 +2245,8 @@ void i2c_del_adapter(struct i2c_adapter *adap)
pm_runtime_disable(&adap->dev);
+ i2c_host_notify_irq_teardown(adap);
+
/* wait until all references to the device are gone
*
* FIXME: This is old code and should ideally be replaced by an
@@ -3453,7 +3632,7 @@ int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb)
int ret;
if (!client || !slave_cb) {
- WARN(1, "insufficent data\n");
+ WARN(1, "insufficient data\n");
return -EINVAL;
}
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 66f323fd3982..6f638bbc922d 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -331,7 +331,7 @@ static noinline int i2cdev_ioctl_smbus(struct i2c_client *client,
unsigned long arg)
{
struct i2c_smbus_ioctl_data data_arg;
- union i2c_smbus_data temp;
+ union i2c_smbus_data temp = {};
int datasize, res;
if (copy_from_user(&data_arg,
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index b0d2679c60d1..f9271c713d20 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -241,108 +241,6 @@ int i2c_handle_smbus_alert(struct i2c_client *ara)
}
EXPORT_SYMBOL_GPL(i2c_handle_smbus_alert);
-static void smbus_host_notify_work(struct work_struct *work)
-{
- struct alert_data alert;
- struct i2c_adapter *adapter;
- unsigned long flags;
- u16 payload;
- u8 addr;
- struct smbus_host_notify *data;
-
- data = container_of(work, struct smbus_host_notify, work);
-
- spin_lock_irqsave(&data->lock, flags);
- payload = data->payload;
- addr = data->addr;
- adapter = data->adapter;
-
- /* clear the pending bit and release the spinlock */
- data->pending = false;
- spin_unlock_irqrestore(&data->lock, flags);
-
- if (!adapter || !addr)
- return;
-
- alert.type = I2C_PROTOCOL_SMBUS_HOST_NOTIFY;
- alert.addr = addr;
- alert.data = payload;
-
- device_for_each_child(&adapter->dev, &alert, smbus_do_alert);
-}
-
-/**
- * i2c_setup_smbus_host_notify - Allocate a new smbus_host_notify for the given
- * I2C adapter.
- * @adapter: the adapter we want to associate a Host Notify function
- *
- * Returns a struct smbus_host_notify pointer on success, and NULL on failure.
- * The resulting smbus_host_notify must not be freed afterwards, it is a
- * managed resource already.
- */
-struct smbus_host_notify *i2c_setup_smbus_host_notify(struct i2c_adapter *adap)
-{
- struct smbus_host_notify *host_notify;
-
- host_notify = devm_kzalloc(&adap->dev, sizeof(struct smbus_host_notify),
- GFP_KERNEL);
- if (!host_notify)
- return NULL;
-
- host_notify->adapter = adap;
-
- spin_lock_init(&host_notify->lock);
- INIT_WORK(&host_notify->work, smbus_host_notify_work);
-
- return host_notify;
-}
-EXPORT_SYMBOL_GPL(i2c_setup_smbus_host_notify);
-
-/**
- * i2c_handle_smbus_host_notify - Forward a Host Notify event to the correct
- * I2C client.
- * @host_notify: the struct host_notify attached to the relevant adapter
- * @addr: the I2C address of the notifying device
- * @data: the payload of the notification
- * Context: can't sleep
- *
- * Helper function to be called from an I2C bus driver's interrupt
- * handler. It will schedule the Host Notify work, in turn calling the
- * corresponding I2C device driver's alert function.
- *
- * host_notify should be a valid pointer previously returned by
- * i2c_setup_smbus_host_notify().
- */
-int i2c_handle_smbus_host_notify(struct smbus_host_notify *host_notify,
- unsigned short addr, unsigned int data)
-{
- unsigned long flags;
- struct i2c_adapter *adapter;
-
- if (!host_notify || !host_notify->adapter)
- return -EINVAL;
-
- adapter = host_notify->adapter;
-
- spin_lock_irqsave(&host_notify->lock, flags);
-
- if (host_notify->pending) {
- spin_unlock_irqrestore(&host_notify->lock, flags);
- dev_warn(&adapter->dev, "Host Notify already scheduled.\n");
- return -EBUSY;
- }
-
- host_notify->payload = data;
- host_notify->addr = addr;
-
- /* Mark that there is a pending notification and release the lock */
- host_notify->pending = true;
- spin_unlock_irqrestore(&host_notify->lock, flags);
-
- return schedule_work(&host_notify->work);
-}
-EXPORT_SYMBOL_GPL(i2c_handle_smbus_host_notify);
-
module_i2c_driver(smbalert_driver);
MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index 96de9ce5669b..10b3d17ae3ea 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -82,4 +82,15 @@ config I2C_DEMUX_PINCTRL
demultiplexer that uses the pinctrl subsystem. This is useful if you
want to change the I2C master at run-time depending on features.
+config I2C_MUX_MLXCPLD
+ tristate "Mellanox CPLD based I2C multiplexer"
+ help
+ If you say yes to this option, support will be included for a
+ CPLD based I2C multiplexer. This driver provides access to
+ I2C busses connected through a MUX, which is controlled
+ by a CPLD register.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-mux-mlxcpld.
+
endmenu
diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile
index 7c267c29b191..9948fa45037f 100644
--- a/drivers/i2c/muxes/Makefile
+++ b/drivers/i2c/muxes/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_I2C_ARB_GPIO_CHALLENGE) += i2c-arb-gpio-challenge.o
obj-$(CONFIG_I2C_DEMUX_PINCTRL) += i2c-demux-pinctrl.o
obj-$(CONFIG_I2C_MUX_GPIO) += i2c-mux-gpio.o
+obj-$(CONFIG_I2C_MUX_MLXCPLD) += i2c-mux-mlxcpld.o
obj-$(CONFIG_I2C_MUX_PCA9541) += i2c-mux-pca9541.o
obj-$(CONFIG_I2C_MUX_PCA954x) += i2c-mux-pca954x.o
obj-$(CONFIG_I2C_MUX_PINCTRL) += i2c-mux-pinctrl.o
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index e5cf26eefa97..655684d621a4 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -21,6 +21,8 @@
struct gpiomux {
struct i2c_mux_gpio_platform_data data;
unsigned gpio_base;
+ struct gpio_desc **gpios;
+ int *values;
};
static void i2c_mux_gpio_set(const struct gpiomux *mux, unsigned val)
@@ -28,8 +30,10 @@ static void i2c_mux_gpio_set(const struct gpiomux *mux, unsigned val)
int i;
for (i = 0; i < mux->data.n_gpios; i++)
- gpio_set_value_cansleep(mux->gpio_base + mux->data.gpios[i],
- val & (1 << i));
+ mux->values[i] = (val >> i) & 1;
+
+ gpiod_set_array_value_cansleep(mux->data.n_gpios,
+ mux->gpios, mux->values);
}
static int i2c_mux_gpio_select(struct i2c_mux_core *muxc, u32 chan)
@@ -176,12 +180,16 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev)
if (!parent)
return -EPROBE_DEFER;
- muxc = i2c_mux_alloc(parent, &pdev->dev, mux->data.n_values, 0, 0,
+ muxc = i2c_mux_alloc(parent, &pdev->dev, mux->data.n_values,
+ mux->data.n_gpios * sizeof(*mux->gpios) +
+ mux->data.n_gpios * sizeof(*mux->values), 0,
i2c_mux_gpio_select, NULL);
if (!muxc) {
ret = -ENOMEM;
goto alloc_failed;
}
+ mux->gpios = muxc->priv;
+ mux->values = (int *)(mux->gpios + mux->data.n_gpios);
muxc->priv = mux;
platform_set_drvdata(pdev, muxc);
@@ -219,10 +227,12 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev)
goto err_request_gpio;
}
+ gpio_desc = gpio_to_desc(gpio_base + mux->data.gpios[i]);
+ mux->gpios[i] = gpio_desc;
+
if (!muxc->mux_locked)
continue;
- gpio_desc = gpio_to_desc(gpio_base + mux->data.gpios[i]);
gpio_dev = &gpio_desc->gdev->dev;
muxc->mux_locked = i2c_root_adapter(gpio_dev) == root;
}
diff --git a/drivers/i2c/muxes/i2c-mux-mlxcpld.c b/drivers/i2c/muxes/i2c-mux-mlxcpld.c
new file mode 100644
index 000000000000..b7ca249ec9c3
--- /dev/null
+++ b/drivers/i2c/muxes/i2c-mux-mlxcpld.c
@@ -0,0 +1,222 @@
+/*
+ * drivers/i2c/muxes/i2c-mux-mlxcpld.c
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Michael Shych <michaels@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/i2c/mlxcpld.h>
+
+#define CPLD_MUX_MAX_NCHANS 8
+
+/* mlxcpld_mux - mux control structure:
+ * @last_chan - last register value
+ * @client - I2C device client
+ */
+struct mlxcpld_mux {
+ u8 last_chan;
+ struct i2c_client *client;
+};
+
+/* MUX logic description.
+ * Driver can support different mux control logic, according to CPLD
+ * implementation.
+ *
+ * Connectivity schema.
+ *
+ * i2c-mlxcpld Digital Analog
+ * driver
+ * *--------* * -> mux1 (virt bus2) -> mux -> |
+ * | I2CLPC | i2c physical * -> mux2 (virt bus3) -> mux -> |
+ * | bridge | bus 1 *---------* |
+ * | logic |---------------------> * mux reg * |
+ * | in CPLD| *---------* |
+ * *--------* i2c-mux-mlxpcld ^ * -> muxn (virt busn) -> mux -> |
+ * | driver | |
+ * | *---------------* | Devices
+ * | * CPLD (i2c bus)* select |
+ * | * registers for *--------*
+ * | * mux selection * deselect
+ * | *---------------*
+ * | |
+ * <--------> <----------->
+ * i2c cntrl Board cntrl reg
+ * reg space space (mux select,
+ * IO, LED, WD, info)
+ *
+ */
+
+static const struct i2c_device_id mlxcpld_mux_id[] = {
+ { "mlxcpld_mux_module", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mlxcpld_mux_id);
+
+/* Write to mux register. Don't use i2c_transfer() and i2c_smbus_xfer()
+ * for this as they will try to lock adapter a second time.
+ */
+static int mlxcpld_mux_reg_write(struct i2c_adapter *adap,
+ struct i2c_client *client, u8 val)
+{
+ struct mlxcpld_mux_plat_data *pdata = dev_get_platdata(&client->dev);
+ int ret = -ENODEV;
+
+ if (adap->algo->master_xfer) {
+ struct i2c_msg msg;
+ u8 msgbuf[] = {pdata->sel_reg_addr, val};
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 2;
+ msg.buf = msgbuf;
+ ret = __i2c_transfer(adap, &msg, 1);
+
+ if (ret >= 0 && ret != 1)
+ ret = -EREMOTEIO;
+ } else if (adap->algo->smbus_xfer) {
+ union i2c_smbus_data data;
+
+ data.byte = val;
+ ret = adap->algo->smbus_xfer(adap, client->addr,
+ client->flags, I2C_SMBUS_WRITE,
+ pdata->sel_reg_addr,
+ I2C_SMBUS_BYTE_DATA, &data);
+ }
+
+ return ret;
+}
+
+static int mlxcpld_mux_select_chan(struct i2c_mux_core *muxc, u32 chan)
+{
+ struct mlxcpld_mux *data = i2c_mux_priv(muxc);
+ struct i2c_client *client = data->client;
+ u8 regval = chan + 1;
+ int err = 0;
+
+ /* Only select the channel if its different from the last channel */
+ if (data->last_chan != regval) {
+ err = mlxcpld_mux_reg_write(muxc->parent, client, regval);
+ data->last_chan = err < 0 ? 0 : regval;
+ }
+
+ return err;
+}
+
+static int mlxcpld_mux_deselect(struct i2c_mux_core *muxc, u32 chan)
+{
+ struct mlxcpld_mux *data = i2c_mux_priv(muxc);
+ struct i2c_client *client = data->client;
+
+ /* Deselect active channel */
+ data->last_chan = 0;
+
+ return mlxcpld_mux_reg_write(muxc->parent, client, data->last_chan);
+}
+
+/* Probe/reomove functions */
+static int mlxcpld_mux_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent);
+ struct mlxcpld_mux_plat_data *pdata = dev_get_platdata(&client->dev);
+ struct i2c_mux_core *muxc;
+ int num, force;
+ struct mlxcpld_mux *data;
+ int err;
+
+ if (!pdata)
+ return -EINVAL;
+
+ if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
+ return -ENODEV;
+
+ muxc = i2c_mux_alloc(adap, &client->dev, CPLD_MUX_MAX_NCHANS,
+ sizeof(*data), 0, mlxcpld_mux_select_chan,
+ mlxcpld_mux_deselect);
+ if (!muxc)
+ return -ENOMEM;
+
+ data = i2c_mux_priv(muxc);
+ i2c_set_clientdata(client, muxc);
+ data->client = client;
+ data->last_chan = 0; /* force the first selection */
+
+ /* Create an adapter for each channel. */
+ for (num = 0; num < CPLD_MUX_MAX_NCHANS; num++) {
+ if (num >= pdata->num_adaps)
+ /* discard unconfigured channels */
+ break;
+
+ force = pdata->adap_ids[num];
+
+ err = i2c_mux_add_adapter(muxc, force, num, 0);
+ if (err)
+ goto virt_reg_failed;
+ }
+
+ return 0;
+
+virt_reg_failed:
+ i2c_mux_del_adapters(muxc);
+ return err;
+}
+
+static int mlxcpld_mux_remove(struct i2c_client *client)
+{
+ struct i2c_mux_core *muxc = i2c_get_clientdata(client);
+
+ i2c_mux_del_adapters(muxc);
+ return 0;
+}
+
+static struct i2c_driver mlxcpld_mux_driver = {
+ .driver = {
+ .name = "mlxcpld-mux",
+ },
+ .probe = mlxcpld_mux_probe,
+ .remove = mlxcpld_mux_remove,
+ .id_table = mlxcpld_mux_id,
+};
+
+module_i2c_driver(mlxcpld_mux_driver);
+
+MODULE_AUTHOR("Michael Shych (michaels@mellanox.com)");
+MODULE_DESCRIPTION("Mellanox I2C-CPLD-MUX driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("platform:i2c-mux-mlxcpld");
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index 8bc3d36d2837..dd18b9ccb1f4 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -35,6 +35,7 @@
* warranty of any kind, whether express or implied.
*/
+#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
@@ -120,6 +121,21 @@ static const struct i2c_device_id pca954x_id[] = {
};
MODULE_DEVICE_TABLE(i2c, pca954x_id);
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id pca954x_acpi_ids[] = {
+ { .id = "PCA9540", .driver_data = pca_9540 },
+ { .id = "PCA9542", .driver_data = pca_9540 },
+ { .id = "PCA9543", .driver_data = pca_9543 },
+ { .id = "PCA9544", .driver_data = pca_9544 },
+ { .id = "PCA9545", .driver_data = pca_9545 },
+ { .id = "PCA9546", .driver_data = pca_9545 },
+ { .id = "PCA9547", .driver_data = pca_9547 },
+ { .id = "PCA9548", .driver_data = pca_9548 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, pca954x_acpi_ids);
+#endif
+
#ifdef CONFIG_OF
static const struct of_device_id pca954x_of_match[] = {
{ .compatible = "nxp,pca9540", .data = &chips[pca_9540] },
@@ -151,6 +167,9 @@ static int pca954x_reg_write(struct i2c_adapter *adap,
buf[0] = val;
msg.buf = buf;
ret = __i2c_transfer(adap, &msg, 1);
+
+ if (ret >= 0 && ret != 1)
+ ret = -EREMOTEIO;
} else {
union i2c_smbus_data data;
ret = adap->algo->smbus_xfer(adap, client->addr,
@@ -179,7 +198,7 @@ static int pca954x_select_chan(struct i2c_mux_core *muxc, u32 chan)
/* Only select the channel if its different from the last channel */
if (data->last_chan != regval) {
ret = pca954x_reg_write(muxc->parent, client, regval);
- data->last_chan = ret ? 0 : regval;
+ data->last_chan = ret < 0 ? 0 : regval;
}
return ret;
@@ -245,8 +264,17 @@ static int pca954x_probe(struct i2c_client *client,
match = of_match_device(of_match_ptr(pca954x_of_match), &client->dev);
if (match)
data->chip = of_device_get_match_data(&client->dev);
- else
+ else if (id)
data->chip = &chips[id->driver_data];
+ else {
+ const struct acpi_device_id *acpi_id;
+
+ acpi_id = acpi_match_device(ACPI_PTR(pca954x_acpi_ids),
+ &client->dev);
+ if (!acpi_id)
+ return -ENODEV;
+ data->chip = &chips[acpi_id->driver_data];
+ }
data->last_chan = 0; /* force the first selection */
@@ -321,6 +349,7 @@ static struct i2c_driver pca954x_driver = {
.name = "pca954x",
.pm = &pca954x_pm,
.of_match_table = of_match_ptr(pca954x_of_match),
+ .acpi_match_table = ACPI_PTR(pca954x_acpi_ids),
},
.probe = pca954x_probe,
.remove = pca954x_remove,
diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
index 0ceae5cbd89a..4b5dc0162e67 100644
--- a/drivers/ide/hpt366.c
+++ b/drivers/ide/hpt366.c
@@ -130,7 +130,7 @@
#include <linux/ide.h>
#include <linux/slab.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
#define DRV_NAME "hpt366"
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 83679da0c3f0..5ceace542b77 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -31,7 +31,7 @@
#include <asm/byteorder.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/div64.h>
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 6360bbd37efe..201e43fcbc94 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -51,7 +51,7 @@
#include <asm/byteorder.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
int ide_end_rq(ide_drive_t *drive, struct request *rq, int error,
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index 376f2dc410c5..210a0887dd29 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -24,7 +24,7 @@
#include <asm/byteorder.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
void SELECT_MASK(ide_drive_t *drive, int mask)
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 0b63facd1d87..330e319419e6 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -36,7 +36,7 @@
#include <asm/byteorder.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
/**
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index 97c070077774..863db44c7916 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -16,7 +16,7 @@
#include <linux/module.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/errno.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index f6b6d42385e1..784670e2736b 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -353,12 +353,12 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
[0] = {
.num = ST_ACCEL_FS_AVL_2G,
.value = 0x00,
- .gain = IIO_G_TO_M_S_2(1024),
+ .gain = IIO_G_TO_M_S_2(1000),
},
[1] = {
.num = ST_ACCEL_FS_AVL_6G,
.value = 0x01,
- .gain = IIO_G_TO_M_S_2(340),
+ .gain = IIO_G_TO_M_S_2(3000),
},
},
},
@@ -366,6 +366,14 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.addr = 0x21,
.mask = 0x40,
},
+ /*
+ * Data Alignment Setting - needs to be set to get
+ * left-justified data like all other sensors.
+ */
+ .das = {
+ .addr = 0x21,
+ .mask = 0x01,
+ },
.drdy_irq = {
.addr = 0x21,
.mask_int1 = 0x04,
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 38bc319904c4..9c8b558ba19e 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -561,7 +561,7 @@ config TI_ADS8688
config TI_AM335X_ADC
tristate "TI's AM335X ADC driver"
- depends on MFD_TI_AM335X_TSCADC
+ depends on MFD_TI_AM335X_TSCADC && HAS_DMA
select IIO_BUFFER
select IIO_KFIFO_BUF
help
diff --git a/drivers/iio/common/st_sensors/st_sensors_buffer.c b/drivers/iio/common/st_sensors/st_sensors_buffer.c
index fe7775bb3740..df4045203a07 100644
--- a/drivers/iio/common/st_sensors/st_sensors_buffer.c
+++ b/drivers/iio/common/st_sensors/st_sensors_buffer.c
@@ -30,7 +30,9 @@ static int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf)
for_each_set_bit(i, indio_dev->active_scan_mask, num_data_channels) {
const struct iio_chan_spec *channel = &indio_dev->channels[i];
- unsigned int bytes_to_read = channel->scan_type.realbits >> 3;
+ unsigned int bytes_to_read =
+ DIV_ROUND_UP(channel->scan_type.realbits +
+ channel->scan_type.shift, 8);
unsigned int storage_bytes =
channel->scan_type.storagebits >> 3;
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 975a1f19f747..79c8c7cd70d5 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -401,6 +401,15 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
return err;
}
+ /* set DAS */
+ if (sdata->sensor_settings->das.addr) {
+ err = st_sensors_write_data_with_mask(indio_dev,
+ sdata->sensor_settings->das.addr,
+ sdata->sensor_settings->das.mask, 1);
+ if (err < 0)
+ return err;
+ }
+
if (sdata->int_pin_open_drain) {
dev_info(&indio_dev->dev,
"set interrupt line to open drain mode\n");
@@ -483,8 +492,10 @@ static int st_sensors_read_axis_data(struct iio_dev *indio_dev,
int err;
u8 *outdata;
struct st_sensor_data *sdata = iio_priv(indio_dev);
- unsigned int byte_for_channel = ch->scan_type.realbits >> 3;
+ unsigned int byte_for_channel;
+ byte_for_channel = DIV_ROUND_UP(ch->scan_type.realbits +
+ ch->scan_type.shift, 8);
outdata = kmalloc(byte_for_channel, GFP_KERNEL);
if (!outdata)
return -ENOMEM;
diff --git a/drivers/iio/counter/104-quad-8.c b/drivers/iio/counter/104-quad-8.c
index 2d2ee353dde7..a5913e97945e 100644
--- a/drivers/iio/counter/104-quad-8.c
+++ b/drivers/iio/counter/104-quad-8.c
@@ -153,7 +153,7 @@ static int quad8_write_raw(struct iio_dev *indio_dev,
ior_cfg = val | priv->preset_enable[chan->channel] << 1;
/* Load I/O control configuration */
- outb(0x40 | ior_cfg, base_offset);
+ outb(0x40 | ior_cfg, base_offset + 1);
return 0;
case IIO_CHAN_INFO_SCALE:
@@ -233,7 +233,7 @@ static ssize_t quad8_read_set_to_preset_on_index(struct iio_dev *indio_dev,
const struct quad8_iio *const priv = iio_priv(indio_dev);
return snprintf(buf, PAGE_SIZE, "%u\n",
- priv->preset_enable[chan->channel]);
+ !priv->preset_enable[chan->channel]);
}
static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev,
@@ -241,7 +241,7 @@ static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev,
size_t len)
{
struct quad8_iio *const priv = iio_priv(indio_dev);
- const int base_offset = priv->base + 2 * chan->channel;
+ const int base_offset = priv->base + 2 * chan->channel + 1;
bool preset_enable;
int ret;
unsigned int ior_cfg;
@@ -250,6 +250,9 @@ static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev,
if (ret)
return ret;
+ /* Preset enable is active low in Input/Output Control register */
+ preset_enable = !preset_enable;
+
priv->preset_enable[chan->channel] = preset_enable;
ior_cfg = priv->ab_enable[chan->channel] |
@@ -362,7 +365,7 @@ static int quad8_set_synchronous_mode(struct iio_dev *indio_dev,
priv->synchronous_mode[chan->channel] = synchronous_mode;
/* Load Index Control configuration to Index Control Register */
- outb(0x40 | idr_cfg, base_offset);
+ outb(0x60 | idr_cfg, base_offset);
return 0;
}
@@ -444,7 +447,7 @@ static int quad8_set_index_polarity(struct iio_dev *indio_dev,
priv->index_polarity[chan->channel] = index_polarity;
/* Load Index Control configuration to Index Control Register */
- outb(0x40 | idr_cfg, base_offset);
+ outb(0x60 | idr_cfg, base_offset);
return 0;
}
diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c
index 5355507f8fa1..c9e319bff58b 100644
--- a/drivers/iio/imu/bmi160/bmi160_core.c
+++ b/drivers/iio/imu/bmi160/bmi160_core.c
@@ -66,10 +66,8 @@
#define BMI160_REG_DUMMY 0x7F
-#define BMI160_ACCEL_PMU_MIN_USLEEP 3200
-#define BMI160_ACCEL_PMU_MAX_USLEEP 3800
-#define BMI160_GYRO_PMU_MIN_USLEEP 55000
-#define BMI160_GYRO_PMU_MAX_USLEEP 80000
+#define BMI160_ACCEL_PMU_MIN_USLEEP 3800
+#define BMI160_GYRO_PMU_MIN_USLEEP 80000
#define BMI160_SOFTRESET_USLEEP 1000
#define BMI160_CHANNEL(_type, _axis, _index) { \
@@ -151,20 +149,9 @@ static struct bmi160_regs bmi160_regs[] = {
},
};
-struct bmi160_pmu_time {
- unsigned long min;
- unsigned long max;
-};
-
-static struct bmi160_pmu_time bmi160_pmu_time[] = {
- [BMI160_ACCEL] = {
- .min = BMI160_ACCEL_PMU_MIN_USLEEP,
- .max = BMI160_ACCEL_PMU_MAX_USLEEP
- },
- [BMI160_GYRO] = {
- .min = BMI160_GYRO_PMU_MIN_USLEEP,
- .max = BMI160_GYRO_PMU_MIN_USLEEP,
- },
+static unsigned long bmi160_pmu_time[] = {
+ [BMI160_ACCEL] = BMI160_ACCEL_PMU_MIN_USLEEP,
+ [BMI160_GYRO] = BMI160_GYRO_PMU_MIN_USLEEP,
};
struct bmi160_scale {
@@ -289,7 +276,7 @@ int bmi160_set_mode(struct bmi160_data *data, enum bmi160_sensor_type t,
if (ret < 0)
return ret;
- usleep_range(bmi160_pmu_time[t].min, bmi160_pmu_time[t].max);
+ usleep_range(bmi160_pmu_time[t], bmi160_pmu_time[t] + 1000);
return 0;
}
diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c
index a144ca3461fc..81bd8e8da4a6 100644
--- a/drivers/iio/light/max44000.c
+++ b/drivers/iio/light/max44000.c
@@ -113,7 +113,7 @@ static const char max44000_int_time_avail_str[] =
"0.100 "
"0.025 "
"0.00625 "
- "0.001625";
+ "0.0015625";
/* Available scales (internal to ulux) with pretty manual alignment: */
static const int max44000_scale_avail_ulux_array[] = {
diff --git a/drivers/iio/trigger/iio-trig-hrtimer.c b/drivers/iio/trigger/iio-trig-hrtimer.c
index 5e6d451febeb..a1cad6cc2e0f 100644
--- a/drivers/iio/trigger/iio-trig-hrtimer.c
+++ b/drivers/iio/trigger/iio-trig-hrtimer.c
@@ -63,7 +63,7 @@ ssize_t iio_hrtimer_store_sampling_frequency(struct device *dev,
return -EINVAL;
info->sampling_frequency = val;
- info->period = ktime_set(0, NSEC_PER_SEC / val);
+ info->period = NSEC_PER_SEC / val;
return len;
}
@@ -141,8 +141,7 @@ static struct iio_sw_trigger *iio_trig_hrtimer_probe(const char *name)
trig_info->timer.function = iio_hrtimer_trig_handler;
trig_info->sampling_frequency = HRTIMER_DEFAULT_SAMPLING_FREQUENCY;
- trig_info->period = ktime_set(0, NSEC_PER_SEC /
- trig_info->sampling_frequency);
+ trig_info->period = NSEC_PER_SEC / trig_info->sampling_frequency;
ret = iio_trigger_register(trig_info->swt.trigger);
if (ret)
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index fb3fb89640e5..670917387eda 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -73,6 +73,7 @@ source "drivers/infiniband/hw/mlx4/Kconfig"
source "drivers/infiniband/hw/mlx5/Kconfig"
source "drivers/infiniband/hw/nes/Kconfig"
source "drivers/infiniband/hw/ocrdma/Kconfig"
+source "drivers/infiniband/hw/vmw_pvrdma/Kconfig"
source "drivers/infiniband/hw/usnic/Kconfig"
source "drivers/infiniband/hw/hns/Kconfig"
diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c
index 4fa524dfb6cf..11dacd97a667 100644
--- a/drivers/infiniband/core/agent.c
+++ b/drivers/infiniband/core/agent.c
@@ -156,7 +156,6 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
/* Create new device info */
port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
if (!port_priv) {
- dev_err(&device->dev, "No memory for ib_agent_port_private\n");
ret = -ENOMEM;
goto error1;
}
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 1a2984c28b95..ae04826e82fc 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -770,12 +770,8 @@ static int _gid_table_setup_one(struct ib_device *ib_dev)
int err = 0;
table = kcalloc(ib_dev->phys_port_cnt, sizeof(*table), GFP_KERNEL);
-
- if (!table) {
- pr_warn("failed to allocate ib gid cache for %s\n",
- ib_dev->name);
+ if (!table)
return -ENOMEM;
- }
for (port = 0; port < ib_dev->phys_port_cnt; port++) {
u8 rdma_port = port + rdma_start_port(ib_dev);
@@ -1170,14 +1166,13 @@ int ib_cache_setup_one(struct ib_device *device)
GFP_KERNEL);
if (!device->cache.pkey_cache ||
!device->cache.lmc_cache) {
- pr_warn("Couldn't allocate cache for %s\n", device->name);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto free;
}
err = gid_table_setup_one(device);
if (err)
- /* Allocated memory will be cleaned in the release function */
- return err;
+ goto free;
for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
ib_cache_update(device, p + rdma_start_port(device));
@@ -1192,6 +1187,9 @@ int ib_cache_setup_one(struct ib_device *device)
err:
gid_table_cleanup_one(device);
+free:
+ kfree(device->cache.pkey_cache);
+ kfree(device->cache.lmc_cache);
return err;
}
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 71c7c4c328ef..cf1edfa1cbac 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -57,6 +57,54 @@ MODULE_AUTHOR("Sean Hefty");
MODULE_DESCRIPTION("InfiniBand CM");
MODULE_LICENSE("Dual BSD/GPL");
+static const char * const ibcm_rej_reason_strs[] = {
+ [IB_CM_REJ_NO_QP] = "no QP",
+ [IB_CM_REJ_NO_EEC] = "no EEC",
+ [IB_CM_REJ_NO_RESOURCES] = "no resources",
+ [IB_CM_REJ_TIMEOUT] = "timeout",
+ [IB_CM_REJ_UNSUPPORTED] = "unsupported",
+ [IB_CM_REJ_INVALID_COMM_ID] = "invalid comm ID",
+ [IB_CM_REJ_INVALID_COMM_INSTANCE] = "invalid comm instance",
+ [IB_CM_REJ_INVALID_SERVICE_ID] = "invalid service ID",
+ [IB_CM_REJ_INVALID_TRANSPORT_TYPE] = "invalid transport type",
+ [IB_CM_REJ_STALE_CONN] = "stale conn",
+ [IB_CM_REJ_RDC_NOT_EXIST] = "RDC not exist",
+ [IB_CM_REJ_INVALID_GID] = "invalid GID",
+ [IB_CM_REJ_INVALID_LID] = "invalid LID",
+ [IB_CM_REJ_INVALID_SL] = "invalid SL",
+ [IB_CM_REJ_INVALID_TRAFFIC_CLASS] = "invalid traffic class",
+ [IB_CM_REJ_INVALID_HOP_LIMIT] = "invalid hop limit",
+ [IB_CM_REJ_INVALID_PACKET_RATE] = "invalid packet rate",
+ [IB_CM_REJ_INVALID_ALT_GID] = "invalid alt GID",
+ [IB_CM_REJ_INVALID_ALT_LID] = "invalid alt LID",
+ [IB_CM_REJ_INVALID_ALT_SL] = "invalid alt SL",
+ [IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS] = "invalid alt traffic class",
+ [IB_CM_REJ_INVALID_ALT_HOP_LIMIT] = "invalid alt hop limit",
+ [IB_CM_REJ_INVALID_ALT_PACKET_RATE] = "invalid alt packet rate",
+ [IB_CM_REJ_PORT_CM_REDIRECT] = "port CM redirect",
+ [IB_CM_REJ_PORT_REDIRECT] = "port redirect",
+ [IB_CM_REJ_INVALID_MTU] = "invalid MTU",
+ [IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES] = "insufficient resp resources",
+ [IB_CM_REJ_CONSUMER_DEFINED] = "consumer defined",
+ [IB_CM_REJ_INVALID_RNR_RETRY] = "invalid RNR retry",
+ [IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID] = "duplicate local comm ID",
+ [IB_CM_REJ_INVALID_CLASS_VERSION] = "invalid class version",
+ [IB_CM_REJ_INVALID_FLOW_LABEL] = "invalid flow label",
+ [IB_CM_REJ_INVALID_ALT_FLOW_LABEL] = "invalid alt flow label",
+};
+
+const char *__attribute_const__ ibcm_reject_msg(int reason)
+{
+ size_t index = reason;
+
+ if (index < ARRAY_SIZE(ibcm_rej_reason_strs) &&
+ ibcm_rej_reason_strs[index])
+ return ibcm_rej_reason_strs[index];
+ else
+ return "unrecognized reason";
+}
+EXPORT_SYMBOL(ibcm_reject_msg);
+
static void cm_add_one(struct ib_device *device);
static void cm_remove_one(struct ib_device *device, void *client_data);
@@ -1582,6 +1630,7 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
struct cm_timewait_info *timewait_info;
struct cm_req_msg *req_msg;
+ struct ib_cm_id *cm_id;
req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
@@ -1603,10 +1652,18 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
if (timewait_info) {
cm_cleanup_timewait(cm_id_priv->timewait_info);
+ cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
+ timewait_info->work.remote_id);
+
spin_unlock_irq(&cm.lock);
cm_issue_rej(work->port, work->mad_recv_wc,
IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
NULL, 0);
+ if (cur_cm_id_priv) {
+ cm_id = &cur_cm_id_priv->id;
+ ib_send_cm_dreq(cm_id, NULL, 0);
+ cm_deref_id(cur_cm_id_priv);
+ }
return NULL;
}
@@ -1984,6 +2041,9 @@ static int cm_rep_handler(struct cm_work *work)
struct cm_id_private *cm_id_priv;
struct cm_rep_msg *rep_msg;
int ret;
+ struct cm_id_private *cur_cm_id_priv;
+ struct ib_cm_id *cm_id;
+ struct cm_timewait_info *timewait_info;
rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
@@ -2018,16 +2078,26 @@ static int cm_rep_handler(struct cm_work *work)
goto error;
}
/* Check for a stale connection. */
- if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
+ timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
+ if (timewait_info) {
rb_erase(&cm_id_priv->timewait_info->remote_id_node,
&cm.remote_id_table);
cm_id_priv->timewait_info->inserted_remote_id = 0;
+ cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
+ timewait_info->work.remote_id);
+
spin_unlock(&cm.lock);
spin_unlock_irq(&cm_id_priv->lock);
cm_issue_rej(work->port, work->mad_recv_wc,
IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
NULL, 0);
ret = -EINVAL;
+ if (cur_cm_id_priv) {
+ cm_id = &cur_cm_id_priv->id;
+ ib_send_cm_dreq(cm_id, NULL, 0);
+ cm_deref_id(cur_cm_id_priv);
+ }
+
goto error;
}
spin_unlock(&cm.lock);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 22fcf284dd8b..e7dcfac877ca 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -101,6 +101,49 @@ const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event)
}
EXPORT_SYMBOL(rdma_event_msg);
+const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id,
+ int reason)
+{
+ if (rdma_ib_or_roce(id->device, id->port_num))
+ return ibcm_reject_msg(reason);
+
+ if (rdma_protocol_iwarp(id->device, id->port_num))
+ return iwcm_reject_msg(reason);
+
+ WARN_ON_ONCE(1);
+ return "unrecognized transport";
+}
+EXPORT_SYMBOL(rdma_reject_msg);
+
+bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason)
+{
+ if (rdma_ib_or_roce(id->device, id->port_num))
+ return reason == IB_CM_REJ_CONSUMER_DEFINED;
+
+ if (rdma_protocol_iwarp(id->device, id->port_num))
+ return reason == -ECONNREFUSED;
+
+ WARN_ON_ONCE(1);
+ return false;
+}
+EXPORT_SYMBOL(rdma_is_consumer_reject);
+
+const void *rdma_consumer_reject_data(struct rdma_cm_id *id,
+ struct rdma_cm_event *ev, u8 *data_len)
+{
+ const void *p;
+
+ if (rdma_is_consumer_reject(id, ev->status)) {
+ *data_len = ev->param.conn.private_data_len;
+ p = ev->param.conn.private_data;
+ } else {
+ *data_len = 0;
+ p = NULL;
+ }
+ return p;
+}
+EXPORT_SYMBOL(rdma_consumer_reject_data);
+
static void cma_add_one(struct ib_device *device);
static void cma_remove_one(struct ib_device *device, void *client_data);
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 0c0bea091de8..d29372624f3a 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -72,9 +72,6 @@ void ib_device_unregister_sysfs(struct ib_device *device);
void ib_cache_setup(void);
void ib_cache_cleanup(void);
-int ib_resolve_eth_dmac(struct ib_qp *qp,
- struct ib_qp_attr *qp_attr, int *qp_attr_mask);
-
typedef void (*roce_netdev_callback)(struct ib_device *device, u8 port,
struct net_device *idev, void *cookie);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 760ef603a468..571974cd3919 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -254,11 +254,8 @@ static int add_client_context(struct ib_device *device, struct ib_client *client
unsigned long flags;
context = kmalloc(sizeof *context, GFP_KERNEL);
- if (!context) {
- pr_warn("Couldn't allocate client context for %s/%s\n",
- device->name, client->name);
+ if (!context)
return -ENOMEM;
- }
context->client = client;
context->data = NULL;
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index cdbb1f1a6d97..cdfad5f26212 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -247,7 +247,6 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket,
GFP_KERNEL);
if (!pool->cache_bucket) {
- pr_warn(PFX "Failed to allocate cache in pool\n");
ret = -ENOMEM;
goto out_free_pool;
}
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index 5495e22839a7..31661b5c1743 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -59,6 +59,27 @@ MODULE_AUTHOR("Tom Tucker");
MODULE_DESCRIPTION("iWARP CM");
MODULE_LICENSE("Dual BSD/GPL");
+static const char * const iwcm_rej_reason_strs[] = {
+ [ECONNRESET] = "reset by remote host",
+ [ECONNREFUSED] = "refused by remote application",
+ [ETIMEDOUT] = "setup timeout",
+};
+
+const char *__attribute_const__ iwcm_reject_msg(int reason)
+{
+ size_t index;
+
+ /* iWARP uses negative errnos */
+ index = -reason;
+
+ if (index < ARRAY_SIZE(iwcm_rej_reason_strs) &&
+ iwcm_rej_reason_strs[index])
+ return iwcm_rej_reason_strs[index];
+ else
+ return "unrecognized reason";
+}
+EXPORT_SYMBOL(iwcm_reject_msg);
+
static struct ibnl_client_cbs iwcm_nl_cb_table[] = {
[RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
[RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
index 1c41b95cefec..a0e7c16d8bd8 100644
--- a/drivers/infiniband/core/iwpm_msg.c
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -604,7 +604,6 @@ int iwpm_remote_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
}
rem_info = kzalloc(sizeof(struct iwpm_remote_info), GFP_ATOMIC);
if (!rem_info) {
- pr_err("%s: Unable to allocate a remote info\n", __func__);
ret = -ENOMEM;
return ret;
}
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index ade71e7f0131..3ef51a96bbf1 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -62,7 +62,6 @@ int iwpm_init(u8 nl_client)
sizeof(struct hlist_head), GFP_KERNEL);
if (!iwpm_hash_bucket) {
ret = -ENOMEM;
- pr_err("%s Unable to create mapinfo hash table\n", __func__);
goto init_exit;
}
iwpm_reminfo_bucket = kzalloc(IWPM_REMINFO_HASH_SIZE *
@@ -70,7 +69,6 @@ int iwpm_init(u8 nl_client)
if (!iwpm_reminfo_bucket) {
kfree(iwpm_hash_bucket);
ret = -ENOMEM;
- pr_err("%s Unable to create reminfo hash table\n", __func__);
goto init_exit;
}
}
@@ -128,10 +126,9 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
if (!iwpm_valid_client(nl_client))
return ret;
map_info = kzalloc(sizeof(struct iwpm_mapping_info), GFP_KERNEL);
- if (!map_info) {
- pr_err("%s: Unable to allocate a mapping info\n", __func__);
+ if (!map_info)
return -ENOMEM;
- }
+
memcpy(&map_info->local_sockaddr, local_sockaddr,
sizeof(struct sockaddr_storage));
memcpy(&map_info->mapped_sockaddr, mapped_sockaddr,
@@ -309,10 +306,9 @@ struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq,
unsigned long flags;
nlmsg_request = kzalloc(sizeof(struct iwpm_nlmsg_request), gfp);
- if (!nlmsg_request) {
- pr_err("%s Unable to allocate a nlmsg_request\n", __func__);
+ if (!nlmsg_request)
return NULL;
- }
+
spin_lock_irqsave(&iwpm_nlmsg_req_lock, flags);
list_add_tail(&nlmsg_request->inprocess_list, &iwpm_nlmsg_req_list);
spin_unlock_irqrestore(&iwpm_nlmsg_req_lock, flags);
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 40cbd6bdb73b..a009f7132c73 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -769,7 +769,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
* If we are at the start of the LID routed part, don't update the
* hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
*/
- if (opa && smp->class_version == OPA_SMP_CLASS_VERSION) {
+ if (opa && smp->class_version == OPA_SM_CLASS_VERSION) {
u32 opa_drslid;
if ((opa_get_smp_direction(opa_smp)
@@ -816,7 +816,6 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
local = kmalloc(sizeof *local, GFP_ATOMIC);
if (!local) {
ret = -ENOMEM;
- dev_err(&device->dev, "No memory for ib_mad_local_private\n");
goto out;
}
local->mad_priv = NULL;
@@ -824,7 +823,6 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
if (!mad_priv) {
ret = -ENOMEM;
- dev_err(&device->dev, "No memory for local response MAD\n");
kfree(local);
goto out;
}
@@ -947,9 +945,6 @@ static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
if (!seg) {
- dev_err(&send_buf->mad_agent->device->dev,
- "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n",
- sizeof (*seg) + seg_size, gfp_mask);
free_send_rmpp_list(send_wr);
return -ENOMEM;
}
@@ -1362,12 +1357,7 @@ static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
{
/* Allocate management method table */
*method = kzalloc(sizeof **method, GFP_ATOMIC);
- if (!*method) {
- pr_err("No memory for ib_mad_mgmt_method_table\n");
- return -ENOMEM;
- }
-
- return 0;
+ return (*method) ? 0 : (-ENOMEM);
}
/*
@@ -1458,8 +1448,6 @@ static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
/* Allocate management class table for "new" class version */
*class = kzalloc(sizeof **class, GFP_ATOMIC);
if (!*class) {
- dev_err(&agent_priv->agent.device->dev,
- "No memory for ib_mad_mgmt_class_table\n");
ret = -ENOMEM;
goto error1;
}
@@ -1524,22 +1512,16 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
if (!*vendor_table) {
/* Allocate mgmt vendor class table for "new" class version */
vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
- if (!vendor) {
- dev_err(&agent_priv->agent.device->dev,
- "No memory for ib_mad_mgmt_vendor_class_table\n");
+ if (!vendor)
goto error1;
- }
*vendor_table = vendor;
}
if (!(*vendor_table)->vendor_class[vclass]) {
/* Allocate table for this management vendor class */
vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
- if (!vendor_class) {
- dev_err(&agent_priv->agent.device->dev,
- "No memory for ib_mad_mgmt_vendor_class\n");
+ if (!vendor_class)
goto error2;
- }
(*vendor_table)->vendor_class[vclass] = vendor_class;
}
@@ -1746,7 +1728,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
if (!class)
goto out;
if (convert_mgmt_class(mad_hdr->mgmt_class) >=
- IB_MGMT_MAX_METHODS)
+ ARRAY_SIZE(class->method_table))
goto out;
method = class->method_table[convert_mgmt_class(
mad_hdr->mgmt_class)];
@@ -2167,7 +2149,7 @@ handle_smi(struct ib_mad_port_private *port_priv,
struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad;
if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION &&
- mad_hdr->class_version == OPA_SMI_CLASS_VERSION)
+ mad_hdr->class_version == OPA_SM_CLASS_VERSION)
return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
response);
@@ -2238,11 +2220,8 @@ static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
mad_size = recv->mad_size;
response = alloc_mad_private(mad_size, GFP_KERNEL);
- if (!response) {
- dev_err(&port_priv->device->dev,
- "%s: no memory for response buffer\n", __func__);
+ if (!response)
goto out;
- }
if (rdma_cap_ib_switch(port_priv->device))
port_num = wc->port_num;
@@ -2869,8 +2848,6 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
GFP_ATOMIC);
if (!mad_priv) {
- dev_err(&qp_info->port_priv->device->dev,
- "No memory for receive buffer\n");
ret = -ENOMEM;
break;
}
@@ -2961,11 +2938,8 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
u16 pkey_index;
attr = kmalloc(sizeof *attr, GFP_KERNEL);
- if (!attr) {
- dev_err(&port_priv->device->dev,
- "Couldn't kmalloc ib_qp_attr\n");
+ if (!attr)
return -ENOMEM;
- }
ret = ib_find_pkey(port_priv->device, port_priv->port_num,
IB_DEFAULT_PKEY_FULL, &pkey_index);
@@ -3135,10 +3109,8 @@ static int ib_mad_port_open(struct ib_device *device,
/* Create new device info */
port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
- if (!port_priv) {
- dev_err(&device->dev, "No memory for ib_mad_port_private\n");
+ if (!port_priv)
return -ENOMEM;
- }
port_priv->device = device;
port_priv->port_num = port_num;
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index e51b739f6ea3..322cb67b07a9 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -518,8 +518,11 @@ static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
process_join_error(group, status);
else {
int mgids_changed, is_mgid0;
- ib_find_pkey(group->port->dev->device, group->port->port_num,
- be16_to_cpu(rec->pkey), &pkey_index);
+
+ if (ib_find_pkey(group->port->dev->device,
+ group->port->port_num, be16_to_cpu(rec->pkey),
+ &pkey_index))
+ pkey_index = MCAST_INVALID_PKEY_INDEX;
spin_lock_irq(&group->port->lock);
if (group->state == MCAST_BUSY &&
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index 3a64a0881882..0621f4455732 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -304,10 +304,9 @@ static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
for_ifa(in_dev) {
struct sin_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
- if (!entry) {
- pr_warn("roce_gid_mgmt: couldn't allocate entry for IPv4 update\n");
+ if (!entry)
continue;
- }
+
entry->ip.sin_family = AF_INET;
entry->ip.sin_addr.s_addr = ifa->ifa_address;
list_add_tail(&entry->list, &sin_list);
@@ -348,10 +347,8 @@ static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
struct sin6_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
- if (!entry) {
- pr_warn("roce_gid_mgmt: couldn't allocate entry for IPv6 update\n");
+ if (!entry)
continue;
- }
entry->sin6.sin6_family = AF_INET6;
entry->sin6.sin6_addr = ifp->addr;
@@ -447,10 +444,8 @@ static int netdev_upper_walk(struct net_device *upper, void *data)
struct upper_list *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
struct list_head *upper_list = data;
- if (!entry) {
- pr_info("roce_gid_mgmt: couldn't allocate entry to delete ndev\n");
+ if (!entry)
return 0;
- }
list_add_tail(&entry->list, upper_list);
dev_hold(upper);
@@ -559,10 +554,8 @@ static int netdevice_queue_work(struct netdev_event_work_cmd *cmds,
struct netdev_event_work *ndev_work =
kmalloc(sizeof(*ndev_work), GFP_KERNEL);
- if (!ndev_work) {
- pr_warn("roce_gid_mgmt: can't allocate work for netdevice_event\n");
+ if (!ndev_work)
return NOTIFY_DONE;
- }
memcpy(ndev_work->cmds, cmds, sizeof(ndev_work->cmds));
for (i = 0; i < ARRAY_SIZE(ndev_work->cmds) && ndev_work->cmds[i].cb; i++) {
@@ -696,10 +689,8 @@ static int addr_event(struct notifier_block *this, unsigned long event,
}
work = kmalloc(sizeof(*work), GFP_ATOMIC);
- if (!work) {
- pr_warn("roce_gid_mgmt: Couldn't allocate work for addr_event\n");
+ if (!work)
return NOTIFY_DONE;
- }
INIT_WORK(&work->work, update_gid_event_work_handler);
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 7713ef089c3c..e0a995b85a2d 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -46,7 +46,7 @@
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <rdma/ib.h>
#include <rdma/ib_cm.h>
@@ -1104,8 +1104,11 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
struct ib_ucm_cmd_hdr hdr;
ssize_t result;
- if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
+ if (!ib_safe_file_access(filp)) {
+ pr_err_once("ucm_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
+ task_tgid_vnr(current), current->comm);
return -EACCES;
+ }
if (len < sizeof(hdr))
return -EINVAL;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 9520154f1d7c..e12f8faf8c23 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1584,8 +1584,11 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
struct rdma_ucm_cmd_hdr hdr;
ssize_t ret;
- if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
+ if (!ib_safe_file_access(filp)) {
+ pr_err_once("ucma_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
+ task_tgid_vnr(current), current->comm);
return -EACCES;
+ }
if (len < sizeof(hdr))
return -EINVAL;
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 84b4eff90395..1e62a5f0cb28 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -51,7 +51,7 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
if (umem->nmap > 0)
ib_dma_unmap_sg(dev, umem->sg_head.sgl,
- umem->nmap,
+ umem->npages,
DMA_BIDIRECTIONAL);
for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) {
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 1f0fe3217f23..6b079a31dced 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -578,7 +578,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
*/
npages = get_user_pages_remote(owning_process, owning_mm,
user_virt, gup_num_pages,
- flags, local_page_list, NULL);
+ flags, local_page_list, NULL, NULL);
up_read(&owning_mm->mmap_sem);
if (npages < 0)
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 415a3185cde7..249b403b43a4 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -50,7 +50,7 @@
#include <linux/semaphore.h>
#include <linux/slab.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <rdma/ib_mad.h>
#include <rdma/ib_user_mad.h>
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index df26a741cda6..455034ac994e 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -289,5 +289,6 @@ IB_UVERBS_DECLARE_EX_CMD(modify_wq);
IB_UVERBS_DECLARE_EX_CMD(destroy_wq);
IB_UVERBS_DECLARE_EX_CMD(create_rwq_ind_table);
IB_UVERBS_DECLARE_EX_CMD(destroy_rwq_ind_table);
+IB_UVERBS_DECLARE_EX_CMD(modify_qp);
#endif /* UVERBS_H */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index cb3f515a2285..700782203483 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -38,7 +38,7 @@
#include <linux/slab.h>
#include <linux/sched.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "uverbs.h"
#include "core_priv.h"
@@ -2328,94 +2328,88 @@ static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
}
}
-ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
- struct ib_device *ib_dev,
- const char __user *buf, int in_len,
- int out_len)
+static int modify_qp(struct ib_uverbs_file *file,
+ struct ib_uverbs_ex_modify_qp *cmd, struct ib_udata *udata)
{
- struct ib_uverbs_modify_qp cmd;
- struct ib_udata udata;
- struct ib_qp *qp;
- struct ib_qp_attr *attr;
- int ret;
-
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
-
- INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
- out_len);
+ struct ib_qp_attr *attr;
+ struct ib_qp *qp;
+ int ret;
attr = kmalloc(sizeof *attr, GFP_KERNEL);
if (!attr)
return -ENOMEM;
- qp = idr_read_qp(cmd.qp_handle, file->ucontext);
+ qp = idr_read_qp(cmd->base.qp_handle, file->ucontext);
if (!qp) {
ret = -EINVAL;
goto out;
}
- attr->qp_state = cmd.qp_state;
- attr->cur_qp_state = cmd.cur_qp_state;
- attr->path_mtu = cmd.path_mtu;
- attr->path_mig_state = cmd.path_mig_state;
- attr->qkey = cmd.qkey;
- attr->rq_psn = cmd.rq_psn;
- attr->sq_psn = cmd.sq_psn;
- attr->dest_qp_num = cmd.dest_qp_num;
- attr->qp_access_flags = cmd.qp_access_flags;
- attr->pkey_index = cmd.pkey_index;
- attr->alt_pkey_index = cmd.alt_pkey_index;
- attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
- attr->max_rd_atomic = cmd.max_rd_atomic;
- attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic;
- attr->min_rnr_timer = cmd.min_rnr_timer;
- attr->port_num = cmd.port_num;
- attr->timeout = cmd.timeout;
- attr->retry_cnt = cmd.retry_cnt;
- attr->rnr_retry = cmd.rnr_retry;
- attr->alt_port_num = cmd.alt_port_num;
- attr->alt_timeout = cmd.alt_timeout;
-
- memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
- attr->ah_attr.grh.flow_label = cmd.dest.flow_label;
- attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index;
- attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit;
- attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class;
- attr->ah_attr.dlid = cmd.dest.dlid;
- attr->ah_attr.sl = cmd.dest.sl;
- attr->ah_attr.src_path_bits = cmd.dest.src_path_bits;
- attr->ah_attr.static_rate = cmd.dest.static_rate;
- attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0;
- attr->ah_attr.port_num = cmd.dest.port_num;
-
- memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
- attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label;
- attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index;
- attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit;
- attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
- attr->alt_ah_attr.dlid = cmd.alt_dest.dlid;
- attr->alt_ah_attr.sl = cmd.alt_dest.sl;
- attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits;
- attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate;
- attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
- attr->alt_ah_attr.port_num = cmd.alt_dest.port_num;
+ attr->qp_state = cmd->base.qp_state;
+ attr->cur_qp_state = cmd->base.cur_qp_state;
+ attr->path_mtu = cmd->base.path_mtu;
+ attr->path_mig_state = cmd->base.path_mig_state;
+ attr->qkey = cmd->base.qkey;
+ attr->rq_psn = cmd->base.rq_psn;
+ attr->sq_psn = cmd->base.sq_psn;
+ attr->dest_qp_num = cmd->base.dest_qp_num;
+ attr->qp_access_flags = cmd->base.qp_access_flags;
+ attr->pkey_index = cmd->base.pkey_index;
+ attr->alt_pkey_index = cmd->base.alt_pkey_index;
+ attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
+ attr->max_rd_atomic = cmd->base.max_rd_atomic;
+ attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic;
+ attr->min_rnr_timer = cmd->base.min_rnr_timer;
+ attr->port_num = cmd->base.port_num;
+ attr->timeout = cmd->base.timeout;
+ attr->retry_cnt = cmd->base.retry_cnt;
+ attr->rnr_retry = cmd->base.rnr_retry;
+ attr->alt_port_num = cmd->base.alt_port_num;
+ attr->alt_timeout = cmd->base.alt_timeout;
+ attr->rate_limit = cmd->rate_limit;
+
+ memcpy(attr->ah_attr.grh.dgid.raw, cmd->base.dest.dgid, 16);
+ attr->ah_attr.grh.flow_label = cmd->base.dest.flow_label;
+ attr->ah_attr.grh.sgid_index = cmd->base.dest.sgid_index;
+ attr->ah_attr.grh.hop_limit = cmd->base.dest.hop_limit;
+ attr->ah_attr.grh.traffic_class = cmd->base.dest.traffic_class;
+ attr->ah_attr.dlid = cmd->base.dest.dlid;
+ attr->ah_attr.sl = cmd->base.dest.sl;
+ attr->ah_attr.src_path_bits = cmd->base.dest.src_path_bits;
+ attr->ah_attr.static_rate = cmd->base.dest.static_rate;
+ attr->ah_attr.ah_flags = cmd->base.dest.is_global ?
+ IB_AH_GRH : 0;
+ attr->ah_attr.port_num = cmd->base.dest.port_num;
+
+ memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd->base.alt_dest.dgid, 16);
+ attr->alt_ah_attr.grh.flow_label = cmd->base.alt_dest.flow_label;
+ attr->alt_ah_attr.grh.sgid_index = cmd->base.alt_dest.sgid_index;
+ attr->alt_ah_attr.grh.hop_limit = cmd->base.alt_dest.hop_limit;
+ attr->alt_ah_attr.grh.traffic_class = cmd->base.alt_dest.traffic_class;
+ attr->alt_ah_attr.dlid = cmd->base.alt_dest.dlid;
+ attr->alt_ah_attr.sl = cmd->base.alt_dest.sl;
+ attr->alt_ah_attr.src_path_bits = cmd->base.alt_dest.src_path_bits;
+ attr->alt_ah_attr.static_rate = cmd->base.alt_dest.static_rate;
+ attr->alt_ah_attr.ah_flags = cmd->base.alt_dest.is_global ?
+ IB_AH_GRH : 0;
+ attr->alt_ah_attr.port_num = cmd->base.alt_dest.port_num;
if (qp->real_qp == qp) {
- ret = ib_resolve_eth_dmac(qp, attr, &cmd.attr_mask);
- if (ret)
- goto release_qp;
+ if (cmd->base.attr_mask & IB_QP_AV) {
+ ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
+ if (ret)
+ goto release_qp;
+ }
ret = qp->device->modify_qp(qp, attr,
- modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
+ modify_qp_mask(qp->qp_type,
+ cmd->base.attr_mask),
+ udata);
} else {
- ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
+ ret = ib_modify_qp(qp, attr,
+ modify_qp_mask(qp->qp_type,
+ cmd->base.attr_mask));
}
- if (ret)
- goto release_qp;
-
- ret = in_len;
-
release_qp:
put_qp_read(qp);
@@ -2425,6 +2419,68 @@ out:
return ret;
}
+ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
+ const char __user *buf, int in_len,
+ int out_len)
+{
+ struct ib_uverbs_ex_modify_qp cmd = {};
+ struct ib_udata udata;
+ int ret;
+
+ if (copy_from_user(&cmd.base, buf, sizeof(cmd.base)))
+ return -EFAULT;
+
+ if (cmd.base.attr_mask &
+ ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1))
+ return -EOPNOTSUPP;
+
+ INIT_UDATA(&udata, buf + sizeof(cmd.base), NULL,
+ in_len - sizeof(cmd.base), out_len);
+
+ ret = modify_qp(file, &cmd, &udata);
+ if (ret)
+ return ret;
+
+ return in_len;
+}
+
+int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
+ struct ib_udata *ucore,
+ struct ib_udata *uhw)
+{
+ struct ib_uverbs_ex_modify_qp cmd = {};
+ int ret;
+
+ /*
+ * Last bit is reserved for extending the attr_mask by
+ * using another field.
+ */
+ BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1 << 31));
+
+ if (ucore->inlen < sizeof(cmd.base))
+ return -EINVAL;
+
+ ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
+ if (ret)
+ return ret;
+
+ if (cmd.base.attr_mask &
+ ~((IB_USER_LAST_QP_ATTR_MASK << 1) - 1))
+ return -EOPNOTSUPP;
+
+ if (ucore->inlen > sizeof(cmd)) {
+ if (ib_is_udata_cleared(ucore, sizeof(cmd),
+ ucore->inlen - sizeof(cmd)))
+ return -EOPNOTSUPP;
+ }
+
+ ret = modify_qp(file, &cmd, uhw);
+
+ return ret;
+}
+
ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
struct ib_device *ib_dev,
const char __user *buf, int in_len,
@@ -2875,6 +2931,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
struct ib_ah *ah;
struct ib_ah_attr attr;
int ret;
+ struct ib_udata udata;
if (out_len < sizeof resp)
return -ENOSPC;
@@ -2882,6 +2939,10 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
+ INIT_UDATA(&udata, buf + sizeof(cmd),
+ (unsigned long)cmd.response + sizeof(resp),
+ in_len - sizeof(cmd), out_len - sizeof(resp));
+
uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
if (!uobj)
return -ENOMEM;
@@ -2908,12 +2969,16 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
memset(&attr.dmac, 0, sizeof(attr.dmac));
memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16);
- ah = ib_create_ah(pd, &attr);
+ ah = pd->device->create_ah(pd, &attr, &udata);
+
if (IS_ERR(ah)) {
ret = PTR_ERR(ah);
goto err_put;
}
+ ah->device = pd->device;
+ ah->pd = pd;
+ atomic_inc(&pd->usecnt);
ah->uobject = uobj;
uobj->object = ah;
@@ -3124,8 +3189,10 @@ static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
kern_spec_val = (void *)kern_spec +
sizeof(struct ib_uverbs_flow_spec_hdr);
kern_spec_mask = kern_spec_val + kern_filter_sz;
+ if (ib_spec->type == (IB_FLOW_SPEC_INNER | IB_FLOW_SPEC_VXLAN_TUNNEL))
+ return -EINVAL;
- switch (ib_spec->type) {
+ switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
case IB_FLOW_SPEC_ETH:
ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz);
actual_filter_sz = spec_filter_size(kern_spec_mask,
@@ -3175,6 +3242,21 @@ static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz);
memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz);
break;
+ case IB_FLOW_SPEC_VXLAN_TUNNEL:
+ ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz);
+ actual_filter_sz = spec_filter_size(kern_spec_mask,
+ kern_filter_sz,
+ ib_filter_sz);
+ if (actual_filter_sz <= 0)
+ return -EINVAL;
+ ib_spec->tunnel.size = sizeof(struct ib_flow_spec_tunnel);
+ memcpy(&ib_spec->tunnel.val, kern_spec_val, actual_filter_sz);
+ memcpy(&ib_spec->tunnel.mask, kern_spec_mask, actual_filter_sz);
+
+ if ((ntohl(ib_spec->tunnel.mask.tunnel_id)) >= BIT(24) ||
+ (ntohl(ib_spec->tunnel.val.tunnel_id)) >= BIT(24))
+ return -EINVAL;
+ break;
default:
return -EINVAL;
}
@@ -3745,7 +3827,6 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
err = PTR_ERR(flow_id);
goto err_free;
}
- flow_id->qp = qp;
flow_id->uobject = uobj;
uobj->object = flow_id;
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 44b1104eb168..b3f95d453fba 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -46,7 +46,7 @@
#include <linux/anon_inodes.h>
#include <linux/slab.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <rdma/ib.h>
@@ -137,6 +137,7 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
[IB_USER_VERBS_EX_CMD_DESTROY_WQ] = ib_uverbs_ex_destroy_wq,
[IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL] = ib_uverbs_ex_create_rwq_ind_table,
[IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL] = ib_uverbs_ex_destroy_rwq_ind_table,
+ [IB_USER_VERBS_EX_CMD_MODIFY_QP] = ib_uverbs_ex_modify_qp,
};
static void ib_uverbs_add_one(struct ib_device *device);
@@ -746,8 +747,11 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
int srcu_key;
ssize_t ret;
- if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
+ if (!ib_safe_file_access(filp)) {
+ pr_err_once("uverbs_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
+ task_tgid_vnr(current), current->comm);
return -EACCES;
+ }
if (count < sizeof hdr)
return -EINVAL;
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 83687646da68..71580cc28c9e 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -315,7 +315,7 @@ struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
{
struct ib_ah *ah;
- ah = pd->device->create_ah(pd, ah_attr);
+ ah = pd->device->create_ah(pd, ah_attr, NULL);
if (!IS_ERR(ah)) {
ah->device = pd->device;
@@ -328,7 +328,7 @@ struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
}
EXPORT_SYMBOL(ib_create_ah);
-static int ib_get_header_version(const union rdma_network_hdr *hdr)
+int ib_get_rdma_header_version(const union rdma_network_hdr *hdr)
{
const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh;
struct iphdr ip4h_checked;
@@ -359,6 +359,7 @@ static int ib_get_header_version(const union rdma_network_hdr *hdr)
return 4;
return 6;
}
+EXPORT_SYMBOL(ib_get_rdma_header_version);
static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
u8 port_num,
@@ -369,7 +370,7 @@ static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
if (rdma_protocol_ib(device, port_num))
return RDMA_NETWORK_IB;
- grh_version = ib_get_header_version((union rdma_network_hdr *)grh);
+ grh_version = ib_get_rdma_header_version((union rdma_network_hdr *)grh);
if (grh_version == 4)
return RDMA_NETWORK_IPV4;
@@ -415,9 +416,9 @@ static int get_sgid_index_from_eth(struct ib_device *device, u8 port_num,
&context, gid_index);
}
-static int get_gids_from_rdma_hdr(union rdma_network_hdr *hdr,
- enum rdma_network_type net_type,
- union ib_gid *sgid, union ib_gid *dgid)
+int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
+ enum rdma_network_type net_type,
+ union ib_gid *sgid, union ib_gid *dgid)
{
struct sockaddr_in src_in;
struct sockaddr_in dst_in;
@@ -447,6 +448,7 @@ static int get_gids_from_rdma_hdr(union rdma_network_hdr *hdr,
return -EINVAL;
}
}
+EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
const struct ib_wc *wc, const struct ib_grh *grh,
@@ -469,8 +471,8 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
net_type = ib_get_net_type_by_grh(device, port_num, grh);
gid_type = ib_network_to_gid_type(net_type);
}
- ret = get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
- &sgid, &dgid);
+ ret = ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
+ &sgid, &dgid);
if (ret)
return ret;
@@ -1014,6 +1016,7 @@ static const struct {
IB_QP_QKEY),
[IB_QPT_GSI] = (IB_QP_CUR_STATE |
IB_QP_QKEY),
+ [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
}
}
},
@@ -1047,6 +1050,7 @@ static const struct {
IB_QP_QKEY),
[IB_QPT_GSI] = (IB_QP_CUR_STATE |
IB_QP_QKEY),
+ [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
}
},
[IB_QPS_SQD] = {
@@ -1196,66 +1200,66 @@ int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
}
EXPORT_SYMBOL(ib_modify_qp_is_ok);
-int ib_resolve_eth_dmac(struct ib_qp *qp,
- struct ib_qp_attr *qp_attr, int *qp_attr_mask)
+int ib_resolve_eth_dmac(struct ib_device *device,
+ struct ib_ah_attr *ah_attr)
{
int ret = 0;
- if (*qp_attr_mask & IB_QP_AV) {
- if (qp_attr->ah_attr.port_num < rdma_start_port(qp->device) ||
- qp_attr->ah_attr.port_num > rdma_end_port(qp->device))
- return -EINVAL;
-
- if (!rdma_cap_eth_ah(qp->device, qp_attr->ah_attr.port_num))
- return 0;
-
- if (rdma_link_local_addr((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw)) {
- rdma_get_ll_mac((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw,
- qp_attr->ah_attr.dmac);
- } else {
- union ib_gid sgid;
- struct ib_gid_attr sgid_attr;
- int ifindex;
- int hop_limit;
-
- ret = ib_query_gid(qp->device,
- qp_attr->ah_attr.port_num,
- qp_attr->ah_attr.grh.sgid_index,
- &sgid, &sgid_attr);
-
- if (ret || !sgid_attr.ndev) {
- if (!ret)
- ret = -ENXIO;
- goto out;
- }
+ if (ah_attr->port_num < rdma_start_port(device) ||
+ ah_attr->port_num > rdma_end_port(device))
+ return -EINVAL;
- ifindex = sgid_attr.ndev->ifindex;
+ if (!rdma_cap_eth_ah(device, ah_attr->port_num))
+ return 0;
- ret = rdma_addr_find_l2_eth_by_grh(&sgid,
- &qp_attr->ah_attr.grh.dgid,
- qp_attr->ah_attr.dmac,
- NULL, &ifindex, &hop_limit);
+ if (rdma_link_local_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
+ rdma_get_ll_mac((struct in6_addr *)ah_attr->grh.dgid.raw,
+ ah_attr->dmac);
+ } else {
+ union ib_gid sgid;
+ struct ib_gid_attr sgid_attr;
+ int ifindex;
+ int hop_limit;
+
+ ret = ib_query_gid(device,
+ ah_attr->port_num,
+ ah_attr->grh.sgid_index,
+ &sgid, &sgid_attr);
+
+ if (ret || !sgid_attr.ndev) {
+ if (!ret)
+ ret = -ENXIO;
+ goto out;
+ }
- dev_put(sgid_attr.ndev);
+ ifindex = sgid_attr.ndev->ifindex;
- qp_attr->ah_attr.grh.hop_limit = hop_limit;
- }
+ ret = rdma_addr_find_l2_eth_by_grh(&sgid,
+ &ah_attr->grh.dgid,
+ ah_attr->dmac,
+ NULL, &ifindex, &hop_limit);
+
+ dev_put(sgid_attr.ndev);
+
+ ah_attr->grh.hop_limit = hop_limit;
}
out:
return ret;
}
EXPORT_SYMBOL(ib_resolve_eth_dmac);
-
int ib_modify_qp(struct ib_qp *qp,
struct ib_qp_attr *qp_attr,
int qp_attr_mask)
{
- int ret;
- ret = ib_resolve_eth_dmac(qp, qp_attr, &qp_attr_mask);
- if (ret)
- return ret;
+ if (qp_attr_mask & IB_QP_AV) {
+ int ret;
+
+ ret = ib_resolve_eth_dmac(qp->device, &qp_attr->ah_attr);
+ if (ret)
+ return ret;
+ }
return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
}
@@ -1734,8 +1738,10 @@ struct ib_flow *ib_create_flow(struct ib_qp *qp,
return ERR_PTR(-ENOSYS);
flow_id = qp->device->create_flow(qp, flow_attr, domain);
- if (!IS_ERR(flow_id))
+ if (!IS_ERR(flow_id)) {
atomic_inc(&qp->usecnt);
+ flow_id->qp = qp;
+ }
return flow_id;
}
EXPORT_SYMBOL(ib_create_flow);
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index e7a5ed9f6f3f..ed553de2ca12 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_MLX4_INFINIBAND) += mlx4/
obj-$(CONFIG_MLX5_INFINIBAND) += mlx5/
obj-$(CONFIG_INFINIBAND_NES) += nes/
obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma/
+obj-$(CONFIG_INFINIBAND_VMWARE_PVRDMA) += vmw_pvrdma/
obj-$(CONFIG_INFINIBAND_USNIC) += usnic/
obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
obj-$(CONFIG_INFINIBAND_HNS) += hns/
diff --git a/drivers/infiniband/hw/cxgb3/cxio_dbg.c b/drivers/infiniband/hw/cxgb3/cxio_dbg.c
index 8bca6b4ec9af..445e89e5e7cf 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_dbg.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_dbg.c
@@ -45,10 +45,9 @@ void cxio_dump_tpt(struct cxio_rdev *rdev, u32 stag)
int size = 32;
m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
- if (!m) {
- PDBG("%s couldn't allocate memory.\n", __func__);
+ if (!m)
return;
- }
+
m->mem_id = MEM_PMRX;
m->addr = (stag>>8) * 32 + rdev->rnic_info.tpt_base;
m->len = size;
@@ -82,10 +81,9 @@ void cxio_dump_pbl(struct cxio_rdev *rdev, u32 pbl_addr, uint len, u8 shift)
size = npages * sizeof(u64);
m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
- if (!m) {
- PDBG("%s couldn't allocate memory.\n", __func__);
+ if (!m)
return;
- }
+
m->mem_id = MEM_PMRX;
m->addr = pbl_addr;
m->len = size;
@@ -144,10 +142,9 @@ void cxio_dump_rqt(struct cxio_rdev *rdev, u32 hwtid, int nents)
int rc;
m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
- if (!m) {
- PDBG("%s couldn't allocate memory.\n", __func__);
+ if (!m)
return;
- }
+
m->mem_id = MEM_PMRX;
m->addr = ((hwtid)<<10) + rdev->rnic_info.rqt_base;
m->len = size;
@@ -177,10 +174,9 @@ void cxio_dump_tcb(struct cxio_rdev *rdev, u32 hwtid)
int rc;
m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
- if (!m) {
- PDBG("%s couldn't allocate memory.\n", __func__);
+ if (!m)
return;
- }
+
m->mem_id = MEM_CM;
m->addr = hwtid * size;
m->len = size;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index cba57bb53dba..9d5fe1853da4 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -62,7 +62,8 @@
#include "common.h"
static struct ib_ah *iwch_ah_create(struct ib_pd *pd,
- struct ib_ah_attr *ah_attr)
+ struct ib_ah_attr *ah_attr,
+ struct ib_udata *udata)
{
return ERR_PTR(-ENOSYS);
}
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 4e5baf4fe15e..516b0ae6dc3f 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -828,8 +828,10 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
}
rdev->status_page = (struct t4_dev_status_page *)
__get_free_page(GFP_KERNEL);
- if (!rdev->status_page)
+ if (!rdev->status_page) {
+ err = -ENOMEM;
goto destroy_ocqp_pool;
+ }
rdev->status_page->qp_start = rdev->lldi.vr->qp.start;
rdev->status_page->qp_size = rdev->lldi.vr->qp.size;
rdev->status_page->cq_start = rdev->lldi.vr->cq.start;
@@ -841,8 +843,6 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
if (rdev->wr_log) {
rdev->wr_log_size = 1 << c4iw_wr_log_size_order;
atomic_set(&rdev->wr_log_idx, 0);
- } else {
- pr_err(MOD "error allocating wr_log. Logging disabled\n");
}
}
@@ -1424,8 +1424,6 @@ static void recover_queues(struct uld_ctx *ctx)
qp_list.qps = kzalloc(count * sizeof *qp_list.qps, GFP_ATOMIC);
if (!qp_list.qps) {
- printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
- pci_name(ctx->lldi.pdev));
spin_unlock_irq(&ctx->dev->lock);
return;
}
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 645e606a17c5..49b51b7e0fd7 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -59,7 +59,9 @@ module_param(fastreg_support, int, 0644);
MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)");
static struct ib_ah *c4iw_ah_create(struct ib_pd *pd,
- struct ib_ah_attr *ah_attr)
+ struct ib_ah_attr *ah_attr,
+ struct ib_udata *udata)
+
{
return ERR_PTR(-ENOSYS);
}
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index b7ac97b27c88..cda5542e13a2 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -321,7 +321,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
FW_RI_RES_WR_DCAEN_V(0) |
FW_RI_RES_WR_DCACPU_V(0) |
FW_RI_RES_WR_FBMIN_V(2) |
- FW_RI_RES_WR_FBMAX_V(2) |
+ (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_FBMAX_V(2) :
+ FW_RI_RES_WR_FBMAX_V(3)) |
FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
FW_RI_RES_WR_CIDXFTHRESH_V(0) |
FW_RI_RES_WR_EQSIZE_V(eqsize));
@@ -345,7 +346,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
FW_RI_RES_WR_DCAEN_V(0) |
FW_RI_RES_WR_DCACPU_V(0) |
FW_RI_RES_WR_FBMIN_V(2) |
- FW_RI_RES_WR_FBMAX_V(2) |
+ FW_RI_RES_WR_FBMAX_V(3) |
FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
FW_RI_RES_WR_CIDXFTHRESH_V(0) |
FW_RI_RES_WR_EQSIZE_V(eqsize));
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index 67ea85a56945..7a3d906b3671 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -125,6 +125,7 @@ int node_affinity_init(void)
cpumask_weight(topology_sibling_cpumask(
cpumask_first(&node_affinity.proc.mask)
));
+ node_affinity.num_possible_nodes = num_possible_nodes();
node_affinity.num_online_nodes = num_online_nodes();
node_affinity.num_online_cpus = num_online_cpus();
@@ -135,7 +136,7 @@ int node_affinity_init(void)
*/
init_real_cpu_mask();
- hfi1_per_node_cntr = kcalloc(num_possible_nodes(),
+ hfi1_per_node_cntr = kcalloc(node_affinity.num_possible_nodes,
sizeof(*hfi1_per_node_cntr), GFP_KERNEL);
if (!hfi1_per_node_cntr)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/hfi1/affinity.h b/drivers/infiniband/hw/hfi1/affinity.h
index 42e63316afd1..e78c7aa094e0 100644
--- a/drivers/infiniband/hw/hfi1/affinity.h
+++ b/drivers/infiniband/hw/hfi1/affinity.h
@@ -70,14 +70,6 @@ struct cpu_mask_set {
uint gen;
};
-struct hfi1_affinity {
- struct cpu_mask_set def_intr;
- struct cpu_mask_set rcv_intr;
- struct cpumask real_cpu_mask;
- /* spin lock to protect affinity struct */
- spinlock_t lock;
-};
-
struct hfi1_msix_entry;
/* Initialize non-HT cpu cores mask */
@@ -115,6 +107,7 @@ struct hfi1_affinity_node_list {
struct cpumask real_cpu_mask;
struct cpu_mask_set proc;
int num_core_siblings;
+ int num_possible_nodes;
int num_online_nodes;
int num_online_cpus;
struct mutex lock; /* protects affinity nodes */
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 24d0820873cf..ef72bc2a9e1d 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -8477,7 +8477,10 @@ static int do_8051_command(
*/
if (type == HCMD_WRITE_LCB_CSR) {
in_data |= ((*out_data) & 0xffffffffffull) << 8;
- reg = ((((*out_data) >> 40) & 0xff) <<
+ /* must preserve COMPLETED - it is tied to hardware */
+ reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0);
+ reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK;
+ reg |= ((((*out_data) >> 40) & 0xff) <<
DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
| ((((*out_data) >> 48) & 0xffff) <<
DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
@@ -9556,11 +9559,11 @@ int bringup_serdes(struct hfi1_pportdata *ppd)
if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
- guid = ppd->guid;
+ guid = ppd->guids[HFI1_PORT_GUID_INDEX];
if (!guid) {
if (dd->base_guid)
guid = dd->base_guid + ppd->port - 1;
- ppd->guid = guid;
+ ppd->guids[HFI1_PORT_GUID_INDEX] = guid;
}
/* Set linkinit_reason on power up per OPA spec */
diff --git a/drivers/infiniband/hw/hfi1/chip_registers.h b/drivers/infiniband/hw/hfi1/chip_registers.h
index 5b9993899789..5bfa839d1c48 100644
--- a/drivers/infiniband/hw/hfi1/chip_registers.h
+++ b/drivers/infiniband/hw/hfi1/chip_registers.h
@@ -415,6 +415,9 @@
#define ASIC_CFG_SBUS_REQUEST_DATA_IN_SHIFT 32
#define ASIC_CFG_SBUS_REQUEST_RECEIVER_ADDR_SHIFT 0
#define ASIC_CFG_SCRATCH (ASIC + 0x000000000020)
+#define ASIC_CFG_SCRATCH_1 (ASIC_CFG_SCRATCH + 0x08)
+#define ASIC_CFG_SCRATCH_2 (ASIC_CFG_SCRATCH + 0x10)
+#define ASIC_CFG_SCRATCH_3 (ASIC_CFG_SCRATCH + 0x18)
#define ASIC_CFG_THERM_POLL_EN (ASIC + 0x000000000050)
#define ASIC_EEP_ADDR_CMD (ASIC + 0x000000000308)
#define ASIC_EEP_ADDR_CMD_EP_ADDR_MASK 0xFFFFFFull
diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
index 632ba21759ab..8725f4c086cf 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.c
+++ b/drivers/infiniband/hw/hfi1/debugfs.c
@@ -541,6 +541,114 @@ static ssize_t asic_flags_write(struct file *file, const char __user *buf,
return ret;
}
+/* read the dc8051 memory */
+static ssize_t dc8051_memory_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hfi1_pportdata *ppd = private2ppd(file);
+ ssize_t rval;
+ void *tmp;
+ loff_t start, end;
+
+ /* the checks below expect the position to be positive */
+ if (*ppos < 0)
+ return -EINVAL;
+
+ tmp = kzalloc(DC8051_DATA_MEM_SIZE, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ /*
+ * Fill in the requested portion of the temporary buffer from the
+ * 8051 memory. The 8051 memory read is done in terms of 8 bytes.
+ * Adjust start and end to fit. Skip reading anything if out of
+ * range.
+ */
+ start = *ppos & ~0x7; /* round down */
+ if (start < DC8051_DATA_MEM_SIZE) {
+ end = (*ppos + count + 7) & ~0x7; /* round up */
+ if (end > DC8051_DATA_MEM_SIZE)
+ end = DC8051_DATA_MEM_SIZE;
+ rval = read_8051_data(ppd->dd, start, end - start,
+ (u64 *)(tmp + start));
+ if (rval)
+ goto done;
+ }
+
+ rval = simple_read_from_buffer(buf, count, ppos, tmp,
+ DC8051_DATA_MEM_SIZE);
+done:
+ kfree(tmp);
+ return rval;
+}
+
+static ssize_t debugfs_lcb_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hfi1_pportdata *ppd = private2ppd(file);
+ struct hfi1_devdata *dd = ppd->dd;
+ unsigned long total, csr_off;
+ u64 data;
+
+ if (*ppos < 0)
+ return -EINVAL;
+ /* only read 8 byte quantities */
+ if ((count % 8) != 0)
+ return -EINVAL;
+ /* offset must be 8-byte aligned */
+ if ((*ppos % 8) != 0)
+ return -EINVAL;
+ /* do nothing if out of range or zero count */
+ if (*ppos >= (LCB_END - LCB_START) || !count)
+ return 0;
+ /* reduce count if needed */
+ if (*ppos + count > LCB_END - LCB_START)
+ count = (LCB_END - LCB_START) - *ppos;
+
+ csr_off = LCB_START + *ppos;
+ for (total = 0; total < count; total += 8, csr_off += 8) {
+ if (read_lcb_csr(dd, csr_off, (u64 *)&data))
+ break; /* failed */
+ if (put_user(data, (unsigned long __user *)(buf + total)))
+ break;
+ }
+ *ppos += total;
+ return total;
+}
+
+static ssize_t debugfs_lcb_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hfi1_pportdata *ppd = private2ppd(file);
+ struct hfi1_devdata *dd = ppd->dd;
+ unsigned long total, csr_off, data;
+
+ if (*ppos < 0)
+ return -EINVAL;
+ /* only write 8 byte quantities */
+ if ((count % 8) != 0)
+ return -EINVAL;
+ /* offset must be 8-byte aligned */
+ if ((*ppos % 8) != 0)
+ return -EINVAL;
+ /* do nothing if out of range or zero count */
+ if (*ppos >= (LCB_END - LCB_START) || !count)
+ return 0;
+ /* reduce count if needed */
+ if (*ppos + count > LCB_END - LCB_START)
+ count = (LCB_END - LCB_START) - *ppos;
+
+ csr_off = LCB_START + *ppos;
+ for (total = 0; total < count; total += 8, csr_off += 8) {
+ if (get_user(data, (unsigned long __user *)(buf + total)))
+ break;
+ if (write_lcb_csr(dd, csr_off, data))
+ break; /* failed */
+ }
+ *ppos += total;
+ return total;
+}
+
/*
* read the per-port QSFP data for ppd
*/
@@ -931,6 +1039,8 @@ static const struct counter_info port_cntr_ops[] = {
DEBUGFS_XOPS("qsfp2", qsfp2_debugfs_read, qsfp2_debugfs_write,
qsfp2_debugfs_open, qsfp2_debugfs_release),
DEBUGFS_OPS("asic_flags", asic_flags_read, asic_flags_write),
+ DEBUGFS_OPS("dc8051_memory", dc8051_memory_read, NULL),
+ DEBUGFS_OPS("lcb", debugfs_lcb_read, debugfs_lcb_write),
};
static void *_sdma_cpu_list_seq_start(struct seq_file *s, loff_t *pos)
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index c5efff29c147..4fbaee68012b 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -795,8 +795,7 @@ static inline void process_rcv_qp_work(struct hfi1_packet *packet)
hfi1_schedule_send(qp);
spin_unlock_irqrestore(&qp->s_lock, flags);
}
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
+ rvt_put_qp(qp);
}
}
diff --git a/drivers/infiniband/hw/hfi1/eprom.c b/drivers/infiniband/hw/hfi1/eprom.c
index e70c223801b4..26da124c88e2 100644
--- a/drivers/infiniband/hw/hfi1/eprom.c
+++ b/drivers/infiniband/hw/hfi1/eprom.c
@@ -207,6 +207,40 @@ done_asic:
/* magic character sequence that trails an image */
#define IMAGE_TRAIL_MAGIC "egamiAPO"
+/* EPROM file types */
+#define HFI1_EFT_PLATFORM_CONFIG 2
+
+/* segment size - 128 KiB */
+#define SEG_SIZE (128 * 1024)
+
+struct hfi1_eprom_footer {
+ u32 oprom_size; /* size of the oprom, in bytes */
+ u16 num_table_entries;
+ u16 version; /* version of this footer */
+ u32 magic; /* must be last */
+};
+
+struct hfi1_eprom_table_entry {
+ u32 type; /* file type */
+ u32 offset; /* file offset from start of EPROM */
+ u32 size; /* file size, in bytes */
+};
+
+/*
+ * Calculate the max number of table entries that will fit within a directory
+ * buffer of size 'dir_size'.
+ */
+#define MAX_TABLE_ENTRIES(dir_size) \
+ (((dir_size) - sizeof(struct hfi1_eprom_footer)) / \
+ sizeof(struct hfi1_eprom_table_entry))
+
+#define DIRECTORY_SIZE(n) (sizeof(struct hfi1_eprom_footer) + \
+ (sizeof(struct hfi1_eprom_table_entry) * (n)))
+
+#define MAGIC4(a, b, c, d) ((d) << 24 | (c) << 16 | (b) << 8 | (a))
+#define FOOTER_MAGIC MAGIC4('e', 'p', 'r', 'm')
+#define FOOTER_VERSION 1
+
/*
* Read all of partition 1. The actual file is at the front. Adjust
* the returned size if a trailing image magic is found.
@@ -242,6 +276,167 @@ static int read_partition_platform_config(struct hfi1_devdata *dd, void **data,
}
/*
+ * The segment magic has been checked. There is a footer and table of
+ * contents present.
+ *
+ * directory is a u32 aligned buffer of size EP_PAGE_SIZE.
+ */
+static int read_segment_platform_config(struct hfi1_devdata *dd,
+ void *directory, void **data, u32 *size)
+{
+ struct hfi1_eprom_footer *footer;
+ struct hfi1_eprom_table_entry *table;
+ struct hfi1_eprom_table_entry *entry;
+ void *buffer = NULL;
+ void *table_buffer = NULL;
+ int ret, i;
+ u32 directory_size;
+ u32 seg_base, seg_offset;
+ u32 bytes_available, ncopied, to_copy;
+
+ /* the footer is at the end of the directory */
+ footer = (struct hfi1_eprom_footer *)
+ (directory + EP_PAGE_SIZE - sizeof(*footer));
+
+ /* make sure the structure version is supported */
+ if (footer->version != FOOTER_VERSION)
+ return -EINVAL;
+
+ /* oprom size cannot be larger than a segment */
+ if (footer->oprom_size >= SEG_SIZE)
+ return -EINVAL;
+
+ /* the file table must fit in a segment with the oprom */
+ if (footer->num_table_entries >
+ MAX_TABLE_ENTRIES(SEG_SIZE - footer->oprom_size))
+ return -EINVAL;
+
+ /* find the file table start, which precedes the footer */
+ directory_size = DIRECTORY_SIZE(footer->num_table_entries);
+ if (directory_size <= EP_PAGE_SIZE) {
+ /* the file table fits into the directory buffer handed in */
+ table = (struct hfi1_eprom_table_entry *)
+ (directory + EP_PAGE_SIZE - directory_size);
+ } else {
+ /* need to allocate and read more */
+ table_buffer = kmalloc(directory_size, GFP_KERNEL);
+ if (!table_buffer)
+ return -ENOMEM;
+ ret = read_length(dd, SEG_SIZE - directory_size,
+ directory_size, table_buffer);
+ if (ret)
+ goto done;
+ table = table_buffer;
+ }
+
+ /* look for the platform configuration file in the table */
+ for (entry = NULL, i = 0; i < footer->num_table_entries; i++) {
+ if (table[i].type == HFI1_EFT_PLATFORM_CONFIG) {
+ entry = &table[i];
+ break;
+ }
+ }
+ if (!entry) {
+ ret = -ENOENT;
+ goto done;
+ }
+
+ /*
+ * Sanity check on the configuration file size - it should never
+ * be larger than 4 KiB.
+ */
+ if (entry->size > (4 * 1024)) {
+ dd_dev_err(dd, "Bad configuration file size 0x%x\n",
+ entry->size);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* check for bogus offset and size that wrap when added together */
+ if (entry->offset + entry->size < entry->offset) {
+ dd_dev_err(dd,
+ "Bad configuration file start + size 0x%x+0x%x\n",
+ entry->offset, entry->size);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* allocate the buffer to return */
+ buffer = kmalloc(entry->size, GFP_KERNEL);
+ if (!buffer) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ /*
+ * Extract the file by looping over segments until it is fully read.
+ */
+ seg_offset = entry->offset % SEG_SIZE;
+ seg_base = entry->offset - seg_offset;
+ ncopied = 0;
+ while (ncopied < entry->size) {
+ /* calculate data bytes available in this segment */
+
+ /* start with the bytes from the current offset to the end */
+ bytes_available = SEG_SIZE - seg_offset;
+ /* subtract off footer and table from segment 0 */
+ if (seg_base == 0) {
+ /*
+ * Sanity check: should not have a starting point
+ * at or within the directory.
+ */
+ if (bytes_available <= directory_size) {
+ dd_dev_err(dd,
+ "Bad configuration file - offset 0x%x within footer+table\n",
+ entry->offset);
+ ret = -EINVAL;
+ goto done;
+ }
+ bytes_available -= directory_size;
+ }
+
+ /* calculate bytes wanted */
+ to_copy = entry->size - ncopied;
+
+ /* max out at the available bytes in this segment */
+ if (to_copy > bytes_available)
+ to_copy = bytes_available;
+
+ /*
+ * Read from the EPROM.
+ *
+ * The sanity check for entry->offset is done in read_length().
+ * The EPROM offset is validated against what the hardware
+ * addressing supports. In addition, if the offset is larger
+ * than the actual EPROM, it silently wraps. It will work
+ * fine, though the reader may not get what they expected
+ * from the EPROM.
+ */
+ ret = read_length(dd, seg_base + seg_offset, to_copy,
+ buffer + ncopied);
+ if (ret)
+ goto done;
+
+ ncopied += to_copy;
+
+ /* set up for next segment */
+ seg_offset = footer->oprom_size;
+ seg_base += SEG_SIZE;
+ }
+
+ /* success */
+ ret = 0;
+ *data = buffer;
+ *size = entry->size;
+
+done:
+ kfree(table_buffer);
+ if (ret)
+ kfree(buffer);
+ return ret;
+}
+
+/*
* Read the platform configuration file from the EPROM.
*
* On success, an allocated buffer containing the data and its size are
@@ -253,6 +448,7 @@ static int read_partition_platform_config(struct hfi1_devdata *dd, void **data,
* -EBUSY - not able to acquire access to the EPROM
* -ENOENT - no recognizable file written
* -ENOMEM - buffer could not be allocated
+ * -EINVAL - invalid EPROM contentents found
*/
int eprom_read_platform_config(struct hfi1_devdata *dd, void **data, u32 *size)
{
@@ -266,21 +462,20 @@ int eprom_read_platform_config(struct hfi1_devdata *dd, void **data, u32 *size)
if (ret)
return -EBUSY;
- /* read the last page of P0 for the EPROM format magic */
- ret = read_length(dd, P1_START - EP_PAGE_SIZE, EP_PAGE_SIZE, directory);
+ /* read the last page of the segment for the EPROM format magic */
+ ret = read_length(dd, SEG_SIZE - EP_PAGE_SIZE, EP_PAGE_SIZE, directory);
if (ret)
goto done;
- /* last dword of P0 contains a magic indicator */
- if (directory[EP_PAGE_DWORDS - 1] == 0) {
+ /* last dword of the segment contains a magic value */
+ if (directory[EP_PAGE_DWORDS - 1] == FOOTER_MAGIC) {
+ /* segment format */
+ ret = read_segment_platform_config(dd, directory, data, size);
+ } else {
/* partition format */
ret = read_partition_platform_config(dd, data, size);
- goto done;
}
- /* nothing recognized */
- ret = -ENOENT;
-
done:
release_chip_resource(dd, CR_EPROM);
return ret;
diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
index 13db8eb4f4ec..0dd50cdb039a 100644
--- a/drivers/infiniband/hw/hfi1/firmware.c
+++ b/drivers/infiniband/hw/hfi1/firmware.c
@@ -239,6 +239,16 @@ static const u8 all_fabric_serdes_broadcast = 0xe1;
const u8 pcie_serdes_broadcast[2] = { 0xe2, 0xe3 };
static const u8 all_pcie_serdes_broadcast = 0xe0;
+static const u32 platform_config_table_limits[PLATFORM_CONFIG_TABLE_MAX] = {
+ 0,
+ SYSTEM_TABLE_MAX,
+ PORT_TABLE_MAX,
+ RX_PRESET_TABLE_MAX,
+ TX_PRESET_TABLE_MAX,
+ QSFP_ATTEN_TABLE_MAX,
+ VARIABLE_SETTINGS_TABLE_MAX
+};
+
/* forwards */
static void dispose_one_firmware(struct firmware_details *fdet);
static int load_fabric_serdes_firmware(struct hfi1_devdata *dd,
@@ -263,11 +273,13 @@ static int __read_8051_data(struct hfi1_devdata *dd, u32 addr, u64 *result)
u64 reg;
int count;
- /* start the read at the given address */
- reg = ((addr & DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_MASK)
- << DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_SHIFT)
- | DC_DC8051_CFG_RAM_ACCESS_CTRL_READ_ENA_SMASK;
+ /* step 1: set the address, clear enable */
+ reg = (addr & DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_MASK)
+ << DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_SHIFT;
write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, reg);
+ /* step 2: enable */
+ write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL,
+ reg | DC_DC8051_CFG_RAM_ACCESS_CTRL_READ_ENA_SMASK);
/* wait until ACCESS_COMPLETED is set */
count = 0;
@@ -707,6 +719,9 @@ static int obtain_firmware(struct hfi1_devdata *dd)
&dd->pcidev->dev);
if (err) {
platform_config = NULL;
+ dd_dev_err(dd,
+ "%s: No default platform config file found\n",
+ __func__);
goto done;
}
dd->platform_config.data = platform_config->data;
@@ -1761,8 +1776,17 @@ int parse_platform_config(struct hfi1_devdata *dd)
u32 record_idx = 0, table_type = 0, table_length_dwords = 0;
int ret = -EINVAL; /* assume failure */
+ /*
+ * For integrated devices that did not fall back to the default file,
+ * the SI tuning information for active channels is acquired from the
+ * scratch register bitmap, thus there is no platform config to parse.
+ * Skip parsing in these situations.
+ */
+ if (is_integrated(dd) && !platform_config_load)
+ return 0;
+
if (!dd->platform_config.data) {
- dd_dev_info(dd, "%s: Missing config file\n", __func__);
+ dd_dev_err(dd, "%s: Missing config file\n", __func__);
goto bail;
}
ptr = (u32 *)dd->platform_config.data;
@@ -1770,7 +1794,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
magic_num = *ptr;
ptr++;
if (magic_num != PLATFORM_CONFIG_MAGIC_NUM) {
- dd_dev_info(dd, "%s: Bad config file\n", __func__);
+ dd_dev_err(dd, "%s: Bad config file\n", __func__);
goto bail;
}
@@ -1797,9 +1821,9 @@ int parse_platform_config(struct hfi1_devdata *dd)
header1 = *ptr;
header2 = *(ptr + 1);
if (header1 != ~header2) {
- dd_dev_info(dd, "%s: Failed validation at offset %ld\n",
- __func__, (ptr - (u32 *)
- dd->platform_config.data));
+ dd_dev_err(dd, "%s: Failed validation at offset %ld\n",
+ __func__, (ptr - (u32 *)
+ dd->platform_config.data));
goto bail;
}
@@ -1841,11 +1865,11 @@ int parse_platform_config(struct hfi1_devdata *dd)
table_length_dwords;
break;
default:
- dd_dev_info(dd,
- "%s: Unknown data table %d, offset %ld\n",
- __func__, table_type,
- (ptr - (u32 *)
- dd->platform_config.data));
+ dd_dev_err(dd,
+ "%s: Unknown data table %d, offset %ld\n",
+ __func__, table_type,
+ (ptr - (u32 *)
+ dd->platform_config.data));
goto bail; /* We don't trust this file now */
}
pcfgcache->config_tables[table_type].table = ptr;
@@ -1865,11 +1889,11 @@ int parse_platform_config(struct hfi1_devdata *dd)
case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
break;
default:
- dd_dev_info(dd,
- "%s: Unknown meta table %d, offset %ld\n",
- __func__, table_type,
- (ptr -
- (u32 *)dd->platform_config.data));
+ dd_dev_err(dd,
+ "%s: Unknown meta table %d, offset %ld\n",
+ __func__, table_type,
+ (ptr -
+ (u32 *)dd->platform_config.data));
goto bail; /* We don't trust this file now */
}
pcfgcache->config_tables[table_type].table_metadata =
@@ -1884,10 +1908,9 @@ int parse_platform_config(struct hfi1_devdata *dd)
/* Jump the table */
ptr += table_length_dwords;
if (crc != *ptr) {
- dd_dev_info(dd, "%s: Failed CRC check at offset %ld\n",
- __func__, (ptr -
- (u32 *)
- dd->platform_config.data));
+ dd_dev_err(dd, "%s: Failed CRC check at offset %ld\n",
+ __func__, (ptr -
+ (u32 *)dd->platform_config.data));
goto bail;
}
/* Jump the CRC DWORD */
@@ -1901,6 +1924,84 @@ bail:
return ret;
}
+static void get_integrated_platform_config_field(
+ struct hfi1_devdata *dd,
+ enum platform_config_table_type_encoding table_type,
+ int field_index, u32 *data)
+{
+ struct hfi1_pportdata *ppd = dd->pport;
+ u8 *cache = ppd->qsfp_info.cache;
+ u32 tx_preset = 0;
+
+ switch (table_type) {
+ case PLATFORM_CONFIG_SYSTEM_TABLE:
+ if (field_index == SYSTEM_TABLE_QSFP_POWER_CLASS_MAX)
+ *data = ppd->max_power_class;
+ else if (field_index == SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_25G)
+ *data = ppd->default_atten;
+ break;
+ case PLATFORM_CONFIG_PORT_TABLE:
+ if (field_index == PORT_TABLE_PORT_TYPE)
+ *data = ppd->port_type;
+ else if (field_index == PORT_TABLE_LOCAL_ATTEN_25G)
+ *data = ppd->local_atten;
+ else if (field_index == PORT_TABLE_REMOTE_ATTEN_25G)
+ *data = ppd->remote_atten;
+ break;
+ case PLATFORM_CONFIG_RX_PRESET_TABLE:
+ if (field_index == RX_PRESET_TABLE_QSFP_RX_CDR_APPLY)
+ *data = (ppd->rx_preset & QSFP_RX_CDR_APPLY_SMASK) >>
+ QSFP_RX_CDR_APPLY_SHIFT;
+ else if (field_index == RX_PRESET_TABLE_QSFP_RX_EMP_APPLY)
+ *data = (ppd->rx_preset & QSFP_RX_EMP_APPLY_SMASK) >>
+ QSFP_RX_EMP_APPLY_SHIFT;
+ else if (field_index == RX_PRESET_TABLE_QSFP_RX_AMP_APPLY)
+ *data = (ppd->rx_preset & QSFP_RX_AMP_APPLY_SMASK) >>
+ QSFP_RX_AMP_APPLY_SHIFT;
+ else if (field_index == RX_PRESET_TABLE_QSFP_RX_CDR)
+ *data = (ppd->rx_preset & QSFP_RX_CDR_SMASK) >>
+ QSFP_RX_CDR_SHIFT;
+ else if (field_index == RX_PRESET_TABLE_QSFP_RX_EMP)
+ *data = (ppd->rx_preset & QSFP_RX_EMP_SMASK) >>
+ QSFP_RX_EMP_SHIFT;
+ else if (field_index == RX_PRESET_TABLE_QSFP_RX_AMP)
+ *data = (ppd->rx_preset & QSFP_RX_AMP_SMASK) >>
+ QSFP_RX_AMP_SHIFT;
+ break;
+ case PLATFORM_CONFIG_TX_PRESET_TABLE:
+ if (cache[QSFP_EQ_INFO_OFFS] & 0x4)
+ tx_preset = ppd->tx_preset_eq;
+ else
+ tx_preset = ppd->tx_preset_noeq;
+ if (field_index == TX_PRESET_TABLE_PRECUR)
+ *data = (tx_preset & TX_PRECUR_SMASK) >>
+ TX_PRECUR_SHIFT;
+ else if (field_index == TX_PRESET_TABLE_ATTN)
+ *data = (tx_preset & TX_ATTN_SMASK) >>
+ TX_ATTN_SHIFT;
+ else if (field_index == TX_PRESET_TABLE_POSTCUR)
+ *data = (tx_preset & TX_POSTCUR_SMASK) >>
+ TX_POSTCUR_SHIFT;
+ else if (field_index == TX_PRESET_TABLE_QSFP_TX_CDR_APPLY)
+ *data = (tx_preset & QSFP_TX_CDR_APPLY_SMASK) >>
+ QSFP_TX_CDR_APPLY_SHIFT;
+ else if (field_index == TX_PRESET_TABLE_QSFP_TX_EQ_APPLY)
+ *data = (tx_preset & QSFP_TX_EQ_APPLY_SMASK) >>
+ QSFP_TX_EQ_APPLY_SHIFT;
+ else if (field_index == TX_PRESET_TABLE_QSFP_TX_CDR)
+ *data = (tx_preset & QSFP_TX_CDR_SMASK) >>
+ QSFP_TX_CDR_SHIFT;
+ else if (field_index == TX_PRESET_TABLE_QSFP_TX_EQ)
+ *data = (tx_preset & QSFP_TX_EQ_SMASK) >>
+ QSFP_TX_EQ_SHIFT;
+ break;
+ case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
+ case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
+ default:
+ break;
+ }
+}
+
static int get_platform_fw_field_metadata(struct hfi1_devdata *dd, int table,
int field, u32 *field_len_bits,
u32 *field_start_bits)
@@ -1976,6 +2077,15 @@ int get_platform_config_field(struct hfi1_devdata *dd,
else
return -EINVAL;
+ if (is_integrated(dd) && !platform_config_load) {
+ /*
+ * Use saved configuration from ppd for integrated platforms
+ */
+ get_integrated_platform_config_field(dd, table_type,
+ field_index, data);
+ return 0;
+ }
+
ret = get_platform_fw_field_metadata(dd, table_type, field_index,
&field_len_bits,
&field_start_bits);
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index cc87fd4e534b..751a0fb29fa5 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -492,6 +492,9 @@ struct rvt_sge_state;
#define HFI1_MIN_VLS_SUPPORTED 1
#define HFI1_MAX_VLS_SUPPORTED 8
+#define HFI1_GUIDS_PER_PORT 5
+#define HFI1_PORT_GUID_INDEX 0
+
static inline void incr_cntr64(u64 *cntr)
{
if (*cntr < (u64)-1LL)
@@ -559,11 +562,20 @@ struct hfi1_pportdata {
struct kobject vl2mtu_kobj;
/* PHY support */
- u32 port_type;
struct qsfp_data qsfp_info;
+ /* Values for SI tuning of SerDes */
+ u32 port_type;
+ u32 tx_preset_eq;
+ u32 tx_preset_noeq;
+ u32 rx_preset;
+ u8 local_atten;
+ u8 remote_atten;
+ u8 default_atten;
+ u8 max_power_class;
+
+ /* GUIDs for this interface, in host order, guids[0] is a port guid */
+ u64 guids[HFI1_GUIDS_PER_PORT];
- /* GUID for this interface, in host order */
- u64 guid;
/* GUID for peer interface, in host order */
u64 neighbor_guid;
@@ -826,32 +838,29 @@ struct hfi1_devdata {
u8 __iomem *kregend;
/* physical address of chip for io_remap, etc. */
resource_size_t physaddr;
- /* receive context data */
- struct hfi1_ctxtdata **rcd;
+ /* Per VL data. Enough for all VLs but not all elements are set/used. */
+ struct per_vl_data vld[PER_VL_SEND_CONTEXTS];
/* send context data */
struct send_context_info *send_contexts;
/* map hardware send contexts to software index */
u8 *hw_to_sw;
/* spinlock for allocating and releasing send context resources */
spinlock_t sc_lock;
- /* Per VL data. Enough for all VLs but not all elements are set/used. */
- struct per_vl_data vld[PER_VL_SEND_CONTEXTS];
/* lock for pio_map */
spinlock_t pio_map_lock;
+ /* Send Context initialization lock. */
+ spinlock_t sc_init_lock;
+ /* lock for sdma_map */
+ spinlock_t sde_map_lock;
/* array of kernel send contexts */
struct send_context **kernel_send_context;
/* array of vl maps */
struct pio_vl_map __rcu *pio_map;
- /* seqlock for sc2vl */
- seqlock_t sc2vl_lock;
- u64 sc2vl[4];
- /* Send Context initialization lock. */
- spinlock_t sc_init_lock;
+ /* default flags to last descriptor */
+ u64 default_desc1;
/* fields common to all SDMA engines */
- /* default flags to last descriptor */
- u64 default_desc1;
volatile __le64 *sdma_heads_dma; /* DMA'ed by chip */
dma_addr_t sdma_heads_phys;
void *sdma_pad_dma; /* DMA'ed by chip */
@@ -862,8 +871,6 @@ struct hfi1_devdata {
u32 chip_sdma_engines;
/* num used */
u32 num_sdma;
- /* lock for sdma_map */
- spinlock_t sde_map_lock;
/* array of engines sized by num_sdma */
struct sdma_engine *per_sdma;
/* array of vl maps */
@@ -872,14 +879,11 @@ struct hfi1_devdata {
wait_queue_head_t sdma_unfreeze_wq;
atomic_t sdma_unfreeze_count;
+ u32 lcb_access_count; /* count of LCB users */
+
/* common data between shared ASIC HFIs in this OS */
struct hfi1_asic_data *asic_data;
- /* hfi1_pportdata, points to array of (physical) port-specific
- * data structs, indexed by pidx (0..n-1)
- */
- struct hfi1_pportdata *pport;
-
/* mem-mapped pointer to base of PIO buffers */
void __iomem *piobase;
/*
@@ -896,20 +900,13 @@ struct hfi1_devdata {
/* send context numbers and sizes for each type */
struct sc_config_sizes sc_sizes[SC_MAX];
- u32 lcb_access_count; /* count of LCB users */
-
char *boardname; /* human readable board info */
- /* device (not port) flags, basically device capabilities */
- u32 flags;
-
/* reset value */
u64 z_int_counter;
u64 z_rcv_limit;
u64 z_send_schedule;
- /* percpu int_counter */
- u64 __percpu *int_counter;
- u64 __percpu *rcv_limit;
+
u64 __percpu *send_schedule;
/* number of receive contexts in use by the driver */
u32 num_rcv_contexts;
@@ -924,6 +921,7 @@ struct hfi1_devdata {
/* base receive interrupt timeout, in CSR units */
u32 rcv_intr_timeout_csr;
+ u32 freezelen; /* max length of freezemsg */
u64 __iomem *egrtidbase;
spinlock_t sendctrl_lock; /* protect changes to SendCtrl */
spinlock_t rcvctrl_lock; /* protect changes to RcvCtrl */
@@ -945,7 +943,6 @@ struct hfi1_devdata {
* IB link status cheaply
*/
struct hfi1_status *status;
- u32 freezelen; /* max length of freezemsg */
/* revision register shadow */
u64 revision;
@@ -973,6 +970,8 @@ struct hfi1_devdata {
u16 rcvegrbufsize_shift;
/* both sides of the PCIe link are gen3 capable */
u8 link_gen3_capable;
+ /* default link down value (poll/sleep) */
+ u8 link_default;
/* localbus width (1, 2,4,8,16,32) from config space */
u32 lbus_width;
/* localbus speed in MHz */
@@ -1008,8 +1007,6 @@ struct hfi1_devdata {
u8 hfi1_id;
/* implementation code */
u8 icode;
- /* default link down value (poll/sleep) */
- u8 link_default;
/* vAU of this device */
u8 vau;
/* vCU of this device */
@@ -1020,27 +1017,17 @@ struct hfi1_devdata {
u16 vl15_init;
/* Misc small ints */
- /* Number of physical ports available */
- u8 num_pports;
- /* Lowest context number which can be used by user processes */
- u8 first_user_ctxt;
u8 n_krcv_queues;
u8 qos_shift;
- u8 qpn_mask;
- u16 rhf_offset; /* offset of RHF within receive header entry */
u16 irev; /* implementation revision */
u16 dc8051_ver; /* 8051 firmware version */
+ spinlock_t hfi1_diag_trans_lock; /* protect diag observer ops */
struct platform_config platform_config;
struct platform_config_cache pcfg_cache;
struct diag_client *diag_client;
- spinlock_t hfi1_diag_trans_lock; /* protect diag observer ops */
-
- u8 psxmitwait_supported;
- /* cycle length of PS* counters in HW (in picoseconds) */
- u16 psxmitwait_check_rate;
/* MSI-X information */
struct hfi1_msix_entry *msix_entries;
@@ -1055,6 +1042,9 @@ struct hfi1_devdata {
struct rcv_array_data rcv_entries;
+ /* cycle length of PS* counters in HW (in picoseconds) */
+ u16 psxmitwait_check_rate;
+
/*
* 64 bit synthetic counters
*/
@@ -1085,11 +1075,11 @@ struct hfi1_devdata {
struct err_info_rcvport err_info_rcvport;
struct err_info_constraint err_info_rcv_constraint;
struct err_info_constraint err_info_xmit_constraint;
- u8 err_info_uncorrectable;
- u8 err_info_fmconfig;
atomic_t drop_packet;
u8 do_drop;
+ u8 err_info_uncorrectable;
+ u8 err_info_fmconfig;
/*
* Software counters for the status bits defined by the
@@ -1112,40 +1102,60 @@ struct hfi1_devdata {
u64 sw_cce_err_status_aggregate;
/* Software counter that aggregates all bypass packet rcv errors */
u64 sw_rcv_bypass_packet_errors;
- /* receive interrupt functions */
- rhf_rcv_function_ptr *rhf_rcv_function_map;
+ /* receive interrupt function */
rhf_rcv_function_ptr normal_rhf_rcv_functions[8];
+ /* Save the enabled LCB error bits */
+ u64 lcb_err_en;
+
/*
* Capability to have different send engines simply by changing a
* pointer value.
*/
- send_routine process_pio_send;
+ send_routine process_pio_send ____cacheline_aligned_in_smp;
send_routine process_dma_send;
void (*pio_inline_send)(struct hfi1_devdata *dd, struct pio_buf *pbuf,
u64 pbc, const void *from, size_t count);
+ /* hfi1_pportdata, points to array of (physical) port-specific
+ * data structs, indexed by pidx (0..n-1)
+ */
+ struct hfi1_pportdata *pport;
+ /* receive context data */
+ struct hfi1_ctxtdata **rcd;
+ u64 __percpu *int_counter;
+ /* device (not port) flags, basically device capabilities */
+ u16 flags;
+ /* Number of physical ports available */
+ u8 num_pports;
+ /* Lowest context number which can be used by user processes */
+ u8 first_user_ctxt;
+ /* adding a new field here would make it part of this cacheline */
+
+ /* seqlock for sc2vl */
+ seqlock_t sc2vl_lock ____cacheline_aligned_in_smp;
+ u64 sc2vl[4];
+ /* receive interrupt functions */
+ rhf_rcv_function_ptr *rhf_rcv_function_map;
+ u64 __percpu *rcv_limit;
+ u16 rhf_offset; /* offset of RHF within receive header entry */
+ /* adding a new field here would make it part of this cacheline */
/* OUI comes from the HW. Used everywhere as 3 separate bytes. */
u8 oui1;
u8 oui2;
u8 oui3;
+ u8 dc_shutdown;
+
/* Timer and counter used to detect RcvBufOvflCnt changes */
struct timer_list rcverr_timer;
- u32 rcv_ovfl_cnt;
wait_queue_head_t event_queue;
- /* Save the enabled LCB error bits */
- u64 lcb_err_en;
- u8 dc_shutdown;
-
/* receive context tail dummy address */
__le64 *rcvhdrtail_dummy_kvaddr;
dma_addr_t rcvhdrtail_dummy_dma;
- bool eprom_available; /* true if EPROM is available for this device */
- bool aspm_supported; /* Does HW support ASPM */
- bool aspm_enabled; /* ASPM state: enabled/disabled */
+ u32 rcv_ovfl_cnt;
/* Serialize ASPM enable/disable between multiple verbs contexts */
spinlock_t aspm_lock;
/* Number of verbs contexts which have disabled ASPM */
@@ -1155,8 +1165,11 @@ struct hfi1_devdata {
/* Used to wait for outstanding user space clients before dev removal */
struct completion user_comp;
- struct hfi1_affinity *affinity;
+ bool eprom_available; /* true if EPROM is available for this device */
+ bool aspm_supported; /* Does HW support ASPM */
+ bool aspm_enabled; /* ASPM state: enabled/disabled */
struct rhashtable sdma_rht;
+
struct kobject kobj;
};
@@ -1604,6 +1617,17 @@ static inline u16 hfi1_get_pkey(struct hfi1_ibport *ibp, unsigned index)
}
/*
+ * Return the indexed GUID from the port GUIDs table.
+ */
+static inline __be64 get_sguid(struct hfi1_ibport *ibp, unsigned int index)
+{
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+
+ WARN_ON(index >= HFI1_GUIDS_PER_PORT);
+ return cpu_to_be64(ppd->guids[index]);
+}
+
+/*
* Called by readers of cc_state only, must call under rcu_read_lock().
*/
static inline struct cc_state *get_cc_state(struct hfi1_pportdata *ppd)
@@ -1982,6 +2006,12 @@ static inline u32 qsfp_resource(struct hfi1_devdata *dd)
return i2c_target(dd->hfi1_id);
}
+/* Is this device integrated or discrete? */
+static inline bool is_integrated(struct hfi1_devdata *dd)
+{
+ return dd->pcidev->device == PCI_DEVICE_ID_INTEL1;
+}
+
int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp);
#define DD_DEV_ENTRY(dd) __string(dev, dev_name(&(dd)->pcidev->dev))
diff --git a/drivers/infiniband/hw/hfi1/iowait.h b/drivers/infiniband/hw/hfi1/iowait.h
index 2ec6ef38d389..d9740ddea6f1 100644
--- a/drivers/infiniband/hw/hfi1/iowait.h
+++ b/drivers/infiniband/hw/hfi1/iowait.h
@@ -64,6 +64,7 @@ struct sdma_engine;
/**
* struct iowait - linkage for delayed progress/waiting
* @list: used to add/insert into QP/PQ wait lists
+ * @lock: uses to record the list head lock
* @tx_head: overflow list of sdma_txreq's
* @sleep: no space callback
* @wakeup: space callback wakeup
@@ -91,6 +92,11 @@ struct sdma_engine;
* so sleeping is not allowed.
*
* The wait_dma member along with the iow
+ *
+ * The lock field is used by waiters to record
+ * the seqlock_t that guards the list head.
+ * Waiters explicity know that, but the destroy
+ * code that unwaits QPs does not.
*/
struct iowait {
@@ -103,6 +109,7 @@ struct iowait {
unsigned seq);
void (*wakeup)(struct iowait *wait, int reason);
void (*sdma_drained)(struct iowait *wait);
+ seqlock_t *lock;
struct work_struct iowork;
wait_queue_head_t wait_dma;
wait_queue_head_t wait_pio;
@@ -141,6 +148,7 @@ static inline void iowait_init(
void (*sdma_drained)(struct iowait *wait))
{
wait->count = 0;
+ wait->lock = NULL;
INIT_LIST_HEAD(&wait->list);
INIT_LIST_HEAD(&wait->tx_head);
INIT_WORK(&wait->iowork, func);
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index 9487c9bb8920..6e595afca24c 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -128,7 +128,7 @@ static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len)
smp = send_buf->mad;
smp->base_version = OPA_MGMT_BASE_VERSION;
smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
- smp->class_version = OPA_SMI_CLASS_VERSION;
+ smp->class_version = OPA_SM_CLASS_VERSION;
smp->method = IB_MGMT_METHOD_TRAP;
ibp->rvp.tid++;
smp->tid = cpu_to_be64(ibp->rvp.tid);
@@ -336,20 +336,20 @@ static int __subn_get_opa_nodeinfo(struct opa_smp *smp, u32 am, u8 *data,
ni = (struct opa_node_info *)data;
/* GUID 0 is illegal */
- if (am || pidx >= dd->num_pports || dd->pport[pidx].guid == 0) {
+ if (am || pidx >= dd->num_pports || ibdev->node_guid == 0 ||
+ get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX) == 0) {
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
}
- ni->port_guid = cpu_to_be64(dd->pport[pidx].guid);
+ ni->port_guid = get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX);
ni->base_version = OPA_MGMT_BASE_VERSION;
- ni->class_version = OPA_SMI_CLASS_VERSION;
+ ni->class_version = OPA_SM_CLASS_VERSION;
ni->node_type = 1; /* channel adapter */
ni->num_ports = ibdev->phys_port_cnt;
/* This is already in network order */
ni->system_image_guid = ib_hfi1_sys_image_guid;
- /* Use first-port GUID as node */
- ni->node_guid = cpu_to_be64(dd->pport->guid);
+ ni->node_guid = ibdev->node_guid;
ni->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd));
ni->device_id = cpu_to_be16(dd->pcidev->device);
ni->revision = cpu_to_be32(dd->minrev);
@@ -373,19 +373,20 @@ static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
/* GUID 0 is illegal */
if (smp->attr_mod || pidx >= dd->num_pports ||
- dd->pport[pidx].guid == 0)
+ ibdev->node_guid == 0 ||
+ get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX) == 0) {
smp->status |= IB_SMP_INVALID_FIELD;
- else
- nip->port_guid = cpu_to_be64(dd->pport[pidx].guid);
+ return reply((struct ib_mad_hdr *)smp);
+ }
+ nip->port_guid = get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX);
nip->base_version = OPA_MGMT_BASE_VERSION;
- nip->class_version = OPA_SMI_CLASS_VERSION;
+ nip->class_version = OPA_SM_CLASS_VERSION;
nip->node_type = 1; /* channel adapter */
nip->num_ports = ibdev->phys_port_cnt;
/* This is already in network order */
nip->sys_guid = ib_hfi1_sys_image_guid;
- /* Use first-port GUID as node */
- nip->node_guid = cpu_to_be64(dd->pport->guid);
+ nip->node_guid = ibdev->node_guid;
nip->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd));
nip->device_id = cpu_to_be16(dd->pcidev->device);
nip->revision = cpu_to_be32(dd->minrev);
@@ -2302,7 +2303,7 @@ static int pma_get_opa_classportinfo(struct opa_pma_mad *pmp,
pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
p->base_version = OPA_MGMT_BASE_VERSION;
- p->class_version = OPA_SMI_CLASS_VERSION;
+ p->class_version = OPA_SM_CLASS_VERSION;
/*
* Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
*/
@@ -4022,7 +4023,7 @@ static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
am = be32_to_cpu(smp->attr_mod);
attr_id = smp->attr_id;
- if (smp->class_version != OPA_SMI_CLASS_VERSION) {
+ if (smp->class_version != OPA_SM_CLASS_VERSION) {
smp->status |= IB_SMP_UNSUP_VERSION;
ret = reply((struct ib_mad_hdr *)smp);
return ret;
@@ -4232,7 +4233,7 @@ static int process_perf_opa(struct ib_device *ibdev, u8 port,
*out_mad = *in_mad;
- if (pmp->mad_hdr.class_version != OPA_SMI_CLASS_VERSION) {
+ if (pmp->mad_hdr.class_version != OPA_SM_CLASS_VERSION) {
pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
return reply((struct ib_mad_hdr *)pmp);
}
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
index 7ad30898fc19..ccbf52c8ff6f 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
@@ -81,7 +81,7 @@ static void do_remove(struct mmu_rb_handler *handler,
struct list_head *del_list);
static void handle_remove(struct work_struct *work);
-static struct mmu_notifier_ops mn_opts = {
+static const struct mmu_notifier_ops mn_opts = {
.invalidate_page = mmu_notifier_page,
.invalidate_range_start = mmu_notifier_range_start,
};
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index d89b8745d4c1..615be68e40b3 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -758,6 +758,7 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
sc->hw_context = hw_context;
cr_group_addresses(sc, &dma);
sc->credits = sci->credits;
+ sc->size = sc->credits * PIO_BLOCK_SIZE;
/* PIO Send Memory Address details */
#define PIO_ADDR_CONTEXT_MASK 0xfful
@@ -1242,6 +1243,7 @@ int sc_enable(struct send_context *sc)
sc->free = 0;
sc->alloc_free = 0;
sc->fill = 0;
+ sc->fill_wrap = 0;
sc->sr_head = 0;
sc->sr_tail = 0;
sc->flags = 0;
@@ -1385,7 +1387,7 @@ struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
unsigned long flags;
unsigned long avail;
unsigned long blocks = dwords_to_blocks(dw_len);
- unsigned long start_fill;
+ u32 fill_wrap;
int trycount = 0;
u32 head, next;
@@ -1410,9 +1412,7 @@ retry:
(sc->fill - sc->alloc_free);
if (blocks > avail) {
/* still no room, actively update */
- spin_unlock_irqrestore(&sc->alloc_lock, flags);
sc_release_update(sc);
- spin_lock_irqsave(&sc->alloc_lock, flags);
sc->alloc_free = ACCESS_ONCE(sc->free);
trycount++;
goto retry;
@@ -1428,8 +1428,11 @@ retry:
head = sc->sr_head;
/* "allocate" the buffer */
- start_fill = sc->fill;
sc->fill += blocks;
+ fill_wrap = sc->fill_wrap;
+ sc->fill_wrap += blocks;
+ if (sc->fill_wrap >= sc->credits)
+ sc->fill_wrap = sc->fill_wrap - sc->credits;
/*
* Fill the parts that the releaser looks at before moving the head.
@@ -1458,11 +1461,8 @@ retry:
spin_unlock_irqrestore(&sc->alloc_lock, flags);
/* finish filling in the buffer outside the lock */
- pbuf->start = sc->base_addr + ((start_fill % sc->credits)
- * PIO_BLOCK_SIZE);
- pbuf->size = sc->credits * PIO_BLOCK_SIZE;
- pbuf->end = sc->base_addr + pbuf->size;
- pbuf->block_count = blocks;
+ pbuf->start = sc->base_addr + fill_wrap * PIO_BLOCK_SIZE;
+ pbuf->end = sc->base_addr + sc->size;
pbuf->qw_written = 0;
pbuf->carry_bytes = 0;
pbuf->carry.val64 = 0;
@@ -1573,6 +1573,7 @@ static void sc_piobufavail(struct send_context *sc)
qp = iowait_to_qp(wait);
priv = qp->priv;
list_del_init(&priv->s_iowait.list);
+ priv->s_iowait.lock = NULL;
/* refcount held until actual wake up */
qps[n++] = qp;
}
@@ -2028,29 +2029,17 @@ freesc15:
int init_credit_return(struct hfi1_devdata *dd)
{
int ret;
- int num_numa;
int i;
- num_numa = num_online_nodes();
- /* enforce the expectation that the numas are compact */
- for (i = 0; i < num_numa; i++) {
- if (!node_online(i)) {
- dd_dev_err(dd, "NUMA nodes are not compact\n");
- ret = -EINVAL;
- goto done;
- }
- }
-
dd->cr_base = kcalloc(
- num_numa,
+ node_affinity.num_possible_nodes,
sizeof(struct credit_return_base),
GFP_KERNEL);
if (!dd->cr_base) {
- dd_dev_err(dd, "Unable to allocate credit return base\n");
ret = -ENOMEM;
goto done;
}
- for (i = 0; i < num_numa; i++) {
+ for_each_node_with_cpus(i) {
int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return);
set_dev_node(&dd->pcidev->dev, i);
@@ -2077,14 +2066,11 @@ done:
void free_credit_return(struct hfi1_devdata *dd)
{
- int num_numa;
int i;
if (!dd->cr_base)
return;
-
- num_numa = num_online_nodes();
- for (i = 0; i < num_numa; i++) {
+ for (i = 0; i < node_affinity.num_possible_nodes; i++) {
if (dd->cr_base[i].va) {
dma_free_coherent(&dd->pcidev->dev,
TXE_NUM_CONTEXTS *
diff --git a/drivers/infiniband/hw/hfi1/pio.h b/drivers/infiniband/hw/hfi1/pio.h
index e709eaf743b5..867e5ffc3595 100644
--- a/drivers/infiniband/hw/hfi1/pio.h
+++ b/drivers/infiniband/hw/hfi1/pio.h
@@ -83,53 +83,55 @@ struct pio_buf {
void *arg; /* argument for cb */
void __iomem *start; /* buffer start address */
void __iomem *end; /* context end address */
- unsigned long size; /* context size, in bytes */
unsigned long sent_at; /* buffer is sent when <= free */
- u32 block_count; /* size of buffer, in blocks */
- u32 qw_written; /* QW written so far */
- u32 carry_bytes; /* number of valid bytes in carry */
union mix carry; /* pending unwritten bytes */
+ u16 qw_written; /* QW written so far */
+ u8 carry_bytes; /* number of valid bytes in carry */
};
/* cache line aligned pio buffer array */
union pio_shadow_ring {
struct pio_buf pbuf;
- u64 unused[16]; /* cache line spacer */
} ____cacheline_aligned;
/* per-NUMA send context */
struct send_context {
/* read-only after init */
struct hfi1_devdata *dd; /* device */
- void __iomem *base_addr; /* start of PIO memory */
union pio_shadow_ring *sr; /* shadow ring */
+ void __iomem *base_addr; /* start of PIO memory */
+ u32 __percpu *buffers_allocated;/* count of buffers allocated */
+ u32 size; /* context size, in bytes */
- volatile __le64 *hw_free; /* HW free counter */
- struct work_struct halt_work; /* halted context work queue entry */
- unsigned long flags; /* flags */
int node; /* context home node */
- int type; /* context type */
- u32 sw_index; /* software index number */
- u32 hw_context; /* hardware context number */
- u32 credits; /* number of blocks in context */
u32 sr_size; /* size of the shadow ring */
- u32 group; /* credit return group */
+ u16 flags; /* flags */
+ u8 type; /* context type */
+ u8 sw_index; /* software index number */
+ u8 hw_context; /* hardware context number */
+ u8 group; /* credit return group */
+
/* allocator fields */
spinlock_t alloc_lock ____cacheline_aligned_in_smp;
+ u32 sr_head; /* shadow ring head */
unsigned long fill; /* official alloc count */
unsigned long alloc_free; /* copy of free (less cache thrash) */
- u32 sr_head; /* shadow ring head */
+ u32 fill_wrap; /* tracks fill within ring */
+ u32 credits; /* number of blocks in context */
+ /* adding a new field here would make it part of this cacheline */
+
/* releaser fields */
spinlock_t release_lock ____cacheline_aligned_in_smp;
- unsigned long free; /* official free count */
u32 sr_tail; /* shadow ring tail */
+ unsigned long free; /* official free count */
+ volatile __le64 *hw_free; /* HW free counter */
/* list for PIO waiters */
struct list_head piowait ____cacheline_aligned_in_smp;
spinlock_t credit_ctrl_lock ____cacheline_aligned_in_smp;
- u64 credit_ctrl; /* cache for credit control */
u32 credit_intr_count; /* count of credit intr users */
- u32 __percpu *buffers_allocated;/* count of buffers allocated */
+ u64 credit_ctrl; /* cache for credit control */
wait_queue_head_t halt_wait; /* wait until kernel sees interrupt */
+ struct work_struct halt_work; /* halted context work queue entry */
};
/* send context flags */
diff --git a/drivers/infiniband/hw/hfi1/pio_copy.c b/drivers/infiniband/hw/hfi1/pio_copy.c
index aa7773643107..03024cec78dd 100644
--- a/drivers/infiniband/hw/hfi1/pio_copy.c
+++ b/drivers/infiniband/hw/hfi1/pio_copy.c
@@ -129,8 +129,8 @@ void pio_copy(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc,
dest += sizeof(u64);
}
- dest -= pbuf->size;
- dend -= pbuf->size;
+ dest -= pbuf->sc->size;
+ dend -= pbuf->sc->size;
}
/* write 8-byte non-SOP, non-wrap chunk data */
@@ -361,8 +361,8 @@ void seg_pio_copy_start(struct pio_buf *pbuf, u64 pbc,
dest += sizeof(u64);
}
- dest -= pbuf->size;
- dend -= pbuf->size;
+ dest -= pbuf->sc->size;
+ dend -= pbuf->sc->size;
}
/* write 8-byte non-SOP, non-wrap chunk data */
@@ -458,8 +458,8 @@ static void mid_copy_mix(struct pio_buf *pbuf, const void *from, size_t nbytes)
dest += sizeof(u64);
}
- dest -= pbuf->size;
- dend -= pbuf->size;
+ dest -= pbuf->sc->size;
+ dend -= pbuf->sc->size;
}
/* write 8-byte non-SOP, non-wrap chunk data */
@@ -492,7 +492,7 @@ static void mid_copy_mix(struct pio_buf *pbuf, const void *from, size_t nbytes)
*/
/* adjust if we have wrapped */
if (dest >= pbuf->end)
- dest -= pbuf->size;
+ dest -= pbuf->sc->size;
/* jump to the SOP range if within the first block */
else if (pbuf->qw_written < PIO_BLOCK_QWS)
dest += SOP_DISTANCE;
@@ -584,8 +584,8 @@ static void mid_copy_straight(struct pio_buf *pbuf,
dest += sizeof(u64);
}
- dest -= pbuf->size;
- dend -= pbuf->size;
+ dest -= pbuf->sc->size;
+ dend -= pbuf->sc->size;
}
/* write 8-byte non-SOP, non-wrap chunk data */
@@ -666,7 +666,7 @@ void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes)
*/
/* adjust if we've wrapped */
if (dest >= pbuf->end)
- dest -= pbuf->size;
+ dest -= pbuf->sc->size;
/* jump to SOP range if within the first block */
else if (pbuf->qw_written < PIO_BLOCK_QWS)
dest += SOP_DISTANCE;
@@ -719,7 +719,7 @@ void seg_pio_copy_end(struct pio_buf *pbuf)
*/
/* adjust if we have wrapped */
if (dest >= pbuf->end)
- dest -= pbuf->size;
+ dest -= pbuf->sc->size;
/* jump to the SOP range if within the first block */
else if (pbuf->qw_written < PIO_BLOCK_QWS)
dest += SOP_DISTANCE;
diff --git a/drivers/infiniband/hw/hfi1/platform.c b/drivers/infiniband/hw/hfi1/platform.c
index 202433178864..838fe84e285a 100644
--- a/drivers/infiniband/hw/hfi1/platform.c
+++ b/drivers/infiniband/hw/hfi1/platform.c
@@ -49,6 +49,90 @@
#include "efivar.h"
#include "eprom.h"
+static int validate_scratch_checksum(struct hfi1_devdata *dd)
+{
+ u64 checksum = 0, temp_scratch = 0;
+ int i, j, version;
+
+ temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
+ version = (temp_scratch & BITMAP_VERSION_SMASK) >> BITMAP_VERSION_SHIFT;
+
+ /* Prevent power on default of all zeroes from passing checksum */
+ if (!version)
+ return 0;
+
+ /*
+ * ASIC scratch 0 only contains the checksum and bitmap version as
+ * fields of interest, both of which are handled separately from the
+ * loop below, so skip it
+ */
+ checksum += version;
+ for (i = 1; i < ASIC_NUM_SCRATCH; i++) {
+ temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH + (8 * i));
+ for (j = sizeof(u64); j != 0; j -= 2) {
+ checksum += (temp_scratch & 0xFFFF);
+ temp_scratch >>= 16;
+ }
+ }
+
+ while (checksum >> 16)
+ checksum = (checksum & CHECKSUM_MASK) + (checksum >> 16);
+
+ temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
+ temp_scratch &= CHECKSUM_SMASK;
+ temp_scratch >>= CHECKSUM_SHIFT;
+
+ if (checksum + temp_scratch == 0xFFFF)
+ return 1;
+ return 0;
+}
+
+static void save_platform_config_fields(struct hfi1_devdata *dd)
+{
+ struct hfi1_pportdata *ppd = dd->pport;
+ u64 temp_scratch = 0, temp_dest = 0;
+
+ temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH_1);
+
+ temp_dest = temp_scratch &
+ (dd->hfi1_id ? PORT1_PORT_TYPE_SMASK :
+ PORT0_PORT_TYPE_SMASK);
+ ppd->port_type = temp_dest >>
+ (dd->hfi1_id ? PORT1_PORT_TYPE_SHIFT :
+ PORT0_PORT_TYPE_SHIFT);
+
+ temp_dest = temp_scratch &
+ (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SMASK :
+ PORT0_LOCAL_ATTEN_SMASK);
+ ppd->local_atten = temp_dest >>
+ (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SHIFT :
+ PORT0_LOCAL_ATTEN_SHIFT);
+
+ temp_dest = temp_scratch &
+ (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SMASK :
+ PORT0_REMOTE_ATTEN_SMASK);
+ ppd->remote_atten = temp_dest >>
+ (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SHIFT :
+ PORT0_REMOTE_ATTEN_SHIFT);
+
+ temp_dest = temp_scratch &
+ (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SMASK :
+ PORT0_DEFAULT_ATTEN_SMASK);
+ ppd->default_atten = temp_dest >>
+ (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SHIFT :
+ PORT0_DEFAULT_ATTEN_SHIFT);
+
+ temp_scratch = read_csr(dd, dd->hfi1_id ? ASIC_CFG_SCRATCH_3 :
+ ASIC_CFG_SCRATCH_2);
+
+ ppd->tx_preset_eq = (temp_scratch & TX_EQ_SMASK) >> TX_EQ_SHIFT;
+ ppd->tx_preset_noeq = (temp_scratch & TX_NO_EQ_SMASK) >> TX_NO_EQ_SHIFT;
+ ppd->rx_preset = (temp_scratch & RX_SMASK) >> RX_SHIFT;
+
+ ppd->max_power_class = (temp_scratch & QSFP_MAX_POWER_SMASK) >>
+ QSFP_MAX_POWER_SHIFT;
+}
+
void get_platform_config(struct hfi1_devdata *dd)
{
int ret = 0;
@@ -56,38 +140,49 @@ void get_platform_config(struct hfi1_devdata *dd)
u8 *temp_platform_config = NULL;
u32 esize;
- ret = eprom_read_platform_config(dd, (void **)&temp_platform_config,
- &esize);
- if (!ret) {
- /* success */
- size = esize;
- goto success;
+ if (is_integrated(dd)) {
+ if (validate_scratch_checksum(dd)) {
+ save_platform_config_fields(dd);
+ return;
+ }
+ dd_dev_err(dd, "%s: Config bitmap corrupted/uninitialized\n",
+ __func__);
+ dd_dev_err(dd,
+ "%s: Please update your BIOS to support active channels\n",
+ __func__);
+ } else {
+ ret = eprom_read_platform_config(dd,
+ (void **)&temp_platform_config,
+ &esize);
+ if (!ret) {
+ /* success */
+ dd->platform_config.data = temp_platform_config;
+ dd->platform_config.size = esize;
+ return;
+ }
+ /* fail, try EFI variable */
+
+ ret = read_hfi1_efi_var(dd, "configuration", &size,
+ (void **)&temp_platform_config);
+ if (!ret) {
+ dd->platform_config.data = temp_platform_config;
+ dd->platform_config.size = size;
+ return;
+ }
}
- /* fail, try EFI variable */
-
- ret = read_hfi1_efi_var(dd, "configuration", &size,
- (void **)&temp_platform_config);
- if (!ret)
- goto success;
-
- dd_dev_info(dd,
- "%s: Failed to get platform config from UEFI, falling back to request firmware\n",
- __func__);
+ dd_dev_err(dd,
+ "%s: Failed to get platform config, falling back to sub-optimal default file\n",
+ __func__);
/* fall back to request firmware */
platform_config_load = 1;
- return;
-
-success:
- dd->platform_config.data = temp_platform_config;
- dd->platform_config.size = size;
}
void free_platform_config(struct hfi1_devdata *dd)
{
if (!platform_config_load) {
/*
- * was loaded from EFI, release memory
- * allocated by read_efi_var
+ * was loaded from EFI or the EPROM, release memory
+ * allocated by read_efi_var/eprom_read_platform_config
*/
kfree(dd->platform_config.data);
}
@@ -100,12 +195,16 @@ void free_platform_config(struct hfi1_devdata *dd)
void get_port_type(struct hfi1_pportdata *ppd)
{
int ret;
+ u32 temp;
ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
- PORT_TABLE_PORT_TYPE, &ppd->port_type,
+ PORT_TABLE_PORT_TYPE, &temp,
4);
- if (ret)
+ if (ret) {
ppd->port_type = PORT_TYPE_UNKNOWN;
+ return;
+ }
+ ppd->port_type = temp;
}
int set_qsfp_tx(struct hfi1_pportdata *ppd, int on)
@@ -538,6 +637,38 @@ static void apply_tx_lanes(struct hfi1_pportdata *ppd, u8 field_id,
}
}
+/*
+ * Return a special SerDes setting for low power AOC cables. The power class
+ * threshold and setting being used were all found by empirical testing.
+ *
+ * Summary of the logic:
+ *
+ * if (QSFP and QSFP_TYPE == AOC and QSFP_POWER_CLASS < 4)
+ * return 0xe
+ * return 0; // leave at default
+ */
+static u8 aoc_low_power_setting(struct hfi1_pportdata *ppd)
+{
+ u8 *cache = ppd->qsfp_info.cache;
+ int power_class;
+
+ /* QSFP only */
+ if (ppd->port_type != PORT_TYPE_QSFP)
+ return 0; /* leave at default */
+
+ /* active optical cables only */
+ switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) {
+ case 0x0 ... 0x9: /* fallthrough */
+ case 0xC: /* fallthrough */
+ case 0xE:
+ /* active AOC */
+ power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
+ if (power_class < QSFP_POWER_CLASS_4)
+ return 0xe;
+ }
+ return 0; /* leave at default */
+}
+
static void apply_tunings(
struct hfi1_pportdata *ppd, u32 tx_preset_index,
u8 tuning_method, u32 total_atten, u8 limiting_active)
@@ -606,7 +737,17 @@ static void apply_tunings(
tx_preset_index, TX_PRESET_TABLE_POSTCUR, &tx_preset, 4);
postcur = tx_preset;
- config_data = precur | (attn << 8) | (postcur << 16);
+ /*
+ * NOTES:
+ * o The aoc_low_power_setting is applied to all lanes even
+ * though only lane 0's value is examined by the firmware.
+ * o A lingering low power setting after a cable swap does
+ * not occur. On cable unplug the 8051 is reset and
+ * restarted on cable insert. This resets all settings to
+ * their default, erasing any previous low power setting.
+ */
+ config_data = precur | (attn << 8) | (postcur << 16) |
+ (aoc_low_power_setting(ppd) << 24);
apply_tx_lanes(ppd, TX_EQ_SETTINGS, config_data,
"Applying TX settings");
diff --git a/drivers/infiniband/hw/hfi1/platform.h b/drivers/infiniband/hw/hfi1/platform.h
index e2c21613c326..eed0aa9124fa 100644
--- a/drivers/infiniband/hw/hfi1/platform.h
+++ b/drivers/infiniband/hw/hfi1/platform.h
@@ -168,16 +168,6 @@ struct platform_config_cache {
struct platform_config_data config_tables[PLATFORM_CONFIG_TABLE_MAX];
};
-static const u32 platform_config_table_limits[PLATFORM_CONFIG_TABLE_MAX] = {
- 0,
- SYSTEM_TABLE_MAX,
- PORT_TABLE_MAX,
- RX_PRESET_TABLE_MAX,
- TX_PRESET_TABLE_MAX,
- QSFP_ATTEN_TABLE_MAX,
- VARIABLE_SETTINGS_TABLE_MAX
-};
-
/* This section defines default values and encodings for the
* fields defined for each table above
*/
@@ -295,6 +285,123 @@ enum link_tuning_encoding {
OPA_UNKNOWN_TUNING
};
+/*
+ * Shifts and masks for the link SI tuning values stuffed into the ASIC scratch
+ * registers for integrated platforms
+ */
+#define PORT0_PORT_TYPE_SHIFT 0
+#define PORT0_LOCAL_ATTEN_SHIFT 4
+#define PORT0_REMOTE_ATTEN_SHIFT 10
+#define PORT0_DEFAULT_ATTEN_SHIFT 32
+
+#define PORT1_PORT_TYPE_SHIFT 16
+#define PORT1_LOCAL_ATTEN_SHIFT 20
+#define PORT1_REMOTE_ATTEN_SHIFT 26
+#define PORT1_DEFAULT_ATTEN_SHIFT 40
+
+#define PORT0_PORT_TYPE_MASK 0xFUL
+#define PORT0_LOCAL_ATTEN_MASK 0x3FUL
+#define PORT0_REMOTE_ATTEN_MASK 0x3FUL
+#define PORT0_DEFAULT_ATTEN_MASK 0xFFUL
+
+#define PORT1_PORT_TYPE_MASK 0xFUL
+#define PORT1_LOCAL_ATTEN_MASK 0x3FUL
+#define PORT1_REMOTE_ATTEN_MASK 0x3FUL
+#define PORT1_DEFAULT_ATTEN_MASK 0xFFUL
+
+#define PORT0_PORT_TYPE_SMASK (PORT0_PORT_TYPE_MASK << \
+ PORT0_PORT_TYPE_SHIFT)
+#define PORT0_LOCAL_ATTEN_SMASK (PORT0_LOCAL_ATTEN_MASK << \
+ PORT0_LOCAL_ATTEN_SHIFT)
+#define PORT0_REMOTE_ATTEN_SMASK (PORT0_REMOTE_ATTEN_MASK << \
+ PORT0_REMOTE_ATTEN_SHIFT)
+#define PORT0_DEFAULT_ATTEN_SMASK (PORT0_DEFAULT_ATTEN_MASK << \
+ PORT0_DEFAULT_ATTEN_SHIFT)
+
+#define PORT1_PORT_TYPE_SMASK (PORT1_PORT_TYPE_MASK << \
+ PORT1_PORT_TYPE_SHIFT)
+#define PORT1_LOCAL_ATTEN_SMASK (PORT1_LOCAL_ATTEN_MASK << \
+ PORT1_LOCAL_ATTEN_SHIFT)
+#define PORT1_REMOTE_ATTEN_SMASK (PORT1_REMOTE_ATTEN_MASK << \
+ PORT1_REMOTE_ATTEN_SHIFT)
+#define PORT1_DEFAULT_ATTEN_SMASK (PORT1_DEFAULT_ATTEN_MASK << \
+ PORT1_DEFAULT_ATTEN_SHIFT)
+
+#define QSFP_MAX_POWER_SHIFT 0
+#define TX_NO_EQ_SHIFT 4
+#define TX_EQ_SHIFT 25
+#define RX_SHIFT 46
+
+#define QSFP_MAX_POWER_MASK 0xFUL
+#define TX_NO_EQ_MASK 0x1FFFFFUL
+#define TX_EQ_MASK 0x1FFFFFUL
+#define RX_MASK 0xFFFFUL
+
+#define QSFP_MAX_POWER_SMASK (QSFP_MAX_POWER_MASK << \
+ QSFP_MAX_POWER_SHIFT)
+#define TX_NO_EQ_SMASK (TX_NO_EQ_MASK << TX_NO_EQ_SHIFT)
+#define TX_EQ_SMASK (TX_EQ_MASK << TX_EQ_SHIFT)
+#define RX_SMASK (RX_MASK << RX_SHIFT)
+
+#define TX_PRECUR_SHIFT 0
+#define TX_ATTN_SHIFT 4
+#define QSFP_TX_CDR_APPLY_SHIFT 9
+#define QSFP_TX_EQ_APPLY_SHIFT 10
+#define QSFP_TX_CDR_SHIFT 11
+#define QSFP_TX_EQ_SHIFT 12
+#define TX_POSTCUR_SHIFT 16
+
+#define TX_PRECUR_MASK 0xFUL
+#define TX_ATTN_MASK 0x1FUL
+#define QSFP_TX_CDR_APPLY_MASK 0x1UL
+#define QSFP_TX_EQ_APPLY_MASK 0x1UL
+#define QSFP_TX_CDR_MASK 0x1UL
+#define QSFP_TX_EQ_MASK 0xFUL
+#define TX_POSTCUR_MASK 0x1FUL
+
+#define TX_PRECUR_SMASK (TX_PRECUR_MASK << TX_PRECUR_SHIFT)
+#define TX_ATTN_SMASK (TX_ATTN_MASK << TX_ATTN_SHIFT)
+#define QSFP_TX_CDR_APPLY_SMASK (QSFP_TX_CDR_APPLY_MASK << \
+ QSFP_TX_CDR_APPLY_SHIFT)
+#define QSFP_TX_EQ_APPLY_SMASK (QSFP_TX_EQ_APPLY_MASK << \
+ QSFP_TX_EQ_APPLY_SHIFT)
+#define QSFP_TX_CDR_SMASK (QSFP_TX_CDR_MASK << QSFP_TX_CDR_SHIFT)
+#define QSFP_TX_EQ_SMASK (QSFP_TX_EQ_MASK << QSFP_TX_EQ_SHIFT)
+#define TX_POSTCUR_SMASK (TX_POSTCUR_MASK << TX_POSTCUR_SHIFT)
+
+#define QSFP_RX_CDR_APPLY_SHIFT 0
+#define QSFP_RX_EMP_APPLY_SHIFT 1
+#define QSFP_RX_AMP_APPLY_SHIFT 2
+#define QSFP_RX_CDR_SHIFT 3
+#define QSFP_RX_EMP_SHIFT 4
+#define QSFP_RX_AMP_SHIFT 8
+
+#define QSFP_RX_CDR_APPLY_MASK 0x1UL
+#define QSFP_RX_EMP_APPLY_MASK 0x1UL
+#define QSFP_RX_AMP_APPLY_MASK 0x1UL
+#define QSFP_RX_CDR_MASK 0x1UL
+#define QSFP_RX_EMP_MASK 0xFUL
+#define QSFP_RX_AMP_MASK 0x3UL
+
+#define QSFP_RX_CDR_APPLY_SMASK (QSFP_RX_CDR_APPLY_MASK << \
+ QSFP_RX_CDR_APPLY_SHIFT)
+#define QSFP_RX_EMP_APPLY_SMASK (QSFP_RX_EMP_APPLY_MASK << \
+ QSFP_RX_EMP_APPLY_SHIFT)
+#define QSFP_RX_AMP_APPLY_SMASK (QSFP_RX_AMP_APPLY_MASK << \
+ QSFP_RX_AMP_APPLY_SHIFT)
+#define QSFP_RX_CDR_SMASK (QSFP_RX_CDR_MASK << QSFP_RX_CDR_SHIFT)
+#define QSFP_RX_EMP_SMASK (QSFP_RX_EMP_MASK << QSFP_RX_EMP_SHIFT)
+#define QSFP_RX_AMP_SMASK (QSFP_RX_AMP_MASK << QSFP_RX_AMP_SHIFT)
+
+#define BITMAP_VERSION 1
+#define BITMAP_VERSION_SHIFT 44
+#define BITMAP_VERSION_MASK 0xFUL
+#define BITMAP_VERSION_SMASK (BITMAP_VERSION_MASK << \
+ BITMAP_VERSION_SHIFT)
+#define CHECKSUM_SHIFT 48
+#define CHECKSUM_MASK 0xFFFFUL
+#define CHECKSUM_SMASK (CHECKSUM_MASK << CHECKSUM_SHIFT)
+
/* platform.c */
void get_platform_config(struct hfi1_devdata *dd);
void free_platform_config(struct hfi1_devdata *dd);
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 9fc75e7e8781..d752d6768a49 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -196,15 +196,18 @@ static void flush_tx_list(struct rvt_qp *qp)
static void flush_iowait(struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
- struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
unsigned long flags;
+ seqlock_t *lock = priv->s_iowait.lock;
- write_seqlock_irqsave(&dev->iowait_lock, flags);
+ if (!lock)
+ return;
+ write_seqlock_irqsave(lock, flags);
if (!list_empty(&priv->s_iowait.list)) {
list_del_init(&priv->s_iowait.list);
+ priv->s_iowait.lock = NULL;
rvt_put_qp(qp);
}
- write_sequnlock_irqrestore(&dev->iowait_lock, flags);
+ write_sequnlock_irqrestore(lock, flags);
}
static inline int opa_mtu_enum_to_int(int mtu)
@@ -543,6 +546,7 @@ static int iowait_sleep(
ibp->rvp.n_dmawait++;
qp->s_flags |= RVT_S_WAIT_DMA_DESC;
list_add_tail(&priv->s_iowait.list, &sde->dmawait);
+ priv->s_iowait.lock = &dev->iowait_lock;
trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC);
rvt_get_qp(qp);
}
@@ -964,6 +968,7 @@ void notify_error_qp(struct rvt_qp *qp)
if (!list_empty(&priv->s_iowait.list) && !(qp->s_flags & RVT_S_BUSY)) {
qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
list_del_init(&priv->s_iowait.list);
+ priv->s_iowait.lock = NULL;
rvt_put_qp(qp);
}
write_sequnlock(&dev->iowait_lock);
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 83198a8a8797..809b26eb6d3c 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -276,7 +276,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
rvt_get_mr(ps->s_txreq->mr);
qp->s_ack_rdma_sge.sge = e->rdma_sge;
qp->s_ack_rdma_sge.num_sge = 1;
- qp->s_cur_sge = &qp->s_ack_rdma_sge;
+ ps->s_txreq->ss = &qp->s_ack_rdma_sge;
if (len > pmtu) {
len = pmtu;
qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
@@ -290,7 +290,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
bth2 = mask_psn(qp->s_ack_rdma_psn++);
} else {
/* COMPARE_SWAP or FETCH_ADD */
- qp->s_cur_sge = NULL;
+ ps->s_txreq->ss = NULL;
len = 0;
qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
ohdr->u.at.aeth = hfi1_compute_aeth(qp);
@@ -306,7 +306,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
/* FALLTHROUGH */
case OP(RDMA_READ_RESPONSE_MIDDLE):
- qp->s_cur_sge = &qp->s_ack_rdma_sge;
+ ps->s_txreq->ss = &qp->s_ack_rdma_sge;
ps->s_txreq->mr = qp->s_ack_rdma_sge.sge.mr;
if (ps->s_txreq->mr)
rvt_get_mr(ps->s_txreq->mr);
@@ -335,7 +335,7 @@ normal:
*/
qp->s_ack_state = OP(SEND_ONLY);
qp->s_flags &= ~RVT_S_ACK_PENDING;
- qp->s_cur_sge = NULL;
+ ps->s_txreq->ss = NULL;
if (qp->s_nak_state)
ohdr->u.aeth =
cpu_to_be32((qp->r_msn & HFI1_MSN_MASK) |
@@ -351,7 +351,7 @@ normal:
qp->s_rdma_ack_cnt++;
qp->s_hdrwords = hwords;
ps->s_txreq->sde = priv->s_sde;
- qp->s_cur_size = len;
+ ps->s_txreq->s_cur_size = len;
hfi1_make_ruc_header(qp, ohdr, bth0, bth2, middle, ps);
/* pbc */
ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2;
@@ -801,8 +801,8 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
qp->s_len -= len;
qp->s_hdrwords = hwords;
ps->s_txreq->sde = priv->s_sde;
- qp->s_cur_sge = ss;
- qp->s_cur_size = len;
+ ps->s_txreq->ss = ss;
+ ps->s_txreq->s_cur_size = len;
hfi1_make_ruc_header(
qp,
ohdr,
@@ -1146,8 +1146,6 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
{
struct ib_other_headers *ohdr;
struct rvt_swqe *wqe;
- struct ib_wc wc;
- unsigned i;
u32 opcode;
u32 psn;
@@ -1195,22 +1193,8 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
qp->s_last = s_last;
/* see post_send() */
barrier();
- for (i = 0; i < wqe->wr.num_sge; i++) {
- struct rvt_sge *sge = &wqe->sg_list[i];
-
- rvt_put_mr(sge->mr);
- }
- /* Post a send completion queue entry if requested. */
- if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
- (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
- memset(&wc, 0, sizeof(wc));
- wc.wr_id = wqe->wr.wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode];
- wc.byte_len = wqe->length;
- wc.qp = &qp->ibqp;
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
- }
+ rvt_put_swqe(wqe);
+ rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS);
}
/*
* If we were waiting for sends to complete before re-sending,
@@ -1240,9 +1224,6 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
struct rvt_swqe *wqe,
struct hfi1_ibport *ibp)
{
- struct ib_wc wc;
- unsigned i;
-
lockdep_assert_held(&qp->s_lock);
/*
* Don't decrement refcount and don't generate a
@@ -1253,28 +1234,14 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
u32 s_last;
- for (i = 0; i < wqe->wr.num_sge; i++) {
- struct rvt_sge *sge = &wqe->sg_list[i];
-
- rvt_put_mr(sge->mr);
- }
+ rvt_put_swqe(wqe);
s_last = qp->s_last;
if (++s_last >= qp->s_size)
s_last = 0;
qp->s_last = s_last;
/* see post_send() */
barrier();
- /* Post a send completion queue entry if requested. */
- if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
- (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
- memset(&wc, 0, sizeof(wc));
- wc.wr_id = wqe->wr.wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode];
- wc.byte_len = wqe->length;
- wc.qp = &qp->ibqp;
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
- }
+ rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS);
} else {
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
@@ -2295,7 +2262,7 @@ send_last:
hfi1_copy_sge(&qp->r_sge, data, tlen, 1, copy_last);
rvt_put_ss(&qp->r_sge);
qp->r_msn++;
- if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
+ if (!__test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
break;
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
@@ -2410,8 +2377,7 @@ send_last:
* Update the next expected PSN. We add 1 later
* below, so only add the remainder here.
*/
- if (len > pmtu)
- qp->r_psn += (len - 1) / pmtu;
+ qp->r_psn += rvt_div_mtu(qp, len - 1);
} else {
e->rdma_sge.mr = NULL;
e->rdma_sge.vaddr = NULL;
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index a1576aea4756..717ed4b159d3 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -239,16 +239,6 @@ bail:
return ret;
}
-static __be64 get_sguid(struct hfi1_ibport *ibp, unsigned index)
-{
- if (!index) {
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
-
- return cpu_to_be64(ppd->guid);
- }
- return ibp->guids[index - 1];
-}
-
static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
{
return (gid->global.interface_id == id &&
@@ -699,9 +689,9 @@ u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
/* The SGID is 32-bit aligned. */
hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
hdr->sgid.global.interface_id =
- grh->sgid_index && grh->sgid_index < ARRAY_SIZE(ibp->guids) ?
- ibp->guids[grh->sgid_index - 1] :
- cpu_to_be64(ppd_from_ibp(ibp)->guid);
+ grh->sgid_index < HFI1_GUIDS_PER_PORT ?
+ get_sguid(ibp, grh->sgid_index) :
+ get_sguid(ibp, HFI1_PORT_GUID_INDEX);
hdr->dgid = grh->dgid;
/* GRH header size in 32-bit words. */
@@ -777,8 +767,8 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
u32 bth1;
/* Construct the header. */
- extra_bytes = -qp->s_cur_size & 3;
- nwords = (qp->s_cur_size + extra_bytes) >> 2;
+ extra_bytes = -ps->s_txreq->s_cur_size & 3;
+ nwords = (ps->s_txreq->s_cur_size + extra_bytes) >> 2;
lrh0 = HFI1_LRH_BTH;
if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
qp->s_hdrwords += hfi1_make_grh(ibp,
@@ -952,7 +942,6 @@ void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
enum ib_wc_status status)
{
u32 old_last, last;
- unsigned i;
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
return;
@@ -964,32 +953,13 @@ void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
qp->s_last = last;
/* See post_send() */
barrier();
- for (i = 0; i < wqe->wr.num_sge; i++) {
- struct rvt_sge *sge = &wqe->sg_list[i];
-
- rvt_put_mr(sge->mr);
- }
+ rvt_put_swqe(wqe);
if (qp->ibqp.qp_type == IB_QPT_UD ||
qp->ibqp.qp_type == IB_QPT_SMI ||
qp->ibqp.qp_type == IB_QPT_GSI)
atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
- /* See ch. 11.2.4.1 and 10.7.3.1 */
- if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
- (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
- status != IB_WC_SUCCESS) {
- struct ib_wc wc;
-
- memset(&wc, 0, sizeof(wc));
- wc.wr_id = wqe->wr.wr_id;
- wc.status = status;
- wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode];
- wc.qp = &qp->ibqp;
- if (status == IB_WC_SUCCESS)
- wc.byte_len = wqe->length;
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,
- status != IB_WC_SUCCESS);
- }
+ rvt_qp_swqe_complete(qp, wqe, status);
if (qp->s_acked == old_last)
qp->s_acked = last;
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 9cbe52d21077..1d81cac1fa6c 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -375,7 +375,7 @@ static inline void complete_tx(struct sdma_engine *sde,
sde->head_sn, tx->sn);
sde->head_sn++;
#endif
- sdma_txclean(sde->dd, tx);
+ __sdma_txclean(sde->dd, tx);
if (complete)
(*complete)(tx, res);
if (wait && iowait_sdma_dec(wait))
@@ -1643,7 +1643,7 @@ static inline u8 ahg_mode(struct sdma_txreq *tx)
}
/**
- * sdma_txclean() - clean tx of mappings, descp *kmalloc's
+ * __sdma_txclean() - clean tx of mappings, descp *kmalloc's
* @dd: hfi1_devdata for unmapping
* @tx: tx request to clean
*
@@ -1653,7 +1653,7 @@ static inline u8 ahg_mode(struct sdma_txreq *tx)
* The code can be called multiple times without issue.
*
*/
-void sdma_txclean(
+void __sdma_txclean(
struct hfi1_devdata *dd,
struct sdma_txreq *tx)
{
@@ -3065,7 +3065,7 @@ static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
tx->descp[i] = tx->descs[i];
return 0;
enomem:
- sdma_txclean(dd, tx);
+ __sdma_txclean(dd, tx);
return -ENOMEM;
}
@@ -3094,14 +3094,14 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
rval = _extend_sdma_tx_descs(dd, tx);
if (rval) {
- sdma_txclean(dd, tx);
+ __sdma_txclean(dd, tx);
return rval;
}
/* If coalesce buffer is allocated, copy data into it */
if (tx->coalesce_buf) {
if (type == SDMA_MAP_NONE) {
- sdma_txclean(dd, tx);
+ __sdma_txclean(dd, tx);
return -EINVAL;
}
@@ -3109,7 +3109,7 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
kvaddr = kmap(page);
kvaddr += offset;
} else if (WARN_ON(!kvaddr)) {
- sdma_txclean(dd, tx);
+ __sdma_txclean(dd, tx);
return -EINVAL;
}
@@ -3139,7 +3139,7 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
- sdma_txclean(dd, tx);
+ __sdma_txclean(dd, tx);
return -ENOSPC;
}
@@ -3181,7 +3181,7 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
if ((unlikely(tx->num_desc == tx->desc_limit))) {
rval = _extend_sdma_tx_descs(dd, tx);
if (rval) {
- sdma_txclean(dd, tx);
+ __sdma_txclean(dd, tx);
return rval;
}
}
diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
index 56257ea3598f..21f1e2834f37 100644
--- a/drivers/infiniband/hw/hfi1/sdma.h
+++ b/drivers/infiniband/hw/hfi1/sdma.h
@@ -667,7 +667,13 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
int type, void *kvaddr, struct page *page,
unsigned long offset, u16 len);
int _pad_sdma_tx_descs(struct hfi1_devdata *, struct sdma_txreq *);
-void sdma_txclean(struct hfi1_devdata *, struct sdma_txreq *);
+void __sdma_txclean(struct hfi1_devdata *, struct sdma_txreq *);
+
+static inline void sdma_txclean(struct hfi1_devdata *dd, struct sdma_txreq *tx)
+{
+ if (tx->num_desc)
+ __sdma_txclean(dd, tx);
+}
/* helpers used by public routines */
static inline void _sdma_close_tx(struct hfi1_devdata *dd,
@@ -753,7 +759,7 @@ static inline int sdma_txadd_page(
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
- sdma_txclean(dd, tx);
+ __sdma_txclean(dd, tx);
return -ENOSPC;
}
@@ -834,7 +840,7 @@ static inline int sdma_txadd_kvaddr(
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
- sdma_txclean(dd, tx);
+ __sdma_txclean(dd, tx);
return -ENOSPC;
}
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index 5e6d1bac4914..b141a78ae38b 100644
--- a/drivers/infiniband/hw/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -258,8 +258,8 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
qp->s_len -= len;
qp->s_hdrwords = hwords;
ps->s_txreq->sde = priv->s_sde;
- qp->s_cur_sge = &qp->s_sge;
- qp->s_cur_size = len;
+ ps->s_txreq->ss = &qp->s_sge;
+ ps->s_txreq->s_cur_size = len;
hfi1_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24),
mask_psn(qp->s_psn++), middle, ps);
/* pbc */
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index 97ae24b6314c..c071955c0272 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -354,8 +354,8 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
/* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
qp->s_hdrwords = 7;
- qp->s_cur_size = wqe->length;
- qp->s_cur_sge = &qp->s_sge;
+ ps->s_txreq->s_cur_size = wqe->length;
+ ps->s_txreq->ss = &qp->s_sge;
qp->s_srate = ah_attr->static_rate;
qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
qp->s_wqe = wqe;
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index 77697d690f3e..7d22f8ee98ef 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -115,6 +115,7 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12
#define KDETH_HCRC_LOWER_MASK 0xff
#define AHG_KDETH_INTR_SHIFT 12
+#define AHG_KDETH_SH_SHIFT 13
#define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4)
#define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff)
@@ -144,8 +145,9 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12
#define KDETH_OM_LARGE 64
#define KDETH_OM_MAX_SIZE (1 << ((KDETH_OM_LARGE / KDETH_OM_SMALL) + 1))
-/* Last packet in the request */
-#define TXREQ_FLAGS_REQ_LAST_PKT BIT(0)
+/* Tx request flag bits */
+#define TXREQ_FLAGS_REQ_ACK BIT(0) /* Set the ACK bit in the header */
+#define TXREQ_FLAGS_REQ_DISABLE_SH BIT(1) /* Disable header suppression */
/* SDMA request flag bits */
#define SDMA_REQ_FOR_THREAD 1
@@ -943,8 +945,13 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
tx->busycount = 0;
INIT_LIST_HEAD(&tx->list);
+ /*
+ * For the last packet set the ACK request
+ * and disable header suppression.
+ */
if (req->seqnum == req->info.npkts - 1)
- tx->flags |= TXREQ_FLAGS_REQ_LAST_PKT;
+ tx->flags |= (TXREQ_FLAGS_REQ_ACK |
+ TXREQ_FLAGS_REQ_DISABLE_SH);
/*
* Calculate the payload size - this is min of the fragment
@@ -963,11 +970,22 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
}
datalen = compute_data_length(req, tx);
+
+ /*
+ * Disable header suppression for the payload <= 8DWS.
+ * If there is an uncorrectable error in the receive
+ * data FIFO when the received payload size is less than
+ * or equal to 8DWS then the RxDmaDataFifoRdUncErr is
+ * not reported.There is set RHF.EccErr if the header
+ * is not suppressed.
+ */
if (!datalen) {
SDMA_DBG(req,
"Request has data but pkt len is 0");
ret = -EFAULT;
goto free_tx;
+ } else if (datalen <= 32) {
+ tx->flags |= TXREQ_FLAGS_REQ_DISABLE_SH;
}
}
@@ -990,6 +1008,10 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
LRH2PBC(lrhlen);
tx->hdr.pbc[0] = cpu_to_le16(pbclen);
}
+ ret = check_header_template(req, &tx->hdr,
+ lrhlen, datalen);
+ if (ret)
+ goto free_tx;
ret = sdma_txinit_ahg(&tx->txreq,
SDMA_TXREQ_F_AHG_COPY,
sizeof(tx->hdr) + datalen,
@@ -1351,7 +1373,7 @@ static int set_txreq_header(struct user_sdma_request *req,
req->seqnum));
/* Set ACK request on last packet */
- if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT))
+ if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK))
hdr->bth[2] |= cpu_to_be32(1UL << 31);
/* Set the new offset */
@@ -1384,8 +1406,8 @@ static int set_txreq_header(struct user_sdma_request *req,
/* Set KDETH.TID based on value for this TID */
KDETH_SET(hdr->kdeth.ver_tid_offset, TID,
EXP_TID_GET(tidval, IDX));
- /* Clear KDETH.SH only on the last packet */
- if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT))
+ /* Clear KDETH.SH when DISABLE_SH flag is set */
+ if (unlikely(tx->flags & TXREQ_FLAGS_REQ_DISABLE_SH))
KDETH_SET(hdr->kdeth.ver_tid_offset, SH, 0);
/*
* Set the KDETH.OFFSET and KDETH.OM based on size of
@@ -1429,7 +1451,7 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
/* BTH.PSN and BTH.A */
val32 = (be32_to_cpu(hdr->bth[2]) + req->seqnum) &
(HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffff : 0xffffff);
- if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT))
+ if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK))
val32 |= 1UL << 31;
AHG_HEADER_SET(req->ahg, diff, 6, 0, 16, cpu_to_be16(val32 >> 16));
AHG_HEADER_SET(req->ahg, diff, 6, 16, 16, cpu_to_be16(val32 & 0xffff));
@@ -1468,19 +1490,23 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
AHG_HEADER_SET(req->ahg, diff, 7, 0, 16,
((!!(req->omfactor - KDETH_OM_SMALL)) << 15 |
((req->tidoffset / req->omfactor) & 0x7fff)));
- /* KDETH.TIDCtrl, KDETH.TID */
+ /* KDETH.TIDCtrl, KDETH.TID, KDETH.Intr, KDETH.SH */
val = cpu_to_le16(((EXP_TID_GET(tidval, CTRL) & 0x3) << 10) |
- (EXP_TID_GET(tidval, IDX) & 0x3ff));
- /* Clear KDETH.SH on last packet */
- if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT)) {
- val |= cpu_to_le16(KDETH_GET(hdr->kdeth.ver_tid_offset,
- INTR) <<
- AHG_KDETH_INTR_SHIFT);
- val &= cpu_to_le16(~(1U << 13));
- AHG_HEADER_SET(req->ahg, diff, 7, 16, 14, val);
+ (EXP_TID_GET(tidval, IDX) & 0x3ff));
+
+ if (unlikely(tx->flags & TXREQ_FLAGS_REQ_DISABLE_SH)) {
+ val |= cpu_to_le16((KDETH_GET(hdr->kdeth.ver_tid_offset,
+ INTR) <<
+ AHG_KDETH_INTR_SHIFT));
} else {
- AHG_HEADER_SET(req->ahg, diff, 7, 16, 12, val);
+ val |= KDETH_GET(hdr->kdeth.ver_tid_offset, SH) ?
+ cpu_to_le16(0x1 << AHG_KDETH_SH_SHIFT) :
+ cpu_to_le16((KDETH_GET(hdr->kdeth.ver_tid_offset,
+ INTR) <<
+ AHG_KDETH_INTR_SHIFT));
}
+
+ AHG_HEADER_SET(req->ahg, diff, 7, 16, 14, val);
}
trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt,
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 4b7a16ceb362..95ed4d6da510 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -297,22 +297,6 @@ static inline int wss_exceeds_threshold(void)
}
/*
- * Translate ib_wr_opcode into ib_wc_opcode.
- */
-const enum ib_wc_opcode ib_hfi1_wc_opcode[] = {
- [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
- [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
- [IB_WR_SEND] = IB_WC_SEND,
- [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
- [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
- [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
- [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD,
- [IB_WR_SEND_WITH_INV] = IB_WC_SEND,
- [IB_WR_LOCAL_INV] = IB_WC_LOCAL_INV,
- [IB_WR_REG_MR] = IB_WC_REG_MR
-};
-
-/*
* Length of header by opcode, 0 --> not supported
*/
const u8 hdr_len_by_opcode[256] = {
@@ -694,6 +678,7 @@ static void mem_timer(unsigned long data)
qp = iowait_to_qp(wait);
priv = qp->priv;
list_del_init(&priv->s_iowait.list);
+ priv->s_iowait.lock = NULL;
/* refcount held until actual wake up */
if (!list_empty(list))
mod_timer(&dev->mem_timer, jiffies + 1);
@@ -769,6 +754,7 @@ static int wait_kmem(struct hfi1_ibdev *dev,
mod_timer(&dev->mem_timer, jiffies + 1);
qp->s_flags |= RVT_S_WAIT_KMEM;
list_add_tail(&priv->s_iowait.list, &dev->memwait);
+ priv->s_iowait.lock = &dev->iowait_lock;
trace_hfi1_qpsleep(qp, RVT_S_WAIT_KMEM);
rvt_get_qp(qp);
}
@@ -788,10 +774,10 @@ static int wait_kmem(struct hfi1_ibdev *dev,
*/
static noinline int build_verbs_ulp_payload(
struct sdma_engine *sde,
- struct rvt_sge_state *ss,
u32 length,
struct verbs_txreq *tx)
{
+ struct rvt_sge_state *ss = tx->ss;
struct rvt_sge *sg_list = ss->sg_list;
struct rvt_sge sge = ss->sge;
u8 num_sge = ss->num_sge;
@@ -835,7 +821,6 @@ bail_txadd:
/* New API */
static int build_verbs_tx_desc(
struct sdma_engine *sde,
- struct rvt_sge_state *ss,
u32 length,
struct verbs_txreq *tx,
struct hfi1_ahg_info *ahg_info,
@@ -879,9 +864,9 @@ static int build_verbs_tx_desc(
goto bail_txadd;
}
- /* add the ulp payload - if any. ss can be NULL for acks */
- if (ss)
- ret = build_verbs_ulp_payload(sde, ss, length, tx);
+ /* add the ulp payload - if any. tx->ss can be NULL for acks */
+ if (tx->ss)
+ ret = build_verbs_ulp_payload(sde, length, tx);
bail_txadd:
return ret;
}
@@ -892,8 +877,7 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
struct hfi1_qp_priv *priv = qp->priv;
struct hfi1_ahg_info *ahg_info = priv->s_ahg;
u32 hdrwords = qp->s_hdrwords;
- struct rvt_sge_state *ss = qp->s_cur_sge;
- u32 len = qp->s_cur_size;
+ u32 len = ps->s_txreq->s_cur_size;
u32 plen = hdrwords + ((len + 3) >> 2) + 2; /* includes pbc */
struct hfi1_ibdev *dev = ps->dev;
struct hfi1_pportdata *ppd = ps->ppd;
@@ -918,7 +902,7 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
plen);
}
tx->wqe = qp->s_wqe;
- ret = build_verbs_tx_desc(tx->sde, ss, len, tx, ahg_info, pbc);
+ ret = build_verbs_tx_desc(tx->sde, len, tx, ahg_info, pbc);
if (unlikely(ret))
goto bail_build;
}
@@ -980,6 +964,7 @@ static int pio_wait(struct rvt_qp *qp,
qp->s_flags |= flag;
was_empty = list_empty(&sc->piowait);
list_add_tail(&priv->s_iowait.list, &sc->piowait);
+ priv->s_iowait.lock = &dev->iowait_lock;
trace_hfi1_qpsleep(qp, RVT_S_WAIT_PIO);
rvt_get_qp(qp);
/* counting: only call wantpiobuf_intr if first user */
@@ -1008,8 +993,8 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
{
struct hfi1_qp_priv *priv = qp->priv;
u32 hdrwords = qp->s_hdrwords;
- struct rvt_sge_state *ss = qp->s_cur_sge;
- u32 len = qp->s_cur_size;
+ struct rvt_sge_state *ss = ps->s_txreq->ss;
+ u32 len = ps->s_txreq->s_cur_size;
u32 dwords = (len + 3) >> 2;
u32 plen = hdrwords + dwords + 2; /* includes pbc */
struct hfi1_pportdata *ppd = ps->ppd;
@@ -1237,7 +1222,7 @@ static inline send_routine get_send_routine(struct rvt_qp *qp,
u8 op = get_opcode(h);
if (piothreshold &&
- qp->s_cur_size <= min(piothreshold, qp->pmtu) &&
+ tx->s_cur_size <= min(piothreshold, qp->pmtu) &&
(BIT(op & OPMASK) & pio_opmask[op >> 5]) &&
iowait_sdma_pending(&priv->s_iowait) == 0 &&
!sdma_txreq_built(&tx->txreq))
@@ -1483,15 +1468,11 @@ static int hfi1_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
int guid_index, __be64 *guid)
{
struct hfi1_ibport *ibp = container_of(rvp, struct hfi1_ibport, rvp);
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- if (guid_index == 0)
- *guid = cpu_to_be64(ppd->guid);
- else if (guid_index < HFI1_GUIDS_PER_PORT)
- *guid = ibp->guids[guid_index - 1];
- else
+ if (guid_index >= HFI1_GUIDS_PER_PORT)
return -EINVAL;
+ *guid = get_sguid(ibp, guid_index);
return 0;
}
@@ -1610,6 +1591,154 @@ static void hfi1_get_dev_fw_str(struct ib_device *ibdev, char *str,
dc8051_ver_min(ver));
}
+static const char * const driver_cntr_names[] = {
+ /* must be element 0*/
+ "DRIVER_KernIntr",
+ "DRIVER_ErrorIntr",
+ "DRIVER_Tx_Errs",
+ "DRIVER_Rcv_Errs",
+ "DRIVER_HW_Errs",
+ "DRIVER_NoPIOBufs",
+ "DRIVER_CtxtsOpen",
+ "DRIVER_RcvLen_Errs",
+ "DRIVER_EgrBufFull",
+ "DRIVER_EgrHdrFull"
+};
+
+static const char **dev_cntr_names;
+static const char **port_cntr_names;
+static int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
+static int num_dev_cntrs;
+static int num_port_cntrs;
+static int cntr_names_initialized;
+
+/*
+ * Convert a list of names separated by '\n' into an array of NULL terminated
+ * strings. Optionally some entries can be reserved in the array to hold extra
+ * external strings.
+ */
+static int init_cntr_names(const char *names_in,
+ const int names_len,
+ int num_extra_names,
+ int *num_cntrs,
+ const char ***cntr_names)
+{
+ char *names_out, *p, **q;
+ int i, n;
+
+ n = 0;
+ for (i = 0; i < names_len; i++)
+ if (names_in[i] == '\n')
+ n++;
+
+ names_out = kmalloc((n + num_extra_names) * sizeof(char *) + names_len,
+ GFP_KERNEL);
+ if (!names_out) {
+ *num_cntrs = 0;
+ *cntr_names = NULL;
+ return -ENOMEM;
+ }
+
+ p = names_out + (n + num_extra_names) * sizeof(char *);
+ memcpy(p, names_in, names_len);
+
+ q = (char **)names_out;
+ for (i = 0; i < n; i++) {
+ q[i] = p;
+ p = strchr(p, '\n');
+ *p++ = '\0';
+ }
+
+ *num_cntrs = n;
+ *cntr_names = (const char **)names_out;
+ return 0;
+}
+
+static struct rdma_hw_stats *alloc_hw_stats(struct ib_device *ibdev,
+ u8 port_num)
+{
+ int i, err;
+
+ if (!cntr_names_initialized) {
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+
+ err = init_cntr_names(dd->cntrnames,
+ dd->cntrnameslen,
+ num_driver_cntrs,
+ &num_dev_cntrs,
+ &dev_cntr_names);
+ if (err)
+ return NULL;
+
+ for (i = 0; i < num_driver_cntrs; i++)
+ dev_cntr_names[num_dev_cntrs + i] =
+ driver_cntr_names[i];
+
+ err = init_cntr_names(dd->portcntrnames,
+ dd->portcntrnameslen,
+ 0,
+ &num_port_cntrs,
+ &port_cntr_names);
+ if (err) {
+ kfree(dev_cntr_names);
+ dev_cntr_names = NULL;
+ return NULL;
+ }
+ cntr_names_initialized = 1;
+ }
+
+ if (!port_num)
+ return rdma_alloc_hw_stats_struct(
+ dev_cntr_names,
+ num_dev_cntrs + num_driver_cntrs,
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+ else
+ return rdma_alloc_hw_stats_struct(
+ port_cntr_names,
+ num_port_cntrs,
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
+static u64 hfi1_sps_ints(void)
+{
+ unsigned long flags;
+ struct hfi1_devdata *dd;
+ u64 sps_ints = 0;
+
+ spin_lock_irqsave(&hfi1_devs_lock, flags);
+ list_for_each_entry(dd, &hfi1_dev_list, list) {
+ sps_ints += get_all_cpu_total(dd->int_counter);
+ }
+ spin_unlock_irqrestore(&hfi1_devs_lock, flags);
+ return sps_ints;
+}
+
+static int get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+ u8 port, int index)
+{
+ u64 *values;
+ int count;
+
+ if (!port) {
+ u64 *stats = (u64 *)&hfi1_stats;
+ int i;
+
+ hfi1_read_cntrs(dd_from_ibdev(ibdev), NULL, &values);
+ values[num_dev_cntrs] = hfi1_sps_ints();
+ for (i = 1; i < num_driver_cntrs; i++)
+ values[num_dev_cntrs + i] = stats[i];
+ count = num_dev_cntrs + num_driver_cntrs;
+ } else {
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+
+ hfi1_read_portcntrs(ppd_from_ibp(ibp), NULL, &values);
+ count = num_port_cntrs;
+ }
+
+ memcpy(stats->value, values, count * sizeof(u64));
+ return count;
+}
+
/**
* hfi1_register_ib_device - register our device with the infiniband core
* @dd: the device data structure
@@ -1620,6 +1749,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
struct hfi1_ibdev *dev = &dd->verbs_dev;
struct ib_device *ibdev = &dev->rdi.ibdev;
struct hfi1_pportdata *ppd = dd->pport;
+ struct hfi1_ibport *ibp = &ppd->ibport_data;
unsigned i;
int ret;
size_t lcpysz = IB_DEVICE_NAME_MAX;
@@ -1632,6 +1762,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
setup_timer(&dev->mem_timer, mem_timer, (unsigned long)dev);
seqlock_init(&dev->iowait_lock);
+ seqlock_init(&dev->txwait_lock);
INIT_LIST_HEAD(&dev->txwait);
INIT_LIST_HEAD(&dev->memwait);
@@ -1639,20 +1770,24 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
if (ret)
goto err_verbs_txreq;
+ /* Use first-port GUID as node guid */
+ ibdev->node_guid = get_sguid(ibp, HFI1_PORT_GUID_INDEX);
+
/*
* The system image GUID is supposed to be the same for all
* HFIs in a single system but since there can be other
* device types in the system, we can't be sure this is unique.
*/
if (!ib_hfi1_sys_image_guid)
- ib_hfi1_sys_image_guid = cpu_to_be64(ppd->guid);
+ ib_hfi1_sys_image_guid = ibdev->node_guid;
lcpysz = strlcpy(ibdev->name, class_name(), lcpysz);
strlcpy(ibdev->name + lcpysz, "_%d", IB_DEVICE_NAME_MAX - lcpysz);
ibdev->owner = THIS_MODULE;
- ibdev->node_guid = cpu_to_be64(ppd->guid);
ibdev->phys_port_cnt = dd->num_pports;
ibdev->dma_device = &dd->pcidev->dev;
ibdev->modify_device = modify_device;
+ ibdev->alloc_hw_stats = alloc_hw_stats;
+ ibdev->get_hw_stats = get_hw_stats;
/* keep process mad in the driver */
ibdev->process_mad = hfi1_process_mad;
@@ -1767,6 +1902,10 @@ void hfi1_unregister_ib_device(struct hfi1_devdata *dd)
del_timer_sync(&dev->mem_timer);
verbs_txreq_exit(dev);
+
+ kfree(dev_cntr_names);
+ kfree(port_cntr_names);
+ cntr_names_initialized = 0;
}
void hfi1_cnp_rcv(struct hfi1_packet *packet)
diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
index 1c3815d89eb7..e6b893010e6d 100644
--- a/drivers/infiniband/hw/hfi1/verbs.h
+++ b/drivers/infiniband/hw/hfi1/verbs.h
@@ -73,7 +73,6 @@ struct hfi1_packet;
#include "iowait.h"
#define HFI1_MAX_RDMA_ATOMIC 16
-#define HFI1_GUIDS_PER_PORT 5
/*
* Increment this value if any changes that break userspace ABI
@@ -169,8 +168,6 @@ struct hfi1_ibport {
struct rvt_qp __rcu *qp[2];
struct rvt_ibport rvp;
- __be64 guids[HFI1_GUIDS_PER_PORT - 1]; /* writable GUIDs */
-
/* the first 16 entries are sl_to_vl for !OPA */
u8 sl_to_sc[32];
u8 sc_to_sl[32];
@@ -180,18 +177,19 @@ struct hfi1_ibdev {
struct rvt_dev_info rdi; /* Must be first */
/* QP numbers are shared by all IB ports */
- /* protect wait lists */
- seqlock_t iowait_lock;
+ /* protect txwait list */
+ seqlock_t txwait_lock ____cacheline_aligned_in_smp;
struct list_head txwait; /* list for wait verbs_txreq */
struct list_head memwait; /* list for wait kernel memory */
- struct list_head txreq_free;
struct kmem_cache *verbs_txreq_cache;
- struct timer_list mem_timer;
+ u64 n_txwait;
+ u64 n_kmem_wait;
+ /* protect iowait lists */
+ seqlock_t iowait_lock ____cacheline_aligned_in_smp;
u64 n_piowait;
u64 n_piodrain;
- u64 n_txwait;
- u64 n_kmem_wait;
+ struct timer_list mem_timer;
#ifdef CONFIG_DEBUG_FS
/* per HFI debugfs */
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c
index 094ab829ec42..5d23172c470f 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.c
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c
@@ -72,22 +72,22 @@ void hfi1_put_txreq(struct verbs_txreq *tx)
kmem_cache_free(dev->verbs_txreq_cache, tx);
do {
- seq = read_seqbegin(&dev->iowait_lock);
+ seq = read_seqbegin(&dev->txwait_lock);
if (!list_empty(&dev->txwait)) {
struct iowait *wait;
- write_seqlock_irqsave(&dev->iowait_lock, flags);
+ write_seqlock_irqsave(&dev->txwait_lock, flags);
wait = list_first_entry(&dev->txwait, struct iowait,
list);
qp = iowait_to_qp(wait);
priv = qp->priv;
list_del_init(&priv->s_iowait.list);
/* refcount held until actual wake up */
- write_sequnlock_irqrestore(&dev->iowait_lock, flags);
+ write_sequnlock_irqrestore(&dev->txwait_lock, flags);
hfi1_qp_wakeup(qp, RVT_S_WAIT_TX);
break;
}
- } while (read_seqretry(&dev->iowait_lock, seq));
+ } while (read_seqretry(&dev->txwait_lock, seq));
}
struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
@@ -96,7 +96,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
{
struct verbs_txreq *tx = ERR_PTR(-EBUSY);
- write_seqlock(&dev->iowait_lock);
+ write_seqlock(&dev->txwait_lock);
if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
struct hfi1_qp_priv *priv;
@@ -108,13 +108,14 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
dev->n_txwait++;
qp->s_flags |= RVT_S_WAIT_TX;
list_add_tail(&priv->s_iowait.list, &dev->txwait);
+ priv->s_iowait.lock = &dev->txwait_lock;
trace_hfi1_qpsleep(qp, RVT_S_WAIT_TX);
rvt_get_qp(qp);
}
qp->s_flags &= ~RVT_S_BUSY;
}
out:
- write_sequnlock(&dev->iowait_lock);
+ write_sequnlock(&dev->txwait_lock);
return tx;
}
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h
index 5660897593ba..76216f2ef35a 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.h
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h
@@ -65,6 +65,7 @@ struct verbs_txreq {
struct sdma_engine *sde;
struct send_context *psc;
u16 hdr_dwords;
+ u16 s_cur_size;
};
struct hfi1_ibdev;
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index 24f79ee39fdf..0ac294db3b29 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -39,7 +39,8 @@
#define HNS_ROCE_VLAN_SL_BIT_MASK 7
#define HNS_ROCE_VLAN_SL_SHIFT 13
-struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *ah_attr)
+struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *ah_attr,
+ struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibpd->device);
struct device *dev = &hr_dev->pdev->dev;
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index 863a17a2de40..605962f2828c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -61,9 +61,10 @@ int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj)
return ret;
}
-void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj)
+void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj,
+ int rr)
{
- hns_roce_bitmap_free_range(bitmap, obj, 1);
+ hns_roce_bitmap_free_range(bitmap, obj, 1, rr);
}
int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
@@ -106,7 +107,8 @@ int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
}
void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
- unsigned long obj, int cnt)
+ unsigned long obj, int cnt,
+ int rr)
{
int i;
@@ -116,7 +118,8 @@ void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
for (i = 0; i < cnt; i++)
clear_bit(obj + i, bitmap->table);
- bitmap->last = min(bitmap->last, obj);
+ if (!rr)
+ bitmap->last = min(bitmap->last, obj);
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
& bitmap->mask;
spin_unlock(&bitmap->lock);
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c
index 2a0b6c05da5f..8c1f7a6f84d2 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c
@@ -216,10 +216,10 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
goto out;
/*
- * It is timeout when wait_for_completion_timeout return 0
- * The return value is the time limit set in advance
- * how many seconds showing
- */
+ * It is timeout when wait_for_completion_timeout return 0
+ * The return value is the time limit set in advance
+ * how many seconds showing
+ */
if (!wait_for_completion_timeout(&context->done,
msecs_to_jiffies(timeout))) {
dev_err(dev, "[cmd]wait_for_completion_timeout timeout\n");
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.h b/drivers/infiniband/hw/hns/hns_roce_cmd.h
index e3997d312c55..f5a9ee2fc53d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.h
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.h
@@ -34,6 +34,7 @@
#define _HNS_ROCE_CMD_H
#define HNS_ROCE_MAILBOX_SIZE 4096
+#define HNS_ROCE_CMD_TIMEOUT_MSECS 10000
enum {
/* TPT commands */
@@ -57,17 +58,6 @@ enum {
HNS_ROCE_CMD_QUERY_QP = 0x22,
};
-enum {
- HNS_ROCE_CMD_TIME_CLASS_A = 10000,
- HNS_ROCE_CMD_TIME_CLASS_B = 10000,
- HNS_ROCE_CMD_TIME_CLASS_C = 10000,
-};
-
-struct hns_roce_cmd_mailbox {
- void *buf;
- dma_addr_t dma;
-};
-
int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
unsigned long in_modifier, u8 op_modifier, u16 op,
unsigned long timeout);
diff --git a/drivers/infiniband/hw/hns/hns_roce_common.h b/drivers/infiniband/hw/hns/hns_roce_common.h
index 297016103aa7..4af403e1348c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_common.h
+++ b/drivers/infiniband/hw/hns/hns_roce_common.h
@@ -57,6 +57,32 @@
#define roce_set_bit(origin, shift, val) \
roce_set_field((origin), (1ul << (shift)), (shift), (val))
+/*
+ * roce_hw_index_cmp_lt - Compare two hardware index values in hisilicon
+ * SOC, check if a is less than b.
+ * @a: hardware index value
+ * @b: hardware index value
+ * @bits: the number of bits of a and b, range: 0~31.
+ *
+ * Hardware index increases continuously till max value, and then restart
+ * from zero, again and again. Because the bits of reg field is often
+ * limited, the reg field can only hold the low bits of the hardware index
+ * in hisilicon SOC.
+ * In some scenes we need to compare two values(a,b) getted from two reg
+ * fields in this driver, for example:
+ * If a equals 0xfffe, b equals 0x1 and bits equals 16, we think b has
+ * incresed from 0xffff to 0x1 and a is less than b.
+ * If a equals 0xfffe, b equals 0x0xf001 and bits equals 16, we think a
+ * is bigger than b.
+ *
+ * Return true on a less than b, otherwise false.
+ */
+#define roce_hw_index_mask(bits) ((1ul << (bits)) - 1)
+#define roce_hw_index_shift(bits) (32 - (bits))
+#define roce_hw_index_cmp_lt(a, b, bits) \
+ ((int)((((a) - (b)) & roce_hw_index_mask(bits)) << \
+ roce_hw_index_shift(bits)) < 0)
+
#define ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S 3
#define ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S 4
@@ -245,16 +271,26 @@
#define ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M \
(((1UL << 28) - 1) << ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S)
+#define ROCEE_SDB_PTR_CMP_BITS 28
+
#define ROCEE_SDB_INV_CNT_SDB_INV_CNT_S 0
#define ROCEE_SDB_INV_CNT_SDB_INV_CNT_M \
(((1UL << 16) - 1) << ROCEE_SDB_INV_CNT_SDB_INV_CNT_S)
+#define ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S 0
+#define ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M \
+ (((1UL << 16) - 1) << ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S)
+
+#define ROCEE_SDB_CNT_CMP_BITS 16
+
+#define ROCEE_TSP_BP_ST_QH_FIFO_ENTRY_S 20
+
+#define ROCEE_CNT_CLR_CE_CNT_CLR_CE_S 0
+
/*************ROCEE_REG DEFINITION****************/
#define ROCEE_VENDOR_ID_REG 0x0
#define ROCEE_VENDOR_PART_ID_REG 0x4
-#define ROCEE_HW_VERSION_REG 0x8
-
#define ROCEE_SYS_IMAGE_GUID_L_REG 0xC
#define ROCEE_SYS_IMAGE_GUID_H_REG 0x10
@@ -318,7 +354,11 @@
#define ROCEE_SDB_ISSUE_PTR_REG 0x758
#define ROCEE_SDB_SEND_PTR_REG 0x75C
+#define ROCEE_CAEP_CQE_WCMD_EMPTY 0x850
+#define ROCEE_SCAEP_WR_CQE_CNT 0x8D0
#define ROCEE_SDB_INV_CNT_REG 0x9A4
+#define ROCEE_SDB_RETRY_CNT_REG 0x9AC
+#define ROCEE_TSP_BP_ST_REG 0x9EC
#define ROCEE_ECC_UCERR_ALM0_REG 0xB34
#define ROCEE_ECC_CERR_ALM0_REG 0xB40
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 097365932b09..589496c8fb9e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -35,7 +35,7 @@
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
#include "hns_roce_hem.h"
-#include "hns_roce_user.h"
+#include <rdma/hns-abi.h>
#include "hns_roce_common.h"
static void hns_roce_ib_cq_comp(struct hns_roce_cq *hr_cq)
@@ -77,7 +77,7 @@ static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev,
unsigned long cq_num)
{
return hns_roce_cmd_mbox(dev, mailbox->dma, 0, cq_num, 0,
- HNS_ROCE_CMD_SW2HW_CQ, HNS_ROCE_CMD_TIME_CLASS_A);
+ HNS_ROCE_CMD_SW2HW_CQ, HNS_ROCE_CMD_TIMEOUT_MSECS);
}
static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
@@ -166,7 +166,7 @@ err_put:
hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
err_out:
- hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn);
+ hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
return ret;
}
@@ -176,11 +176,10 @@ static int hns_roce_hw2sw_cq(struct hns_roce_dev *dev,
{
return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
mailbox ? 0 : 1, HNS_ROCE_CMD_HW2SW_CQ,
- HNS_ROCE_CMD_TIME_CLASS_A);
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
}
-static void hns_roce_free_cq(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq *hr_cq)
+void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct device *dev = &hr_dev->pdev->dev;
@@ -204,7 +203,7 @@ static void hns_roce_free_cq(struct hns_roce_dev *hr_dev,
spin_unlock_irq(&cq_table->lock);
hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
- hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn);
+ hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
}
static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
@@ -349,6 +348,15 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
goto err_mtt;
}
+ /*
+ * For the QP created by kernel space, tptr value should be initialized
+ * to zero; For the QP created by user space, it will cause synchronous
+ * problems if tptr is set to zero here, so we initialze it in user
+ * space.
+ */
+ if (!context)
+ *hr_cq->tptr_addr = 0;
+
/* Get created cq handler and carry out event */
hr_cq->comp = hns_roce_ib_cq_comp;
hr_cq->event = hns_roce_ib_cq_event;
@@ -383,19 +391,25 @@ int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
+ int ret = 0;
- hns_roce_free_cq(hr_dev, hr_cq);
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+ if (hr_dev->hw->destroy_cq) {
+ ret = hr_dev->hw->destroy_cq(ib_cq);
+ } else {
+ hns_roce_free_cq(hr_dev, hr_cq);
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
- if (ib_cq->uobject)
- ib_umem_release(hr_cq->umem);
- else
- /* Free the buff of stored cq */
- hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, ib_cq->cqe);
+ if (ib_cq->uobject)
+ ib_umem_release(hr_cq->umem);
+ else
+ /* Free the buff of stored cq */
+ hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf,
+ ib_cq->cqe);
- kfree(hr_cq);
+ kfree(hr_cq);
+ }
- return 0;
+ return ret;
}
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 341731553a60..1a6cb5d7a0dd 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -37,6 +37,8 @@
#define DRV_NAME "hns_roce"
+#define HNS_ROCE_HW_VER1 ('h' << 24 | 'i' << 16 | '0' << 8 | '6')
+
#define MAC_ADDR_OCTET_NUM 6
#define HNS_ROCE_MAX_MSG_LEN 0x80000000
@@ -54,6 +56,12 @@
#define HNS_ROCE_MAX_INNER_MTPT_NUM 0x7
#define HNS_ROCE_MAX_MTPT_PBL_NUM 0x100000
+#define HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS 20
+#define HNS_ROCE_MAX_FREE_CQ_WAIT_CNT \
+ (5000 / HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS)
+#define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2
+#define HNS_ROCE_MIN_CQE_CNT 16
+
#define HNS_ROCE_MAX_IRQ_NUM 34
#define HNS_ROCE_COMP_VEC_NUM 32
@@ -70,6 +78,9 @@
#define HNS_ROCE_MAX_GID_NUM 16
#define HNS_ROCE_GID_SIZE 16
+#define BITMAP_NO_RR 0
+#define BITMAP_RR 1
+
#define MR_TYPE_MR 0x00
#define MR_TYPE_DMA 0x03
@@ -196,9 +207,9 @@ struct hns_roce_bitmap {
/* Order = 0: bitmap is biggest, order = max bitmap is least (only a bit) */
/* Every bit repesent to a partner free/used status in bitmap */
/*
-* Initial, bits of other bitmap are all 0 except that a bit of max_order is 1
-* Bit = 1 represent to idle and available; bit = 0: not available
-*/
+ * Initial, bits of other bitmap are all 0 except that a bit of max_order is 1
+ * Bit = 1 represent to idle and available; bit = 0: not available
+ */
struct hns_roce_buddy {
/* Members point to every order level bitmap */
unsigned long **bits;
@@ -296,7 +307,7 @@ struct hns_roce_cq {
u32 cq_depth;
u32 cons_index;
void __iomem *cq_db_l;
- void __iomem *tptr_addr;
+ u16 *tptr_addr;
unsigned long cqn;
u32 vector;
atomic_t refcount;
@@ -360,29 +371,34 @@ struct hns_roce_cmdq {
struct mutex hcr_mutex;
struct semaphore poll_sem;
/*
- * Event mode: cmd register mutex protection,
- * ensure to not exceed max_cmds and user use limit region
- */
+ * Event mode: cmd register mutex protection,
+ * ensure to not exceed max_cmds and user use limit region
+ */
struct semaphore event_sem;
int max_cmds;
spinlock_t context_lock;
int free_head;
struct hns_roce_cmd_context *context;
/*
- * Result of get integer part
- * which max_comds compute according a power of 2
- */
+ * Result of get integer part
+ * which max_comds compute according a power of 2
+ */
u16 token_mask;
/*
- * Process whether use event mode, init default non-zero
- * After the event queue of cmd event ready,
- * can switch into event mode
- * close device, switch into poll mode(non event mode)
- */
+ * Process whether use event mode, init default non-zero
+ * After the event queue of cmd event ready,
+ * can switch into event mode
+ * close device, switch into poll mode(non event mode)
+ */
u8 use_events;
u8 toggle;
};
+struct hns_roce_cmd_mailbox {
+ void *buf;
+ dma_addr_t dma;
+};
+
struct hns_roce_dev;
struct hns_roce_qp {
@@ -424,8 +440,6 @@ struct hns_roce_ib_iboe {
struct net_device *netdevs[HNS_ROCE_MAX_PORTS];
struct notifier_block nb;
struct notifier_block nb_inet;
- /* 16 GID is shared by 6 port in v1 engine. */
- union ib_gid gid_table[HNS_ROCE_MAX_GID_NUM];
u8 phy_port[HNS_ROCE_MAX_PORTS];
};
@@ -519,6 +533,8 @@ struct hns_roce_hw {
struct ib_recv_wr **bad_recv_wr);
int (*req_notify_cq)(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+ int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr);
+ int (*destroy_cq)(struct ib_cq *ibcq);
void *priv;
};
@@ -553,6 +569,8 @@ struct hns_roce_dev {
int cmd_mod;
int loop_idc;
+ dma_addr_t tptr_dma_addr; /*only for hw v1*/
+ u32 tptr_size; /*only for hw v1*/
struct hns_roce_hw *hw;
};
@@ -657,7 +675,8 @@ void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);
int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj);
-void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj);
+void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj,
+ int rr);
int hns_roce_bitmap_init(struct hns_roce_bitmap *bitmap, u32 num, u32 mask,
u32 reserved_bot, u32 resetrved_top);
void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap);
@@ -665,9 +684,11 @@ void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev);
int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
int align, unsigned long *obj);
void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
- unsigned long obj, int cnt);
+ unsigned long obj, int cnt,
+ int rr);
-struct ib_ah *hns_roce_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
+struct ib_ah *hns_roce_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+ struct ib_udata *udata);
int hns_roce_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
int hns_roce_destroy_ah(struct ib_ah *ah);
@@ -681,6 +702,10 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
int hns_roce_dereg_mr(struct ib_mr *ibmr);
+int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmd_mailbox *mailbox,
+ unsigned long mpt_index);
+unsigned long key_to_hw_index(u32 key);
void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
struct hns_roce_buf *buf);
@@ -717,6 +742,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
struct ib_udata *udata);
int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq);
+void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq);
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
diff --git a/drivers/infiniband/hw/hns/hns_roce_eq.c b/drivers/infiniband/hw/hns/hns_roce_eq.c
index 21e21b03cfb5..50f864935a0e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_eq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_eq.c
@@ -371,9 +371,9 @@ static int hns_roce_aeq_ovf_int(struct hns_roce_dev *hr_dev,
int i = 0;
/**
- * AEQ overflow ECC mult bit err CEQ overflow alarm
- * must clear interrupt, mask irq, clear irq, cancel mask operation
- */
+ * AEQ overflow ECC mult bit err CEQ overflow alarm
+ * must clear interrupt, mask irq, clear irq, cancel mask operation
+ */
aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG);
if (roce_get_bit(aeshift_val,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index 250d8f280390..c5104e0b2916 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -80,9 +80,9 @@ struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages,
--order;
/*
- * Alloc memory one time. If failed, don't alloc small block
- * memory, directly return fail.
- */
+ * Alloc memory one time. If failed, don't alloc small block
+ * memory, directly return fail.
+ */
mem = &chunk->mem[chunk->npages];
buf = dma_alloc_coherent(&hr_dev->pdev->dev, PAGE_SIZE << order,
&sg_dma_address(mem), gfp_mask);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 71232e5fabf6..b8111b0c8877 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -32,6 +32,7 @@
#include <linux/platform_device.h>
#include <linux/acpi.h>
+#include <linux/etherdevice.h>
#include <rdma/ib_umem.h>
#include "hns_roce_common.h"
#include "hns_roce_device.h"
@@ -72,6 +73,8 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
int nreq = 0;
u32 ind = 0;
int ret = 0;
+ u8 *smac;
+ int loopback;
if (unlikely(ibqp->qp_type != IB_QPT_GSI &&
ibqp->qp_type != IB_QPT_RC)) {
@@ -129,6 +132,14 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
UD_SEND_WQE_U32_8_DMAC_5_M,
UD_SEND_WQE_U32_8_DMAC_5_S,
ah->av.mac[5]);
+
+ smac = (u8 *)hr_dev->dev_addr[qp->port];
+ loopback = ether_addr_equal_unaligned(ah->av.mac,
+ smac) ? 1 : 0;
+ roce_set_bit(ud_sq_wqe->u32_8,
+ UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S,
+ loopback);
+
roce_set_field(ud_sq_wqe->u32_8,
UD_SEND_WQE_U32_8_OPERATION_TYPE_M,
UD_SEND_WQE_U32_8_OPERATION_TYPE_S,
@@ -284,6 +295,8 @@ out:
roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SQ_HEAD_M,
SQ_DOORBELL_U32_4_SQ_HEAD_S,
(qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1)));
+ roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SL_M,
+ SQ_DOORBELL_U32_4_SL_S, qp->sl);
roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M,
SQ_DOORBELL_U32_4_PORT_S, qp->phy_port);
roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M,
@@ -611,6 +624,213 @@ ext_sdb_buf_fail_out:
return ret;
}
+static struct hns_roce_qp *hns_roce_v1_create_lp_qp(struct hns_roce_dev *hr_dev,
+ struct ib_pd *pd)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct ib_qp_init_attr init_attr;
+ struct ib_qp *qp;
+
+ memset(&init_attr, 0, sizeof(struct ib_qp_init_attr));
+ init_attr.qp_type = IB_QPT_RC;
+ init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
+ init_attr.cap.max_recv_wr = HNS_ROCE_MIN_WQE_NUM;
+ init_attr.cap.max_send_wr = HNS_ROCE_MIN_WQE_NUM;
+
+ qp = hns_roce_create_qp(pd, &init_attr, NULL);
+ if (IS_ERR(qp)) {
+ dev_err(dev, "Create loop qp for mr free failed!");
+ return NULL;
+ }
+
+ return to_hr_qp(qp);
+}
+
+static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_caps *caps = &hr_dev->caps;
+ struct device *dev = &hr_dev->pdev->dev;
+ struct ib_cq_init_attr cq_init_attr;
+ struct hns_roce_free_mr *free_mr;
+ struct ib_qp_attr attr = { 0 };
+ struct hns_roce_v1_priv *priv;
+ struct hns_roce_qp *hr_qp;
+ struct ib_cq *cq;
+ struct ib_pd *pd;
+ u64 subnet_prefix;
+ int attr_mask = 0;
+ int i;
+ int ret;
+ u8 phy_port;
+ u8 sl;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ free_mr = &priv->free_mr;
+
+ /* Reserved cq for loop qp */
+ cq_init_attr.cqe = HNS_ROCE_MIN_WQE_NUM * 2;
+ cq_init_attr.comp_vector = 0;
+ cq = hns_roce_ib_create_cq(&hr_dev->ib_dev, &cq_init_attr, NULL, NULL);
+ if (IS_ERR(cq)) {
+ dev_err(dev, "Create cq for reseved loop qp failed!");
+ return -ENOMEM;
+ }
+ free_mr->mr_free_cq = to_hr_cq(cq);
+ free_mr->mr_free_cq->ib_cq.device = &hr_dev->ib_dev;
+ free_mr->mr_free_cq->ib_cq.uobject = NULL;
+ free_mr->mr_free_cq->ib_cq.comp_handler = NULL;
+ free_mr->mr_free_cq->ib_cq.event_handler = NULL;
+ free_mr->mr_free_cq->ib_cq.cq_context = NULL;
+ atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0);
+
+ pd = hns_roce_alloc_pd(&hr_dev->ib_dev, NULL, NULL);
+ if (IS_ERR(pd)) {
+ dev_err(dev, "Create pd for reseved loop qp failed!");
+ ret = -ENOMEM;
+ goto alloc_pd_failed;
+ }
+ free_mr->mr_free_pd = to_hr_pd(pd);
+ free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev;
+ free_mr->mr_free_pd->ibpd.uobject = NULL;
+ atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0);
+
+ attr.qp_access_flags = IB_ACCESS_REMOTE_WRITE;
+ attr.pkey_index = 0;
+ attr.min_rnr_timer = 0;
+ /* Disable read ability */
+ attr.max_dest_rd_atomic = 0;
+ attr.max_rd_atomic = 0;
+ /* Use arbitrary values as rq_psn and sq_psn */
+ attr.rq_psn = 0x0808;
+ attr.sq_psn = 0x0808;
+ attr.retry_cnt = 7;
+ attr.rnr_retry = 7;
+ attr.timeout = 0x12;
+ attr.path_mtu = IB_MTU_256;
+ attr.ah_attr.ah_flags = 1;
+ attr.ah_attr.static_rate = 3;
+ attr.ah_attr.grh.sgid_index = 0;
+ attr.ah_attr.grh.hop_limit = 1;
+ attr.ah_attr.grh.flow_label = 0;
+ attr.ah_attr.grh.traffic_class = 0;
+
+ subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
+ for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
+ free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
+ if (IS_ERR(free_mr->mr_free_qp[i])) {
+ dev_err(dev, "Create loop qp failed!\n");
+ goto create_lp_qp_failed;
+ }
+ hr_qp = free_mr->mr_free_qp[i];
+
+ sl = i / caps->num_ports;
+
+ if (caps->num_ports == HNS_ROCE_MAX_PORTS)
+ phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) :
+ (i % caps->num_ports);
+ else
+ phy_port = i % caps->num_ports;
+
+ hr_qp->port = phy_port + 1;
+ hr_qp->phy_port = phy_port;
+ hr_qp->ibqp.qp_type = IB_QPT_RC;
+ hr_qp->ibqp.device = &hr_dev->ib_dev;
+ hr_qp->ibqp.uobject = NULL;
+ atomic_set(&hr_qp->ibqp.usecnt, 0);
+ hr_qp->ibqp.pd = pd;
+ hr_qp->ibqp.recv_cq = cq;
+ hr_qp->ibqp.send_cq = cq;
+
+ attr.ah_attr.port_num = phy_port + 1;
+ attr.ah_attr.sl = sl;
+ attr.port_num = phy_port + 1;
+
+ attr.dest_qp_num = hr_qp->qpn;
+ memcpy(attr.ah_attr.dmac, hr_dev->dev_addr[phy_port],
+ MAC_ADDR_OCTET_NUM);
+
+ memcpy(attr.ah_attr.grh.dgid.raw,
+ &subnet_prefix, sizeof(u64));
+ memcpy(&attr.ah_attr.grh.dgid.raw[8],
+ hr_dev->dev_addr[phy_port], 3);
+ memcpy(&attr.ah_attr.grh.dgid.raw[13],
+ hr_dev->dev_addr[phy_port] + 3, 3);
+ attr.ah_attr.grh.dgid.raw[11] = 0xff;
+ attr.ah_attr.grh.dgid.raw[12] = 0xfe;
+ attr.ah_attr.grh.dgid.raw[8] ^= 2;
+
+ attr_mask |= IB_QP_PORT;
+
+ ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
+ IB_QPS_RESET, IB_QPS_INIT);
+ if (ret) {
+ dev_err(dev, "modify qp failed(%d)!\n", ret);
+ goto create_lp_qp_failed;
+ }
+
+ ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
+ IB_QPS_INIT, IB_QPS_RTR);
+ if (ret) {
+ dev_err(dev, "modify qp failed(%d)!\n", ret);
+ goto create_lp_qp_failed;
+ }
+
+ ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
+ IB_QPS_RTR, IB_QPS_RTS);
+ if (ret) {
+ dev_err(dev, "modify qp failed(%d)!\n", ret);
+ goto create_lp_qp_failed;
+ }
+ }
+
+ return 0;
+
+create_lp_qp_failed:
+ for (i -= 1; i >= 0; i--) {
+ hr_qp = free_mr->mr_free_qp[i];
+ if (hns_roce_v1_destroy_qp(&hr_qp->ibqp))
+ dev_err(dev, "Destroy qp %d for mr free failed!\n", i);
+ }
+
+ if (hns_roce_dealloc_pd(pd))
+ dev_err(dev, "Destroy pd for create_lp_qp failed!\n");
+
+alloc_pd_failed:
+ if (hns_roce_ib_destroy_cq(cq))
+ dev_err(dev, "Destroy cq for create_lp_qp failed!\n");
+
+ return -EINVAL;
+}
+
+static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_free_mr *free_mr;
+ struct hns_roce_v1_priv *priv;
+ struct hns_roce_qp *hr_qp;
+ int ret;
+ int i;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ free_mr = &priv->free_mr;
+
+ for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
+ hr_qp = free_mr->mr_free_qp[i];
+ ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp);
+ if (ret)
+ dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n",
+ i, ret);
+ }
+
+ ret = hns_roce_ib_destroy_cq(&free_mr->mr_free_cq->ib_cq);
+ if (ret)
+ dev_err(dev, "Destroy cq for mr_free failed(%d)!\n", ret);
+
+ ret = hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd);
+ if (ret)
+ dev_err(dev, "Destroy pd for mr_free failed(%d)!\n", ret);
+}
+
static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
{
struct device *dev = &hr_dev->pdev->dev;
@@ -648,6 +868,223 @@ static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
return 0;
}
+void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work)
+{
+ struct hns_roce_recreate_lp_qp_work *lp_qp_work;
+ struct hns_roce_dev *hr_dev;
+
+ lp_qp_work = container_of(work, struct hns_roce_recreate_lp_qp_work,
+ work);
+ hr_dev = to_hr_dev(lp_qp_work->ib_dev);
+
+ hns_roce_v1_release_lp_qp(hr_dev);
+
+ if (hns_roce_v1_rsv_lp_qp(hr_dev))
+ dev_err(&hr_dev->pdev->dev, "create reserver qp failed\n");
+
+ if (lp_qp_work->comp_flag)
+ complete(lp_qp_work->comp);
+
+ kfree(lp_qp_work);
+}
+
+static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_recreate_lp_qp_work *lp_qp_work;
+ struct hns_roce_free_mr *free_mr;
+ struct hns_roce_v1_priv *priv;
+ struct completion comp;
+ unsigned long end =
+ msecs_to_jiffies(HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS) + jiffies;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ free_mr = &priv->free_mr;
+
+ lp_qp_work = kzalloc(sizeof(struct hns_roce_recreate_lp_qp_work),
+ GFP_KERNEL);
+
+ INIT_WORK(&(lp_qp_work->work), hns_roce_v1_recreate_lp_qp_work_fn);
+
+ lp_qp_work->ib_dev = &(hr_dev->ib_dev);
+ lp_qp_work->comp = &comp;
+ lp_qp_work->comp_flag = 1;
+
+ init_completion(lp_qp_work->comp);
+
+ queue_work(free_mr->free_mr_wq, &(lp_qp_work->work));
+
+ while (time_before_eq(jiffies, end)) {
+ if (try_wait_for_completion(&comp))
+ return 0;
+ msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE);
+ }
+
+ lp_qp_work->comp_flag = 0;
+ if (try_wait_for_completion(&comp))
+ return 0;
+
+ dev_warn(dev, "recreate lp qp failed 20s timeout and return failed!\n");
+ return -ETIMEDOUT;
+}
+
+static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
+ struct device *dev = &hr_dev->pdev->dev;
+ struct ib_send_wr send_wr, *bad_wr;
+ int ret;
+
+ memset(&send_wr, 0, sizeof(send_wr));
+ send_wr.next = NULL;
+ send_wr.num_sge = 0;
+ send_wr.send_flags = 0;
+ send_wr.sg_list = NULL;
+ send_wr.wr_id = (unsigned long long)&send_wr;
+ send_wr.opcode = IB_WR_RDMA_WRITE;
+
+ ret = hns_roce_v1_post_send(&hr_qp->ibqp, &send_wr, &bad_wr);
+ if (ret) {
+ dev_err(dev, "Post write wqe for mr free failed(%d)!", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
+{
+ struct hns_roce_mr_free_work *mr_work;
+ struct ib_wc wc[HNS_ROCE_V1_RESV_QP];
+ struct hns_roce_free_mr *free_mr;
+ struct hns_roce_cq *mr_free_cq;
+ struct hns_roce_v1_priv *priv;
+ struct hns_roce_dev *hr_dev;
+ struct hns_roce_mr *hr_mr;
+ struct hns_roce_qp *hr_qp;
+ struct device *dev;
+ unsigned long end =
+ msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
+ int i;
+ int ret;
+ int ne;
+
+ mr_work = container_of(work, struct hns_roce_mr_free_work, work);
+ hr_mr = (struct hns_roce_mr *)mr_work->mr;
+ hr_dev = to_hr_dev(mr_work->ib_dev);
+ dev = &hr_dev->pdev->dev;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ free_mr = &priv->free_mr;
+ mr_free_cq = free_mr->mr_free_cq;
+
+ for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
+ hr_qp = free_mr->mr_free_qp[i];
+ ret = hns_roce_v1_send_lp_wqe(hr_qp);
+ if (ret) {
+ dev_err(dev,
+ "Send wqe (qp:0x%lx) for mr free failed(%d)!\n",
+ hr_qp->qpn, ret);
+ goto free_work;
+ }
+ }
+
+ ne = HNS_ROCE_V1_RESV_QP;
+ do {
+ ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
+ if (ret < 0) {
+ dev_err(dev,
+ "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n",
+ hr_qp->qpn, ret, hr_mr->key, ne);
+ goto free_work;
+ }
+ ne -= ret;
+ msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE);
+ } while (ne && time_before_eq(jiffies, end));
+
+ if (ne != 0)
+ dev_err(dev,
+ "Poll cqe for mr 0x%x free timeout! Remain %d cqe\n",
+ hr_mr->key, ne);
+
+free_work:
+ if (mr_work->comp_flag)
+ complete(mr_work->comp);
+ kfree(mr_work);
+}
+
+int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_mr_free_work *mr_work;
+ struct hns_roce_free_mr *free_mr;
+ struct hns_roce_v1_priv *priv;
+ struct completion comp;
+ unsigned long end =
+ msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
+ unsigned long start = jiffies;
+ int npages;
+ int ret = 0;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ free_mr = &priv->free_mr;
+
+ if (mr->enabled) {
+ if (hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key)
+ & (hr_dev->caps.num_mtpts - 1)))
+ dev_warn(dev, "HW2SW_MPT failed!\n");
+ }
+
+ mr_work = kzalloc(sizeof(*mr_work), GFP_KERNEL);
+ if (!mr_work) {
+ ret = -ENOMEM;
+ goto free_mr;
+ }
+
+ INIT_WORK(&(mr_work->work), hns_roce_v1_mr_free_work_fn);
+
+ mr_work->ib_dev = &(hr_dev->ib_dev);
+ mr_work->comp = &comp;
+ mr_work->comp_flag = 1;
+ mr_work->mr = (void *)mr;
+ init_completion(mr_work->comp);
+
+ queue_work(free_mr->free_mr_wq, &(mr_work->work));
+
+ while (time_before_eq(jiffies, end)) {
+ if (try_wait_for_completion(&comp))
+ goto free_mr;
+ msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE);
+ }
+
+ mr_work->comp_flag = 0;
+ if (try_wait_for_completion(&comp))
+ goto free_mr;
+
+ dev_warn(dev, "Free mr work 0x%x over 50s and failed!\n", mr->key);
+ ret = -ETIMEDOUT;
+
+free_mr:
+ dev_dbg(dev, "Free mr 0x%x use 0x%x us.\n",
+ mr->key, jiffies_to_usecs(jiffies) - jiffies_to_usecs(start));
+
+ if (mr->size != ~0ULL) {
+ npages = ib_umem_page_count(mr->umem);
+ dma_free_coherent(dev, npages * 8, mr->pbl_buf,
+ mr->pbl_dma_addr);
+ }
+
+ hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
+ key_to_hw_index(mr->key), 0);
+
+ if (mr->umem)
+ ib_umem_release(mr->umem);
+
+ kfree(mr);
+
+ return ret;
+}
+
static void hns_roce_db_free(struct hns_roce_dev *hr_dev)
{
struct device *dev = &hr_dev->pdev->dev;
@@ -849,6 +1286,85 @@ static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
}
+static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_buf_list *tptr_buf;
+ struct hns_roce_v1_priv *priv;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ tptr_buf = &priv->tptr_table.tptr_buf;
+
+ /*
+ * This buffer will be used for CQ's tptr(tail pointer), also
+ * named ci(customer index). Every CQ will use 2 bytes to save
+ * cqe ci in hip06. Hardware will read this area to get new ci
+ * when the queue is almost full.
+ */
+ tptr_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
+ &tptr_buf->map, GFP_KERNEL);
+ if (!tptr_buf->buf)
+ return -ENOMEM;
+
+ hr_dev->tptr_dma_addr = tptr_buf->map;
+ hr_dev->tptr_size = HNS_ROCE_V1_TPTR_BUF_SIZE;
+
+ return 0;
+}
+
+static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_buf_list *tptr_buf;
+ struct hns_roce_v1_priv *priv;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ tptr_buf = &priv->tptr_table.tptr_buf;
+
+ dma_free_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
+ tptr_buf->buf, tptr_buf->map);
+}
+
+static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_free_mr *free_mr;
+ struct hns_roce_v1_priv *priv;
+ int ret = 0;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ free_mr = &priv->free_mr;
+
+ free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr");
+ if (!free_mr->free_mr_wq) {
+ dev_err(dev, "Create free mr workqueue failed!\n");
+ return -ENOMEM;
+ }
+
+ ret = hns_roce_v1_rsv_lp_qp(hr_dev);
+ if (ret) {
+ dev_err(dev, "Reserved loop qp failed(%d)!\n", ret);
+ flush_workqueue(free_mr->free_mr_wq);
+ destroy_workqueue(free_mr->free_mr_wq);
+ }
+
+ return ret;
+}
+
+static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_free_mr *free_mr;
+ struct hns_roce_v1_priv *priv;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ free_mr = &priv->free_mr;
+
+ flush_workqueue(free_mr->free_mr_wq);
+ destroy_workqueue(free_mr->free_mr_wq);
+
+ hns_roce_v1_release_lp_qp(hr_dev);
+}
+
/**
* hns_roce_v1_reset - reset RoCE
* @hr_dev: RoCE device struct pointer
@@ -898,6 +1414,38 @@ int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
return ret;
}
+static int hns_roce_des_qp_init(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_v1_priv *priv;
+ struct hns_roce_des_qp *des_qp;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ des_qp = &priv->des_qp;
+
+ des_qp->requeue_flag = 1;
+ des_qp->qp_wq = create_singlethread_workqueue("hns_roce_destroy_qp");
+ if (!des_qp->qp_wq) {
+ dev_err(dev, "Create destroy qp workqueue failed!\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void hns_roce_des_qp_free(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v1_priv *priv;
+ struct hns_roce_des_qp *des_qp;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ des_qp = &priv->des_qp;
+
+ des_qp->requeue_flag = 0;
+ flush_workqueue(des_qp->qp_wq);
+ destroy_workqueue(des_qp->qp_wq);
+}
+
void hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
{
int i = 0;
@@ -906,12 +1454,11 @@ void hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
hr_dev->vendor_id = le32_to_cpu(roce_read(hr_dev, ROCEE_VENDOR_ID_REG));
hr_dev->vendor_part_id = le32_to_cpu(roce_read(hr_dev,
ROCEE_VENDOR_PART_ID_REG));
- hr_dev->hw_rev = le32_to_cpu(roce_read(hr_dev, ROCEE_HW_VERSION_REG));
-
hr_dev->sys_image_guid = le32_to_cpu(roce_read(hr_dev,
ROCEE_SYS_IMAGE_GUID_L_REG)) |
((u64)le32_to_cpu(roce_read(hr_dev,
ROCEE_SYS_IMAGE_GUID_H_REG)) << 32);
+ hr_dev->hw_rev = HNS_ROCE_HW_VER1;
caps->num_qps = HNS_ROCE_V1_MAX_QP_NUM;
caps->max_wqes = HNS_ROCE_V1_MAX_WQE_NUM;
@@ -1001,18 +1548,44 @@ int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
goto error_failed_raq_init;
}
- hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP);
-
ret = hns_roce_bt_init(hr_dev);
if (ret) {
dev_err(dev, "bt init failed!\n");
goto error_failed_bt_init;
}
+ ret = hns_roce_tptr_init(hr_dev);
+ if (ret) {
+ dev_err(dev, "tptr init failed!\n");
+ goto error_failed_tptr_init;
+ }
+
+ ret = hns_roce_des_qp_init(hr_dev);
+ if (ret) {
+ dev_err(dev, "des qp init failed!\n");
+ goto error_failed_des_qp_init;
+ }
+
+ ret = hns_roce_free_mr_init(hr_dev);
+ if (ret) {
+ dev_err(dev, "free mr init failed!\n");
+ goto error_failed_free_mr_init;
+ }
+
+ hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP);
+
return 0;
+error_failed_free_mr_init:
+ hns_roce_des_qp_free(hr_dev);
+
+error_failed_des_qp_init:
+ hns_roce_tptr_free(hr_dev);
+
+error_failed_tptr_init:
+ hns_roce_bt_free(hr_dev);
+
error_failed_bt_init:
- hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
hns_roce_raq_free(hr_dev);
error_failed_raq_init:
@@ -1022,8 +1595,11 @@ error_failed_raq_init:
void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
{
- hns_roce_bt_free(hr_dev);
hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
+ hns_roce_free_mr_free(hr_dev);
+ hns_roce_des_qp_free(hr_dev);
+ hns_roce_tptr_free(hr_dev);
+ hns_roce_bt_free(hr_dev);
hns_roce_raq_free(hr_dev);
hns_roce_db_free(hr_dev);
}
@@ -1061,6 +1637,14 @@ void hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr)
u32 *p;
u32 val;
+ /*
+ * When mac changed, loopback may fail
+ * because of smac not equal to dmac.
+ * We Need to release and create reserved qp again.
+ */
+ if (hr_dev->hw->dereg_mr && hns_roce_v1_recreate_lp_qp(hr_dev))
+ dev_warn(&hr_dev->pdev->dev, "recreate lp qp timeout!\n");
+
p = (u32 *)(&addr[0]);
reg_smac_l = *p;
roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_SMAC_L_0_REG +
@@ -1293,9 +1877,9 @@ static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
}
/*
- * Now backwards through the CQ, removing CQ entries
- * that match our QP by overwriting them with next entries.
- */
+ * Now backwards through the CQ, removing CQ entries
+ * that match our QP by overwriting them with next entries.
+ */
while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
cqe = get_cqe(hr_cq, prod_index & hr_cq->ib_cq.cqe);
if ((roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
@@ -1317,9 +1901,9 @@ static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
if (nfreed) {
hr_cq->cons_index += nfreed;
/*
- * Make sure update of buffer contents is done before
- * updating consumer index.
- */
+ * Make sure update of buffer contents is done before
+ * updating consumer index.
+ */
wmb();
hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
@@ -1339,14 +1923,21 @@ void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
dma_addr_t dma_handle, int nent, u32 vector)
{
struct hns_roce_cq_context *cq_context = NULL;
- void __iomem *tptr_addr;
+ struct hns_roce_buf_list *tptr_buf;
+ struct hns_roce_v1_priv *priv;
+ dma_addr_t tptr_dma_addr;
+ int offset;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ tptr_buf = &priv->tptr_table.tptr_buf;
cq_context = mb_buf;
memset(cq_context, 0, sizeof(*cq_context));
- tptr_addr = 0;
- hr_dev->priv_addr = tptr_addr;
- hr_cq->tptr_addr = tptr_addr;
+ /* Get the tptr for this CQ. */
+ offset = hr_cq->cqn * HNS_ROCE_V1_TPTR_ENTRY_SIZE;
+ tptr_dma_addr = tptr_buf->map + offset;
+ hr_cq->tptr_addr = (u16 *)(tptr_buf->buf + offset);
/* Register cq_context members */
roce_set_field(cq_context->cqc_byte_4,
@@ -1390,10 +1981,10 @@ void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
roce_set_field(cq_context->cqc_byte_20,
CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M,
CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S,
- (u64)tptr_addr >> 44);
+ tptr_dma_addr >> 44);
cq_context->cqc_byte_20 = cpu_to_le32(cq_context->cqc_byte_20);
- cq_context->cqe_tptr_addr_l = (u32)((u64)tptr_addr >> 12);
+ cq_context->cqe_tptr_addr_l = (u32)(tptr_dma_addr >> 12);
roce_set_field(cq_context->cqc_byte_32,
CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M,
@@ -1407,7 +1998,7 @@ void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
roce_set_bit(cq_context->cqc_byte_32,
CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S,
0);
- /*The initial value of cq's ci is 0 */
+ /* The initial value of cq's ci is 0 */
roce_set_field(cq_context->cqc_byte_32,
CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M,
CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S, 0);
@@ -1424,9 +2015,9 @@ int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
notification_flag = (flags & IB_CQ_SOLICITED_MASK) ==
IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL;
/*
- * flags = 0; Notification Flag = 1, next
- * flags = 1; Notification Flag = 0, solocited
- */
+ * flags = 0; Notification Flag = 1, next
+ * flags = 1; Notification Flag = 0, solocited
+ */
doorbell[0] = hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1);
roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
@@ -1581,10 +2172,10 @@ static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
wq = &(*cur_qp)->sq;
if ((*cur_qp)->sq_signal_bits) {
/*
- * If sg_signal_bit is 1,
- * firstly tail pointer updated to wqe
- * which current cqe correspond to
- */
+ * If sg_signal_bit is 1,
+ * firstly tail pointer updated to wqe
+ * which current cqe correspond to
+ */
wqe_ctr = (u16)roce_get_field(cqe->cqe_byte_4,
CQE_BYTE_4_WQE_INDEX_M,
CQE_BYTE_4_WQE_INDEX_S);
@@ -1659,8 +2250,14 @@ int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
break;
}
- if (npolled)
+ if (npolled) {
+ *hr_cq->tptr_addr = hr_cq->cons_index &
+ ((hr_cq->cq_depth << 1) - 1);
+
+ /* Memroy barrier */
+ wmb();
hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
+ }
spin_unlock_irqrestore(&hr_cq->lock, flags);
@@ -1799,12 +2396,12 @@ static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
if (op[cur_state][new_state] == HNS_ROCE_CMD_2RST_QP)
return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
HNS_ROCE_CMD_2RST_QP,
- HNS_ROCE_CMD_TIME_CLASS_A);
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
if (op[cur_state][new_state] == HNS_ROCE_CMD_2ERR_QP)
return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
HNS_ROCE_CMD_2ERR_QP,
- HNS_ROCE_CMD_TIME_CLASS_A);
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
@@ -1814,7 +2411,7 @@ static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
op[cur_state][new_state],
- HNS_ROCE_CMD_TIME_CLASS_C);
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
return ret;
@@ -2000,11 +2597,11 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
}
/*
- *Reset to init
- * Mandatory param:
- * IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS
- * Optional param: NA
- */
+ * Reset to init
+ * Mandatory param:
+ * IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS
+ * Optional param: NA
+ */
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
roce_set_field(context->qpc_bytes_4,
QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
@@ -2172,24 +2769,14 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S,
hr_qp->sq_signal_bits);
- for (port = 0; port < hr_dev->caps.num_ports; port++) {
- smac = (u8 *)hr_dev->dev_addr[port];
- dev_dbg(dev, "smac: %2x: %2x: %2x: %2x: %2x: %2x\n",
- smac[0], smac[1], smac[2], smac[3], smac[4],
- smac[5]);
- if ((dmac[0] == smac[0]) && (dmac[1] == smac[1]) &&
- (dmac[2] == smac[2]) && (dmac[3] == smac[3]) &&
- (dmac[4] == smac[4]) && (dmac[5] == smac[5])) {
- roce_set_bit(context->qpc_bytes_32,
- QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S,
- 1);
- break;
- }
- }
-
- if (hr_dev->loop_idc == 0x1)
+ port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) :
+ hr_qp->port;
+ smac = (u8 *)hr_dev->dev_addr[port];
+ /* when dmac equals smac or loop_idc is 1, it should loopback */
+ if (ether_addr_equal_unaligned(dmac, smac) ||
+ hr_dev->loop_idc == 0x1)
roce_set_bit(context->qpc_bytes_32,
- QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S, 1);
+ QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S, 1);
roce_set_bit(context->qpc_bytes_32,
QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S,
@@ -2509,7 +3096,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
/* Every status migrate must change state */
roce_set_field(context->qpc_bytes_144,
QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
- QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, attr->qp_state);
+ QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, new_state);
/* SW pass context to HW */
ret = hns_roce_v1_qp_modify(hr_dev, &hr_qp->mtt,
@@ -2522,9 +3109,9 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
}
/*
- * Use rst2init to instead of init2init with drv,
- * need to hw to flash RQ HEAD by DB again
- */
+ * Use rst2init to instead of init2init with drv,
+ * need to hw to flash RQ HEAD by DB again
+ */
if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
/* Memory barrier */
wmb();
@@ -2619,7 +3206,7 @@ static int hns_roce_v1_query_qpc(struct hns_roce_dev *hr_dev,
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
HNS_ROCE_CMD_QUERY_QP,
- HNS_ROCE_CMD_TIME_CLASS_A);
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
if (!ret)
memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
else
@@ -2630,8 +3217,78 @@ static int hns_roce_v1_query_qpc(struct hns_roce_dev *hr_dev,
return ret;
}
-int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
- int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
+static int hns_roce_v1_q_sqp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct hns_roce_sqp_context context;
+ u32 addr;
+
+ mutex_lock(&hr_qp->mutex);
+
+ if (hr_qp->state == IB_QPS_RESET) {
+ qp_attr->qp_state = IB_QPS_RESET;
+ goto done;
+ }
+
+ addr = ROCEE_QP1C_CFG0_0_REG +
+ hr_qp->port * sizeof(struct hns_roce_sqp_context);
+ context.qp1c_bytes_4 = roce_read(hr_dev, addr);
+ context.sq_rq_bt_l = roce_read(hr_dev, addr + 1);
+ context.qp1c_bytes_12 = roce_read(hr_dev, addr + 2);
+ context.qp1c_bytes_16 = roce_read(hr_dev, addr + 3);
+ context.qp1c_bytes_20 = roce_read(hr_dev, addr + 4);
+ context.cur_rq_wqe_ba_l = roce_read(hr_dev, addr + 5);
+ context.qp1c_bytes_28 = roce_read(hr_dev, addr + 6);
+ context.qp1c_bytes_32 = roce_read(hr_dev, addr + 7);
+ context.cur_sq_wqe_ba_l = roce_read(hr_dev, addr + 8);
+ context.qp1c_bytes_40 = roce_read(hr_dev, addr + 9);
+
+ hr_qp->state = roce_get_field(context.qp1c_bytes_4,
+ QP1C_BYTES_4_QP_STATE_M,
+ QP1C_BYTES_4_QP_STATE_S);
+ qp_attr->qp_state = hr_qp->state;
+ qp_attr->path_mtu = IB_MTU_256;
+ qp_attr->path_mig_state = IB_MIG_ARMED;
+ qp_attr->qkey = QKEY_VAL;
+ qp_attr->rq_psn = 0;
+ qp_attr->sq_psn = 0;
+ qp_attr->dest_qp_num = 1;
+ qp_attr->qp_access_flags = 6;
+
+ qp_attr->pkey_index = roce_get_field(context.qp1c_bytes_20,
+ QP1C_BYTES_20_PKEY_IDX_M,
+ QP1C_BYTES_20_PKEY_IDX_S);
+ qp_attr->port_num = hr_qp->port + 1;
+ qp_attr->sq_draining = 0;
+ qp_attr->max_rd_atomic = 0;
+ qp_attr->max_dest_rd_atomic = 0;
+ qp_attr->min_rnr_timer = 0;
+ qp_attr->timeout = 0;
+ qp_attr->retry_cnt = 0;
+ qp_attr->rnr_retry = 0;
+ qp_attr->alt_timeout = 0;
+
+done:
+ qp_attr->cur_qp_state = qp_attr->qp_state;
+ qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
+ qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
+ qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
+ qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
+ qp_attr->cap.max_inline_data = 0;
+ qp_init_attr->cap = qp_attr->cap;
+ qp_init_attr->create_flags = 0;
+
+ mutex_unlock(&hr_qp->mutex);
+
+ return 0;
+}
+
+static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
@@ -2725,9 +3382,7 @@ int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
qp_attr->pkey_index = roce_get_field(context->qpc_bytes_12,
QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S);
- qp_attr->port_num = (u8)roce_get_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
- QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S) + 1;
+ qp_attr->port_num = hr_qp->port + 1;
qp_attr->sq_draining = 0;
qp_attr->max_rd_atomic = roce_get_field(context->qpc_bytes_156,
QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
@@ -2767,134 +3422,397 @@ out:
return ret;
}
-static void hns_roce_v1_destroy_qp_common(struct hns_roce_dev *hr_dev,
- struct hns_roce_qp *hr_qp,
- int is_user)
+int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
+{
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+
+ return hr_qp->doorbell_qpn <= 1 ?
+ hns_roce_v1_q_sqp(ibqp, qp_attr, qp_attr_mask, qp_init_attr) :
+ hns_roce_v1_q_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr);
+}
+
+static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ u32 sdb_issue_ptr,
+ u32 *sdb_inv_cnt,
+ u32 *wait_stage)
{
- u32 sdbinvcnt;
- unsigned long end = 0;
- u32 sdbinvcnt_val;
- u32 sdbsendptr_val;
- u32 sdbisusepr_val;
- struct hns_roce_cq *send_cq, *recv_cq;
struct device *dev = &hr_dev->pdev->dev;
+ u32 sdb_retry_cnt, old_retry;
+ u32 sdb_send_ptr, old_send;
+ u32 success_flags = 0;
+ u32 cur_cnt, old_cnt;
+ unsigned long end;
+ u32 send_ptr;
+ u32 inv_cnt;
+ u32 tsp_st;
+
+ if (*wait_stage > HNS_ROCE_V1_DB_STAGE2 ||
+ *wait_stage < HNS_ROCE_V1_DB_STAGE1) {
+ dev_err(dev, "QP(0x%lx) db status wait stage(%d) error!\n",
+ hr_qp->qpn, *wait_stage);
+ return -EINVAL;
+ }
- if (hr_qp->ibqp.qp_type == IB_QPT_RC) {
- if (hr_qp->state != IB_QPS_RESET) {
- /*
- * Set qp to ERR,
- * waiting for hw complete processing all dbs
- */
- if (hns_roce_v1_qp_modify(hr_dev, NULL,
- to_hns_roce_state(
- (enum ib_qp_state)hr_qp->state),
- HNS_ROCE_QP_STATE_ERR, NULL,
- hr_qp))
- dev_err(dev, "modify QP %06lx to ERR failed.\n",
- hr_qp->qpn);
-
- /* Record issued doorbell */
- sdbisusepr_val = roce_read(hr_dev,
- ROCEE_SDB_ISSUE_PTR_REG);
- /*
- * Query db process status,
- * until hw process completely
- */
- end = msecs_to_jiffies(
- HNS_ROCE_QP_DESTROY_TIMEOUT_MSECS) + jiffies;
- do {
- sdbsendptr_val = roce_read(hr_dev,
+ /* Calculate the total timeout for the entire verification process */
+ end = msecs_to_jiffies(HNS_ROCE_V1_CHECK_DB_TIMEOUT_MSECS) + jiffies;
+
+ if (*wait_stage == HNS_ROCE_V1_DB_STAGE1) {
+ /* Query db process status, until hw process completely */
+ sdb_send_ptr = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
+ while (roce_hw_index_cmp_lt(sdb_send_ptr, sdb_issue_ptr,
+ ROCEE_SDB_PTR_CMP_BITS)) {
+ if (!time_before(jiffies, end)) {
+ dev_dbg(dev, "QP(0x%lx) db process stage1 timeout. issue 0x%x send 0x%x.\n",
+ hr_qp->qpn, sdb_issue_ptr,
+ sdb_send_ptr);
+ return 0;
+ }
+
+ msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
+ sdb_send_ptr = roce_read(hr_dev,
ROCEE_SDB_SEND_PTR_REG);
- if (!time_before(jiffies, end)) {
- dev_err(dev, "destroy qp(0x%lx) timeout!!!",
- hr_qp->qpn);
- break;
- }
- } while ((short)(roce_get_field(sdbsendptr_val,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) -
- roce_get_field(sdbisusepr_val,
- ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M,
- ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S)
- ) < 0);
+ }
- /* Get list pointer */
- sdbinvcnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
+ if (roce_get_field(sdb_issue_ptr,
+ ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M,
+ ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S) ==
+ roce_get_field(sdb_send_ptr,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S)) {
+ old_send = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
+ old_retry = roce_read(hr_dev, ROCEE_SDB_RETRY_CNT_REG);
- /* Query db's list status, until hw reversal */
do {
- sdbinvcnt_val = roce_read(hr_dev,
- ROCEE_SDB_INV_CNT_REG);
+ tsp_st = roce_read(hr_dev, ROCEE_TSP_BP_ST_REG);
+ if (roce_get_bit(tsp_st,
+ ROCEE_TSP_BP_ST_QH_FIFO_ENTRY_S) == 1) {
+ *wait_stage = HNS_ROCE_V1_DB_WAIT_OK;
+ return 0;
+ }
+
if (!time_before(jiffies, end)) {
- dev_err(dev, "destroy qp(0x%lx) timeout!!!",
- hr_qp->qpn);
- dev_err(dev, "SdbInvCnt = 0x%x\n",
- sdbinvcnt_val);
- break;
+ dev_dbg(dev, "QP(0x%lx) db process stage1 timeout when send ptr equals issue ptr.\n"
+ "issue 0x%x send 0x%x.\n",
+ hr_qp->qpn, sdb_issue_ptr,
+ sdb_send_ptr);
+ return 0;
+ }
+
+ msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
+
+ sdb_send_ptr = roce_read(hr_dev,
+ ROCEE_SDB_SEND_PTR_REG);
+ sdb_retry_cnt = roce_read(hr_dev,
+ ROCEE_SDB_RETRY_CNT_REG);
+ cur_cnt = roce_get_field(sdb_send_ptr,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
+ roce_get_field(sdb_retry_cnt,
+ ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
+ ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
+ if (!roce_get_bit(tsp_st,
+ ROCEE_CNT_CLR_CE_CNT_CLR_CE_S)) {
+ old_cnt = roce_get_field(old_send,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
+ roce_get_field(old_retry,
+ ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
+ ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
+ if (cur_cnt - old_cnt > SDB_ST_CMP_VAL)
+ success_flags = 1;
+ } else {
+ old_cnt = roce_get_field(old_send,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S);
+ if (cur_cnt - old_cnt > SDB_ST_CMP_VAL)
+ success_flags = 1;
+ else {
+ send_ptr = roce_get_field(old_send,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
+ roce_get_field(sdb_retry_cnt,
+ ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
+ ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
+ roce_set_field(old_send,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S,
+ send_ptr);
+ }
}
- } while ((short)(roce_get_field(sdbinvcnt_val,
- ROCEE_SDB_INV_CNT_SDB_INV_CNT_M,
- ROCEE_SDB_INV_CNT_SDB_INV_CNT_S) -
- (sdbinvcnt + SDB_INV_CNT_OFFSET)) < 0);
-
- /* Modify qp to reset before destroying qp */
- if (hns_roce_v1_qp_modify(hr_dev, NULL,
- to_hns_roce_state(
- (enum ib_qp_state)hr_qp->state),
- HNS_ROCE_QP_STATE_RST, NULL, hr_qp))
- dev_err(dev, "modify QP %06lx to RESET failed.\n",
- hr_qp->qpn);
+ } while (!success_flags);
}
+
+ *wait_stage = HNS_ROCE_V1_DB_STAGE2;
+
+ /* Get list pointer */
+ *sdb_inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
+ dev_dbg(dev, "QP(0x%lx) db process stage2. inv cnt = 0x%x.\n",
+ hr_qp->qpn, *sdb_inv_cnt);
+ }
+
+ if (*wait_stage == HNS_ROCE_V1_DB_STAGE2) {
+ /* Query db's list status, until hw reversal */
+ inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
+ while (roce_hw_index_cmp_lt(inv_cnt,
+ *sdb_inv_cnt + SDB_INV_CNT_OFFSET,
+ ROCEE_SDB_CNT_CMP_BITS)) {
+ if (!time_before(jiffies, end)) {
+ dev_dbg(dev, "QP(0x%lx) db process stage2 timeout. inv cnt 0x%x.\n",
+ hr_qp->qpn, inv_cnt);
+ return 0;
+ }
+
+ msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
+ inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
+ }
+
+ *wait_stage = HNS_ROCE_V1_DB_WAIT_OK;
+ }
+
+ return 0;
+}
+
+static int check_qp_reset_state(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct hns_roce_qp_work *qp_work_entry,
+ int *is_timeout)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ u32 sdb_issue_ptr;
+ int ret;
+
+ if (hr_qp->state != IB_QPS_RESET) {
+ /* Set qp to ERR, waiting for hw complete processing all dbs */
+ ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
+ IB_QPS_ERR);
+ if (ret) {
+ dev_err(dev, "Modify QP(0x%lx) to ERR failed!\n",
+ hr_qp->qpn);
+ return ret;
+ }
+
+ /* Record issued doorbell */
+ sdb_issue_ptr = roce_read(hr_dev, ROCEE_SDB_ISSUE_PTR_REG);
+ qp_work_entry->sdb_issue_ptr = sdb_issue_ptr;
+ qp_work_entry->db_wait_stage = HNS_ROCE_V1_DB_STAGE1;
+
+ /* Query db process status, until hw process completely */
+ ret = check_qp_db_process_status(hr_dev, hr_qp, sdb_issue_ptr,
+ &qp_work_entry->sdb_inv_cnt,
+ &qp_work_entry->db_wait_stage);
+ if (ret) {
+ dev_err(dev, "Check QP(0x%lx) db process status failed!\n",
+ hr_qp->qpn);
+ return ret;
+ }
+
+ if (qp_work_entry->db_wait_stage != HNS_ROCE_V1_DB_WAIT_OK) {
+ qp_work_entry->sche_cnt = 0;
+ *is_timeout = 1;
+ return 0;
+ }
+
+ /* Modify qp to reset before destroying qp */
+ ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
+ IB_QPS_RESET);
+ if (ret) {
+ dev_err(dev, "Modify QP(0x%lx) to RST failed!\n",
+ hr_qp->qpn);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
+{
+ struct hns_roce_qp_work *qp_work_entry;
+ struct hns_roce_v1_priv *priv;
+ struct hns_roce_dev *hr_dev;
+ struct hns_roce_qp *hr_qp;
+ struct device *dev;
+ int ret;
+
+ qp_work_entry = container_of(work, struct hns_roce_qp_work, work);
+ hr_dev = to_hr_dev(qp_work_entry->ib_dev);
+ dev = &hr_dev->pdev->dev;
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ hr_qp = qp_work_entry->qp;
+
+ dev_dbg(dev, "Schedule destroy QP(0x%lx) work.\n", hr_qp->qpn);
+
+ qp_work_entry->sche_cnt++;
+
+ /* Query db process status, until hw process completely */
+ ret = check_qp_db_process_status(hr_dev, hr_qp,
+ qp_work_entry->sdb_issue_ptr,
+ &qp_work_entry->sdb_inv_cnt,
+ &qp_work_entry->db_wait_stage);
+ if (ret) {
+ dev_err(dev, "Check QP(0x%lx) db process status failed!\n",
+ hr_qp->qpn);
+ return;
+ }
+
+ if (qp_work_entry->db_wait_stage != HNS_ROCE_V1_DB_WAIT_OK &&
+ priv->des_qp.requeue_flag) {
+ queue_work(priv->des_qp.qp_wq, work);
+ return;
+ }
+
+ /* Modify qp to reset before destroying qp */
+ ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
+ IB_QPS_RESET);
+ if (ret) {
+ dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", hr_qp->qpn);
+ return;
+ }
+
+ hns_roce_qp_remove(hr_dev, hr_qp);
+ hns_roce_qp_free(hr_dev, hr_qp);
+
+ if (hr_qp->ibqp.qp_type == IB_QPT_RC) {
+ /* RC QP, release QPN */
+ hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
+ kfree(hr_qp);
+ } else
+ kfree(hr_to_hr_sqp(hr_qp));
+
+ kfree(qp_work_entry);
+
+ dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", hr_qp->qpn);
+}
+
+int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_qp_work qp_work_entry;
+ struct hns_roce_qp_work *qp_work;
+ struct hns_roce_v1_priv *priv;
+ struct hns_roce_cq *send_cq, *recv_cq;
+ int is_user = !!ibqp->pd->uobject;
+ int is_timeout = 0;
+ int ret;
+
+ ret = check_qp_reset_state(hr_dev, hr_qp, &qp_work_entry, &is_timeout);
+ if (ret) {
+ dev_err(dev, "QP reset state check failed(%d)!\n", ret);
+ return ret;
}
send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
hns_roce_lock_cqs(send_cq, recv_cq);
-
if (!is_user) {
__hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
to_hr_srq(hr_qp->ibqp.srq) : NULL);
if (send_cq != recv_cq)
__hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL);
}
-
- hns_roce_qp_remove(hr_dev, hr_qp);
-
hns_roce_unlock_cqs(send_cq, recv_cq);
- hns_roce_qp_free(hr_dev, hr_qp);
+ if (!is_timeout) {
+ hns_roce_qp_remove(hr_dev, hr_qp);
+ hns_roce_qp_free(hr_dev, hr_qp);
- /* Not special_QP, free their QPN */
- if ((hr_qp->ibqp.qp_type == IB_QPT_RC) ||
- (hr_qp->ibqp.qp_type == IB_QPT_UC) ||
- (hr_qp->ibqp.qp_type == IB_QPT_UD))
- hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
+ /* RC QP, release QPN */
+ if (hr_qp->ibqp.qp_type == IB_QPT_RC)
+ hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
+ }
hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
- if (is_user) {
+ if (is_user)
ib_umem_release(hr_qp->umem);
- } else {
+ else {
kfree(hr_qp->sq.wrid);
kfree(hr_qp->rq.wrid);
+
hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
}
+
+ if (!is_timeout) {
+ if (hr_qp->ibqp.qp_type == IB_QPT_RC)
+ kfree(hr_qp);
+ else
+ kfree(hr_to_hr_sqp(hr_qp));
+ } else {
+ qp_work = kzalloc(sizeof(*qp_work), GFP_KERNEL);
+ if (!qp_work)
+ return -ENOMEM;
+
+ INIT_WORK(&qp_work->work, hns_roce_v1_destroy_qp_work_fn);
+ qp_work->ib_dev = &hr_dev->ib_dev;
+ qp_work->qp = hr_qp;
+ qp_work->db_wait_stage = qp_work_entry.db_wait_stage;
+ qp_work->sdb_issue_ptr = qp_work_entry.sdb_issue_ptr;
+ qp_work->sdb_inv_cnt = qp_work_entry.sdb_inv_cnt;
+ qp_work->sche_cnt = qp_work_entry.sche_cnt;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ queue_work(priv->des_qp.qp_wq, &qp_work->work);
+ dev_dbg(dev, "Begin destroy QP(0x%lx) work.\n", hr_qp->qpn);
+ }
+
+ return 0;
}
-int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
+int hns_roce_v1_destroy_cq(struct ib_cq *ibcq)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
- struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
+ struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
+ struct device *dev = &hr_dev->pdev->dev;
+ u32 cqe_cnt_ori;
+ u32 cqe_cnt_cur;
+ u32 cq_buf_size;
+ int wait_time = 0;
+ int ret = 0;
- hns_roce_v1_destroy_qp_common(hr_dev, hr_qp, !!ibqp->pd->uobject);
+ hns_roce_free_cq(hr_dev, hr_cq);
- if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
- kfree(hr_to_hr_sqp(hr_qp));
- else
- kfree(hr_qp);
+ /*
+ * Before freeing cq buffer, we need to ensure that the outstanding CQE
+ * have been written by checking the CQE counter.
+ */
+ cqe_cnt_ori = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
+ while (1) {
+ if (roce_read(hr_dev, ROCEE_CAEP_CQE_WCMD_EMPTY) &
+ HNS_ROCE_CQE_WCMD_EMPTY_BIT)
+ break;
- return 0;
+ cqe_cnt_cur = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
+ if ((cqe_cnt_cur - cqe_cnt_ori) >= HNS_ROCE_MIN_CQE_CNT)
+ break;
+
+ msleep(HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS);
+ if (wait_time > HNS_ROCE_MAX_FREE_CQ_WAIT_CNT) {
+ dev_warn(dev, "Destroy cq 0x%lx timeout!\n",
+ hr_cq->cqn);
+ ret = -ETIMEDOUT;
+ break;
+ }
+ wait_time++;
+ }
+
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+
+ if (ibcq->uobject)
+ ib_umem_release(hr_cq->umem);
+ else {
+ /* Free the buff of stored cq */
+ cq_buf_size = (ibcq->cqe + 1) * hr_dev->caps.cq_entry_sz;
+ hns_roce_buf_free(hr_dev, cq_buf_size, &hr_cq->hr_buf.hr_buf);
+ }
+
+ kfree(hr_cq);
+
+ return ret;
}
struct hns_roce_v1_priv hr_v1_priv;
@@ -2917,5 +3835,7 @@ struct hns_roce_hw hns_roce_hw_v1 = {
.post_recv = hns_roce_v1_post_recv,
.req_notify_cq = hns_roce_v1_req_notify_cq,
.poll_cq = hns_roce_v1_poll_cq,
+ .dereg_mr = hns_roce_v1_dereg_mr,
+ .destroy_cq = hns_roce_v1_destroy_cq,
.priv = &hr_v1_priv,
};
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
index 539b0a3b92b0..b213b5e6fef1 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
@@ -58,6 +58,7 @@
#define HNS_ROCE_V1_PHY_UAR_NUM 8
#define HNS_ROCE_V1_GID_NUM 16
+#define HNS_ROCE_V1_RESV_QP 8
#define HNS_ROCE_V1_NUM_COMP_EQE 0x8000
#define HNS_ROCE_V1_NUM_ASYNC_EQE 0x400
@@ -102,8 +103,22 @@
#define HNS_ROCE_V1_EXT_ODB_ALFUL \
(HNS_ROCE_V1_EXT_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD)
+#define HNS_ROCE_V1_DB_WAIT_OK 0
+#define HNS_ROCE_V1_DB_STAGE1 1
+#define HNS_ROCE_V1_DB_STAGE2 2
+#define HNS_ROCE_V1_CHECK_DB_TIMEOUT_MSECS 10000
+#define HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS 20
+#define HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS 50000
+#define HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS 10000
+#define HNS_ROCE_V1_FREE_MR_WAIT_VALUE 5
+#define HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE 20
+
#define HNS_ROCE_BT_RSV_BUF_SIZE (1 << 17)
+#define HNS_ROCE_V1_TPTR_ENTRY_SIZE 2
+#define HNS_ROCE_V1_TPTR_BUF_SIZE \
+ (HNS_ROCE_V1_TPTR_ENTRY_SIZE * HNS_ROCE_V1_MAX_CQ_NUM)
+
#define HNS_ROCE_ODB_POLL_MODE 0
#define HNS_ROCE_SDB_NORMAL_MODE 0
@@ -140,6 +155,7 @@
#define SQ_PSN_SHIFT 8
#define QKEY_VAL 0x80010000
#define SDB_INV_CNT_OFFSET 8
+#define SDB_ST_CMP_VAL 8
struct hns_roce_cq_context {
u32 cqc_byte_4;
@@ -436,6 +452,8 @@ struct hns_roce_ud_send_wqe {
#define UD_SEND_WQE_U32_8_DMAC_5_M \
(((1UL << 8) - 1) << UD_SEND_WQE_U32_8_DMAC_5_S)
+#define UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S 22
+
#define UD_SEND_WQE_U32_8_OPERATION_TYPE_S 16
#define UD_SEND_WQE_U32_8_OPERATION_TYPE_M \
(((1UL << 4) - 1) << UD_SEND_WQE_U32_8_OPERATION_TYPE_S)
@@ -480,13 +498,17 @@ struct hns_roce_sqp_context {
u32 qp1c_bytes_12;
u32 qp1c_bytes_16;
u32 qp1c_bytes_20;
- u32 qp1c_bytes_28;
u32 cur_rq_wqe_ba_l;
+ u32 qp1c_bytes_28;
u32 qp1c_bytes_32;
u32 cur_sq_wqe_ba_l;
u32 qp1c_bytes_40;
};
+#define QP1C_BYTES_4_QP_STATE_S 0
+#define QP1C_BYTES_4_QP_STATE_M \
+ (((1UL << 3) - 1) << QP1C_BYTES_4_QP_STATE_S)
+
#define QP1C_BYTES_4_SQ_WQE_SHIFT_S 8
#define QP1C_BYTES_4_SQ_WQE_SHIFT_M \
(((1UL << 4) - 1) << QP1C_BYTES_4_SQ_WQE_SHIFT_S)
@@ -952,6 +974,10 @@ struct hns_roce_sq_db {
#define SQ_DOORBELL_U32_4_SQ_HEAD_M \
(((1UL << 15) - 1) << SQ_DOORBELL_U32_4_SQ_HEAD_S)
+#define SQ_DOORBELL_U32_4_SL_S 16
+#define SQ_DOORBELL_U32_4_SL_M \
+ (((1UL << 2) - 1) << SQ_DOORBELL_U32_4_SL_S)
+
#define SQ_DOORBELL_U32_4_PORT_S 18
#define SQ_DOORBELL_U32_4_PORT_M (((1UL << 3) - 1) << SQ_DOORBELL_U32_4_PORT_S)
@@ -979,12 +1005,58 @@ struct hns_roce_bt_table {
struct hns_roce_buf_list cqc_buf;
};
+struct hns_roce_tptr_table {
+ struct hns_roce_buf_list tptr_buf;
+};
+
+struct hns_roce_qp_work {
+ struct work_struct work;
+ struct ib_device *ib_dev;
+ struct hns_roce_qp *qp;
+ u32 db_wait_stage;
+ u32 sdb_issue_ptr;
+ u32 sdb_inv_cnt;
+ u32 sche_cnt;
+};
+
+struct hns_roce_des_qp {
+ struct workqueue_struct *qp_wq;
+ int requeue_flag;
+};
+
+struct hns_roce_mr_free_work {
+ struct work_struct work;
+ struct ib_device *ib_dev;
+ struct completion *comp;
+ int comp_flag;
+ void *mr;
+};
+
+struct hns_roce_recreate_lp_qp_work {
+ struct work_struct work;
+ struct ib_device *ib_dev;
+ struct completion *comp;
+ int comp_flag;
+};
+
+struct hns_roce_free_mr {
+ struct workqueue_struct *free_mr_wq;
+ struct hns_roce_qp *mr_free_qp[HNS_ROCE_V1_RESV_QP];
+ struct hns_roce_cq *mr_free_cq;
+ struct hns_roce_pd *mr_free_pd;
+};
+
struct hns_roce_v1_priv {
struct hns_roce_db_table db_table;
struct hns_roce_raq_table raq_table;
struct hns_roce_bt_table bt_table;
+ struct hns_roce_tptr_table tptr_table;
+ struct hns_roce_des_qp des_qp;
+ struct hns_roce_free_mr free_mr;
};
int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
+int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+int hns_roce_v1_destroy_qp(struct ib_qp *ibqp);
#endif
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 764e35a54457..4953d9cb83a7 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -35,52 +35,13 @@
#include <rdma/ib_addr.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_cache.h>
#include "hns_roce_common.h"
#include "hns_roce_device.h"
-#include "hns_roce_user.h"
+#include <rdma/hns-abi.h>
#include "hns_roce_hem.h"
/**
- * hns_roce_addrconf_ifid_eui48 - Get default gid.
- * @eui: eui.
- * @vlan_id: gid
- * @dev: net device
- * Description:
- * MAC convert to GID
- * gid[0..7] = fe80 0000 0000 0000
- * gid[8] = mac[0] ^ 2
- * gid[9] = mac[1]
- * gid[10] = mac[2]
- * gid[11] = ff (VLAN ID high byte (4 MS bits))
- * gid[12] = fe (VLAN ID low byte)
- * gid[13] = mac[3]
- * gid[14] = mac[4]
- * gid[15] = mac[5]
- */
-static void hns_roce_addrconf_ifid_eui48(u8 *eui, u16 vlan_id,
- struct net_device *dev)
-{
- memcpy(eui, dev->dev_addr, 3);
- memcpy(eui + 5, dev->dev_addr + 3, 3);
- if (vlan_id < 0x1000) {
- eui[3] = vlan_id >> 8;
- eui[4] = vlan_id & 0xff;
- } else {
- eui[3] = 0xff;
- eui[4] = 0xfe;
- }
- eui[0] ^= 2;
-}
-
-static void hns_roce_make_default_gid(struct net_device *dev, union ib_gid *gid)
-{
- memset(gid, 0, sizeof(*gid));
- gid->raw[0] = 0xFE;
- gid->raw[1] = 0x80;
- hns_roce_addrconf_ifid_eui48(&gid->raw[8], 0xffff, dev);
-}
-
-/**
* hns_get_gid_index - Get gid index.
* @hr_dev: pointer to structure hns_roce_dev.
* @port: port, value range: 0 ~ MAX
@@ -96,30 +57,6 @@ int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index)
return gid_index * hr_dev->caps.num_ports + port;
}
-static int hns_roce_set_gid(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
- union ib_gid *gid)
-{
- struct device *dev = &hr_dev->pdev->dev;
- u8 gid_idx = 0;
-
- if (gid_index >= hr_dev->caps.gid_table_len[port]) {
- dev_err(dev, "gid_index %d illegal, port %d gid range: 0~%d\n",
- gid_index, port, hr_dev->caps.gid_table_len[port] - 1);
- return -EINVAL;
- }
-
- gid_idx = hns_get_gid_index(hr_dev, port, gid_index);
-
- if (!memcmp(gid, &hr_dev->iboe.gid_table[gid_idx], sizeof(*gid)))
- return -EINVAL;
-
- memcpy(&hr_dev->iboe.gid_table[gid_idx], gid, sizeof(*gid));
-
- hr_dev->hw->set_gid(hr_dev, port, gid_index, gid);
-
- return 0;
-}
-
static void hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
{
u8 phy_port;
@@ -135,27 +72,44 @@ static void hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
hr_dev->hw->set_mac(hr_dev, phy_port, addr);
}
-static void hns_roce_set_mtu(struct hns_roce_dev *hr_dev, u8 port, int mtu)
+static int hns_roce_add_gid(struct ib_device *device, u8 port_num,
+ unsigned int index, const union ib_gid *gid,
+ const struct ib_gid_attr *attr, void **context)
{
- u8 phy_port = hr_dev->iboe.phy_port[port];
- enum ib_mtu tmp;
+ struct hns_roce_dev *hr_dev = to_hr_dev(device);
+ u8 port = port_num - 1;
+ unsigned long flags;
+
+ if (port >= hr_dev->caps.num_ports)
+ return -EINVAL;
+
+ spin_lock_irqsave(&hr_dev->iboe.lock, flags);
- tmp = iboe_get_mtu(mtu);
- if (!tmp)
- tmp = IB_MTU_256;
+ hr_dev->hw->set_gid(hr_dev, port, index, (union ib_gid *)gid);
- hr_dev->hw->set_mtu(hr_dev, phy_port, tmp);
+ spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
+
+ return 0;
}
-static void hns_roce_update_gids(struct hns_roce_dev *hr_dev, int port)
+static int hns_roce_del_gid(struct ib_device *device, u8 port_num,
+ unsigned int index, void **context)
{
- struct ib_event event;
+ struct hns_roce_dev *hr_dev = to_hr_dev(device);
+ union ib_gid zgid = { {0} };
+ u8 port = port_num - 1;
+ unsigned long flags;
+
+ if (port >= hr_dev->caps.num_ports)
+ return -EINVAL;
+
+ spin_lock_irqsave(&hr_dev->iboe.lock, flags);
- /* Refresh gid in ib_cache */
- event.device = &hr_dev->ib_dev;
- event.element.port_num = port + 1;
- event.event = IB_EVENT_GID_CHANGE;
- ib_dispatch_event(&event);
+ hr_dev->hw->set_gid(hr_dev, port, index, &zgid);
+
+ spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
+
+ return 0;
}
static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
@@ -163,9 +117,6 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
{
struct device *dev = &hr_dev->pdev->dev;
struct net_device *netdev;
- unsigned long flags;
- union ib_gid gid;
- int ret = 0;
netdev = hr_dev->iboe.netdevs[port];
if (!netdev) {
@@ -173,7 +124,7 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
return -ENODEV;
}
- spin_lock_irqsave(&hr_dev->iboe.lock, flags);
+ spin_lock_bh(&hr_dev->iboe.lock);
switch (event) {
case NETDEV_UP:
@@ -181,23 +132,19 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
case NETDEV_REGISTER:
case NETDEV_CHANGEADDR:
hns_roce_set_mac(hr_dev, port, netdev->dev_addr);
- hns_roce_make_default_gid(netdev, &gid);
- ret = hns_roce_set_gid(hr_dev, port, 0, &gid);
- if (!ret)
- hns_roce_update_gids(hr_dev, port);
break;
case NETDEV_DOWN:
/*
- * In v1 engine, only support all ports closed together.
- */
+ * In v1 engine, only support all ports closed together.
+ */
break;
default:
dev_dbg(dev, "NETDEV event = 0x%x!\n", (u32)(event));
break;
}
- spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
- return ret;
+ spin_unlock_bh(&hr_dev->iboe.lock);
+ return 0;
}
static int hns_roce_netdev_event(struct notifier_block *self,
@@ -224,118 +171,17 @@ static int hns_roce_netdev_event(struct notifier_block *self,
return NOTIFY_DONE;
}
-static void hns_roce_addr_event(int event, struct net_device *event_netdev,
- struct hns_roce_dev *hr_dev, union ib_gid *gid)
+static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_ib_iboe *iboe = NULL;
- int gid_table_len = 0;
- unsigned long flags;
- union ib_gid zgid;
- u8 gid_idx = 0;
- u8 port = 0;
- int i = 0;
- int free;
- struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ?
- rdma_vlan_dev_real_dev(event_netdev) :
- event_netdev;
-
- if (event != NETDEV_UP && event != NETDEV_DOWN)
- return;
-
- iboe = &hr_dev->iboe;
- while (port < hr_dev->caps.num_ports) {
- if (real_dev == iboe->netdevs[port])
- break;
- port++;
- }
-
- if (port >= hr_dev->caps.num_ports) {
- dev_dbg(&hr_dev->pdev->dev, "can't find netdev\n");
- return;
- }
-
- memset(zgid.raw, 0, sizeof(zgid.raw));
- free = -1;
- gid_table_len = hr_dev->caps.gid_table_len[port];
-
- spin_lock_irqsave(&hr_dev->iboe.lock, flags);
-
- for (i = 0; i < gid_table_len; i++) {
- gid_idx = hns_get_gid_index(hr_dev, port, i);
- if (!memcmp(gid->raw, iboe->gid_table[gid_idx].raw,
- sizeof(gid->raw)))
- break;
- if (free < 0 && !memcmp(zgid.raw,
- iboe->gid_table[gid_idx].raw, sizeof(zgid.raw)))
- free = i;
- }
-
- if (i >= gid_table_len) {
- if (free < 0) {
- spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
- dev_dbg(&hr_dev->pdev->dev,
- "gid_index overflow, port(%d)\n", port);
- return;
- }
- if (!hns_roce_set_gid(hr_dev, port, free, gid))
- hns_roce_update_gids(hr_dev, port);
- } else if (event == NETDEV_DOWN) {
- if (!hns_roce_set_gid(hr_dev, port, i, &zgid))
- hns_roce_update_gids(hr_dev, port);
- }
-
- spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
-}
-
-static int hns_roce_inet_event(struct notifier_block *self, unsigned long event,
- void *ptr)
-{
- struct in_ifaddr *ifa = ptr;
- struct hns_roce_dev *hr_dev;
- struct net_device *dev = ifa->ifa_dev->dev;
- union ib_gid gid;
-
- ipv6_addr_set_v4mapped(ifa->ifa_address, (struct in6_addr *)&gid);
-
- hr_dev = container_of(self, struct hns_roce_dev, iboe.nb_inet);
-
- hns_roce_addr_event(event, dev, hr_dev, &gid);
-
- return NOTIFY_DONE;
-}
-
-static int hns_roce_setup_mtu_gids(struct hns_roce_dev *hr_dev)
-{
- struct in_ifaddr *ifa_list = NULL;
- union ib_gid gid = {{0} };
- u32 ipaddr = 0;
- int index = 0;
- int ret = 0;
- u8 i = 0;
+ u8 i;
for (i = 0; i < hr_dev->caps.num_ports; i++) {
- hns_roce_set_mtu(hr_dev, i,
- ib_mtu_enum_to_int(hr_dev->caps.max_mtu));
+ hr_dev->hw->set_mtu(hr_dev, hr_dev->iboe.phy_port[i],
+ hr_dev->caps.max_mtu);
hns_roce_set_mac(hr_dev, i, hr_dev->iboe.netdevs[i]->dev_addr);
-
- if (hr_dev->iboe.netdevs[i]->ip_ptr) {
- ifa_list = hr_dev->iboe.netdevs[i]->ip_ptr->ifa_list;
- index = 1;
- while (ifa_list) {
- ipaddr = ifa_list->ifa_address;
- ipv6_addr_set_v4mapped(ipaddr,
- (struct in6_addr *)&gid);
- ret = hns_roce_set_gid(hr_dev, i, index, &gid);
- if (ret)
- break;
- index++;
- ifa_list = ifa_list->ifa_next;
- }
- hns_roce_update_gids(hr_dev, i);
- }
}
- return ret;
+ return 0;
}
static int hns_roce_query_device(struct ib_device *ib_dev,
@@ -444,31 +290,6 @@ static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device,
static int hns_roce_query_gid(struct ib_device *ib_dev, u8 port_num, int index,
union ib_gid *gid)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
- struct device *dev = &hr_dev->pdev->dev;
- u8 gid_idx = 0;
- u8 port;
-
- if (port_num < 1 || port_num > hr_dev->caps.num_ports ||
- index >= hr_dev->caps.gid_table_len[port_num - 1]) {
- dev_err(dev,
- "port_num %d index %d illegal! correct range: port_num 1~%d index 0~%d!\n",
- port_num, index, hr_dev->caps.num_ports,
- hr_dev->caps.gid_table_len[port_num - 1] - 1);
- return -EINVAL;
- }
-
- port = port_num - 1;
- gid_idx = hns_get_gid_index(hr_dev, port, index);
- if (gid_idx >= HNS_ROCE_MAX_GID_NUM) {
- dev_err(dev, "port_num %d index %d illegal! total gid num %d!\n",
- port_num, index, HNS_ROCE_MAX_GID_NUM);
- return -EINVAL;
- }
-
- memcpy(gid->raw, hr_dev->iboe.gid_table[gid_idx].raw,
- HNS_ROCE_GID_SIZE);
-
return 0;
}
@@ -549,6 +370,8 @@ static int hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
static int hns_roce_mmap(struct ib_ucontext *context,
struct vm_area_struct *vma)
{
+ struct hns_roce_dev *hr_dev = to_hr_dev(context->device);
+
if (((vma->vm_end - vma->vm_start) % PAGE_SIZE) != 0)
return -EINVAL;
@@ -558,10 +381,15 @@ static int hns_roce_mmap(struct ib_ucontext *context,
to_hr_ucontext(context)->uar.pfn,
PAGE_SIZE, vma->vm_page_prot))
return -EAGAIN;
-
- } else {
+ } else if (vma->vm_pgoff == 1 && hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
+ /* vm_pgoff: 1 -- TPTR */
+ if (io_remap_pfn_range(vma, vma->vm_start,
+ hr_dev->tptr_dma_addr >> PAGE_SHIFT,
+ hr_dev->tptr_size,
+ vma->vm_page_prot))
+ return -EAGAIN;
+ } else
return -EINVAL;
- }
return 0;
}
@@ -605,7 +433,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
spin_lock_init(&iboe->lock);
ib_dev = &hr_dev->ib_dev;
- strlcpy(ib_dev->name, "hisi_%d", IB_DEVICE_NAME_MAX);
+ strlcpy(ib_dev->name, "hns_%d", IB_DEVICE_NAME_MAX);
ib_dev->owner = THIS_MODULE;
ib_dev->node_type = RDMA_NODE_IB_CA;
@@ -639,6 +467,8 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
ib_dev->get_link_layer = hns_roce_get_link_layer;
ib_dev->get_netdev = hns_roce_get_netdev;
ib_dev->query_gid = hns_roce_query_gid;
+ ib_dev->add_gid = hns_roce_add_gid;
+ ib_dev->del_gid = hns_roce_del_gid;
ib_dev->query_pkey = hns_roce_query_pkey;
ib_dev->alloc_ucontext = hns_roce_alloc_ucontext;
ib_dev->dealloc_ucontext = hns_roce_dealloc_ucontext;
@@ -681,32 +511,22 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
return ret;
}
- ret = hns_roce_setup_mtu_gids(hr_dev);
+ ret = hns_roce_setup_mtu_mac(hr_dev);
if (ret) {
- dev_err(dev, "roce_setup_mtu_gids failed!\n");
- goto error_failed_setup_mtu_gids;
+ dev_err(dev, "setup_mtu_mac failed!\n");
+ goto error_failed_setup_mtu_mac;
}
iboe->nb.notifier_call = hns_roce_netdev_event;
ret = register_netdevice_notifier(&iboe->nb);
if (ret) {
dev_err(dev, "register_netdevice_notifier failed!\n");
- goto error_failed_setup_mtu_gids;
- }
-
- iboe->nb_inet.notifier_call = hns_roce_inet_event;
- ret = register_inetaddr_notifier(&iboe->nb_inet);
- if (ret) {
- dev_err(dev, "register inet addr notifier failed!\n");
- goto error_failed_register_inetaddr_notifier;
+ goto error_failed_setup_mtu_mac;
}
return 0;
-error_failed_register_inetaddr_notifier:
- unregister_netdevice_notifier(&iboe->nb);
-
-error_failed_setup_mtu_gids:
+error_failed_setup_mtu_mac:
ib_unregister_device(ib_dev);
return ret;
@@ -940,10 +760,10 @@ err_unmap_mtt:
}
/**
-* hns_roce_setup_hca - setup host channel adapter
-* @hr_dev: pointer to hns roce device
-* Return : int
-*/
+ * hns_roce_setup_hca - setup host channel adapter
+ * @hr_dev: pointer to hns roce device
+ * Return : int
+ */
static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
{
int ret;
@@ -1008,11 +828,11 @@ err_uar_table_free:
}
/**
-* hns_roce_probe - RoCE driver entrance
-* @pdev: pointer to platform device
-* Return : int
-*
-*/
+ * hns_roce_probe - RoCE driver entrance
+ * @pdev: pointer to platform device
+ * Return : int
+ *
+ */
static int hns_roce_probe(struct platform_device *pdev)
{
int ret;
@@ -1023,9 +843,6 @@ static int hns_roce_probe(struct platform_device *pdev)
if (!hr_dev)
return -ENOMEM;
- memset((u8 *)hr_dev + sizeof(struct ib_device), 0,
- sizeof(struct hns_roce_dev) - sizeof(struct ib_device));
-
hr_dev->pdev = pdev;
platform_set_drvdata(pdev, hr_dev);
@@ -1125,9 +942,9 @@ error_failed_get_cfg:
}
/**
-* hns_roce_remove - remove RoCE device
-* @pdev: pointer to platform device
-*/
+ * hns_roce_remove - remove RoCE device
+ * @pdev: pointer to platform device
+ */
static int hns_roce_remove(struct platform_device *pdev)
{
struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev);
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index fb87883ead34..4139abee3b54 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -42,7 +42,7 @@ static u32 hw_index_to_key(unsigned long ind)
return (u32)(ind >> 24) | (ind << 8);
}
-static unsigned long key_to_hw_index(u32 key)
+unsigned long key_to_hw_index(u32 key)
{
return (key << 24) | (key >> 8);
}
@@ -53,16 +53,16 @@ static int hns_roce_sw2hw_mpt(struct hns_roce_dev *hr_dev,
{
return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0,
HNS_ROCE_CMD_SW2HW_MPT,
- HNS_ROCE_CMD_TIME_CLASS_B);
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
}
-static int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
+int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
struct hns_roce_cmd_mailbox *mailbox,
unsigned long mpt_index)
{
return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0,
mpt_index, !mailbox, HNS_ROCE_CMD_HW2SW_MPT,
- HNS_ROCE_CMD_TIME_CLASS_B);
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
}
static int hns_roce_buddy_alloc(struct hns_roce_buddy *buddy, int order,
@@ -137,11 +137,13 @@ static int hns_roce_buddy_init(struct hns_roce_buddy *buddy, int max_order)
for (i = 0; i <= buddy->max_order; ++i) {
s = BITS_TO_LONGS(1 << (buddy->max_order - i));
- buddy->bits[i] = kmalloc_array(s, sizeof(long), GFP_KERNEL);
- if (!buddy->bits[i])
- goto err_out_free;
-
- bitmap_zero(buddy->bits[i], 1 << (buddy->max_order - i));
+ buddy->bits[i] = kcalloc(s, sizeof(long), GFP_KERNEL |
+ __GFP_NOWARN);
+ if (!buddy->bits[i]) {
+ buddy->bits[i] = vzalloc(s * sizeof(long));
+ if (!buddy->bits[i])
+ goto err_out_free;
+ }
}
set_bit(0, buddy->bits[buddy->max_order]);
@@ -151,7 +153,7 @@ static int hns_roce_buddy_init(struct hns_roce_buddy *buddy, int max_order)
err_out_free:
for (i = 0; i <= buddy->max_order; ++i)
- kfree(buddy->bits[i]);
+ kvfree(buddy->bits[i]);
err_out:
kfree(buddy->bits);
@@ -164,7 +166,7 @@ static void hns_roce_buddy_cleanup(struct hns_roce_buddy *buddy)
int i;
for (i = 0; i <= buddy->max_order; ++i)
- kfree(buddy->bits[i]);
+ kvfree(buddy->bits[i]);
kfree(buddy->bits);
kfree(buddy->num_free);
@@ -287,7 +289,7 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
}
hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
- key_to_hw_index(mr->key));
+ key_to_hw_index(mr->key), BITMAP_NO_RR);
}
static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
@@ -605,13 +607,20 @@ err_free:
int hns_roce_dereg_mr(struct ib_mr *ibmr)
{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
struct hns_roce_mr *mr = to_hr_mr(ibmr);
+ int ret = 0;
- hns_roce_mr_free(to_hr_dev(ibmr->device), mr);
- if (mr->umem)
- ib_umem_release(mr->umem);
+ if (hr_dev->hw->dereg_mr) {
+ ret = hr_dev->hw->dereg_mr(hr_dev, mr);
+ } else {
+ hns_roce_mr_free(hr_dev, mr);
- kfree(mr);
+ if (mr->umem)
+ ib_umem_release(mr->umem);
- return 0;
+ kfree(mr);
+ }
+
+ return ret;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index 05db7d59812a..a64500fa1145 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -40,7 +40,7 @@ static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn)
static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn)
{
- hns_roce_bitmap_free(&hr_dev->pd_bitmap, pdn);
+ hns_roce_bitmap_free(&hr_dev->pd_bitmap, pdn, BITMAP_NO_RR);
}
int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev)
@@ -121,7 +121,8 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
void hns_roce_uar_free(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
{
- hns_roce_bitmap_free(&hr_dev->uar_table.bitmap, uar->index);
+ hns_roce_bitmap_free(&hr_dev->uar_table.bitmap, uar->index,
+ BITMAP_NO_RR);
}
int hns_roce_init_uar_table(struct hns_roce_dev *hr_dev)
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index e86dd8d06777..f036f32f15d3 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -37,7 +37,7 @@
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_hem.h"
-#include "hns_roce_user.h"
+#include <rdma/hns-abi.h>
#define SQP_NUM (2 * HNS_ROCE_MAX_PORTS)
@@ -250,7 +250,7 @@ void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
if (base_qpn < SQP_NUM)
return;
- hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
+ hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, BITMAP_RR);
}
static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
diff --git a/drivers/infiniband/hw/hns/hns_roce_user.h b/drivers/infiniband/hw/hns/hns_roce_user.h
deleted file mode 100644
index a28f761a9f65..000000000000
--- a/drivers/infiniband/hw/hns/hns_roce_user.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2016 Hisilicon Limited.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _HNS_ROCE_USER_H
-#define _HNS_ROCE_USER_H
-
-struct hns_roce_ib_create_cq {
- __u64 buf_addr;
-};
-
-struct hns_roce_ib_create_qp {
- __u64 buf_addr;
- __u64 db_addr;
- __u8 log_sq_bb_count;
- __u8 log_sq_stride;
- __u8 sq_no_prefetch;
- __u8 reserved[5];
-};
-
-struct hns_roce_ib_alloc_ucontext_resp {
- __u32 qp_tab_size;
-};
-
-#endif /*_HNS_ROCE_USER_H */
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index 8ec09e470f84..da2eb5a281fa 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -112,9 +112,12 @@
#define I40IW_DRV_OPT_MCAST_LOGPORT_MAP 0x00000800
#define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types)
-#define IW_CFG_FPM_QP_COUNT 32768
-#define I40IW_MAX_PAGES_PER_FMR 512
-#define I40IW_MIN_PAGES_PER_FMR 1
+#define IW_CFG_FPM_QP_COUNT 32768
+#define I40IW_MAX_PAGES_PER_FMR 512
+#define I40IW_MIN_PAGES_PER_FMR 1
+#define I40IW_CQP_COMPL_RQ_WQE_FLUSHED 2
+#define I40IW_CQP_COMPL_SQ_WQE_FLUSHED 3
+#define I40IW_CQP_COMPL_RQ_SQ_WQE_FLUSHED 4
#define I40IW_MTU_TO_MSS 40
#define I40IW_DEFAULT_MSS 1460
@@ -210,6 +213,12 @@ struct i40iw_msix_vector {
u32 ceq_id;
};
+struct l2params_work {
+ struct work_struct work;
+ struct i40iw_device *iwdev;
+ struct i40iw_l2params l2params;
+};
+
#define I40IW_MSIX_TABLE_SIZE 65
struct virtchnl_work {
@@ -227,6 +236,7 @@ struct i40iw_device {
struct net_device *netdev;
wait_queue_head_t vchnl_waitq;
struct i40iw_sc_dev sc_dev;
+ struct i40iw_sc_vsi vsi;
struct i40iw_handler *hdl;
struct i40e_info *ldev;
struct i40e_client *client;
@@ -280,7 +290,6 @@ struct i40iw_device {
u32 sd_type;
struct workqueue_struct *param_wq;
atomic_t params_busy;
- u32 mss;
enum init_completion_state init_state;
u16 mac_ip_table_idx;
atomic_t vchnl_msgs;
@@ -297,6 +306,14 @@ struct i40iw_device {
u32 mr_stagmask;
u32 mpa_version;
bool dcb;
+ bool closing;
+ bool reset;
+ u32 used_pds;
+ u32 used_cqs;
+ u32 used_mrs;
+ u32 used_qps;
+ wait_queue_head_t close_wq;
+ atomic64_t use_count;
};
struct i40iw_ib_device {
@@ -498,7 +515,7 @@ u32 i40iw_initialize_hw_resources(struct i40iw_device *iwdev);
int i40iw_register_rdma_device(struct i40iw_device *iwdev);
void i40iw_port_ibevent(struct i40iw_device *iwdev);
-int i40iw_cm_disconn(struct i40iw_qp *);
+void i40iw_cm_disconn(struct i40iw_qp *iwqp);
void i40iw_cm_disconn_worker(void *);
int mini_cm_recv_pkt(struct i40iw_cm_core *, struct i40iw_device *,
struct sk_buff *);
@@ -508,20 +525,26 @@ enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev,
enum i40iw_status_code i40iw_add_mac_addr(struct i40iw_device *iwdev,
u8 *mac_addr, u8 *mac_index);
int i40iw_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
+void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq);
void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev);
void i40iw_add_pdusecount(struct i40iw_pd *iwpd);
+void i40iw_rem_devusecount(struct i40iw_device *iwdev);
+void i40iw_add_devusecount(struct i40iw_device *iwdev);
void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp,
struct i40iw_modify_qp_info *info, bool wait);
+void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev,
+ struct i40iw_sc_qp *qp,
+ bool suspend);
enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
struct i40iw_cm_info *cminfo,
enum i40iw_quad_entry_type etype,
enum i40iw_quad_hash_manage_type mtype,
void *cmnode,
bool wait);
-void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf);
-void i40iw_free_sqbuf(struct i40iw_sc_dev *dev, void *bufp);
+void i40iw_receive_ilq(struct i40iw_sc_vsi *vsi, struct i40iw_puda_buf *rbuf);
+void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp);
void i40iw_free_qp_resources(struct i40iw_device *iwdev,
struct i40iw_qp *iwqp,
u32 qp_num);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 85637696f6e9..95a0586a4da8 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -68,13 +68,13 @@ static void i40iw_disconnect_worker(struct work_struct *work);
/**
* i40iw_free_sqbuf - put back puda buffer if refcount = 0
- * @dev: FPK device
+ * @vsi: pointer to vsi structure
* @buf: puda buffer to free
*/
-void i40iw_free_sqbuf(struct i40iw_sc_dev *dev, void *bufp)
+void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp)
{
struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)bufp;
- struct i40iw_puda_rsrc *ilq = dev->ilq;
+ struct i40iw_puda_rsrc *ilq = vsi->ilq;
if (!atomic_dec_return(&buf->refcount))
i40iw_puda_ret_bufpool(ilq, buf);
@@ -221,6 +221,7 @@ static void i40iw_get_addr_info(struct i40iw_cm_node *cm_node,
memcpy(cm_info->rem_addr, cm_node->rem_addr, sizeof(cm_info->rem_addr));
cm_info->loc_port = cm_node->loc_port;
cm_info->rem_port = cm_node->rem_port;
+ cm_info->user_pri = cm_node->user_pri;
}
/**
@@ -271,6 +272,7 @@ static int i40iw_send_cm_event(struct i40iw_cm_node *cm_node,
event.provider_data = (void *)cm_node;
event.private_data = (void *)cm_node->pdata_buf;
event.private_data_len = (u8)cm_node->pdata.size;
+ event.ird = cm_node->ird_size;
break;
case IW_CM_EVENT_CONNECT_REPLY:
i40iw_get_cmevent_info(cm_node, cm_id, &event);
@@ -335,13 +337,13 @@ static struct i40iw_cm_event *i40iw_create_event(struct i40iw_cm_node *cm_node,
*/
static void i40iw_free_retrans_entry(struct i40iw_cm_node *cm_node)
{
- struct i40iw_sc_dev *dev = cm_node->dev;
+ struct i40iw_device *iwdev = cm_node->iwdev;
struct i40iw_timer_entry *send_entry;
send_entry = cm_node->send_entry;
if (send_entry) {
cm_node->send_entry = NULL;
- i40iw_free_sqbuf(dev, (void *)send_entry->sqbuf);
+ i40iw_free_sqbuf(&iwdev->vsi, (void *)send_entry->sqbuf);
kfree(send_entry);
atomic_dec(&cm_node->ref_count);
}
@@ -360,15 +362,6 @@ static void i40iw_cleanup_retrans_entry(struct i40iw_cm_node *cm_node)
spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
}
-static bool is_remote_ne020_or_chelsio(struct i40iw_cm_node *cm_node)
-{
- if ((cm_node->rem_mac[0] == 0x0) &&
- (((cm_node->rem_mac[1] == 0x12) && (cm_node->rem_mac[2] == 0x55)) ||
- ((cm_node->rem_mac[1] == 0x07 && (cm_node->rem_mac[2] == 0x43)))))
- return true;
- return false;
-}
-
/**
* i40iw_form_cm_frame - get a free packet and build frame
* @cm_node: connection's node ionfo to use in frame
@@ -384,7 +377,7 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
u8 flags)
{
struct i40iw_puda_buf *sqbuf;
- struct i40iw_sc_dev *dev = cm_node->dev;
+ struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;
u8 *buf;
struct tcphdr *tcph;
@@ -396,8 +389,9 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
u32 opts_len = 0;
u32 pd_len = 0;
u32 hdr_len = 0;
+ u16 vtag;
- sqbuf = i40iw_puda_get_bufpool(dev->ilq);
+ sqbuf = i40iw_puda_get_bufpool(vsi->ilq);
if (!sqbuf)
return NULL;
buf = sqbuf->mem.va;
@@ -408,11 +402,8 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
if (hdr)
hdr_len = hdr->size;
- if (pdata) {
+ if (pdata)
pd_len = pdata->size;
- if (!is_remote_ne020_or_chelsio(cm_node))
- pd_len += MPA_ZERO_PAD_LEN;
- }
if (cm_node->vlan_id < VLAN_TAG_PRESENT)
eth_hlen += 4;
@@ -445,7 +436,8 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
ether_addr_copy(ethh->h_source, cm_node->loc_mac);
if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
- ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(cm_node->vlan_id);
+ vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id;
+ ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IP);
} else {
@@ -454,7 +446,7 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
iph->version = IPVERSION;
iph->ihl = 5; /* 5 * 4Byte words, IP headr len */
- iph->tos = 0;
+ iph->tos = cm_node->tos;
iph->tot_len = htons(packetsize);
iph->id = htons(++cm_node->tcp_cntxt.loc_id);
@@ -474,13 +466,15 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
ether_addr_copy(ethh->h_source, cm_node->loc_mac);
if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
- ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(cm_node->vlan_id);
+ vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id;
+ ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IPV6);
} else {
ethh->h_proto = htons(ETH_P_IPV6);
}
ip6h->version = 6;
- ip6h->flow_lbl[0] = 0;
+ ip6h->priority = cm_node->tos >> 4;
+ ip6h->flow_lbl[0] = cm_node->tos << 4;
ip6h->flow_lbl[1] = 0;
ip6h->flow_lbl[2] = 0;
ip6h->payload_len = htons(packetsize - sizeof(*ip6h));
@@ -1065,7 +1059,7 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
int send_retrans,
int close_when_complete)
{
- struct i40iw_sc_dev *dev = cm_node->dev;
+ struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;
struct i40iw_cm_core *cm_core = cm_node->cm_core;
struct i40iw_timer_entry *new_send;
int ret = 0;
@@ -1074,7 +1068,7 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
if (!new_send) {
- i40iw_free_sqbuf(cm_node->dev, (void *)sqbuf);
+ i40iw_free_sqbuf(vsi, (void *)sqbuf);
return -ENOMEM;
}
new_send->retrycount = I40IW_DEFAULT_RETRYS;
@@ -1089,7 +1083,7 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
new_send->timetosend += (HZ / 10);
if (cm_node->close_entry) {
kfree(new_send);
- i40iw_free_sqbuf(cm_node->dev, (void *)sqbuf);
+ i40iw_free_sqbuf(vsi, (void *)sqbuf);
i40iw_pr_err("already close entry\n");
return -EINVAL;
}
@@ -1104,7 +1098,7 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
new_send->timetosend = jiffies + I40IW_RETRY_TIMEOUT;
atomic_inc(&sqbuf->refcount);
- i40iw_puda_send_buf(dev->ilq, sqbuf);
+ i40iw_puda_send_buf(vsi->ilq, sqbuf);
if (!send_retrans) {
i40iw_cleanup_retrans_entry(cm_node);
if (close_when_complete)
@@ -1201,6 +1195,7 @@ static void i40iw_cm_timer_tick(unsigned long pass)
struct i40iw_cm_node *cm_node;
struct i40iw_timer_entry *send_entry, *close_entry;
struct list_head *list_core_temp;
+ struct i40iw_sc_vsi *vsi;
struct list_head *list_node;
struct i40iw_cm_core *cm_core = (struct i40iw_cm_core *)pass;
u32 settimer = 0;
@@ -1276,9 +1271,10 @@ static void i40iw_cm_timer_tick(unsigned long pass)
cm_node->cm_core->stats_pkt_retrans++;
spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ vsi = &cm_node->iwdev->vsi;
dev = cm_node->dev;
atomic_inc(&send_entry->sqbuf->refcount);
- i40iw_puda_send_buf(dev->ilq, send_entry->sqbuf);
+ i40iw_puda_send_buf(vsi->ilq, send_entry->sqbuf);
spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
if (send_entry->send_retrans) {
send_entry->retranscount--;
@@ -1379,10 +1375,11 @@ int i40iw_send_syn(struct i40iw_cm_node *cm_node, u32 sendack)
static void i40iw_send_ack(struct i40iw_cm_node *cm_node)
{
struct i40iw_puda_buf *sqbuf;
+ struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;
sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, SET_ACK);
if (sqbuf)
- i40iw_puda_send_buf(cm_node->dev->ilq, sqbuf);
+ i40iw_puda_send_buf(vsi->ilq, sqbuf);
else
i40iw_pr_err("no sqbuf\n");
}
@@ -1564,9 +1561,15 @@ static enum i40iw_status_code i40iw_del_multiple_qhash(
memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
sizeof(cm_info->loc_addr));
cm_info->vlan_id = child_listen_node->vlan_id;
- ret = i40iw_manage_qhash(iwdev, cm_info,
- I40IW_QHASH_TYPE_TCP_SYN,
- I40IW_QHASH_MANAGE_TYPE_DELETE, NULL, false);
+ if (child_listen_node->qhash_set) {
+ ret = i40iw_manage_qhash(iwdev, cm_info,
+ I40IW_QHASH_TYPE_TCP_SYN,
+ I40IW_QHASH_MANAGE_TYPE_DELETE,
+ NULL, false);
+ child_listen_node->qhash_set = false;
+ } else {
+ ret = I40IW_SUCCESS;
+ }
i40iw_debug(&iwdev->sc_dev,
I40IW_DEBUG_CM,
"freed pointer = %p\n",
@@ -1591,9 +1594,10 @@ static enum i40iw_status_code i40iw_del_multiple_qhash(
static struct net_device *i40iw_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac)
{
struct net_device *ip_dev = NULL;
-#if IS_ENABLED(CONFIG_IPV6)
struct in6_addr laddr6;
+ if (!IS_ENABLED(CONFIG_IPV6))
+ return NULL;
i40iw_copy_ip_htonl(laddr6.in6_u.u6_addr32, addr);
if (vlan_id)
*vlan_id = I40IW_NO_VLAN;
@@ -1610,7 +1614,6 @@ static struct net_device *i40iw_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *ma
}
}
rcu_read_unlock();
-#endif
return ip_dev;
}
@@ -1646,7 +1649,7 @@ static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev,
{
struct net_device *ip_dev;
struct inet6_dev *idev;
- struct inet6_ifaddr *ifp;
+ struct inet6_ifaddr *ifp, *tmp;
enum i40iw_status_code ret = 0;
struct i40iw_cm_listener *child_listen_node;
unsigned long flags;
@@ -1661,7 +1664,7 @@ static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev,
i40iw_pr_err("idev == NULL\n");
break;
}
- list_for_each_entry(ifp, &idev->addr_list, if_list) {
+ list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
i40iw_debug(&iwdev->sc_dev,
I40IW_DEBUG_CM,
"IP=%pI6, vlan_id=%d, MAC=%pM\n",
@@ -1675,7 +1678,6 @@ static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev,
"Allocating child listener %p\n",
child_listen_node);
if (!child_listen_node) {
- i40iw_pr_err("listener memory allocation\n");
ret = I40IW_ERR_NO_MEMORY;
goto exit;
}
@@ -1695,6 +1697,7 @@ static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev,
I40IW_QHASH_MANAGE_TYPE_ADD,
NULL, true);
if (!ret) {
+ child_listen_node->qhash_set = true;
spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
list_add(&child_listen_node->child_listen_list,
&cm_parent_listen_node->child_listen_list);
@@ -1751,7 +1754,6 @@ static enum i40iw_status_code i40iw_add_mqh_4(
"Allocating child listener %p\n",
child_listen_node);
if (!child_listen_node) {
- i40iw_pr_err("listener memory allocation\n");
in_dev_put(idev);
ret = I40IW_ERR_NO_MEMORY;
goto exit;
@@ -1773,6 +1775,7 @@ static enum i40iw_status_code i40iw_add_mqh_4(
NULL,
true);
if (!ret) {
+ child_listen_node->qhash_set = true;
spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
list_add(&child_listen_node->child_listen_list,
&cm_parent_listen_node->child_listen_list);
@@ -1880,6 +1883,7 @@ static int i40iw_dec_refcnt_listen(struct i40iw_cm_core *cm_core,
nfo.loc_port = listener->loc_port;
nfo.ipv4 = listener->ipv4;
nfo.vlan_id = listener->vlan_id;
+ nfo.user_pri = listener->user_pri;
if (!list_empty(&listener->child_listen_list)) {
i40iw_del_multiple_qhash(listener->iwdev, &nfo, listener);
@@ -2138,6 +2142,20 @@ static struct i40iw_cm_node *i40iw_make_cm_node(
/* set our node specific transport info */
cm_node->ipv4 = cm_info->ipv4;
cm_node->vlan_id = cm_info->vlan_id;
+ if ((cm_node->vlan_id == I40IW_NO_VLAN) && iwdev->dcb)
+ cm_node->vlan_id = 0;
+ cm_node->tos = cm_info->tos;
+ cm_node->user_pri = cm_info->user_pri;
+ if (listener) {
+ if (listener->tos != cm_info->tos)
+ i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB,
+ "application TOS[%d] and remote client TOS[%d] mismatch\n",
+ listener->tos, cm_info->tos);
+ cm_node->tos = max(listener->tos, cm_info->tos);
+ cm_node->user_pri = rt_tos2priority(cm_node->tos);
+ i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "listener: TOS:[%d] UP:[%d]\n",
+ cm_node->tos, cm_node->user_pri);
+ }
memcpy(cm_node->loc_addr, cm_info->loc_addr, sizeof(cm_node->loc_addr));
memcpy(cm_node->rem_addr, cm_info->rem_addr, sizeof(cm_node->rem_addr));
cm_node->loc_port = cm_info->loc_port;
@@ -2162,7 +2180,7 @@ static struct i40iw_cm_node *i40iw_make_cm_node(
I40IW_CM_DEFAULT_RCV_WND_SCALED >> I40IW_CM_DEFAULT_RCV_WND_SCALE;
ts = current_kernel_time();
cm_node->tcp_cntxt.loc_seq_num = ts.tv_nsec;
- cm_node->tcp_cntxt.mss = iwdev->mss;
+ cm_node->tcp_cntxt.mss = iwdev->vsi.mss;
cm_node->iwdev = iwdev;
cm_node->dev = &iwdev->sc_dev;
@@ -2236,7 +2254,7 @@ static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node)
i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true);
} else {
if (!i40iw_listen_port_in_use(cm_core, cm_node->loc_port) &&
- cm_node->apbvt_set && cm_node->iwdev) {
+ cm_node->apbvt_set) {
i40iw_manage_apbvt(cm_node->iwdev,
cm_node->loc_port,
I40IW_MANAGE_APBVT_DEL);
@@ -2861,7 +2879,7 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
/* create a CM connection node */
cm_node = i40iw_make_cm_node(cm_core, iwdev, cm_info, NULL);
if (!cm_node)
- return NULL;
+ return ERR_PTR(-ENOMEM);
/* set our node side to client (active) side */
cm_node->tcp_cntxt.client = 1;
cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE;
@@ -2874,7 +2892,8 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
cm_node->vlan_id,
I40IW_CM_LISTENER_ACTIVE_STATE);
if (!loopback_remotelistener) {
- i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
+ i40iw_rem_ref_cm_node(cm_node);
+ return ERR_PTR(-ECONNREFUSED);
} else {
loopback_cm_info = *cm_info;
loopback_cm_info.loc_port = cm_info->rem_port;
@@ -2887,7 +2906,7 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
loopback_remotelistener);
if (!loopback_remotenode) {
i40iw_rem_ref_cm_node(cm_node);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
cm_core->stats_loopbacks++;
loopback_remotenode->loopbackpartner = cm_node;
@@ -3041,10 +3060,10 @@ static int i40iw_cm_close(struct i40iw_cm_node *cm_node)
/**
* i40iw_receive_ilq - recv an ETHERNET packet, and process it
* through CM
- * @dev: FPK dev struct
+ * @vsi: pointer to the vsi structure
* @rbuf: receive buffer
*/
-void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf)
+void i40iw_receive_ilq(struct i40iw_sc_vsi *vsi, struct i40iw_puda_buf *rbuf)
{
struct i40iw_cm_node *cm_node;
struct i40iw_cm_listener *listener;
@@ -3052,9 +3071,11 @@ void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf)
struct ipv6hdr *ip6h;
struct tcphdr *tcph;
struct i40iw_cm_info cm_info;
+ struct i40iw_sc_dev *dev = vsi->dev;
struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
struct i40iw_cm_core *cm_core = &iwdev->cm_core;
struct vlan_ethhdr *ethh;
+ u16 vtag;
/* if vlan, then maclen = 18 else 14 */
iph = (struct iphdr *)rbuf->iph;
@@ -3068,7 +3089,9 @@ void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf)
ethh = (struct vlan_ethhdr *)rbuf->mem.va;
if (ethh->h_vlan_proto == htons(ETH_P_8021Q)) {
- cm_info.vlan_id = ntohs(ethh->h_vlan_TCI) & VLAN_VID_MASK;
+ vtag = ntohs(ethh->h_vlan_TCI);
+ cm_info.user_pri = (vtag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ cm_info.vlan_id = vtag & VLAN_VID_MASK;
i40iw_debug(cm_core->dev,
I40IW_DEBUG_CM,
"%s vlan_id=%d\n",
@@ -3083,6 +3106,7 @@ void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf)
cm_info.loc_addr[0] = ntohl(iph->daddr);
cm_info.rem_addr[0] = ntohl(iph->saddr);
cm_info.ipv4 = true;
+ cm_info.tos = iph->tos;
} else {
ip6h = (struct ipv6hdr *)rbuf->iph;
i40iw_copy_ip_ntohl(cm_info.loc_addr,
@@ -3090,6 +3114,7 @@ void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf)
i40iw_copy_ip_ntohl(cm_info.rem_addr,
ip6h->saddr.in6_u.u6_addr32);
cm_info.ipv4 = false;
+ cm_info.tos = (ip6h->priority << 4) | (ip6h->flow_lbl[0] >> 4);
}
cm_info.loc_port = ntohs(tcph->dest);
cm_info.rem_port = ntohs(tcph->source);
@@ -3309,6 +3334,8 @@ static void i40iw_cm_init_tsa_conn(struct i40iw_qp *iwqp,
ctx_info->tcp_info_valid = true;
ctx_info->iwarp_info_valid = true;
+ ctx_info->add_to_qoslist = true;
+ ctx_info->user_pri = cm_node->user_pri;
i40iw_init_tcp_ctx(cm_node, &tcp_info, iwqp);
if (cm_node->snd_mark_en) {
@@ -3320,33 +3347,47 @@ static void i40iw_cm_init_tsa_conn(struct i40iw_qp *iwqp,
cm_node->state = I40IW_CM_STATE_OFFLOADED;
tcp_info.tcp_state = I40IW_TCP_STATE_ESTABLISHED;
tcp_info.src_mac_addr_idx = iwdev->mac_ip_table_idx;
+ tcp_info.tos = cm_node->tos;
dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp, (u64 *)(iwqp->host_ctx.va), ctx_info);
/* once tcp_info is set, no need to do it again */
ctx_info->tcp_info_valid = false;
ctx_info->iwarp_info_valid = false;
+ ctx_info->add_to_qoslist = false;
}
/**
* i40iw_cm_disconn - when a connection is being closed
* @iwqp: associate qp for the connection
*/
-int i40iw_cm_disconn(struct i40iw_qp *iwqp)
+void i40iw_cm_disconn(struct i40iw_qp *iwqp)
{
struct disconn_work *work;
struct i40iw_device *iwdev = iwqp->iwdev;
struct i40iw_cm_core *cm_core = &iwdev->cm_core;
+ unsigned long flags;
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work)
- return -ENOMEM; /* Timer will clean up */
-
+ return; /* Timer will clean up */
+
+ spin_lock_irqsave(&iwdev->qptable_lock, flags);
+ if (!iwdev->qp_table[iwqp->ibqp.qp_num]) {
+ spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
+ i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
+ "%s qp_id %d is already freed\n",
+ __func__, iwqp->ibqp.qp_num);
+ kfree(work);
+ return;
+ }
i40iw_add_ref(&iwqp->ibqp);
+ spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
+
work->iwqp = iwqp;
INIT_WORK(&work->work, i40iw_disconnect_worker);
queue_work(cm_core->disconn_wq, &work->work);
- return 0;
+ return;
}
/**
@@ -3432,7 +3473,7 @@ static void i40iw_cm_disconn_true(struct i40iw_qp *iwqp)
*terminate-handler to issue cm_disconn which can re-free
*a QP even after its refcnt=0.
*/
- del_timer(&iwqp->terminate_timer);
+ i40iw_terminate_del_timer(qp);
if (!iwqp->flush_issued) {
iwqp->flush_issued = 1;
issue_flush = 1;
@@ -3462,7 +3503,7 @@ static void i40iw_cm_disconn_true(struct i40iw_qp *iwqp)
/* Flush the queues */
i40iw_flush_wqes(iwdev, iwqp);
- if (qp->term_flags) {
+ if (qp->term_flags && iwqp->ibqp.event_handler) {
ibevent.device = iwqp->ibqp.device;
ibevent.event = (qp->eventtype == TERM_EVENT_QP_FATAL) ?
IB_EVENT_QP_FATAL : IB_EVENT_QP_ACCESS_ERR;
@@ -3571,7 +3612,7 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
iwqp->cm_node = (void *)cm_node;
cm_node->iwqp = iwqp;
- buf_len = conn_param->private_data_len + I40IW_MAX_IETF_SIZE + MPA_ZERO_PAD_LEN;
+ buf_len = conn_param->private_data_len + I40IW_MAX_IETF_SIZE;
status = i40iw_allocate_dma_mem(dev->hw, &iwqp->ietf_mem, buf_len, 1);
@@ -3605,18 +3646,10 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
iwqp->lsmm_mr = ibmr;
if (iwqp->page)
iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
- if (is_remote_ne020_or_chelsio(cm_node))
- dev->iw_priv_qp_ops->qp_send_lsmm(
- &iwqp->sc_qp,
+ dev->iw_priv_qp_ops->qp_send_lsmm(&iwqp->sc_qp,
iwqp->ietf_mem.va,
(accept.size + conn_param->private_data_len),
ibmr->lkey);
- else
- dev->iw_priv_qp_ops->qp_send_lsmm(
- &iwqp->sc_qp,
- iwqp->ietf_mem.va,
- (accept.size + conn_param->private_data_len + MPA_ZERO_PAD_LEN),
- ibmr->lkey);
} else {
if (iwqp->page)
@@ -3714,6 +3747,7 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct sockaddr_in6 *raddr6;
bool qhash_set = false;
int apbvt_set = 0;
+ int err = 0;
enum i40iw_status_code status;
ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn);
@@ -3759,6 +3793,10 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
i40iw_netdev_vlan_ipv6(cm_info.loc_addr, &cm_info.vlan_id, NULL);
}
cm_info.cm_id = cm_id;
+ cm_info.tos = cm_id->tos;
+ cm_info.user_pri = rt_tos2priority(cm_id->tos);
+ i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "%s TOS:[%d] UP:[%d]\n",
+ __func__, cm_id->tos, cm_info.user_pri);
if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) ||
(!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32,
raddr6->sin6_addr.in6_u.u6_addr32,
@@ -3790,8 +3828,11 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
conn_param->private_data_len,
(void *)conn_param->private_data,
&cm_info);
- if (!cm_node)
- goto err;
+
+ if (IS_ERR(cm_node)) {
+ err = PTR_ERR(cm_node);
+ goto err_out;
+ }
i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord);
if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO &&
@@ -3805,10 +3846,12 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
iwqp->cm_id = cm_id;
i40iw_add_ref(&iwqp->ibqp);
- if (cm_node->state == I40IW_CM_STATE_SYN_SENT) {
- if (i40iw_send_syn(cm_node, 0)) {
+ if (cm_node->state != I40IW_CM_STATE_OFFLOADED) {
+ cm_node->state = I40IW_CM_STATE_SYN_SENT;
+ err = i40iw_send_syn(cm_node, 0);
+ if (err) {
i40iw_rem_ref_cm_node(cm_node);
- goto err;
+ goto err_out;
}
}
@@ -3820,24 +3863,25 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cm_node->cm_id);
return 0;
-err:
- if (cm_node) {
- if (cm_node->ipv4)
- i40iw_debug(cm_node->dev,
- I40IW_DEBUG_CM,
- "Api - connect() FAILED: dest addr=%pI4",
- cm_node->rem_addr);
- else
- i40iw_debug(cm_node->dev, I40IW_DEBUG_CM,
- "Api - connect() FAILED: dest addr=%pI6",
- cm_node->rem_addr);
- }
- i40iw_manage_qhash(iwdev,
- &cm_info,
- I40IW_QHASH_TYPE_TCP_ESTABLISHED,
- I40IW_QHASH_MANAGE_TYPE_DELETE,
- NULL,
- false);
+err_out:
+ if (cm_info.ipv4)
+ i40iw_debug(&iwdev->sc_dev,
+ I40IW_DEBUG_CM,
+ "Api - connect() FAILED: dest addr=%pI4",
+ cm_info.rem_addr);
+ else
+ i40iw_debug(&iwdev->sc_dev,
+ I40IW_DEBUG_CM,
+ "Api - connect() FAILED: dest addr=%pI6",
+ cm_info.rem_addr);
+
+ if (qhash_set)
+ i40iw_manage_qhash(iwdev,
+ &cm_info,
+ I40IW_QHASH_TYPE_TCP_ESTABLISHED,
+ I40IW_QHASH_MANAGE_TYPE_DELETE,
+ NULL,
+ false);
if (apbvt_set && !i40iw_listen_port_in_use(&iwdev->cm_core,
cm_info.loc_port))
@@ -3846,7 +3890,7 @@ err:
I40IW_MANAGE_APBVT_DEL);
cm_id->rem_ref(cm_id);
iwdev->cm_core.stats_connect_errs++;
- return -ENOMEM;
+ return err;
}
/**
@@ -3904,6 +3948,10 @@ int i40iw_create_listen(struct iw_cm_id *cm_id, int backlog)
cm_id->provider_data = cm_listen_node;
+ cm_listen_node->tos = cm_id->tos;
+ cm_listen_node->user_pri = rt_tos2priority(cm_id->tos);
+ cm_info.user_pri = cm_listen_node->user_pri;
+
if (!cm_listen_node->reused_node) {
if (wildcard) {
if (cm_info.ipv4)
@@ -4124,3 +4172,158 @@ static void i40iw_cm_post_event(struct i40iw_cm_event *event)
queue_work(event->cm_node->cm_core->event_wq, &event->event_work);
}
+
+/**
+ * i40iw_qhash_ctrl - enable/disable qhash for list
+ * @iwdev: device pointer
+ * @parent_listen_node: parent listen node
+ * @nfo: cm info node
+ * @ipaddr: Pointer to IPv4 or IPv6 address
+ * @ipv4: flag indicating IPv4 when true
+ * @ifup: flag indicating interface up when true
+ *
+ * Enables or disables the qhash for the node in the child
+ * listen list that matches ipaddr. If no matching IP was found
+ * it will allocate and add a new child listen node to the
+ * parent listen node. The listen_list_lock is assumed to be
+ * held when called.
+ */
+static void i40iw_qhash_ctrl(struct i40iw_device *iwdev,
+ struct i40iw_cm_listener *parent_listen_node,
+ struct i40iw_cm_info *nfo,
+ u32 *ipaddr, bool ipv4, bool ifup)
+{
+ struct list_head *child_listen_list = &parent_listen_node->child_listen_list;
+ struct i40iw_cm_listener *child_listen_node;
+ struct list_head *pos, *tpos;
+ enum i40iw_status_code ret;
+ bool node_allocated = false;
+ enum i40iw_quad_hash_manage_type op =
+ ifup ? I40IW_QHASH_MANAGE_TYPE_ADD : I40IW_QHASH_MANAGE_TYPE_DELETE;
+
+ list_for_each_safe(pos, tpos, child_listen_list) {
+ child_listen_node =
+ list_entry(pos,
+ struct i40iw_cm_listener,
+ child_listen_list);
+ if (!memcmp(child_listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16))
+ goto set_qhash;
+ }
+
+ /* if not found then add a child listener if interface is going up */
+ if (!ifup)
+ return;
+ child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_ATOMIC);
+ if (!child_listen_node)
+ return;
+ node_allocated = true;
+ memcpy(child_listen_node, parent_listen_node, sizeof(*child_listen_node));
+
+ memcpy(child_listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16);
+
+set_qhash:
+ memcpy(nfo->loc_addr,
+ child_listen_node->loc_addr,
+ sizeof(nfo->loc_addr));
+ nfo->vlan_id = child_listen_node->vlan_id;
+ ret = i40iw_manage_qhash(iwdev, nfo,
+ I40IW_QHASH_TYPE_TCP_SYN,
+ op,
+ NULL, false);
+ if (!ret) {
+ child_listen_node->qhash_set = ifup;
+ if (node_allocated)
+ list_add(&child_listen_node->child_listen_list,
+ &parent_listen_node->child_listen_list);
+ } else if (node_allocated) {
+ kfree(child_listen_node);
+ }
+}
+
+/**
+ * i40iw_cm_disconnect_all - disconnect all connected qp's
+ * @iwdev: device pointer
+ */
+void i40iw_cm_disconnect_all(struct i40iw_device *iwdev)
+{
+ struct i40iw_cm_core *cm_core = &iwdev->cm_core;
+ struct list_head *list_core_temp;
+ struct list_head *list_node;
+ struct i40iw_cm_node *cm_node;
+ unsigned long flags;
+ struct list_head connected_list;
+ struct ib_qp_attr attr;
+
+ INIT_LIST_HEAD(&connected_list);
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ list_for_each_safe(list_node, list_core_temp, &cm_core->connected_nodes) {
+ cm_node = container_of(list_node, struct i40iw_cm_node, list);
+ atomic_inc(&cm_node->ref_count);
+ list_add(&cm_node->connected_entry, &connected_list);
+ }
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+ list_for_each_safe(list_node, list_core_temp, &connected_list) {
+ cm_node = container_of(list_node, struct i40iw_cm_node, connected_entry);
+ attr.qp_state = IB_QPS_ERR;
+ i40iw_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+ i40iw_rem_ref_cm_node(cm_node);
+ }
+}
+
+/**
+ * i40iw_ifdown_notify - process an ifdown on an interface
+ * @iwdev: device pointer
+ * @ipaddr: Pointer to IPv4 or IPv6 address
+ * @ipv4: flag indicating IPv4 when true
+ * @ifup: flag indicating interface up when true
+ */
+void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
+ u32 *ipaddr, bool ipv4, bool ifup)
+{
+ struct i40iw_cm_core *cm_core = &iwdev->cm_core;
+ unsigned long flags;
+ struct i40iw_cm_listener *listen_node;
+ static const u32 ip_zero[4] = { 0, 0, 0, 0 };
+ struct i40iw_cm_info nfo;
+ u16 vlan_id = rdma_vlan_dev_vlan_id(netdev);
+ enum i40iw_status_code ret;
+ enum i40iw_quad_hash_manage_type op =
+ ifup ? I40IW_QHASH_MANAGE_TYPE_ADD : I40IW_QHASH_MANAGE_TYPE_DELETE;
+
+ /* Disable or enable qhash for listeners */
+ spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+ list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
+ if (vlan_id == listen_node->vlan_id &&
+ (!memcmp(listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16) ||
+ !memcmp(listen_node->loc_addr, ip_zero, ipv4 ? 4 : 16))) {
+ memcpy(nfo.loc_addr, listen_node->loc_addr,
+ sizeof(nfo.loc_addr));
+ nfo.loc_port = listen_node->loc_port;
+ nfo.ipv4 = listen_node->ipv4;
+ nfo.vlan_id = listen_node->vlan_id;
+ nfo.user_pri = listen_node->user_pri;
+ if (!list_empty(&listen_node->child_listen_list)) {
+ i40iw_qhash_ctrl(iwdev,
+ listen_node,
+ &nfo,
+ ipaddr, ipv4, ifup);
+ } else if (memcmp(listen_node->loc_addr, ip_zero,
+ ipv4 ? 4 : 16)) {
+ ret = i40iw_manage_qhash(iwdev,
+ &nfo,
+ I40IW_QHASH_TYPE_TCP_SYN,
+ op,
+ NULL,
+ false);
+ if (!ret)
+ listen_node->qhash_set = ifup;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+
+ /* disconnect any connected qp's on ifdown */
+ if (!ifup)
+ i40iw_cm_disconnect_all(iwdev);
+}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h
index e9046d9f9645..2e52e38ffcf3 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.h
@@ -56,8 +56,6 @@
#define I40IW_MAX_IETF_SIZE 32
-#define MPA_ZERO_PAD_LEN 4
-
/* IETF RTR MSG Fields */
#define IETF_PEER_TO_PEER 0x8000
#define IETF_FLPDU_ZERO_LEN 0x4000
@@ -299,6 +297,7 @@ struct i40iw_cm_listener {
enum i40iw_cm_listener_state listener_state;
u32 reused_node;
u8 user_pri;
+ u8 tos;
u16 vlan_id;
bool qhash_set;
bool ipv4;
@@ -341,9 +340,11 @@ struct i40iw_cm_node {
int accept_pend;
struct list_head timer_entry;
struct list_head reset_entry;
+ struct list_head connected_entry;
atomic_t passive_state;
bool qhash_set;
u8 user_pri;
+ u8 tos;
bool ipv4;
bool snd_mark_en;
u16 lsmm_size;
@@ -368,7 +369,8 @@ struct i40iw_cm_info {
u32 rem_addr[4];
u16 vlan_id;
int backlog;
- u16 user_pri;
+ u8 user_pri;
+ u8 tos;
bool ipv4;
};
@@ -445,4 +447,7 @@ int i40iw_arp_table(struct i40iw_device *iwdev,
u8 *mac_addr,
u32 action);
+void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
+ u32 *ipaddr, bool ipv4, bool ifup);
+void i40iw_cm_disconnect_all(struct i40iw_device *iwdev);
#endif /* I40IW_CM_H */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index 2c4b4d072d6a..98923a8cf86d 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -103,6 +103,7 @@ static enum i40iw_status_code i40iw_cqp_poll_registers(
if (newtail != tail) {
/* SUCCESS */
I40IW_RING_MOVE_TAIL(cqp->sq_ring);
+ cqp->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]++;
return 0;
}
udelay(I40IW_SLEEP_COUNT);
@@ -223,17 +224,150 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf(
}
/**
+ * i40iw_fill_qos_list - Change all unknown qs handles to available ones
+ * @qs_list: list of qs_handles to be fixed with valid qs_handles
+ */
+static void i40iw_fill_qos_list(u16 *qs_list)
+{
+ u16 qshandle = qs_list[0];
+ int i;
+
+ for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
+ if (qs_list[i] == QS_HANDLE_UNKNOWN)
+ qs_list[i] = qshandle;
+ else
+ qshandle = qs_list[i];
+ }
+}
+
+/**
+ * i40iw_qp_from_entry - Given entry, get to the qp structure
+ * @entry: Points to list of qp structure
+ */
+static struct i40iw_sc_qp *i40iw_qp_from_entry(struct list_head *entry)
+{
+ if (!entry)
+ return NULL;
+
+ return (struct i40iw_sc_qp *)((char *)entry - offsetof(struct i40iw_sc_qp, list));
+}
+
+/**
+ * i40iw_get_qp - get the next qp from the list given current qp
+ * @head: Listhead of qp's
+ * @qp: current qp
+ */
+static struct i40iw_sc_qp *i40iw_get_qp(struct list_head *head, struct i40iw_sc_qp *qp)
+{
+ struct list_head *entry = NULL;
+ struct list_head *lastentry;
+
+ if (list_empty(head))
+ return NULL;
+
+ if (!qp) {
+ entry = head->next;
+ } else {
+ lastentry = &qp->list;
+ entry = (lastentry != head) ? lastentry->next : NULL;
+ }
+
+ return i40iw_qp_from_entry(entry);
+}
+
+/**
+ * i40iw_change_l2params - given the new l2 parameters, change all qp
+ * @vsi: pointer to the vsi structure
+ * @l2params: New paramaters from l2
+ */
+void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2params)
+{
+ struct i40iw_sc_dev *dev = vsi->dev;
+ struct i40iw_sc_qp *qp = NULL;
+ bool qs_handle_change = false;
+ bool mss_change = false;
+ unsigned long flags;
+ u16 qs_handle;
+ int i;
+
+ if (vsi->mss != l2params->mss) {
+ mss_change = true;
+ vsi->mss = l2params->mss;
+ }
+
+ i40iw_fill_qos_list(l2params->qs_handle_list);
+ for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
+ qs_handle = l2params->qs_handle_list[i];
+ if (vsi->qos[i].qs_handle != qs_handle)
+ qs_handle_change = true;
+ else if (!mss_change)
+ continue; /* no MSS nor qs handle change */
+ spin_lock_irqsave(&vsi->qos[i].lock, flags);
+ qp = i40iw_get_qp(&vsi->qos[i].qplist, qp);
+ while (qp) {
+ if (mss_change)
+ i40iw_qp_mss_modify(dev, qp);
+ if (qs_handle_change) {
+ qp->qs_handle = qs_handle;
+ /* issue cqp suspend command */
+ i40iw_qp_suspend_resume(dev, qp, true);
+ }
+ qp = i40iw_get_qp(&vsi->qos[i].qplist, qp);
+ }
+ spin_unlock_irqrestore(&vsi->qos[i].lock, flags);
+ vsi->qos[i].qs_handle = qs_handle;
+ }
+}
+
+/**
+ * i40iw_qp_rem_qos - remove qp from qos lists during destroy qp
+ * @qp: qp to be removed from qos
+ */
+static void i40iw_qp_rem_qos(struct i40iw_sc_qp *qp)
+{
+ struct i40iw_sc_vsi *vsi = qp->vsi;
+ unsigned long flags;
+
+ if (!qp->on_qoslist)
+ return;
+ spin_lock_irqsave(&vsi->qos[qp->user_pri].lock, flags);
+ list_del(&qp->list);
+ spin_unlock_irqrestore(&vsi->qos[qp->user_pri].lock, flags);
+}
+
+/**
+ * i40iw_qp_add_qos - called during setctx fot qp to be added to qos
+ * @qp: qp to be added to qos
+ */
+void i40iw_qp_add_qos(struct i40iw_sc_qp *qp)
+{
+ struct i40iw_sc_vsi *vsi = qp->vsi;
+ unsigned long flags;
+
+ if (qp->on_qoslist)
+ return;
+ spin_lock_irqsave(&vsi->qos[qp->user_pri].lock, flags);
+ qp->qs_handle = vsi->qos[qp->user_pri].qs_handle;
+ list_add(&qp->list, &vsi->qos[qp->user_pri].qplist);
+ qp->on_qoslist = true;
+ spin_unlock_irqrestore(&vsi->qos[qp->user_pri].lock, flags);
+}
+
+/**
* i40iw_sc_pd_init - initialize sc pd struct
* @dev: sc device struct
* @pd: sc pd ptr
* @pd_id: pd_id for allocated pd
+ * @abi_ver: ABI version from user context, -1 if not valid
*/
static void i40iw_sc_pd_init(struct i40iw_sc_dev *dev,
struct i40iw_sc_pd *pd,
- u16 pd_id)
+ u16 pd_id,
+ int abi_ver)
{
pd->size = sizeof(*pd);
pd->pd_id = pd_id;
+ pd->abi_ver = abi_ver;
pd->dev = dev;
}
@@ -292,6 +426,9 @@ static enum i40iw_status_code i40iw_sc_cqp_init(struct i40iw_sc_cqp *cqp,
info->dev->cqp = cqp;
I40IW_RING_INIT(cqp->sq_ring, cqp->sq_size);
+ cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS] = 0;
+ cqp->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS] = 0;
+
i40iw_debug(cqp->dev, I40IW_DEBUG_WQE,
"%s: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04X]\n",
__func__, cqp->sq_size, cqp->hw_sq_size,
@@ -302,12 +439,10 @@ static enum i40iw_status_code i40iw_sc_cqp_init(struct i40iw_sc_cqp *cqp,
/**
* i40iw_sc_cqp_create - create cqp during bringup
* @cqp: struct for cqp hw
- * @disable_pfpdus: if pfpdu to be disabled
* @maj_err: If error, major err number
* @min_err: If error, minor err number
*/
static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp,
- bool disable_pfpdus,
u16 *maj_err,
u16 *min_err)
{
@@ -326,9 +461,6 @@ static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp,
temp = LS_64(cqp->hw_sq_size, I40IW_CQPHC_SQSIZE) |
LS_64(cqp->struct_ver, I40IW_CQPHC_SVER);
- if (disable_pfpdus)
- temp |= LS_64(1, I40IW_CQPHC_DISABLE_PFPDUS);
-
set_64bit_val(cqp->host_ctx, 0, temp);
set_64bit_val(cqp->host_ctx, 8, cqp->sq_pa);
temp = LS_64(cqp->enabled_vf_count, I40IW_CQPHC_ENABLED_VFS) |
@@ -424,6 +556,7 @@ u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
return NULL;
}
I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, wqe_idx, ret_code);
+ cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS]++;
if (ret_code)
return NULL;
if (!wqe_idx)
@@ -559,6 +692,8 @@ static enum i40iw_status_code i40iw_sc_ccq_get_cqe_info(
I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring));
wmb(); /* write shadow area before tail */
I40IW_RING_MOVE_TAIL(cqp->sq_ring);
+ ccq->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]++;
+
return ret_code;
}
@@ -1051,6 +1186,7 @@ static enum i40iw_status_code i40iw_sc_manage_qhash_table_entry(
u64 qw1 = 0;
u64 qw2 = 0;
u64 temp;
+ struct i40iw_sc_vsi *vsi = info->vsi;
wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
@@ -1082,7 +1218,7 @@ static enum i40iw_status_code i40iw_sc_manage_qhash_table_entry(
LS_64(info->dest_ip[2], I40IW_CQPSQ_QHASH_ADDR2) |
LS_64(info->dest_ip[3], I40IW_CQPSQ_QHASH_ADDR3));
}
- qw2 = LS_64(cqp->dev->qs_handle, I40IW_CQPSQ_QHASH_QS_HANDLE);
+ qw2 = LS_64(vsi->qos[info->user_pri].qs_handle, I40IW_CQPSQ_QHASH_QS_HANDLE);
if (info->vlan_valid)
qw2 |= LS_64(info->vlan_id, I40IW_CQPSQ_QHASH_VLANID);
set_64bit_val(wqe, 16, qw2);
@@ -2103,6 +2239,7 @@ static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
u32 offset;
qp->dev = info->pd->dev;
+ qp->vsi = info->vsi;
qp->sq_pa = info->sq_pa;
qp->rq_pa = info->rq_pa;
qp->hw_host_ctx_pa = info->host_ctx_pa;
@@ -2118,6 +2255,7 @@ static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
offset);
info->qp_uk_init_info.wqe_alloc_reg = wqe_alloc_reg;
+ info->qp_uk_init_info.abi_ver = qp->pd->abi_ver;
ret_code = i40iw_qp_uk_init(&qp->qp_uk, &info->qp_uk_init_info);
if (ret_code)
return ret_code;
@@ -2136,10 +2274,21 @@ static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
false);
i40iw_debug(qp->dev, I40IW_DEBUG_WQE, "%s: hw_sq_size[%04d] sq_ring.size[%04d]\n",
__func__, qp->hw_sq_size, qp->qp_uk.sq_ring.size);
- ret_code = i40iw_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
- &wqe_size);
- if (ret_code)
- return ret_code;
+
+ switch (qp->pd->abi_ver) {
+ case 4:
+ ret_code = i40iw_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
+ &wqe_size);
+ if (ret_code)
+ return ret_code;
+ break;
+ case 5: /* fallthrough until next ABI version */
+ default:
+ if (qp->qp_uk.max_rq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)
+ return I40IW_ERR_INVALID_FRAG_COUNT;
+ wqe_size = I40IW_MAX_WQE_SIZE_RQ;
+ break;
+ }
qp->hw_rq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.rq_size *
(wqe_size / I40IW_QP_WQE_MIN_SIZE), false);
i40iw_debug(qp->dev, I40IW_DEBUG_WQE,
@@ -2151,7 +2300,7 @@ static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
qp->rq_tph_en = info->rq_tph_en;
qp->rcv_tph_en = info->rcv_tph_en;
qp->xmit_tph_en = info->xmit_tph_en;
- qp->qs_handle = qp->pd->dev->qs_handle;
+ qp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle;
qp->exception_lan_queue = qp->pd->dev->exception_lan_queue;
return 0;
@@ -2296,6 +2445,7 @@ static enum i40iw_status_code i40iw_sc_qp_destroy(
struct i40iw_sc_cqp *cqp;
u64 header;
+ i40iw_qp_rem_qos(qp);
cqp = qp->pd->dev->cqp;
wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
@@ -2443,10 +2593,20 @@ static enum i40iw_status_code i40iw_sc_qp_setctx(
{
struct i40iwarp_offload_info *iw;
struct i40iw_tcp_offload_info *tcp;
+ struct i40iw_sc_vsi *vsi;
+ struct i40iw_sc_dev *dev;
u64 qw0, qw3, qw7 = 0;
iw = info->iwarp_info;
tcp = info->tcp_info;
+ vsi = qp->vsi;
+ dev = qp->dev;
+ if (info->add_to_qoslist) {
+ qp->user_pri = info->user_pri;
+ i40iw_qp_add_qos(qp);
+ i40iw_debug(qp->dev, I40IW_DEBUG_DCB, "%s qp[%d] UP[%d] qset[%d]\n",
+ __func__, qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle);
+ }
qw0 = LS_64(qp->qp_uk.rq_wqe_size, I40IWQPC_RQWQESIZE) |
LS_64(info->err_rq_idx_valid, I40IWQPC_ERR_RQ_IDX_VALID) |
LS_64(qp->rcv_tph_en, I40IWQPC_RCVTPHEN) |
@@ -2487,16 +2647,14 @@ static enum i40iw_status_code i40iw_sc_qp_setctx(
LS_64(iw->rdmap_ver, I40IWQPC_RDMAP_VER);
qw7 |= LS_64(iw->pd_id, I40IWQPC_PDIDX);
- set_64bit_val(qp_ctx, 144, qp->q2_pa);
+ set_64bit_val(qp_ctx,
+ 144,
+ LS_64(qp->q2_pa, I40IWQPC_Q2ADDR) |
+ LS_64(vsi->fcn_id, I40IWQPC_STAT_INDEX));
set_64bit_val(qp_ctx,
152,
LS_64(iw->last_byte_sent, I40IWQPC_LASTBYTESENT));
- /*
- * Hard-code IRD_SIZE to hw-limit, 128, in qpctx, i.e matching an
- *advertisable IRD of 64
- */
- iw->ird_size = I40IW_QPCTX_ENCD_MAXIRD;
set_64bit_val(qp_ctx,
160,
LS_64(iw->ord_size, I40IWQPC_ORDSIZE) |
@@ -2507,6 +2665,9 @@ static enum i40iw_status_code i40iw_sc_qp_setctx(
LS_64(iw->bind_en, I40IWQPC_BINDEN) |
LS_64(iw->fast_reg_en, I40IWQPC_FASTREGEN) |
LS_64(iw->priv_mode_en, I40IWQPC_PRIVEN) |
+ LS_64((((vsi->stats_fcn_id_alloc) &&
+ (dev->is_pf) && (vsi->fcn_id >= I40IW_FIRST_NON_PF_STAT)) ? 1 : 0),
+ I40IWQPC_USESTATSINSTANCE) |
LS_64(1, I40IWQPC_IWARPMODE) |
LS_64(iw->rcv_mark_en, I40IWQPC_RCVMARKERS) |
LS_64(iw->align_hdrs, I40IWQPC_ALIGNHDRS) |
@@ -2623,7 +2784,9 @@ static enum i40iw_status_code i40iw_sc_alloc_stag(
u64 *wqe;
struct i40iw_sc_cqp *cqp;
u64 header;
+ enum i40iw_page_size page_size;
+ page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
cqp = dev->cqp;
wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
@@ -2643,7 +2806,7 @@ static enum i40iw_status_code i40iw_sc_alloc_stag(
LS_64(1, I40IW_CQPSQ_STAG_MR) |
LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |
- LS_64(info->page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
+ LS_64(page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
LS_64(info->remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) |
LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) |
@@ -2679,7 +2842,9 @@ static enum i40iw_status_code i40iw_sc_mr_reg_non_shared(
u32 pble_obj_cnt;
bool remote_access;
u8 addr_type;
+ enum i40iw_page_size page_size;
+ page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY |
I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY))
remote_access = true;
@@ -2722,7 +2887,7 @@ static enum i40iw_status_code i40iw_sc_mr_reg_non_shared(
header = LS_64(I40IW_CQP_OP_REG_MR, I40IW_CQPSQ_OPCODE) |
LS_64(1, I40IW_CQPSQ_STAG_MR) |
LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |
- LS_64(info->page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
+ LS_64(page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) |
@@ -2937,7 +3102,9 @@ enum i40iw_status_code i40iw_sc_mr_fast_register(
u64 temp, header;
u64 *wqe;
u32 wqe_idx;
+ enum i40iw_page_size page_size;
+ page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
wqe = i40iw_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx, I40IW_QP_WQE_MIN_SIZE,
0, info->wr_id);
if (!wqe)
@@ -2964,7 +3131,7 @@ enum i40iw_status_code i40iw_sc_mr_fast_register(
LS_64(info->stag_idx, I40IWQPSQ_STAGINDEX) |
LS_64(I40IWQP_OP_FAST_REGISTER, I40IWQPSQ_OPCODE) |
LS_64(info->chunk_size, I40IWQPSQ_LPBLSIZE) |
- LS_64(info->page_size, I40IWQPSQ_HPAGESIZE) |
+ LS_64(page_size, I40IWQPSQ_HPAGESIZE) |
LS_64(info->access_rights, I40IWQPSQ_STAGRIGHTS) |
LS_64(info->addr_type, I40IWQPSQ_VABASEDTO) |
LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
@@ -3959,7 +4126,7 @@ enum i40iw_status_code i40iw_process_cqp_cmd(struct i40iw_sc_dev *dev,
struct cqp_commands_info *pcmdinfo)
{
enum i40iw_status_code status = 0;
- unsigned long flags;
+ unsigned long flags;
spin_lock_irqsave(&dev->cqp_lock, flags);
if (list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp))
@@ -3978,7 +4145,7 @@ enum i40iw_status_code i40iw_process_bh(struct i40iw_sc_dev *dev)
{
enum i40iw_status_code status = 0;
struct cqp_commands_info *pcmdinfo;
- unsigned long flags;
+ unsigned long flags;
spin_lock_irqsave(&dev->cqp_lock, flags);
while (!list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp)) {
@@ -4055,7 +4222,6 @@ static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp,
u16 ddp_seg_len;
int copy_len = 0;
u8 is_tagged = 0;
- enum i40iw_flush_opcode flush_code = FLUSH_INVALID;
u32 opcode;
struct i40iw_terminate_hdr *termhdr;
@@ -4228,9 +4394,6 @@ static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp,
if (copy_len)
memcpy(termhdr + 1, pkt, copy_len);
- if (flush_code && !info->in_rdrsp_wr)
- qp->sq_flush = (info->sq) ? true : false;
-
return sizeof(struct i40iw_terminate_hdr) + copy_len;
}
@@ -4321,286 +4484,370 @@ void i40iw_terminate_received(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *in
}
/**
- * i40iw_hw_stat_init - Initiliaze HW stats table
- * @devstat: pestat struct
+ * i40iw_sc_vsi_init - Initialize virtual device
+ * @vsi: pointer to the vsi structure
+ * @info: parameters to initialize vsi
+ **/
+void i40iw_sc_vsi_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_init_info *info)
+{
+ int i;
+
+ vsi->dev = info->dev;
+ vsi->back_vsi = info->back_vsi;
+ vsi->mss = info->params->mss;
+ i40iw_fill_qos_list(info->params->qs_handle_list);
+
+ for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
+ vsi->qos[i].qs_handle =
+ info->params->qs_handle_list[i];
+ i40iw_debug(vsi->dev, I40IW_DEBUG_DCB, "qset[%d]: %d\n", i, vsi->qos[i].qs_handle);
+ spin_lock_init(&vsi->qos[i].lock);
+ INIT_LIST_HEAD(&vsi->qos[i].qplist);
+ }
+}
+
+/**
+ * i40iw_hw_stats_init - Initiliaze HW stats table
+ * @stats: pestat struct
* @fcn_idx: PCI fn id
- * @hw: PF i40iw_hw structure.
* @is_pf: Is it a PF?
*
- * Populate the HW stat table with register offset addr for each
- * stat. And start the perioidic stats timer.
+ * Populate the HW stats table with register offset addr for each
+ * stats. And start the perioidic stats timer.
*/
-static void i40iw_hw_stat_init(struct i40iw_dev_pestat *devstat,
- u8 fcn_idx,
- struct i40iw_hw *hw, bool is_pf)
+void i40iw_hw_stats_init(struct i40iw_vsi_pestat *stats, u8 fcn_idx, bool is_pf)
{
- u32 stat_reg_offset;
- u32 stat_index;
- struct i40iw_dev_hw_stat_offsets *stat_table =
- &devstat->hw_stat_offsets;
- struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats;
-
- devstat->hw = hw;
+ u32 stats_reg_offset;
+ u32 stats_index;
+ struct i40iw_dev_hw_stats_offsets *stats_table =
+ &stats->hw_stats_offsets;
+ struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
if (is_pf) {
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
I40E_GLPES_PFIP4RXDISCARD(fcn_idx);
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
I40E_GLPES_PFIP4RXTRUNC(fcn_idx);
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
I40E_GLPES_PFIP4TXNOROUTE(fcn_idx);
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
I40E_GLPES_PFIP6RXDISCARD(fcn_idx);
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
I40E_GLPES_PFIP6RXTRUNC(fcn_idx);
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
I40E_GLPES_PFIP6TXNOROUTE(fcn_idx);
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
I40E_GLPES_PFTCPRTXSEG(fcn_idx);
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
I40E_GLPES_PFTCPRXOPTERR(fcn_idx);
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
I40E_GLPES_PFTCPRXPROTOERR(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
I40E_GLPES_PFIP4RXOCTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
I40E_GLPES_PFIP4RXPKTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
I40E_GLPES_PFIP4RXFRAGSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
I40E_GLPES_PFIP4RXMCPKTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
I40E_GLPES_PFIP4TXOCTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
I40E_GLPES_PFIP4TXPKTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
I40E_GLPES_PFIP4TXFRAGSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
I40E_GLPES_PFIP4TXMCPKTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
I40E_GLPES_PFIP6RXOCTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
I40E_GLPES_PFIP6RXPKTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
I40E_GLPES_PFIP6RXFRAGSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
I40E_GLPES_PFIP6RXMCPKTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
I40E_GLPES_PFIP6TXOCTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
I40E_GLPES_PFIP6TXPKTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
I40E_GLPES_PFIP6TXPKTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
I40E_GLPES_PFIP6TXFRAGSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
I40E_GLPES_PFTCPRXSEGSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
I40E_GLPES_PFTCPTXSEGLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
I40E_GLPES_PFRDMARXRDSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
I40E_GLPES_PFRDMARXSNDSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
I40E_GLPES_PFRDMARXWRSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
I40E_GLPES_PFRDMATXRDSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
I40E_GLPES_PFRDMATXSNDSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
I40E_GLPES_PFRDMATXWRSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
I40E_GLPES_PFRDMAVBNDLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
I40E_GLPES_PFRDMAVINVLO(fcn_idx);
} else {
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
I40E_GLPES_VFIP4RXDISCARD(fcn_idx);
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
I40E_GLPES_VFIP4RXTRUNC(fcn_idx);
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
I40E_GLPES_VFIP4TXNOROUTE(fcn_idx);
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
I40E_GLPES_VFIP6RXDISCARD(fcn_idx);
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
I40E_GLPES_VFIP6RXTRUNC(fcn_idx);
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
I40E_GLPES_VFIP6TXNOROUTE(fcn_idx);
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
I40E_GLPES_VFTCPRTXSEG(fcn_idx);
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
I40E_GLPES_VFTCPRXOPTERR(fcn_idx);
- stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
+ stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
I40E_GLPES_VFTCPRXPROTOERR(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
I40E_GLPES_VFIP4RXOCTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
I40E_GLPES_VFIP4RXPKTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
I40E_GLPES_VFIP4RXFRAGSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
I40E_GLPES_VFIP4RXMCPKTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
I40E_GLPES_VFIP4TXOCTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
I40E_GLPES_VFIP4TXPKTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
I40E_GLPES_VFIP4TXFRAGSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
I40E_GLPES_VFIP4TXMCPKTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
I40E_GLPES_VFIP6RXOCTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
I40E_GLPES_VFIP6RXPKTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
I40E_GLPES_VFIP6RXFRAGSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
I40E_GLPES_VFIP6RXMCPKTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
I40E_GLPES_VFIP6TXOCTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
I40E_GLPES_VFIP6TXPKTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
I40E_GLPES_VFIP6TXPKTSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
I40E_GLPES_VFIP6TXFRAGSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
I40E_GLPES_VFTCPRXSEGSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
I40E_GLPES_VFTCPTXSEGLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
I40E_GLPES_VFRDMARXRDSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
I40E_GLPES_VFRDMARXSNDSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
I40E_GLPES_VFRDMARXWRSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
I40E_GLPES_VFRDMATXRDSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
I40E_GLPES_VFRDMATXSNDSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
I40E_GLPES_VFRDMATXWRSLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
I40E_GLPES_VFRDMAVBNDLO(fcn_idx);
- stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
+ stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
I40E_GLPES_VFRDMAVINVLO(fcn_idx);
}
- for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64;
- stat_index++) {
- stat_reg_offset = stat_table->stat_offset_64[stat_index];
- last_rd_stats->stat_value_64[stat_index] =
- readq(devstat->hw->hw_addr + stat_reg_offset);
+ for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
+ stats_index++) {
+ stats_reg_offset = stats_table->stats_offset_64[stats_index];
+ last_rd_stats->stats_value_64[stats_index] =
+ readq(stats->hw->hw_addr + stats_reg_offset);
}
- for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32;
- stat_index++) {
- stat_reg_offset = stat_table->stat_offset_32[stat_index];
- last_rd_stats->stat_value_32[stat_index] =
- i40iw_rd32(devstat->hw, stat_reg_offset);
+ for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
+ stats_index++) {
+ stats_reg_offset = stats_table->stats_offset_32[stats_index];
+ last_rd_stats->stats_value_32[stats_index] =
+ i40iw_rd32(stats->hw, stats_reg_offset);
}
}
/**
- * i40iw_hw_stat_read_32 - Read 32-bit HW stat counters and accommodates for roll-overs.
- * @devstat: pestat struct
- * @index: index in HW stat table which contains offset reg-addr
- * @value: hw stat value
+ * i40iw_hw_stats_read_32 - Read 32-bit HW stats counters and accommodates for roll-overs.
+ * @stat: pestat struct
+ * @index: index in HW stats table which contains offset reg-addr
+ * @value: hw stats value
*/
-static void i40iw_hw_stat_read_32(struct i40iw_dev_pestat *devstat,
- enum i40iw_hw_stat_index_32b index,
- u64 *value)
+void i40iw_hw_stats_read_32(struct i40iw_vsi_pestat *stats,
+ enum i40iw_hw_stats_index_32b index,
+ u64 *value)
{
- struct i40iw_dev_hw_stat_offsets *stat_table =
- &devstat->hw_stat_offsets;
- struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats;
- struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
- u64 new_stat_value = 0;
- u32 stat_reg_offset = stat_table->stat_offset_32[index];
-
- new_stat_value = i40iw_rd32(devstat->hw, stat_reg_offset);
+ struct i40iw_dev_hw_stats_offsets *stats_table =
+ &stats->hw_stats_offsets;
+ struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
+ struct i40iw_dev_hw_stats *hw_stats = &stats->hw_stats;
+ u64 new_stats_value = 0;
+ u32 stats_reg_offset = stats_table->stats_offset_32[index];
+
+ new_stats_value = i40iw_rd32(stats->hw, stats_reg_offset);
/*roll-over case */
- if (new_stat_value < last_rd_stats->stat_value_32[index])
- hw_stats->stat_value_32[index] += new_stat_value;
+ if (new_stats_value < last_rd_stats->stats_value_32[index])
+ hw_stats->stats_value_32[index] += new_stats_value;
else
- hw_stats->stat_value_32[index] +=
- new_stat_value - last_rd_stats->stat_value_32[index];
- last_rd_stats->stat_value_32[index] = new_stat_value;
- *value = hw_stats->stat_value_32[index];
+ hw_stats->stats_value_32[index] +=
+ new_stats_value - last_rd_stats->stats_value_32[index];
+ last_rd_stats->stats_value_32[index] = new_stats_value;
+ *value = hw_stats->stats_value_32[index];
}
/**
- * i40iw_hw_stat_read_64 - Read HW stat counters (greater than 32-bit) and accommodates for roll-overs.
- * @devstat: pestat struct
- * @index: index in HW stat table which contains offset reg-addr
- * @value: hw stat value
+ * i40iw_hw_stats_read_64 - Read HW stats counters (greater than 32-bit) and accommodates for roll-overs.
+ * @stats: pestat struct
+ * @index: index in HW stats table which contains offset reg-addr
+ * @value: hw stats value
*/
-static void i40iw_hw_stat_read_64(struct i40iw_dev_pestat *devstat,
- enum i40iw_hw_stat_index_64b index,
- u64 *value)
+void i40iw_hw_stats_read_64(struct i40iw_vsi_pestat *stats,
+ enum i40iw_hw_stats_index_64b index,
+ u64 *value)
{
- struct i40iw_dev_hw_stat_offsets *stat_table =
- &devstat->hw_stat_offsets;
- struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats;
- struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
- u64 new_stat_value = 0;
- u32 stat_reg_offset = stat_table->stat_offset_64[index];
-
- new_stat_value = readq(devstat->hw->hw_addr + stat_reg_offset);
+ struct i40iw_dev_hw_stats_offsets *stats_table =
+ &stats->hw_stats_offsets;
+ struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
+ struct i40iw_dev_hw_stats *hw_stats = &stats->hw_stats;
+ u64 new_stats_value = 0;
+ u32 stats_reg_offset = stats_table->stats_offset_64[index];
+
+ new_stats_value = readq(stats->hw->hw_addr + stats_reg_offset);
/*roll-over case */
- if (new_stat_value < last_rd_stats->stat_value_64[index])
- hw_stats->stat_value_64[index] += new_stat_value;
+ if (new_stats_value < last_rd_stats->stats_value_64[index])
+ hw_stats->stats_value_64[index] += new_stats_value;
else
- hw_stats->stat_value_64[index] +=
- new_stat_value - last_rd_stats->stat_value_64[index];
- last_rd_stats->stat_value_64[index] = new_stat_value;
- *value = hw_stats->stat_value_64[index];
+ hw_stats->stats_value_64[index] +=
+ new_stats_value - last_rd_stats->stats_value_64[index];
+ last_rd_stats->stats_value_64[index] = new_stats_value;
+ *value = hw_stats->stats_value_64[index];
}
/**
- * i40iw_hw_stat_read_all - read all HW stat counters
- * @devstat: pestat struct
- * @stat_values: hw stats structure
+ * i40iw_hw_stats_read_all - read all HW stat counters
+ * @stats: pestat struct
+ * @stats_values: hw stats structure
*
* Read all the HW stat counters and populates hw_stats structure
- * of passed-in dev's pestat as well as copy created in stat_values.
+ * of passed-in vsi's pestat as well as copy created in stat_values.
*/
-static void i40iw_hw_stat_read_all(struct i40iw_dev_pestat *devstat,
- struct i40iw_dev_hw_stats *stat_values)
+void i40iw_hw_stats_read_all(struct i40iw_vsi_pestat *stats,
+ struct i40iw_dev_hw_stats *stats_values)
{
- u32 stat_index;
-
- for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32;
- stat_index++)
- i40iw_hw_stat_read_32(devstat, stat_index,
- &stat_values->stat_value_32[stat_index]);
- for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64;
- stat_index++)
- i40iw_hw_stat_read_64(devstat, stat_index,
- &stat_values->stat_value_64[stat_index]);
+ u32 stats_index;
+ unsigned long flags;
+
+ spin_lock_irqsave(&stats->lock, flags);
+
+ for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
+ stats_index++)
+ i40iw_hw_stats_read_32(stats, stats_index,
+ &stats_values->stats_value_32[stats_index]);
+ for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
+ stats_index++)
+ i40iw_hw_stats_read_64(stats, stats_index,
+ &stats_values->stats_value_64[stats_index]);
+ spin_unlock_irqrestore(&stats->lock, flags);
}
/**
- * i40iw_hw_stat_refresh_all - Update all HW stat structs
- * @devstat: pestat struct
- * @stat_values: hw stats structure
+ * i40iw_hw_stats_refresh_all - Update all HW stats structs
+ * @stats: pestat struct
*
- * Read all the HW stat counters to refresh values in hw_stats structure
+ * Read all the HW stats counters to refresh values in hw_stats structure
* of passed-in dev's pestat
*/
-static void i40iw_hw_stat_refresh_all(struct i40iw_dev_pestat *devstat)
+void i40iw_hw_stats_refresh_all(struct i40iw_vsi_pestat *stats)
+{
+ u64 stats_value;
+ u32 stats_index;
+ unsigned long flags;
+
+ spin_lock_irqsave(&stats->lock, flags);
+
+ for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
+ stats_index++)
+ i40iw_hw_stats_read_32(stats, stats_index, &stats_value);
+ for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
+ stats_index++)
+ i40iw_hw_stats_read_64(stats, stats_index, &stats_value);
+ spin_unlock_irqrestore(&stats->lock, flags);
+}
+
+/**
+ * i40iw_get_fcn_id - Return the function id
+ * @dev: pointer to the device
+ */
+static u8 i40iw_get_fcn_id(struct i40iw_sc_dev *dev)
+{
+ u8 fcn_id = I40IW_INVALID_FCN_ID;
+ u8 i;
+
+ for (i = I40IW_FIRST_NON_PF_STAT; i < I40IW_MAX_STATS_COUNT; i++)
+ if (!dev->fcn_id_array[i]) {
+ fcn_id = i;
+ dev->fcn_id_array[i] = true;
+ break;
+ }
+ return fcn_id;
+}
+
+/**
+ * i40iw_vsi_stats_init - Initialize the vsi statistics
+ * @vsi: pointer to the vsi structure
+ * @info: The info structure used for initialization
+ */
+enum i40iw_status_code i40iw_vsi_stats_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_stats_info *info)
+{
+ u8 fcn_id = info->fcn_id;
+
+ if (info->alloc_fcn_id)
+ fcn_id = i40iw_get_fcn_id(vsi->dev);
+
+ if (fcn_id == I40IW_INVALID_FCN_ID)
+ return I40IW_ERR_NOT_READY;
+
+ vsi->pestat = info->pestat;
+ vsi->pestat->hw = vsi->dev->hw;
+
+ if (info->stats_initialize) {
+ i40iw_hw_stats_init(vsi->pestat, fcn_id, true);
+ spin_lock_init(&vsi->pestat->lock);
+ i40iw_hw_stats_start_timer(vsi);
+ }
+ vsi->stats_fcn_id_alloc = info->alloc_fcn_id;
+ vsi->fcn_id = fcn_id;
+ return I40IW_SUCCESS;
+}
+
+/**
+ * i40iw_vsi_stats_free - Free the vsi stats
+ * @vsi: pointer to the vsi structure
+ */
+void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi)
{
- u64 stat_value;
- u32 stat_index;
-
- for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32;
- stat_index++)
- i40iw_hw_stat_read_32(devstat, stat_index, &stat_value);
- for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64;
- stat_index++)
- i40iw_hw_stat_read_64(devstat, stat_index, &stat_value);
+ u8 fcn_id = vsi->fcn_id;
+
+ if ((vsi->stats_fcn_id_alloc) && (fcn_id != I40IW_INVALID_FCN_ID))
+ vsi->dev->fcn_id_array[fcn_id] = false;
+ i40iw_hw_stats_stop_timer(vsi);
}
static struct i40iw_cqp_ops iw_cqp_ops = {
@@ -4711,24 +4958,6 @@ static struct i40iw_hmc_ops iw_hmc_ops = {
NULL
};
-static const struct i40iw_device_pestat_ops iw_device_pestat_ops = {
- i40iw_hw_stat_init,
- i40iw_hw_stat_read_32,
- i40iw_hw_stat_read_64,
- i40iw_hw_stat_read_all,
- i40iw_hw_stat_refresh_all
-};
-
-/**
- * i40iw_device_init_pestat - Initialize the pestat structure
- * @dev: pestat struct
- */
-enum i40iw_status_code i40iw_device_init_pestat(struct i40iw_dev_pestat *devstat)
-{
- devstat->ops = iw_device_pestat_ops;
- return 0;
-}
-
/**
* i40iw_device_init - Initialize IWARP device
* @dev: IWARP device pointer
@@ -4750,14 +4979,7 @@ enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
dev->debug_mask = info->debug_mask;
- ret_code = i40iw_device_init_pestat(&dev->dev_pestat);
- if (ret_code) {
- i40iw_debug(dev, I40IW_DEBUG_DEV,
- "%s: i40iw_device_init_pestat failed\n", __func__);
- return ret_code;
- }
dev->hmc_fn_id = info->hmc_fn_id;
- dev->qs_handle = info->qs_handle;
dev->exception_lan_queue = info->exception_lan_queue;
dev->is_pf = info->is_pf;
@@ -4770,15 +4992,10 @@ enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
dev->hw = info->hw;
dev->hw->hw_addr = info->bar0;
- val = i40iw_rd32(dev->hw, I40E_GLPCI_DREVID);
- dev->hw_rev = (u8)RS_32(val, I40E_GLPCI_DREVID_DEFAULT_REVID);
-
if (dev->is_pf) {
- dev->dev_pestat.ops.iw_hw_stat_init(&dev->dev_pestat,
- dev->hmc_fn_id, dev->hw, true);
- spin_lock_init(&dev->dev_pestat.stats_lock);
- /*start the periodic stats_timer */
- i40iw_hw_stats_start_timer(dev);
+ val = i40iw_rd32(dev->hw, I40E_GLPCI_DREVID);
+ dev->hw_rev = (u8)RS_32(val, I40E_GLPCI_DREVID_DEFAULT_REVID);
+
val = i40iw_rd32(dev->hw, I40E_GLPCI_LBARCTRL);
db_size = (u8)RS_32(val, I40E_GLPCI_LBARCTRL_PE_DB_SIZE);
if ((db_size != I40IW_PE_DB_SIZE_4M) &&
diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h
index 2fac1db0e0a0..a39ac12b6a7e 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_d.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_d.h
@@ -35,6 +35,8 @@
#ifndef I40IW_D_H
#define I40IW_D_H
+#define I40IW_FIRST_USER_QP_ID 2
+
#define I40IW_DB_ADDR_OFFSET (4 * 1024 * 1024 - 64 * 1024)
#define I40IW_VF_DB_ADDR_OFFSET (64 * 1024)
@@ -67,6 +69,9 @@
#define I40IW_STAG_TYPE_NONSHARED 1
#define I40IW_MAX_USER_PRIORITY 8
+#define I40IW_MAX_STATS_COUNT 16
+#define I40IW_FIRST_NON_PF_STAT 4
+
#define LS_64_1(val, bits) ((u64)(uintptr_t)val << bits)
#define RS_64_1(val, bits) ((u64)(uintptr_t)val >> bits)
@@ -74,6 +79,8 @@
#define RS_32_1(val, bits) (u32)(val >> bits)
#define I40E_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
+#define QS_HANDLE_UNKNOWN 0xffff
+
#define LS_64(val, field) (((u64)val << field ## _SHIFT) & (field ## _MASK))
#define RS_64(val, field) ((u64)(val & field ## _MASK) >> field ## _SHIFT)
@@ -1199,8 +1206,11 @@
#define I40IWQPC_RXCQNUM_SHIFT 32
#define I40IWQPC_RXCQNUM_MASK (0x1ffffULL << I40IWQPC_RXCQNUM_SHIFT)
-#define I40IWQPC_Q2ADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT
-#define I40IWQPC_Q2ADDR_MASK I40IW_CQPHC_QPCTX_MASK
+#define I40IWQPC_STAT_INDEX_SHIFT 0
+#define I40IWQPC_STAT_INDEX_MASK (0x1fULL << I40IWQPC_STAT_INDEX_SHIFT)
+
+#define I40IWQPC_Q2ADDR_SHIFT 0
+#define I40IWQPC_Q2ADDR_MASK (0xffffffffffffff00ULL << I40IWQPC_Q2ADDR_SHIFT)
#define I40IWQPC_LASTBYTESENT_SHIFT 0
#define I40IWQPC_LASTBYTESENT_MASK (0xffUL << I40IWQPC_LASTBYTESENT_SHIFT)
@@ -1232,11 +1242,8 @@
#define I40IWQPC_PRIVEN_SHIFT 25
#define I40IWQPC_PRIVEN_MASK (1UL << I40IWQPC_PRIVEN_SHIFT)
-#define I40IWQPC_LSMMPRESENT_SHIFT 26
-#define I40IWQPC_LSMMPRESENT_MASK (1UL << I40IWQPC_LSMMPRESENT_SHIFT)
-
-#define I40IWQPC_ADJUSTFORLSMM_SHIFT 27
-#define I40IWQPC_ADJUSTFORLSMM_MASK (1UL << I40IWQPC_ADJUSTFORLSMM_SHIFT)
+#define I40IWQPC_USESTATSINSTANCE_SHIFT 26
+#define I40IWQPC_USESTATSINSTANCE_MASK (1UL << I40IWQPC_USESTATSINSTANCE_SHIFT)
#define I40IWQPC_IWARPMODE_SHIFT 28
#define I40IWQPC_IWARPMODE_MASK (1UL << I40IWQPC_IWARPMODE_SHIFT)
@@ -1713,6 +1720,8 @@ enum i40iw_alignment {
#define OP_MANAGE_VF_PBLE_BP 28
#define OP_QUERY_FPM_VALUES 29
#define OP_COMMIT_FPM_VALUES 30
-#define OP_SIZE_CQP_STAT_ARRAY 31
+#define OP_REQUESTED_COMMANDS 31
+#define OP_COMPLETED_COMMANDS 32
+#define OP_SIZE_CQP_STAT_ARRAY 33
#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
index 0c92a40b3e86..476867a3f584 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
@@ -62,7 +62,7 @@ u32 i40iw_initialize_hw_resources(struct i40iw_device *iwdev)
max_mr = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt;
arp_table_size = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_ARP].cnt;
iwdev->max_cqe = 0xFFFFF;
- num_pds = max_qp * 4;
+ num_pds = I40IW_MAX_PDS;
resources_size = sizeof(struct i40iw_arp_entry) * arp_table_size;
resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_qp);
resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_mr);
@@ -308,7 +308,9 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
iwqp = iwdev->qp_table[info->qp_cq_id];
if (!iwqp) {
spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
- i40iw_pr_err("qp_id %d is already freed\n", info->qp_cq_id);
+ i40iw_debug(dev, I40IW_DEBUG_AEQ,
+ "%s qp_id %d is already freed\n",
+ __func__, info->qp_cq_id);
continue;
}
i40iw_add_ref(&iwqp->ibqp);
@@ -359,6 +361,9 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
continue;
i40iw_cm_disconn(iwqp);
break;
+ case I40IW_AE_QP_SUSPEND_COMPLETE:
+ i40iw_qp_suspend_resume(dev, &iwqp->sc_qp, false);
+ break;
case I40IW_AE_TERMINATE_SENT:
i40iw_terminate_send_fin(qp);
break;
@@ -404,19 +409,18 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
case I40IW_AE_LCE_CQ_CATASTROPHIC:
case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG:
case I40IW_AE_UDA_XMIT_IPADDR_MISMATCH:
- case I40IW_AE_QP_SUSPEND_COMPLETE:
ctx_info->err_rq_idx_valid = false;
default:
- if (!info->sq && ctx_info->err_rq_idx_valid) {
- ctx_info->err_rq_idx = info->wqe_idx;
- ctx_info->tcp_info_valid = false;
- ctx_info->iwarp_info_valid = false;
- ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
- iwqp->host_ctx.va,
- ctx_info);
- }
- i40iw_terminate_connection(qp, info);
- break;
+ if (!info->sq && ctx_info->err_rq_idx_valid) {
+ ctx_info->err_rq_idx = info->wqe_idx;
+ ctx_info->tcp_info_valid = false;
+ ctx_info->iwarp_info_valid = false;
+ ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
+ iwqp->host_ctx.va,
+ ctx_info);
+ }
+ i40iw_terminate_connection(qp, info);
+ break;
}
if (info->qp)
i40iw_rem_ref(&iwqp->ibqp);
@@ -538,6 +542,7 @@ enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
{
struct i40iw_qhash_table_info *info;
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_sc_vsi *vsi = &iwdev->vsi;
enum i40iw_status_code status;
struct i40iw_cqp *iwcqp = &iwdev->cqp;
struct i40iw_cqp_request *cqp_request;
@@ -550,6 +555,7 @@ enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
info = &cqp_info->in.u.manage_qhash_table_entry.info;
memset(info, 0, sizeof(*info));
+ info->vsi = &iwdev->vsi;
info->manage = mtype;
info->entry_type = etype;
if (cminfo->vlan_id != 0xFFFF) {
@@ -560,8 +566,9 @@ enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
}
info->ipv4_valid = cminfo->ipv4;
+ info->user_pri = cminfo->user_pri;
ether_addr_copy(info->mac_addr, iwdev->netdev->dev_addr);
- info->qp_num = cpu_to_le32(dev->ilq->qp_id);
+ info->qp_num = cpu_to_le32(vsi->ilq->qp_id);
info->dest_port = cpu_to_le16(cminfo->loc_port);
info->dest_ip[0] = cpu_to_le32(cminfo->loc_addr[0]);
info->dest_ip[1] = cpu_to_le32(cminfo->loc_addr[1]);
@@ -617,6 +624,7 @@ enum i40iw_status_code i40iw_hw_flush_wqes(struct i40iw_device *iwdev,
struct i40iw_qp_flush_info *hw_info;
struct i40iw_cqp_request *cqp_request;
struct cqp_commands_info *cqp_info;
+ struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
if (!cqp_request)
@@ -631,9 +639,30 @@ enum i40iw_status_code i40iw_hw_flush_wqes(struct i40iw_device *iwdev,
cqp_info->in.u.qp_flush_wqes.qp = qp;
cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)cqp_request;
status = i40iw_handle_cqp_op(iwdev, cqp_request);
- if (status)
+ if (status) {
i40iw_pr_err("CQP-OP Flush WQE's fail");
- return status;
+ complete(&iwqp->sq_drained);
+ complete(&iwqp->rq_drained);
+ return status;
+ }
+ if (!cqp_request->compl_info.maj_err_code) {
+ switch (cqp_request->compl_info.min_err_code) {
+ case I40IW_CQP_COMPL_RQ_WQE_FLUSHED:
+ complete(&iwqp->sq_drained);
+ break;
+ case I40IW_CQP_COMPL_SQ_WQE_FLUSHED:
+ complete(&iwqp->rq_drained);
+ break;
+ case I40IW_CQP_COMPL_RQ_SQ_WQE_FLUSHED:
+ break;
+ default:
+ complete(&iwqp->sq_drained);
+ complete(&iwqp->rq_drained);
+ break;
+ }
+ }
+
+ return 0;
}
/**
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index ac2f3cd9478c..2728af3103ce 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -237,14 +237,11 @@ static irqreturn_t i40iw_irq_handler(int irq, void *data)
*/
static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp)
{
- enum i40iw_status_code status = 0;
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
struct i40iw_cqp *cqp = &iwdev->cqp;
- if (free_hwcqp && dev->cqp_ops->cqp_destroy)
- status = dev->cqp_ops->cqp_destroy(dev->cqp);
- if (status)
- i40iw_pr_err("destroy cqp failed");
+ if (free_hwcqp)
+ dev->cqp_ops->cqp_destroy(dev->cqp);
i40iw_free_dma_mem(dev->hw, &cqp->sq);
kfree(cqp->scratch_array);
@@ -270,6 +267,7 @@ static void i40iw_disable_irq(struct i40iw_sc_dev *dev,
i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_vec->idx - 1), 0);
else
i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_vec->idx - 1), 0);
+ irq_set_affinity_hint(msix_vec->irq, NULL);
free_irq(msix_vec->irq, dev_id);
}
@@ -603,7 +601,7 @@ static enum i40iw_status_code i40iw_create_cqp(struct i40iw_device *iwdev)
i40iw_pr_err("cqp init status %d\n", status);
goto exit;
}
- status = dev->cqp_ops->cqp_create(dev->cqp, true, &maj_err, &min_err);
+ status = dev->cqp_ops->cqp_create(dev->cqp, &maj_err, &min_err);
if (status) {
i40iw_pr_err("cqp create status %d maj_err %d min_err %d\n",
status, maj_err, min_err);
@@ -688,6 +686,7 @@ static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iw
struct i40iw_msix_vector *msix_vec)
{
enum i40iw_status_code status;
+ cpumask_t mask;
if (iwdev->msix_shared && !ceq_id) {
tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
@@ -697,12 +696,15 @@ static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iw
status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq);
}
+ cpumask_clear(&mask);
+ cpumask_set_cpu(msix_vec->cpu_affinity, &mask);
+ irq_set_affinity_hint(msix_vec->irq, &mask);
+
if (status) {
i40iw_pr_err("ceq irq config fail\n");
return I40IW_ERR_CONFIG;
}
msix_vec->ceq_id = ceq_id;
- msix_vec->cpu_affinity = 0;
return 0;
}
@@ -930,6 +932,7 @@ static enum i40iw_status_code i40iw_initialize_ilq(struct i40iw_device *iwdev)
struct i40iw_puda_rsrc_info info;
enum i40iw_status_code status;
+ memset(&info, 0, sizeof(info));
info.type = I40IW_PUDA_RSRC_TYPE_ILQ;
info.cq_id = 1;
info.qp_id = 0;
@@ -939,10 +942,9 @@ static enum i40iw_status_code i40iw_initialize_ilq(struct i40iw_device *iwdev)
info.rq_size = 8192;
info.buf_size = 1024;
info.tx_buf_cnt = 16384;
- info.mss = iwdev->mss;
info.receive = i40iw_receive_ilq;
info.xmit_complete = i40iw_free_sqbuf;
- status = i40iw_puda_create_rsrc(&iwdev->sc_dev, &info);
+ status = i40iw_puda_create_rsrc(&iwdev->vsi, &info);
if (status)
i40iw_pr_err("ilq create fail\n");
return status;
@@ -959,6 +961,7 @@ static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev)
struct i40iw_puda_rsrc_info info;
enum i40iw_status_code status;
+ memset(&info, 0, sizeof(info));
info.type = I40IW_PUDA_RSRC_TYPE_IEQ;
info.cq_id = 2;
info.qp_id = iwdev->sc_dev.exception_lan_queue;
@@ -967,9 +970,8 @@ static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev)
info.sq_size = 8192;
info.rq_size = 8192;
info.buf_size = 2048;
- info.mss = iwdev->mss;
info.tx_buf_cnt = 16384;
- status = i40iw_puda_create_rsrc(&iwdev->sc_dev, &info);
+ status = i40iw_puda_create_rsrc(&iwdev->vsi, &info);
if (status)
i40iw_pr_err("ieq create fail\n");
return status;
@@ -1159,7 +1161,7 @@ static void i40iw_add_ipv6_addr(struct i40iw_device *iwdev)
{
struct net_device *ip_dev;
struct inet6_dev *idev;
- struct inet6_ifaddr *ifp;
+ struct inet6_ifaddr *ifp, *tmp;
u32 local_ipaddr6[4];
rcu_read_lock();
@@ -1172,7 +1174,7 @@ static void i40iw_add_ipv6_addr(struct i40iw_device *iwdev)
i40iw_pr_err("ipv6 inet device not found\n");
break;
}
- list_for_each_entry(ifp, &idev->addr_list, if_list) {
+ list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
i40iw_pr_info("IP=%pI6, vlan_id=%d, MAC=%pM\n", &ifp->addr,
rdma_vlan_dev_vlan_id(ip_dev), ip_dev->dev_addr);
i40iw_copy_ip_ntohl(local_ipaddr6,
@@ -1294,17 +1296,23 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
enum i40iw_status_code status;
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
struct i40iw_device_init_info info;
+ struct i40iw_vsi_init_info vsi_info;
struct i40iw_dma_mem mem;
+ struct i40iw_l2params l2params;
u32 size;
+ struct i40iw_vsi_stats_info stats_info;
+ u16 last_qset = I40IW_NO_QSET;
+ u16 qset;
+ u32 i;
+ memset(&l2params, 0, sizeof(l2params));
memset(&info, 0, sizeof(info));
size = sizeof(struct i40iw_hmc_pble_rsrc) + sizeof(struct i40iw_hmc_info) +
(sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX);
iwdev->hmc_info_mem = kzalloc(size, GFP_KERNEL);
- if (!iwdev->hmc_info_mem) {
- i40iw_pr_err("memory alloc fail\n");
+ if (!iwdev->hmc_info_mem)
return I40IW_ERR_NO_MEMORY;
- }
+
iwdev->pble_rsrc = (struct i40iw_hmc_pble_rsrc *)iwdev->hmc_info_mem;
dev->hmc_info = &iwdev->hw.hmc;
dev->hmc_info->hmc_obj = (struct i40iw_hmc_obj_info *)(iwdev->pble_rsrc + 1);
@@ -1325,7 +1333,17 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
info.bar0 = ldev->hw_addr;
info.hw = &iwdev->hw;
info.debug_mask = debug;
- info.qs_handle = ldev->params.qos.prio_qos[0].qs_handle;
+ l2params.mss =
+ (ldev->params.mtu) ? ldev->params.mtu - I40IW_MTU_TO_MSS : I40IW_DEFAULT_MSS;
+ for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++) {
+ qset = ldev->params.qos.prio_qos[i].qs_handle;
+ l2params.qs_handle_list[i] = qset;
+ if (last_qset == I40IW_NO_QSET)
+ last_qset = qset;
+ else if ((qset != last_qset) && (qset != I40IW_NO_QSET))
+ iwdev->dcb = true;
+ }
+ i40iw_pr_info("DCB is set/clear = %d\n", iwdev->dcb);
info.exception_lan_queue = 1;
info.vchnl_send = i40iw_virtchnl_send;
status = i40iw_device_init(&iwdev->sc_dev, &info);
@@ -1334,6 +1352,20 @@ exit:
kfree(iwdev->hmc_info_mem);
iwdev->hmc_info_mem = NULL;
}
+ memset(&vsi_info, 0, sizeof(vsi_info));
+ vsi_info.dev = &iwdev->sc_dev;
+ vsi_info.back_vsi = (void *)iwdev;
+ vsi_info.params = &l2params;
+ i40iw_sc_vsi_init(&iwdev->vsi, &vsi_info);
+
+ if (dev->is_pf) {
+ memset(&stats_info, 0, sizeof(stats_info));
+ stats_info.fcn_id = ldev->fid;
+ stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);
+ stats_info.stats_initialize = true;
+ if (stats_info.pestat)
+ i40iw_vsi_stats_init(&iwdev->vsi, &stats_info);
+ }
return status;
}
@@ -1384,6 +1416,7 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
for (i = 0, ceq_idx = 0; i < iwdev->msix_count; i++, iw_qvinfo++) {
iwdev->iw_msixtbl[i].idx = ldev->msix_entries[i].entry;
iwdev->iw_msixtbl[i].irq = ldev->msix_entries[i].vector;
+ iwdev->iw_msixtbl[i].cpu_affinity = ceq_idx;
if (i == 0) {
iw_qvinfo->aeq_idx = 0;
if (iwdev->msix_shared)
@@ -1404,18 +1437,19 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
* i40iw_deinit_device - clean up the device resources
* @iwdev: iwarp device
* @reset: true if called before reset
- * @del_hdl: true if delete hdl entry
*
* Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses,
* destroy the device queues and free the pble and the hmc objects
*/
-static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset, bool del_hdl)
+static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset)
{
struct i40e_info *ldev = iwdev->ldev;
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
i40iw_pr_info("state = %d\n", iwdev->init_state);
+ if (iwdev->param_wq)
+ destroy_workqueue(iwdev->param_wq);
switch (iwdev->init_state) {
case RDMA_DEV_REGISTERED:
@@ -1441,10 +1475,10 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset, bool del
i40iw_destroy_aeq(iwdev, reset);
/* fallthrough */
case IEQ_CREATED:
- i40iw_puda_dele_resources(dev, I40IW_PUDA_RSRC_TYPE_IEQ, reset);
+ i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, reset);
/* fallthrough */
case ILQ_CREATED:
- i40iw_puda_dele_resources(dev, I40IW_PUDA_RSRC_TYPE_ILQ, reset);
+ i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, reset);
/* fallthrough */
case CCQ_CREATED:
i40iw_destroy_ccq(iwdev, reset);
@@ -1456,13 +1490,14 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset, bool del
i40iw_del_hmc_objects(dev, dev->hmc_info, true, reset);
/* fallthrough */
case CQP_CREATED:
- i40iw_destroy_cqp(iwdev, !reset);
+ i40iw_destroy_cqp(iwdev, true);
/* fallthrough */
case INITIAL_STATE:
i40iw_cleanup_cm_core(&iwdev->cm_core);
- if (dev->is_pf)
- i40iw_hw_stats_del_timer(dev);
-
+ if (iwdev->vsi.pestat) {
+ i40iw_vsi_stats_free(&iwdev->vsi);
+ kfree(iwdev->vsi.pestat);
+ }
i40iw_del_init_mem(iwdev);
break;
case INVALID_STATE:
@@ -1472,8 +1507,7 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset, bool del
break;
}
- if (del_hdl)
- i40iw_del_handler(i40iw_find_i40e_handler(ldev));
+ i40iw_del_handler(i40iw_find_i40e_handler(ldev));
kfree(iwdev->hdl);
}
@@ -1508,7 +1542,6 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
iwdev->max_enabled_vfs = iwdev->max_rdma_vfs;
iwdev->netdev = ldev->netdev;
hdl->client = client;
- iwdev->mss = (!ldev->params.mtu) ? I40IW_DEFAULT_MSS : ldev->params.mtu - I40IW_MTU_TO_MSS;
if (!ldev->ftype)
iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_DB_ADDR_OFFSET;
else
@@ -1528,6 +1561,7 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
init_waitqueue_head(&iwdev->vchnl_waitq);
init_waitqueue_head(&dev->vf_reqs);
+ init_waitqueue_head(&iwdev->close_wq);
status = i40iw_initialize_dev(iwdev, ldev);
exit:
@@ -1540,6 +1574,20 @@ exit:
}
/**
+ * i40iw_get_used_rsrc - determine resources used internally
+ * @iwdev: iwarp device
+ *
+ * Called after internal allocations
+ */
+static void i40iw_get_used_rsrc(struct i40iw_device *iwdev)
+{
+ iwdev->used_pds = find_next_zero_bit(iwdev->allocated_pds, iwdev->max_pd, 0);
+ iwdev->used_qps = find_next_zero_bit(iwdev->allocated_qps, iwdev->max_qp, 0);
+ iwdev->used_cqs = find_next_zero_bit(iwdev->allocated_cqs, iwdev->max_cq, 0);
+ iwdev->used_mrs = find_next_zero_bit(iwdev->allocated_mrs, iwdev->max_mr, 0);
+}
+
+/**
* i40iw_open - client interface operation open for iwarp/uda device
* @ldev: lan device information
* @client: iwarp client information, provided during registration
@@ -1611,6 +1659,7 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
status = i40iw_initialize_hw_resources(iwdev);
if (status)
break;
+ i40iw_get_used_rsrc(iwdev);
dev->ccq_ops->ccq_arm(dev->ccq);
status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc);
if (status)
@@ -1630,35 +1679,73 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
iwdev->init_state = RDMA_DEV_REGISTERED;
iwdev->iw_status = 1;
i40iw_port_ibevent(iwdev);
+ iwdev->param_wq = alloc_ordered_workqueue("l2params", WQ_MEM_RECLAIM);
+ if(iwdev->param_wq == NULL)
+ break;
i40iw_pr_info("i40iw_open completed\n");
return 0;
} while (0);
i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state);
- i40iw_deinit_device(iwdev, false, false);
+ i40iw_deinit_device(iwdev, false);
return -ERESTART;
}
/**
- * i40iw_l2param_change : handle qs handles for qos and mss change
+ * i40iw_l2params_worker - worker for l2 params change
+ * @work: work pointer for l2 params
+ */
+static void i40iw_l2params_worker(struct work_struct *work)
+{
+ struct l2params_work *dwork =
+ container_of(work, struct l2params_work, work);
+ struct i40iw_device *iwdev = dwork->iwdev;
+
+ i40iw_change_l2params(&iwdev->vsi, &dwork->l2params);
+ atomic_dec(&iwdev->params_busy);
+ kfree(work);
+}
+
+/**
+ * i40iw_l2param_change - handle qs handles for qos and mss change
* @ldev: lan device information
* @client: client for paramater change
* @params: new parameters from L2
*/
-static void i40iw_l2param_change(struct i40e_info *ldev,
- struct i40e_client *client,
+static void i40iw_l2param_change(struct i40e_info *ldev, struct i40e_client *client,
struct i40e_params *params)
{
struct i40iw_handler *hdl;
+ struct i40iw_l2params *l2params;
+ struct l2params_work *work;
struct i40iw_device *iwdev;
+ int i;
hdl = i40iw_find_i40e_handler(ldev);
if (!hdl)
return;
iwdev = &hdl->device;
- if (params->mtu)
- iwdev->mss = params->mtu - I40IW_MTU_TO_MSS;
+
+ if (atomic_read(&iwdev->params_busy))
+ return;
+
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ atomic_inc(&iwdev->params_busy);
+
+ work->iwdev = iwdev;
+ l2params = &work->l2params;
+ for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++)
+ l2params->qs_handle_list[i] = params->qos.prio_qos[i].qs_handle;
+
+ l2params->mss = (params->mtu) ? params->mtu - I40IW_MTU_TO_MSS : iwdev->vsi.mss;
+
+ INIT_WORK(&work->work, i40iw_l2params_worker);
+ queue_work(iwdev->param_wq, &work->work);
}
/**
@@ -1679,8 +1766,11 @@ static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool
return;
iwdev = &hdl->device;
+ iwdev->closing = true;
+
+ i40iw_cm_disconnect_all(iwdev);
destroy_workqueue(iwdev->virtchnl_wq);
- i40iw_deinit_device(iwdev, reset, true);
+ i40iw_deinit_device(iwdev, reset);
}
/**
@@ -1701,21 +1791,23 @@ static void i40iw_vf_reset(struct i40e_info *ldev, struct i40e_client *client, u
struct i40iw_vfdev *tmp_vfdev;
unsigned int i;
unsigned long flags;
+ struct i40iw_device *iwdev;
hdl = i40iw_find_i40e_handler(ldev);
if (!hdl)
return;
dev = &hdl->device.sc_dev;
+ iwdev = (struct i40iw_device *)dev->back_dev;
for (i = 0; i < I40IW_MAX_PE_ENABLED_VF_COUNT; i++) {
if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id != vf_id))
continue;
/* free all resources allocated on behalf of vf */
tmp_vfdev = dev->vf_dev[i];
- spin_lock_irqsave(&dev->dev_pestat.stats_lock, flags);
+ spin_lock_irqsave(&iwdev->vsi.pestat->lock, flags);
dev->vf_dev[i] = NULL;
- spin_unlock_irqrestore(&dev->dev_pestat.stats_lock, flags);
+ spin_unlock_irqrestore(&iwdev->vsi.pestat->lock, flags);
i40iw_del_hmc_objects(dev, &tmp_vfdev->hmc_info, false, false);
/* remove vf hmc function */
memset(&hmc_fcn_info, 0, sizeof(hmc_fcn_info));
diff --git a/drivers/infiniband/hw/i40iw/i40iw_osdep.h b/drivers/infiniband/hw/i40iw/i40iw_osdep.h
index 80f422bf3967..aa66c1c63dfa 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_osdep.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_osdep.h
@@ -198,6 +198,8 @@ enum i40iw_status_code i40iw_cqp_manage_vf_pble_bp(struct i40iw_sc_dev *dev,
void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev,
struct i40iw_virtchnl_work_info *work_info, u32 iw_vf_idx);
void *i40iw_remove_head(struct list_head *list);
+void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend);
+void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len);
void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred);
@@ -207,9 +209,9 @@ void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp);
enum i40iw_status_code i40iw_hw_manage_vf_pble_bp(struct i40iw_device *iwdev,
struct i40iw_manage_vf_pble_info *info,
bool wait);
-struct i40iw_dev_pestat;
-void i40iw_hw_stats_start_timer(struct i40iw_sc_dev *);
-void i40iw_hw_stats_del_timer(struct i40iw_sc_dev *);
+struct i40iw_sc_vsi;
+void i40iw_hw_stats_start_timer(struct i40iw_sc_vsi *vsi);
+void i40iw_hw_stats_stop_timer(struct i40iw_sc_vsi *vsi);
#define i40iw_mmiowb() mmiowb()
void i40iw_wr32(struct i40iw_hw *hw, u32 reg, u32 value);
u32 i40iw_rd32(struct i40iw_hw *hw, u32 reg);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_p.h b/drivers/infiniband/hw/i40iw/i40iw_p.h
index a0b8ca10d67e..28a92fee0822 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_p.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_p.h
@@ -47,8 +47,6 @@ void i40iw_debug_buf(struct i40iw_sc_dev *dev, enum i40iw_debug_flag mask,
enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
struct i40iw_device_init_info *info);
-enum i40iw_status_code i40iw_device_init_pestat(struct i40iw_dev_pestat *);
-
void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp);
u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch);
@@ -64,7 +62,24 @@ enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev,
enum i40iw_status_code i40iw_pf_init_vfhmc(struct i40iw_sc_dev *dev, u8 vf_hmc_fn_id,
u32 *vf_cnt_array);
-/* cqp misc functions */
+/* stats functions */
+void i40iw_hw_stats_refresh_all(struct i40iw_vsi_pestat *stats);
+void i40iw_hw_stats_read_all(struct i40iw_vsi_pestat *stats, struct i40iw_dev_hw_stats *stats_values);
+void i40iw_hw_stats_read_32(struct i40iw_vsi_pestat *stats,
+ enum i40iw_hw_stats_index_32b index,
+ u64 *value);
+void i40iw_hw_stats_read_64(struct i40iw_vsi_pestat *stats,
+ enum i40iw_hw_stats_index_64b index,
+ u64 *value);
+void i40iw_hw_stats_init(struct i40iw_vsi_pestat *stats, u8 index, bool is_pf);
+
+/* vsi misc functions */
+enum i40iw_status_code i40iw_vsi_stats_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_stats_info *info);
+void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi);
+void i40iw_sc_vsi_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_init_info *info);
+
+void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2params);
+void i40iw_qp_add_qos(struct i40iw_sc_qp *qp);
void i40iw_terminate_send_fin(struct i40iw_sc_qp *qp);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.c b/drivers/infiniband/hw/i40iw/i40iw_pble.c
index 85993dc44f6e..c87ba1617087 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_pble.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_pble.c
@@ -353,10 +353,6 @@ static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
pages = (idx->rel_pd_idx) ? (I40IW_HMC_PD_CNT_IN_SD -
idx->rel_pd_idx) : I40IW_HMC_PD_CNT_IN_SD;
pages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT);
- if (!pages) {
- ret_code = I40IW_ERR_NO_PBLCHUNKS_AVAILABLE;
- goto error;
- }
info.chunk = chunk;
info.hmc_info = hmc_info;
info.pages = pages;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
index c62d354f7810..db41ab40da9c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -42,12 +42,13 @@
#include "i40iw_p.h"
#include "i40iw_puda.h"
-static void i40iw_ieq_receive(struct i40iw_sc_dev *dev,
+static void i40iw_ieq_receive(struct i40iw_sc_vsi *vsi,
struct i40iw_puda_buf *buf);
-static void i40iw_ieq_tx_compl(struct i40iw_sc_dev *dev, void *sqwrid);
+static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi *vsi, void *sqwrid);
static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx);
static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc
*rsrc, bool initial);
+static void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp);
/**
* i40iw_puda_get_listbuf - get buffer from puda list
* @list: list to use for buffers (ILQ or IEQ)
@@ -292,7 +293,7 @@ enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
unsigned long flags;
if ((cq_type == I40IW_CQ_TYPE_ILQ) || (cq_type == I40IW_CQ_TYPE_IEQ)) {
- rsrc = (cq_type == I40IW_CQ_TYPE_ILQ) ? dev->ilq : dev->ieq;
+ rsrc = (cq_type == I40IW_CQ_TYPE_ILQ) ? cq->vsi->ilq : cq->vsi->ieq;
} else {
i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s qp_type error\n", __func__);
return I40IW_ERR_BAD_PTR;
@@ -335,7 +336,7 @@ enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
rsrc->stats_pkt_rcvd++;
rsrc->compl_rxwqe_idx = info.wqe_idx;
i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s RQ completion\n", __func__);
- rsrc->receive(rsrc->dev, buf);
+ rsrc->receive(rsrc->vsi, buf);
if (cq_type == I40IW_CQ_TYPE_ILQ)
i40iw_ilq_putback_rcvbuf(&rsrc->qp, info.wqe_idx);
else
@@ -345,12 +346,12 @@ enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s SQ completion\n", __func__);
sqwrid = (void *)(uintptr_t)qp->sq_wrtrk_array[info.wqe_idx].wrid;
I40IW_RING_SET_TAIL(qp->sq_ring, info.wqe_idx);
- rsrc->xmit_complete(rsrc->dev, sqwrid);
+ rsrc->xmit_complete(rsrc->vsi, sqwrid);
spin_lock_irqsave(&rsrc->bufpool_lock, flags);
rsrc->tx_wqe_avail_cnt++;
spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
- if (!list_empty(&dev->ilq->txpend))
- i40iw_puda_send_buf(dev->ilq, NULL);
+ if (!list_empty(&rsrc->vsi->ilq->txpend))
+ i40iw_puda_send_buf(rsrc->vsi->ilq, NULL);
}
done:
@@ -513,10 +514,8 @@ static void i40iw_puda_qp_setctx(struct i40iw_puda_rsrc *rsrc)
* i40iw_puda_qp_wqe - setup wqe for qp create
* @rsrc: resource for qp
*/
-static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_puda_rsrc *rsrc)
+static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
{
- struct i40iw_sc_qp *qp = &rsrc->qp;
- struct i40iw_sc_dev *dev = rsrc->dev;
struct i40iw_sc_cqp *cqp;
u64 *wqe;
u64 header;
@@ -582,6 +581,7 @@ static enum i40iw_status_code i40iw_puda_qp_create(struct i40iw_puda_rsrc *rsrc)
qp->back_qp = (void *)rsrc;
qp->sq_pa = mem->pa;
qp->rq_pa = qp->sq_pa + sq_size;
+ qp->vsi = rsrc->vsi;
ukqp->sq_base = mem->va;
ukqp->rq_base = &ukqp->sq_base[rsrc->sq_size];
ukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem;
@@ -608,15 +608,63 @@ static enum i40iw_status_code i40iw_puda_qp_create(struct i40iw_puda_rsrc *rsrc)
ukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
I40E_VFPE_WQEALLOC1);
- qp->qs_handle = qp->dev->qs_handle;
+ qp->user_pri = 0;
+ i40iw_qp_add_qos(qp);
i40iw_puda_qp_setctx(rsrc);
- ret = i40iw_puda_qp_wqe(rsrc);
+ if (rsrc->ceq_valid)
+ ret = i40iw_cqp_qp_create_cmd(rsrc->dev, qp);
+ else
+ ret = i40iw_puda_qp_wqe(rsrc->dev, qp);
if (ret)
i40iw_free_dma_mem(rsrc->dev->hw, &rsrc->qpmem);
return ret;
}
/**
+ * i40iw_puda_cq_wqe - setup wqe for cq create
+ * @rsrc: resource for cq
+ */
+static enum i40iw_status_code i40iw_puda_cq_wqe(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq)
+{
+ u64 *wqe;
+ struct i40iw_sc_cqp *cqp;
+ u64 header;
+ struct i40iw_ccq_cqe_info compl_info;
+ enum i40iw_status_code status = 0;
+
+ cqp = dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
+ set_64bit_val(wqe, 8, RS_64_1(cq, 1));
+ set_64bit_val(wqe, 16,
+ LS_64(cq->shadow_read_threshold,
+ I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
+ set_64bit_val(wqe, 32, cq->cq_pa);
+
+ set_64bit_val(wqe, 40, cq->shadow_area_pa);
+
+ header = cq->cq_uk.cq_id |
+ LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
+ LS_64(1, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
+ LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) |
+ LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+ set_64bit_val(wqe, 24, header);
+
+ i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ i40iw_sc_cqp_post_sq(dev->cqp);
+ status = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
+ I40IW_CQP_OP_CREATE_CQ,
+ &compl_info);
+ return status;
+}
+
+/**
* i40iw_puda_cq_create - create cq for resource
* @rsrc: resource for which cq to create
*/
@@ -624,18 +672,13 @@ static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc)
{
struct i40iw_sc_dev *dev = rsrc->dev;
struct i40iw_sc_cq *cq = &rsrc->cq;
- u64 *wqe;
- struct i40iw_sc_cqp *cqp;
- u64 header;
enum i40iw_status_code ret = 0;
u32 tsize, cqsize;
- u32 shadow_read_threshold = 128;
struct i40iw_dma_mem *mem;
- struct i40iw_ccq_cqe_info compl_info;
struct i40iw_cq_init_info info;
struct i40iw_cq_uk_init_info *init_info = &info.cq_uk_init_info;
- cq->back_cq = (void *)rsrc;
+ cq->vsi = rsrc->vsi;
cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe));
tsize = cqsize + sizeof(struct i40iw_cq_shadow_area);
ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize,
@@ -656,43 +699,84 @@ static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc)
init_info->shadow_area = (u64 *)((u8 *)mem->va + cqsize);
init_info->cq_size = rsrc->cq_size;
init_info->cq_id = rsrc->cq_id;
+ info.ceqe_mask = true;
+ info.ceq_id_valid = true;
ret = dev->iw_priv_cq_ops->cq_init(cq, &info);
if (ret)
goto error;
- cqp = dev->cqp;
- wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0);
- if (!wqe) {
- ret = I40IW_ERR_RING_FULL;
- goto error;
- }
+ if (rsrc->ceq_valid)
+ ret = i40iw_cqp_cq_create_cmd(dev, cq);
+ else
+ ret = i40iw_puda_cq_wqe(dev, cq);
+error:
+ if (ret)
+ i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
+ return ret;
+}
- set_64bit_val(wqe, 0, rsrc->cq_size);
- set_64bit_val(wqe, 8, RS_64_1(cq, 1));
- set_64bit_val(wqe, 16, LS_64(shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
- set_64bit_val(wqe, 32, cq->cq_pa);
+/**
+ * i40iw_puda_free_qp - free qp for resource
+ * @rsrc: resource for which qp to free
+ */
+static void i40iw_puda_free_qp(struct i40iw_puda_rsrc *rsrc)
+{
+ enum i40iw_status_code ret;
+ struct i40iw_ccq_cqe_info compl_info;
+ struct i40iw_sc_dev *dev = rsrc->dev;
- set_64bit_val(wqe, 40, cq->shadow_area_pa);
+ if (rsrc->ceq_valid) {
+ i40iw_cqp_qp_destroy_cmd(dev, &rsrc->qp);
+ return;
+ }
- header = rsrc->cq_id |
- LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
- LS_64(1, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
- LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) |
- LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) |
- LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
- set_64bit_val(wqe, 24, header);
+ ret = dev->iw_priv_qp_ops->qp_destroy(&rsrc->qp,
+ 0, false, true, true);
+ if (ret)
+ i40iw_debug(dev, I40IW_DEBUG_PUDA,
+ "%s error puda qp destroy wqe\n",
+ __func__);
- i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE",
- wqe, I40IW_CQP_WQE_SIZE * 8);
+ if (!ret) {
+ ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
+ I40IW_CQP_OP_DESTROY_QP,
+ &compl_info);
+ if (ret)
+ i40iw_debug(dev, I40IW_DEBUG_PUDA,
+ "%s error puda qp destroy failed\n",
+ __func__);
+ }
+}
- i40iw_sc_cqp_post_sq(dev->cqp);
- ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
- I40IW_CQP_OP_CREATE_CQ,
- &compl_info);
+/**
+ * i40iw_puda_free_cq - free cq for resource
+ * @rsrc: resource for which cq to free
+ */
+static void i40iw_puda_free_cq(struct i40iw_puda_rsrc *rsrc)
+{
+ enum i40iw_status_code ret;
+ struct i40iw_ccq_cqe_info compl_info;
+ struct i40iw_sc_dev *dev = rsrc->dev;
+
+ if (rsrc->ceq_valid) {
+ i40iw_cqp_cq_destroy_cmd(dev, &rsrc->cq);
+ return;
+ }
+ ret = dev->iw_priv_cq_ops->cq_destroy(&rsrc->cq, 0, true);
-error:
if (ret)
- i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
- return ret;
+ i40iw_debug(dev, I40IW_DEBUG_PUDA,
+ "%s error ieq cq destroy\n",
+ __func__);
+
+ if (!ret) {
+ ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
+ I40IW_CQP_OP_DESTROY_CQ,
+ &compl_info);
+ if (ret)
+ i40iw_debug(dev, I40IW_DEBUG_PUDA,
+ "%s error ieq qp destroy done\n",
+ __func__);
+ }
}
/**
@@ -701,25 +785,24 @@ error:
* @type: type of resource to dele
* @reset: true if reset chip
*/
-void i40iw_puda_dele_resources(struct i40iw_sc_dev *dev,
+void i40iw_puda_dele_resources(struct i40iw_sc_vsi *vsi,
enum puda_resource_type type,
bool reset)
{
- struct i40iw_ccq_cqe_info compl_info;
+ struct i40iw_sc_dev *dev = vsi->dev;
struct i40iw_puda_rsrc *rsrc;
struct i40iw_puda_buf *buf = NULL;
struct i40iw_puda_buf *nextbuf = NULL;
struct i40iw_virt_mem *vmem;
- enum i40iw_status_code ret;
switch (type) {
case I40IW_PUDA_RSRC_TYPE_ILQ:
- rsrc = dev->ilq;
- vmem = &dev->ilq_mem;
+ rsrc = vsi->ilq;
+ vmem = &vsi->ilq_mem;
break;
case I40IW_PUDA_RSRC_TYPE_IEQ:
- rsrc = dev->ieq;
- vmem = &dev->ieq_mem;
+ rsrc = vsi->ieq;
+ vmem = &vsi->ieq_mem;
break;
default:
i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s: error resource type = 0x%x\n",
@@ -731,45 +814,14 @@ void i40iw_puda_dele_resources(struct i40iw_sc_dev *dev,
case PUDA_HASH_CRC_COMPLETE:
i40iw_free_hash_desc(rsrc->hash_desc);
case PUDA_QP_CREATED:
- do {
- if (reset)
- break;
- ret = dev->iw_priv_qp_ops->qp_destroy(&rsrc->qp,
- 0, false, true, true);
- if (ret)
- i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
- "%s error ieq qp destroy\n",
- __func__);
-
- ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
- I40IW_CQP_OP_DESTROY_QP,
- &compl_info);
- if (ret)
- i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
- "%s error ieq qp destroy done\n",
- __func__);
- } while (0);
+ if (!reset)
+ i40iw_puda_free_qp(rsrc);
i40iw_free_dma_mem(dev->hw, &rsrc->qpmem);
/* fallthrough */
case PUDA_CQ_CREATED:
- do {
- if (reset)
- break;
- ret = dev->iw_priv_cq_ops->cq_destroy(&rsrc->cq, 0, true);
- if (ret)
- i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
- "%s error ieq cq destroy\n",
- __func__);
-
- ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
- I40IW_CQP_OP_DESTROY_CQ,
- &compl_info);
- if (ret)
- i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
- "%s error ieq qp destroy done\n",
- __func__);
- } while (0);
+ if (!reset)
+ i40iw_puda_free_cq(rsrc);
i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
break;
@@ -825,9 +877,10 @@ static enum i40iw_status_code i40iw_puda_allocbufs(struct i40iw_puda_rsrc *rsrc,
* @dev: iwarp device
* @info: resource information
*/
-enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
+enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_vsi *vsi,
struct i40iw_puda_rsrc_info *info)
{
+ struct i40iw_sc_dev *dev = vsi->dev;
enum i40iw_status_code ret = 0;
struct i40iw_puda_rsrc *rsrc;
u32 pudasize;
@@ -840,10 +893,10 @@ enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
rqwridsize = info->rq_size * 8;
switch (info->type) {
case I40IW_PUDA_RSRC_TYPE_ILQ:
- vmem = &dev->ilq_mem;
+ vmem = &vsi->ilq_mem;
break;
case I40IW_PUDA_RSRC_TYPE_IEQ:
- vmem = &dev->ieq_mem;
+ vmem = &vsi->ieq_mem;
break;
default:
return I40IW_NOT_SUPPORTED;
@@ -856,28 +909,28 @@ enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
rsrc = (struct i40iw_puda_rsrc *)vmem->va;
spin_lock_init(&rsrc->bufpool_lock);
if (info->type == I40IW_PUDA_RSRC_TYPE_ILQ) {
- dev->ilq = (struct i40iw_puda_rsrc *)vmem->va;
- dev->ilq_count = info->count;
+ vsi->ilq = (struct i40iw_puda_rsrc *)vmem->va;
+ vsi->ilq_count = info->count;
rsrc->receive = info->receive;
rsrc->xmit_complete = info->xmit_complete;
} else {
- vmem = &dev->ieq_mem;
- dev->ieq_count = info->count;
- dev->ieq = (struct i40iw_puda_rsrc *)vmem->va;
+ vmem = &vsi->ieq_mem;
+ vsi->ieq_count = info->count;
+ vsi->ieq = (struct i40iw_puda_rsrc *)vmem->va;
rsrc->receive = i40iw_ieq_receive;
rsrc->xmit_complete = i40iw_ieq_tx_compl;
}
+ rsrc->ceq_valid = info->ceq_valid;
rsrc->type = info->type;
rsrc->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)((u8 *)vmem->va + pudasize);
rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize);
- rsrc->mss = info->mss;
/* Initialize all ieq lists */
INIT_LIST_HEAD(&rsrc->bufpool);
INIT_LIST_HEAD(&rsrc->txpend);
rsrc->tx_wqe_avail_cnt = info->sq_size - 1;
- dev->iw_pd_ops->pd_init(dev, &rsrc->sc_pd, info->pd_id);
+ dev->iw_pd_ops->pd_init(dev, &rsrc->sc_pd, info->pd_id, -1);
rsrc->qp_id = info->qp_id;
rsrc->cq_id = info->cq_id;
rsrc->sq_size = info->sq_size;
@@ -885,6 +938,7 @@ enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
rsrc->cq_size = info->rq_size + info->sq_size;
rsrc->buf_size = info->buf_size;
rsrc->dev = dev;
+ rsrc->vsi = vsi;
ret = i40iw_puda_cq_create(rsrc);
if (!ret) {
@@ -919,7 +973,7 @@ enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
dev->ccq_ops->ccq_arm(&rsrc->cq);
return ret;
error:
- i40iw_puda_dele_resources(dev, info->type, false);
+ i40iw_puda_dele_resources(vsi, info->type, false);
return ret;
}
@@ -1131,7 +1185,7 @@ static enum i40iw_status_code i40iw_ieq_handle_partial(struct i40iw_puda_rsrc *i
list_add(&buf->list, &pbufl);
status = i40iw_ieq_create_pbufl(pfpdu, rxlist, &pbufl, buf, fpdu_len);
- if (!status)
+ if (status)
goto error;
txbuf = i40iw_puda_get_bufpool(ieq);
@@ -1332,7 +1386,7 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
}
if (pfpdu->mode && (fps != pfpdu->fps)) {
/* clean up qp as it is new partial sequence */
- i40iw_ieq_cleanup_qp(ieq->dev, qp);
+ i40iw_ieq_cleanup_qp(ieq, qp);
i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
"%s: restarting new partial\n", __func__);
pfpdu->mode = false;
@@ -1344,7 +1398,7 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
pfpdu->rcv_nxt = fps;
pfpdu->fps = fps;
pfpdu->mode = true;
- pfpdu->max_fpdu_data = ieq->mss;
+ pfpdu->max_fpdu_data = ieq->vsi->mss;
pfpdu->pmode_count++;
INIT_LIST_HEAD(rxlist);
i40iw_ieq_check_first_buf(buf, fps);
@@ -1379,14 +1433,14 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
* @dev: iwarp device
* @buf: exception buffer received
*/
-static void i40iw_ieq_receive(struct i40iw_sc_dev *dev,
+static void i40iw_ieq_receive(struct i40iw_sc_vsi *vsi,
struct i40iw_puda_buf *buf)
{
- struct i40iw_puda_rsrc *ieq = dev->ieq;
+ struct i40iw_puda_rsrc *ieq = vsi->ieq;
struct i40iw_sc_qp *qp = NULL;
u32 wqe_idx = ieq->compl_rxwqe_idx;
- qp = i40iw_ieq_get_qp(dev, buf);
+ qp = i40iw_ieq_get_qp(vsi->dev, buf);
if (!qp) {
ieq->stats_bad_qp_id++;
i40iw_puda_ret_bufpool(ieq, buf);
@@ -1404,12 +1458,12 @@ static void i40iw_ieq_receive(struct i40iw_sc_dev *dev,
/**
* i40iw_ieq_tx_compl - put back after sending completed exception buffer
- * @dev: iwarp device
+ * @vsi: pointer to the vsi structure
* @sqwrid: pointer to puda buffer
*/
-static void i40iw_ieq_tx_compl(struct i40iw_sc_dev *dev, void *sqwrid)
+static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi *vsi, void *sqwrid)
{
- struct i40iw_puda_rsrc *ieq = dev->ieq;
+ struct i40iw_puda_rsrc *ieq = vsi->ieq;
struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)sqwrid;
i40iw_puda_ret_bufpool(ieq, buf);
@@ -1421,15 +1475,14 @@ static void i40iw_ieq_tx_compl(struct i40iw_sc_dev *dev, void *sqwrid)
/**
* i40iw_ieq_cleanup_qp - qp is being destroyed
- * @dev: iwarp device
+ * @ieq: ieq resource
* @qp: all pending fpdu buffers
*/
-void i40iw_ieq_cleanup_qp(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
+static void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp)
{
struct i40iw_puda_buf *buf;
struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
struct list_head *rxlist = &pfpdu->rxlist;
- struct i40iw_puda_rsrc *ieq = dev->ieq;
if (!pfpdu->mode)
return;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.h b/drivers/infiniband/hw/i40iw/i40iw_puda.h
index 52bf7826ce4e..dba05ce7d392 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.h
@@ -100,6 +100,7 @@ struct i40iw_puda_rsrc_info {
enum puda_resource_type type; /* ILQ or IEQ */
u32 count;
u16 pd_id;
+ bool ceq_valid;
u32 cq_id;
u32 qp_id;
u32 sq_size;
@@ -107,8 +108,8 @@ struct i40iw_puda_rsrc_info {
u16 buf_size;
u16 mss;
u32 tx_buf_cnt; /* total bufs allocated will be rq_size + tx_buf_cnt */
- void (*receive)(struct i40iw_sc_dev *, struct i40iw_puda_buf *);
- void (*xmit_complete)(struct i40iw_sc_dev *, void *);
+ void (*receive)(struct i40iw_sc_vsi *, struct i40iw_puda_buf *);
+ void (*xmit_complete)(struct i40iw_sc_vsi *, void *);
};
struct i40iw_puda_rsrc {
@@ -116,6 +117,7 @@ struct i40iw_puda_rsrc {
struct i40iw_sc_qp qp;
struct i40iw_sc_pd sc_pd;
struct i40iw_sc_dev *dev;
+ struct i40iw_sc_vsi *vsi;
struct i40iw_dma_mem cqmem;
struct i40iw_dma_mem qpmem;
struct i40iw_virt_mem ilq_mem;
@@ -123,6 +125,7 @@ struct i40iw_puda_rsrc {
enum puda_resource_type type;
u16 buf_size; /*buffer must be max datalen + tcpip hdr + mac */
u16 mss;
+ bool ceq_valid;
u32 cq_id;
u32 qp_id;
u32 sq_size;
@@ -142,8 +145,8 @@ struct i40iw_puda_rsrc {
u32 avail_buf_count; /* snapshot of currently available buffers */
spinlock_t bufpool_lock;
struct i40iw_puda_buf *alloclist;
- void (*receive)(struct i40iw_sc_dev *, struct i40iw_puda_buf *);
- void (*xmit_complete)(struct i40iw_sc_dev *, void *);
+ void (*receive)(struct i40iw_sc_vsi *, struct i40iw_puda_buf *);
+ void (*xmit_complete)(struct i40iw_sc_vsi *, void *);
/* puda stats */
u64 stats_buf_alloc_fail;
u64 stats_pkt_rcvd;
@@ -160,14 +163,13 @@ void i40iw_puda_send_buf(struct i40iw_puda_rsrc *rsrc,
struct i40iw_puda_buf *buf);
enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
struct i40iw_puda_send_info *info);
-enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
+enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_vsi *vsi,
struct i40iw_puda_rsrc_info *info);
-void i40iw_puda_dele_resources(struct i40iw_sc_dev *dev,
+void i40iw_puda_dele_resources(struct i40iw_sc_vsi *vsi,
enum puda_resource_type type,
bool reset);
enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
struct i40iw_sc_cq *cq, u32 *compl_err);
-void i40iw_ieq_cleanup_qp(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev,
struct i40iw_puda_buf *buf);
@@ -180,4 +182,8 @@ void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
void i40iw_free_hash_desc(struct shash_desc *desc);
void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length,
u32 seqnum);
+enum i40iw_status_code i40iw_cqp_qp_create_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
+enum i40iw_status_code i40iw_cqp_cq_create_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq);
+void i40iw_cqp_qp_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
+void i40iw_cqp_cq_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq);
#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h
index 2b1a04e9ca3c..7b76259752b0 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_type.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_type.h
@@ -61,7 +61,7 @@ struct i40iw_cq_shadow_area {
struct i40iw_sc_dev;
struct i40iw_hmc_info;
-struct i40iw_dev_pestat;
+struct i40iw_vsi_pestat;
struct i40iw_cqp_ops;
struct i40iw_ccq_ops;
@@ -74,6 +74,11 @@ struct i40iw_priv_qp_ops;
struct i40iw_priv_cq_ops;
struct i40iw_hmc_ops;
+enum i40iw_page_size {
+ I40IW_PAGE_SIZE_4K,
+ I40IW_PAGE_SIZE_2M
+};
+
enum i40iw_resource_indicator_type {
I40IW_RSRC_INDICATOR_TYPE_ADAPTER = 0,
I40IW_RSRC_INDICATOR_TYPE_CQ,
@@ -186,7 +191,7 @@ enum i40iw_debug_flag {
I40IW_DEBUG_ALL = 0xFFFFFFFF
};
-enum i40iw_hw_stat_index_32b {
+enum i40iw_hw_stats_index_32b {
I40IW_HW_STAT_INDEX_IP4RXDISCARD = 0,
I40IW_HW_STAT_INDEX_IP4RXTRUNC,
I40IW_HW_STAT_INDEX_IP4TXNOROUTE,
@@ -199,7 +204,7 @@ enum i40iw_hw_stat_index_32b {
I40IW_HW_STAT_INDEX_MAX_32
};
-enum i40iw_hw_stat_index_64b {
+enum i40iw_hw_stats_index_64b {
I40IW_HW_STAT_INDEX_IP4RXOCTS = 0,
I40IW_HW_STAT_INDEX_IP4RXPKTS,
I40IW_HW_STAT_INDEX_IP4RXFRAGS,
@@ -229,32 +234,23 @@ enum i40iw_hw_stat_index_64b {
I40IW_HW_STAT_INDEX_MAX_64
};
-struct i40iw_dev_hw_stat_offsets {
- u32 stat_offset_32[I40IW_HW_STAT_INDEX_MAX_32];
- u32 stat_offset_64[I40IW_HW_STAT_INDEX_MAX_64];
+struct i40iw_dev_hw_stats_offsets {
+ u32 stats_offset_32[I40IW_HW_STAT_INDEX_MAX_32];
+ u32 stats_offset_64[I40IW_HW_STAT_INDEX_MAX_64];
};
struct i40iw_dev_hw_stats {
- u64 stat_value_32[I40IW_HW_STAT_INDEX_MAX_32];
- u64 stat_value_64[I40IW_HW_STAT_INDEX_MAX_64];
-};
-
-struct i40iw_device_pestat_ops {
- void (*iw_hw_stat_init)(struct i40iw_dev_pestat *, u8, struct i40iw_hw *, bool);
- void (*iw_hw_stat_read_32)(struct i40iw_dev_pestat *, enum i40iw_hw_stat_index_32b, u64 *);
- void (*iw_hw_stat_read_64)(struct i40iw_dev_pestat *, enum i40iw_hw_stat_index_64b, u64 *);
- void (*iw_hw_stat_read_all)(struct i40iw_dev_pestat *, struct i40iw_dev_hw_stats *);
- void (*iw_hw_stat_refresh_all)(struct i40iw_dev_pestat *);
+ u64 stats_value_32[I40IW_HW_STAT_INDEX_MAX_32];
+ u64 stats_value_64[I40IW_HW_STAT_INDEX_MAX_64];
};
-struct i40iw_dev_pestat {
+struct i40iw_vsi_pestat {
struct i40iw_hw *hw;
- struct i40iw_device_pestat_ops ops;
struct i40iw_dev_hw_stats hw_stats;
struct i40iw_dev_hw_stats last_read_hw_stats;
- struct i40iw_dev_hw_stat_offsets hw_stat_offsets;
+ struct i40iw_dev_hw_stats_offsets hw_stats_offsets;
struct timer_list stats_timer;
- spinlock_t stats_lock; /* rdma stats lock */
+ spinlock_t lock; /* rdma stats lock */
};
struct i40iw_hw {
@@ -284,6 +280,7 @@ struct i40iw_sc_pd {
u32 size;
struct i40iw_sc_dev *dev;
u16 pd_id;
+ int abi_ver;
};
struct i40iw_cqp_quanta {
@@ -350,6 +347,7 @@ struct i40iw_sc_cq {
u64 cq_pa;
u64 shadow_area_pa;
struct i40iw_sc_dev *dev;
+ struct i40iw_sc_vsi *vsi;
void *pbl_list;
void *back_cq;
u32 ceq_id;
@@ -373,6 +371,7 @@ struct i40iw_sc_qp {
u64 shadow_area_pa;
u64 q2_pa;
struct i40iw_sc_dev *dev;
+ struct i40iw_sc_vsi *vsi;
struct i40iw_sc_pd *pd;
u64 *hw_host_ctx;
void *llp_stream_handle;
@@ -397,6 +396,9 @@ struct i40iw_sc_qp {
bool virtual_map;
bool flush_sq;
bool flush_rq;
+ u8 user_pri;
+ struct list_head list;
+ bool on_qoslist;
bool sq_flush;
enum i40iw_flush_opcode flush_code;
enum i40iw_term_eventtypes eventtype;
@@ -424,10 +426,16 @@ struct i40iw_vchnl_vf_msg_buffer {
char parm_buffer[I40IW_VCHNL_MAX_VF_MSG_SIZE - 1];
};
+struct i40iw_qos {
+ struct list_head qplist;
+ spinlock_t lock; /* qos list */
+ u16 qs_handle;
+};
+
struct i40iw_vfdev {
struct i40iw_sc_dev *pf_dev;
u8 *hmc_info_mem;
- struct i40iw_dev_pestat dev_pestat;
+ struct i40iw_vsi_pestat pestat;
struct i40iw_hmc_pble_info *pble_info;
struct i40iw_hmc_info hmc_info;
struct i40iw_vchnl_vf_msg_buffer vf_msg_buffer;
@@ -441,11 +449,28 @@ struct i40iw_vfdev {
bool stats_initialized;
};
+#define I40IW_INVALID_FCN_ID 0xff
+struct i40iw_sc_vsi {
+ struct i40iw_sc_dev *dev;
+ void *back_vsi; /* Owned by OS */
+ u32 ilq_count;
+ struct i40iw_virt_mem ilq_mem;
+ struct i40iw_puda_rsrc *ilq;
+ u32 ieq_count;
+ struct i40iw_virt_mem ieq_mem;
+ struct i40iw_puda_rsrc *ieq;
+ u16 mss;
+ u8 fcn_id;
+ bool stats_fcn_id_alloc;
+ struct i40iw_qos qos[I40IW_MAX_USER_PRIORITY];
+ struct i40iw_vsi_pestat *pestat;
+};
+
struct i40iw_sc_dev {
struct list_head cqp_cmd_head; /* head of the CQP command list */
spinlock_t cqp_lock; /* cqp list sync */
struct i40iw_dev_uk dev_uk;
- struct i40iw_dev_pestat dev_pestat;
+ bool fcn_id_array[I40IW_MAX_STATS_COUNT];
struct i40iw_dma_mem vf_fpm_query_buf[I40IW_MAX_PE_ENABLED_VF_COUNT];
u64 fpm_query_buf_pa;
u64 fpm_commit_buf_pa;
@@ -472,17 +497,9 @@ struct i40iw_sc_dev {
struct i40iw_cqp_misc_ops *cqp_misc_ops;
struct i40iw_hmc_ops *hmc_ops;
struct i40iw_vchnl_if vchnl_if;
- u32 ilq_count;
- struct i40iw_virt_mem ilq_mem;
- struct i40iw_puda_rsrc *ilq;
- u32 ieq_count;
- struct i40iw_virt_mem ieq_mem;
- struct i40iw_puda_rsrc *ieq;
-
const struct i40iw_vf_cqp_ops *iw_vf_cqp_ops;
struct i40iw_hmc_fpm_misc hmc_fpm_misc;
- u16 qs_handle;
u32 debug_mask;
u16 exception_lan_queue;
u8 hmc_fn_id;
@@ -556,6 +573,19 @@ struct i40iw_l2params {
u16 mss;
};
+struct i40iw_vsi_init_info {
+ struct i40iw_sc_dev *dev;
+ void *back_vsi;
+ struct i40iw_l2params *params;
+};
+
+struct i40iw_vsi_stats_info {
+ struct i40iw_vsi_pestat *pestat;
+ u8 fcn_id;
+ bool alloc_fcn_id;
+ bool stats_initialize;
+};
+
struct i40iw_device_init_info {
u64 fpm_query_buf_pa;
u64 fpm_commit_buf_pa;
@@ -564,7 +594,6 @@ struct i40iw_device_init_info {
struct i40iw_hw *hw;
void __iomem *bar0;
enum i40iw_status_code (*vchnl_send)(struct i40iw_sc_dev *, u32, u8 *, u16);
- u16 qs_handle;
u16 exception_lan_queue;
u8 hmc_fn_id;
bool is_pf;
@@ -722,6 +751,8 @@ struct i40iw_qp_host_ctx_info {
bool iwarp_info_valid;
bool err_rq_idx_valid;
u16 err_rq_idx;
+ bool add_to_qoslist;
+ u8 user_pri;
};
struct i40iw_aeqe_info {
@@ -814,6 +845,7 @@ struct i40iw_register_shared_stag {
struct i40iw_qp_init_info {
struct i40iw_qp_uk_init_info qp_uk_init_info;
struct i40iw_sc_pd *pd;
+ struct i40iw_sc_vsi *vsi;
u64 *host_ctx;
u8 *q2;
u64 sq_pa;
@@ -821,6 +853,7 @@ struct i40iw_qp_init_info {
u64 host_ctx_pa;
u64 q2_pa;
u64 shadow_area_pa;
+ int abi_ver;
u8 sq_tph_val;
u8 rq_tph_val;
u8 type;
@@ -880,13 +913,14 @@ enum i40iw_quad_hash_manage_type {
};
struct i40iw_qhash_table_info {
+ struct i40iw_sc_vsi *vsi;
enum i40iw_quad_hash_manage_type manage;
enum i40iw_quad_entry_type entry_type;
bool vlan_valid;
bool ipv4_valid;
u8 mac_addr[6];
u16 vlan_id;
- u16 qs_handle;
+ u8 user_pri;
u32 qp_num;
u32 dest_ip[4];
u32 src_ip[4];
@@ -976,7 +1010,7 @@ struct i40iw_cqp_query_fpm_values {
struct i40iw_cqp_ops {
enum i40iw_status_code (*cqp_init)(struct i40iw_sc_cqp *,
struct i40iw_cqp_init_info *);
- enum i40iw_status_code (*cqp_create)(struct i40iw_sc_cqp *, bool, u16 *, u16 *);
+ enum i40iw_status_code (*cqp_create)(struct i40iw_sc_cqp *, u16 *, u16 *);
void (*cqp_post_sq)(struct i40iw_sc_cqp *);
u64 *(*cqp_get_next_send_wqe)(struct i40iw_sc_cqp *, u64 scratch);
enum i40iw_status_code (*cqp_destroy)(struct i40iw_sc_cqp *);
@@ -1019,7 +1053,7 @@ struct i40iw_aeq_ops {
};
struct i40iw_pd_ops {
- void (*pd_init)(struct i40iw_sc_dev *, struct i40iw_sc_pd *, u16);
+ void (*pd_init)(struct i40iw_sc_dev *, struct i40iw_sc_pd *, u16, int);
};
struct i40iw_priv_qp_ops {
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ucontext.h b/drivers/infiniband/hw/i40iw/i40iw_ucontext.h
index 12acd688def4..57d3f1d11ff1 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ucontext.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_ucontext.h
@@ -39,8 +39,8 @@
#include <linux/types.h>
-#define I40IW_ABI_USERSPACE_VER 4
-#define I40IW_ABI_KERNEL_VER 4
+#define I40IW_ABI_VER 5
+
struct i40iw_alloc_ucontext_req {
__u32 reserved32;
__u8 userspace_ver;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c
index 4d28c3cb03cc..2800f796271c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_uk.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c
@@ -175,12 +175,10 @@ u64 *i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk *qp,
if (!*wqe_idx)
qp->swqe_polarity = !qp->swqe_polarity;
}
-
- for (i = 0; i < wqe_size / I40IW_QP_WQE_MIN_SIZE; i++) {
- I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
- if (ret_code)
- return NULL;
- }
+ I40IW_RING_MOVE_HEAD_BY_COUNT(qp->sq_ring,
+ wqe_size / I40IW_QP_WQE_MIN_SIZE, ret_code);
+ if (ret_code)
+ return NULL;
wqe = qp->sq_base[*wqe_idx].elem;
@@ -430,7 +428,7 @@ static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp,
struct i40iw_inline_rdma_write *op_info;
u64 *push;
u64 header = 0;
- u32 i, wqe_idx;
+ u32 wqe_idx;
enum i40iw_status_code ret_code;
bool read_fence = false;
u8 wqe_size;
@@ -465,14 +463,12 @@ static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp,
src = (u8 *)(op_info->data);
if (op_info->len <= 16) {
- for (i = 0; i < op_info->len; i++, src++, dest++)
- *dest = *src;
+ memcpy(dest, src, op_info->len);
} else {
- for (i = 0; i < 16; i++, src++, dest++)
- *dest = *src;
+ memcpy(dest, src, 16);
+ src += 16;
dest = (u8 *)wqe + 32;
- for (; i < op_info->len; i++, src++, dest++)
- *dest = *src;
+ memcpy(dest, src, op_info->len - 16);
}
wmb(); /* make sure WQE is populated before valid bit is set */
@@ -507,7 +503,7 @@ static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp,
u8 *dest, *src;
struct i40iw_post_inline_send *op_info;
u64 header;
- u32 wqe_idx, i;
+ u32 wqe_idx;
enum i40iw_status_code ret_code;
bool read_fence = false;
u8 wqe_size;
@@ -540,14 +536,12 @@ static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp,
src = (u8 *)(op_info->data);
if (op_info->len <= 16) {
- for (i = 0; i < op_info->len; i++, src++, dest++)
- *dest = *src;
+ memcpy(dest, src, op_info->len);
} else {
- for (i = 0; i < 16; i++, src++, dest++)
- *dest = *src;
+ memcpy(dest, src, 16);
+ src += 16;
dest = (u8 *)wqe + 32;
- for (; i < op_info->len; i++, src++, dest++)
- *dest = *src;
+ memcpy(dest, src, op_info->len - 16);
}
wmb(); /* make sure WQE is populated before valid bit is set */
@@ -972,10 +966,6 @@ enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
if (ret_code)
return ret_code;
- ret_code = i40iw_get_wqe_shift(info->rq_size, info->max_rq_frag_cnt, 0, &rqshift);
- if (ret_code)
- return ret_code;
-
qp->sq_base = info->sq;
qp->rq_base = info->rq;
qp->shadow_area = info->shadow_area;
@@ -1004,8 +994,19 @@ enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
if (!qp->use_srq) {
qp->rq_size = info->rq_size;
qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
- qp->rq_wqe_size = rqshift;
I40IW_RING_INIT(qp->rq_ring, qp->rq_size);
+ switch (info->abi_ver) {
+ case 4:
+ ret_code = i40iw_get_wqe_shift(info->rq_size, info->max_rq_frag_cnt, 0, &rqshift);
+ if (ret_code)
+ return ret_code;
+ break;
+ case 5: /* fallthrough until next ABI version */
+ default:
+ rqshift = I40IW_MAX_RQ_WQE_SHIFT;
+ break;
+ }
+ qp->rq_wqe_size = rqshift;
qp->rq_wqe_size_multiplier = 4 << rqshift;
}
qp->ops = iw_qp_uk_ops;
@@ -1190,12 +1191,8 @@ enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size,
if (data_size <= 16)
*wqe_size = I40IW_QP_WQE_MIN_SIZE;
- else if (data_size <= 48)
- *wqe_size = 64;
- else if (data_size <= 80)
- *wqe_size = 96;
else
- *wqe_size = 128;
+ *wqe_size = 64;
return 0;
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_user.h b/drivers/infiniband/hw/i40iw/i40iw_user.h
index 276bcefffd7e..84be6f13b9c5 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_user.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_user.h
@@ -72,12 +72,13 @@ enum i40iw_device_capabilities_const {
I40IW_MAX_SQ_PAYLOAD_SIZE = 2145386496,
I40IW_MAX_INLINE_DATA_SIZE = 48,
I40IW_MAX_PUSHMODE_INLINE_DATA_SIZE = 48,
- I40IW_MAX_IRD_SIZE = 32,
- I40IW_QPCTX_ENCD_MAXIRD = 3,
+ I40IW_MAX_IRD_SIZE = 63,
+ I40IW_MAX_ORD_SIZE = 127,
I40IW_MAX_WQ_ENTRIES = 2048,
- I40IW_MAX_ORD_SIZE = 32,
I40IW_Q2_BUFFER_SIZE = (248 + 100),
- I40IW_QP_CTX_SIZE = 248
+ I40IW_MAX_WQE_SIZE_RQ = 128,
+ I40IW_QP_CTX_SIZE = 248,
+ I40IW_MAX_PDS = 32768
};
#define i40iw_handle void *
@@ -96,13 +97,8 @@ enum i40iw_device_capabilities_const {
#define i40iw_physical_fragment u64
#define i40iw_address_list u64 *
-#define I40IW_CREATE_STAG(index, key) (((index) << 8) + (key))
-
-#define I40IW_STAG_KEY_FROM_STAG(stag) ((stag) && 0x000000FF)
-
-#define I40IW_STAG_INDEX_FROM_STAG(stag) (((stag) && 0xFFFFFF00) >> 8)
-
#define I40IW_MAX_MR_SIZE 0x10000000000L
+#define I40IW_MAX_RQ_WQE_SHIFT 2
struct i40iw_qp_uk;
struct i40iw_cq_uk;
@@ -411,7 +407,7 @@ struct i40iw_qp_uk_init_info {
u32 max_sq_frag_cnt;
u32 max_rq_frag_cnt;
u32 max_inline_data;
-
+ int abi_ver;
};
struct i40iw_cq_uk_init_info {
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 6fd043b1d714..0f5d43d1f5fc 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -153,6 +153,7 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
struct i40iw_device *iwdev;
struct i40iw_handler *hdl;
u32 local_ipaddr;
+ u32 action = I40IW_ARP_ADD;
hdl = i40iw_find_netdev(event_netdev);
if (!hdl)
@@ -164,44 +165,25 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
if (netdev != event_netdev)
return NOTIFY_DONE;
+ if (upper_dev)
+ local_ipaddr = ntohl(
+ ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address);
+ else
+ local_ipaddr = ntohl(ifa->ifa_address);
switch (event) {
case NETDEV_DOWN:
- if (upper_dev)
- local_ipaddr = ntohl(
- ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address);
- else
- local_ipaddr = ntohl(ifa->ifa_address);
- i40iw_manage_arp_cache(iwdev,
- netdev->dev_addr,
- &local_ipaddr,
- true,
- I40IW_ARP_DELETE);
- return NOTIFY_OK;
+ action = I40IW_ARP_DELETE;
+ /* Fall through */
case NETDEV_UP:
- if (upper_dev)
- local_ipaddr = ntohl(
- ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address);
- else
- local_ipaddr = ntohl(ifa->ifa_address);
- i40iw_manage_arp_cache(iwdev,
- netdev->dev_addr,
- &local_ipaddr,
- true,
- I40IW_ARP_ADD);
- break;
+ /* Fall through */
case NETDEV_CHANGEADDR:
- /* Add the address to the IP table */
- if (upper_dev)
- local_ipaddr = ntohl(
- ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address);
- else
- local_ipaddr = ntohl(ifa->ifa_address);
-
i40iw_manage_arp_cache(iwdev,
netdev->dev_addr,
&local_ipaddr,
true,
- I40IW_ARP_ADD);
+ action);
+ i40iw_if_notify(iwdev, netdev, &local_ipaddr, true,
+ (action == I40IW_ARP_ADD) ? true : false);
break;
default:
break;
@@ -225,6 +207,7 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
struct i40iw_device *iwdev;
struct i40iw_handler *hdl;
u32 local_ipaddr6[4];
+ u32 action = I40IW_ARP_ADD;
hdl = i40iw_find_netdev(event_netdev);
if (!hdl)
@@ -235,24 +218,21 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
if (netdev != event_netdev)
return NOTIFY_DONE;
+ i40iw_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
switch (event) {
case NETDEV_DOWN:
- i40iw_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
- i40iw_manage_arp_cache(iwdev,
- netdev->dev_addr,
- local_ipaddr6,
- false,
- I40IW_ARP_DELETE);
- return NOTIFY_OK;
+ action = I40IW_ARP_DELETE;
+ /* Fall through */
case NETDEV_UP:
/* Fall through */
case NETDEV_CHANGEADDR:
- i40iw_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
i40iw_manage_arp_cache(iwdev,
netdev->dev_addr,
local_ipaddr6,
false,
- I40IW_ARP_ADD);
+ action);
+ i40iw_if_notify(iwdev, netdev, local_ipaddr6, false,
+ (action == I40IW_ARP_ADD) ? true : false);
break;
default:
break;
@@ -392,6 +372,7 @@ static void i40iw_free_qp(struct i40iw_cqp_request *cqp_request, u32 num)
i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
i40iw_free_qp_resources(iwdev, iwqp, qp_num);
+ i40iw_rem_devusecount(iwdev);
}
/**
@@ -415,7 +396,10 @@ static int i40iw_wait_event(struct i40iw_device *iwdev,
i40iw_pr_err("error cqp command 0x%x timed out ret = %d\n",
info->cqp_cmd, timeout_ret);
err_code = -ETIME;
- i40iw_request_reset(iwdev);
+ if (!iwdev->reset) {
+ iwdev->reset = true;
+ i40iw_request_reset(iwdev);
+ }
goto done;
}
cqp_error = cqp_request->compl_info.error;
@@ -445,6 +429,11 @@ enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev,
struct cqp_commands_info *info = &cqp_request->info;
int err_code = 0;
+ if (iwdev->reset) {
+ i40iw_free_cqp_request(&iwdev->cqp, cqp_request);
+ return I40IW_ERR_CQP_COMPL_ERROR;
+ }
+
status = i40iw_process_cqp_cmd(dev, info);
if (status) {
i40iw_pr_err("error cqp command 0x%x failed\n", info->cqp_cmd);
@@ -459,6 +448,26 @@ enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev,
}
/**
+ * i40iw_add_devusecount - add dev refcount
+ * @iwdev: dev for refcount
+ */
+void i40iw_add_devusecount(struct i40iw_device *iwdev)
+{
+ atomic64_inc(&iwdev->use_count);
+}
+
+/**
+ * i40iw_rem_devusecount - decrement refcount for dev
+ * @iwdev: device
+ */
+void i40iw_rem_devusecount(struct i40iw_device *iwdev)
+{
+ if (!atomic64_dec_and_test(&iwdev->use_count))
+ return;
+ wake_up(&iwdev->close_wq);
+}
+
+/**
* i40iw_add_pdusecount - add pd refcount
* @iwpd: pd for refcount
*/
@@ -712,6 +721,51 @@ enum i40iw_status_code i40iw_cqp_sds_cmd(struct i40iw_sc_dev *dev,
}
/**
+ * i40iw_qp_suspend_resume - cqp command for suspend/resume
+ * @dev: hardware control device structure
+ * @qp: hardware control qp
+ * @suspend: flag if suspend or resume
+ */
+void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend)
+{
+ struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+ struct i40iw_cqp_request *cqp_request;
+ struct i40iw_sc_cqp *cqp = dev->cqp;
+ struct cqp_commands_info *cqp_info;
+ enum i40iw_status_code status;
+
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
+ if (!cqp_request)
+ return;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = (suspend) ? OP_SUSPEND : OP_RESUME;
+ cqp_info->in.u.suspend_resume.cqp = cqp;
+ cqp_info->in.u.suspend_resume.qp = qp;
+ cqp_info->in.u.suspend_resume.scratch = (uintptr_t)cqp_request;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP-OP QP Suspend/Resume fail");
+}
+
+/**
+ * i40iw_qp_mss_modify - modify mss for qp
+ * @dev: hardware control device structure
+ * @qp: hardware control qp
+ */
+void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
+{
+ struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+ struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
+ struct i40iw_modify_qp_info info;
+
+ memset(&info, 0, sizeof(info));
+ info.mss_change = true;
+ info.new_mss = qp->vsi->mss;
+ i40iw_hw_modify_qp(iwdev, iwqp, &info, false);
+}
+
+/**
* i40iw_term_modify_qp - modify qp for term message
* @qp: hardware control qp
* @next_state: qp's next state
@@ -769,6 +823,7 @@ static void i40iw_terminate_timeout(unsigned long context)
struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)&iwqp->sc_qp;
i40iw_terminate_done(qp, 1);
+ i40iw_rem_ref(&iwqp->ibqp);
}
/**
@@ -780,6 +835,7 @@ void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp)
struct i40iw_qp *iwqp;
iwqp = (struct i40iw_qp *)qp->back_qp;
+ i40iw_add_ref(&iwqp->ibqp);
init_timer(&iwqp->terminate_timer);
iwqp->terminate_timer.function = i40iw_terminate_timeout;
iwqp->terminate_timer.expires = jiffies + HZ;
@@ -796,7 +852,8 @@ void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp)
struct i40iw_qp *iwqp;
iwqp = (struct i40iw_qp *)qp->back_qp;
- del_timer(&iwqp->terminate_timer);
+ if (del_timer(&iwqp->terminate_timer))
+ i40iw_rem_ref(&iwqp->ibqp);
}
/**
@@ -1011,6 +1068,116 @@ enum i40iw_status_code i40iw_vf_wait_vchnl_resp(struct i40iw_sc_dev *dev)
}
/**
+ * i40iw_cqp_cq_create_cmd - create a cq for the cqp
+ * @dev: device pointer
+ * @cq: pointer to created cq
+ */
+enum i40iw_status_code i40iw_cqp_cq_create_cmd(struct i40iw_sc_dev *dev,
+ struct i40iw_sc_cq *cq)
+{
+ struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+ struct i40iw_cqp *iwcqp = &iwdev->cqp;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+ enum i40iw_status_code status;
+
+ cqp_request = i40iw_get_cqp_request(iwcqp, true);
+ if (!cqp_request)
+ return I40IW_ERR_NO_MEMORY;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = OP_CQ_CREATE;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.cq_create.cq = cq;
+ cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP-OP Create QP fail");
+
+ return status;
+}
+
+/**
+ * i40iw_cqp_qp_create_cmd - create a qp for the cqp
+ * @dev: device pointer
+ * @qp: pointer to created qp
+ */
+enum i40iw_status_code i40iw_cqp_qp_create_cmd(struct i40iw_sc_dev *dev,
+ struct i40iw_sc_qp *qp)
+{
+ struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+ struct i40iw_cqp *iwcqp = &iwdev->cqp;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+ struct i40iw_create_qp_info *qp_info;
+ enum i40iw_status_code status;
+
+ cqp_request = i40iw_get_cqp_request(iwcqp, true);
+ if (!cqp_request)
+ return I40IW_ERR_NO_MEMORY;
+
+ cqp_info = &cqp_request->info;
+ qp_info = &cqp_request->info.in.u.qp_create.info;
+
+ memset(qp_info, 0, sizeof(*qp_info));
+
+ qp_info->cq_num_valid = true;
+ qp_info->next_iwarp_state = I40IW_QP_STATE_RTS;
+
+ cqp_info->cqp_cmd = OP_QP_CREATE;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.qp_create.qp = qp;
+ cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP-OP QP create fail");
+ return status;
+}
+
+/**
+ * i40iw_cqp_cq_destroy_cmd - destroy the cqp cq
+ * @dev: device pointer
+ * @cq: pointer to cq
+ */
+void i40iw_cqp_cq_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq)
+{
+ struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+
+ i40iw_cq_wq_destroy(iwdev, cq);
+}
+
+/**
+ * i40iw_cqp_qp_destroy_cmd - destroy the cqp
+ * @dev: device pointer
+ * @qp: pointer to qp
+ */
+void i40iw_cqp_qp_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
+{
+ struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+ struct i40iw_cqp *iwcqp = &iwdev->cqp;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+ enum i40iw_status_code status;
+
+ cqp_request = i40iw_get_cqp_request(iwcqp, true);
+ if (!cqp_request)
+ return;
+
+ cqp_info = &cqp_request->info;
+ memset(cqp_info, 0, sizeof(*cqp_info));
+
+ cqp_info->cqp_cmd = OP_QP_DESTROY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.qp_destroy.qp = qp;
+ cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
+ cqp_info->in.u.qp_destroy.remove_hash_idx = true;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP QP_DESTROY fail");
+}
+
+
+/**
* i40iw_ieq_mpa_crc_ae - generate AE for crc error
* @dev: hardware control device structure
* @qp: hardware control qp
@@ -1208,7 +1375,7 @@ enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_in
buf->totallen = pkt_len + buf->maclen;
- if (info->payload_len < buf->totallen - 4) {
+ if (info->payload_len < buf->totallen) {
i40iw_pr_err("payload_len = 0x%x totallen expected0x%x\n",
info->payload_len, buf->totallen);
return I40IW_ERR_INVALID_SIZE;
@@ -1224,27 +1391,29 @@ enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_in
/**
* i40iw_hw_stats_timeout - Stats timer-handler which updates all HW stats
- * @dev: hardware control device structure
+ * @vsi: pointer to the vsi structure
*/
-static void i40iw_hw_stats_timeout(unsigned long dev)
+static void i40iw_hw_stats_timeout(unsigned long vsi)
{
- struct i40iw_sc_dev *pf_dev = (struct i40iw_sc_dev *)dev;
- struct i40iw_dev_pestat *pf_devstat = &pf_dev->dev_pestat;
- struct i40iw_dev_pestat *vf_devstat = NULL;
+ struct i40iw_sc_vsi *sc_vsi = (struct i40iw_sc_vsi *)vsi;
+ struct i40iw_sc_dev *pf_dev = sc_vsi->dev;
+ struct i40iw_vsi_pestat *pf_devstat = sc_vsi->pestat;
+ struct i40iw_vsi_pestat *vf_devstat = NULL;
u16 iw_vf_idx;
unsigned long flags;
/*PF*/
- pf_devstat->ops.iw_hw_stat_read_all(pf_devstat, &pf_devstat->hw_stats);
+ i40iw_hw_stats_read_all(pf_devstat, &pf_devstat->hw_stats);
+
for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) {
- spin_lock_irqsave(&pf_devstat->stats_lock, flags);
+ spin_lock_irqsave(&pf_devstat->lock, flags);
if (pf_dev->vf_dev[iw_vf_idx]) {
if (pf_dev->vf_dev[iw_vf_idx]->stats_initialized) {
- vf_devstat = &pf_dev->vf_dev[iw_vf_idx]->dev_pestat;
- vf_devstat->ops.iw_hw_stat_read_all(vf_devstat, &vf_devstat->hw_stats);
+ vf_devstat = &pf_dev->vf_dev[iw_vf_idx]->pestat;
+ i40iw_hw_stats_read_all(vf_devstat, &vf_devstat->hw_stats);
}
}
- spin_unlock_irqrestore(&pf_devstat->stats_lock, flags);
+ spin_unlock_irqrestore(&pf_devstat->lock, flags);
}
mod_timer(&pf_devstat->stats_timer,
@@ -1253,26 +1422,26 @@ static void i40iw_hw_stats_timeout(unsigned long dev)
/**
* i40iw_hw_stats_start_timer - Start periodic stats timer
- * @dev: hardware control device structure
+ * @vsi: pointer to the vsi structure
*/
-void i40iw_hw_stats_start_timer(struct i40iw_sc_dev *dev)
+void i40iw_hw_stats_start_timer(struct i40iw_sc_vsi *vsi)
{
- struct i40iw_dev_pestat *devstat = &dev->dev_pestat;
+ struct i40iw_vsi_pestat *devstat = vsi->pestat;
init_timer(&devstat->stats_timer);
devstat->stats_timer.function = i40iw_hw_stats_timeout;
- devstat->stats_timer.data = (unsigned long)dev;
+ devstat->stats_timer.data = (unsigned long)vsi;
mod_timer(&devstat->stats_timer,
jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
}
/**
- * i40iw_hw_stats_del_timer - Delete periodic stats timer
- * @dev: hardware control device structure
+ * i40iw_hw_stats_stop_timer - Delete periodic stats timer
+ * @vsi: pointer to the vsi structure
*/
-void i40iw_hw_stats_del_timer(struct i40iw_sc_dev *dev)
+void i40iw_hw_stats_stop_timer(struct i40iw_sc_vsi *vsi)
{
- struct i40iw_dev_pestat *devstat = &dev->dev_pestat;
+ struct i40iw_vsi_pestat *devstat = vsi->pestat;
del_timer_sync(&devstat->stats_timer);
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 6329c971c22f..29e97df9e1a7 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -37,6 +37,7 @@
#include <linux/random.h>
#include <linux/highmem.h>
#include <linux/time.h>
+#include <linux/hugetlb.h>
#include <asm/byteorder.h>
#include <net/ip.h>
#include <rdma/ib_verbs.h>
@@ -67,13 +68,13 @@ static int i40iw_query_device(struct ib_device *ibdev,
props->vendor_part_id = iwdev->ldev->pcidev->device;
props->hw_ver = (u32)iwdev->sc_dev.hw_rev;
props->max_mr_size = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
- props->max_qp = iwdev->max_qp;
+ props->max_qp = iwdev->max_qp - iwdev->used_qps;
props->max_qp_wr = (I40IW_MAX_WQ_ENTRIES >> 2) - 1;
props->max_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
- props->max_cq = iwdev->max_cq;
+ props->max_cq = iwdev->max_cq - iwdev->used_cqs;
props->max_cqe = iwdev->max_cqe;
- props->max_mr = iwdev->max_mr;
- props->max_pd = iwdev->max_pd;
+ props->max_mr = iwdev->max_mr - iwdev->used_mrs;
+ props->max_pd = iwdev->max_pd - iwdev->used_pds;
props->max_sge_rd = I40IW_MAX_SGE_RD;
props->max_qp_rd_atom = I40IW_MAX_IRD_SIZE;
props->max_qp_init_rd_atom = props->max_qp_rd_atom;
@@ -144,9 +145,8 @@ static struct ib_ucontext *i40iw_alloc_ucontext(struct ib_device *ibdev,
if (ib_copy_from_udata(&req, udata, sizeof(req)))
return ERR_PTR(-EINVAL);
- if (req.userspace_ver != I40IW_ABI_USERSPACE_VER) {
- i40iw_pr_err("Invalid userspace driver version detected. Detected version %d, should be %d\n",
- req.userspace_ver, I40IW_ABI_USERSPACE_VER);
+ if (req.userspace_ver < 4 || req.userspace_ver > I40IW_ABI_VER) {
+ i40iw_pr_err("Unsupported provider library version %u.\n", req.userspace_ver);
return ERR_PTR(-EINVAL);
}
@@ -154,13 +154,14 @@ static struct ib_ucontext *i40iw_alloc_ucontext(struct ib_device *ibdev,
uresp.max_qps = iwdev->max_qp;
uresp.max_pds = iwdev->max_pd;
uresp.wq_size = iwdev->max_qp_wr * 2;
- uresp.kernel_ver = I40IW_ABI_KERNEL_VER;
+ uresp.kernel_ver = req.userspace_ver;
ucontext = kzalloc(sizeof(*ucontext), GFP_KERNEL);
if (!ucontext)
return ERR_PTR(-ENOMEM);
ucontext->iwdev = iwdev;
+ ucontext->abi_ver = req.userspace_ver;
if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
kfree(ucontext);
@@ -254,7 +255,6 @@ static void i40iw_alloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp
{
struct i40iw_cqp_request *cqp_request;
struct cqp_commands_info *cqp_info;
- struct i40iw_sc_dev *dev = &iwdev->sc_dev;
enum i40iw_status_code status;
if (qp->push_idx != I40IW_INVALID_PUSH_PAGE_INDEX)
@@ -270,7 +270,7 @@ static void i40iw_alloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp
cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE;
cqp_info->post_sq = 1;
- cqp_info->in.u.manage_push_page.info.qs_handle = dev->qs_handle;
+ cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
cqp_info->in.u.manage_push_page.info.free_page = 0;
cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
@@ -292,7 +292,6 @@ static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_
{
struct i40iw_cqp_request *cqp_request;
struct cqp_commands_info *cqp_info;
- struct i40iw_sc_dev *dev = &iwdev->sc_dev;
enum i40iw_status_code status;
if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX)
@@ -307,7 +306,7 @@ static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_
cqp_info->post_sq = 1;
cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx;
- cqp_info->in.u.manage_push_page.info.qs_handle = dev->qs_handle;
+ cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
cqp_info->in.u.manage_push_page.info.free_page = 1;
cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
@@ -334,9 +333,13 @@ static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev,
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
struct i40iw_alloc_pd_resp uresp;
struct i40iw_sc_pd *sc_pd;
+ struct i40iw_ucontext *ucontext;
u32 pd_id = 0;
int err;
+ if (iwdev->closing)
+ return ERR_PTR(-ENODEV);
+
err = i40iw_alloc_resource(iwdev, iwdev->allocated_pds,
iwdev->max_pd, &pd_id, &iwdev->next_pd);
if (err) {
@@ -351,15 +354,18 @@ static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev,
}
sc_pd = &iwpd->sc_pd;
- dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id);
if (context) {
+ ucontext = to_ucontext(context);
+ dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
memset(&uresp, 0, sizeof(uresp));
uresp.pd_id = pd_id;
if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
err = -EFAULT;
goto error;
}
+ } else {
+ dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, -1);
}
i40iw_add_pdusecount(iwpd);
@@ -516,7 +522,7 @@ static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
struct i40iw_dma_mem *mem = &iwqp->kqp.dma_mem;
u32 sqdepth, rqdepth;
u32 sq_size, rq_size;
- u8 sqshift, rqshift;
+ u8 sqshift;
u32 size;
enum i40iw_status_code status;
struct i40iw_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
@@ -525,14 +531,11 @@ static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
rq_size = i40iw_qp_roundup(ukinfo->rq_size + 1);
status = i40iw_get_wqe_shift(sq_size, ukinfo->max_sq_frag_cnt, ukinfo->max_inline_data, &sqshift);
- if (!status)
- status = i40iw_get_wqe_shift(rq_size, ukinfo->max_rq_frag_cnt, 0, &rqshift);
-
if (status)
return -ENOMEM;
sqdepth = sq_size << sqshift;
- rqdepth = rq_size << rqshift;
+ rqdepth = rq_size << I40IW_MAX_RQ_WQE_SHIFT;
size = sqdepth * sizeof(struct i40iw_sq_uk_wr_trk_info) + (rqdepth << 3);
iwqp->kqp.wrid_mem = kzalloc(size, GFP_KERNEL);
@@ -602,6 +605,9 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
struct i40iwarp_offload_info *iwarp_info;
unsigned long flags;
+ if (iwdev->closing)
+ return ERR_PTR(-ENODEV);
+
if (init_attr->create_flags)
return ERR_PTR(-EINVAL);
if (init_attr->cap.max_inline_data > I40IW_MAX_INLINE_DATA_SIZE)
@@ -610,11 +616,15 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
if (init_attr->cap.max_send_sge > I40IW_MAX_WQ_FRAGMENT_COUNT)
init_attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
+ if (init_attr->cap.max_recv_sge > I40IW_MAX_WQ_FRAGMENT_COUNT)
+ init_attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
+
memset(&init_info, 0, sizeof(init_info));
sq_size = init_attr->cap.max_send_wr;
rq_size = init_attr->cap.max_recv_wr;
+ init_info.vsi = &iwdev->vsi;
init_info.qp_uk_init_info.sq_size = sq_size;
init_info.qp_uk_init_info.rq_size = rq_size;
init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
@@ -774,6 +784,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
iwdev->qp_table[qp_num] = iwqp;
i40iw_add_pdusecount(iwqp->iwpd);
+ i40iw_add_devusecount(iwdev);
if (ibpd->uobject && udata) {
memset(&uresp, 0, sizeof(uresp));
uresp.actual_sq_size = sq_size;
@@ -815,8 +826,9 @@ static int i40iw_query_qp(struct ib_qp *ibqp,
attr->qp_access_flags = 0;
attr->cap.max_send_wr = qp->qp_uk.sq_size;
attr->cap.max_recv_wr = qp->qp_uk.rq_size;
- attr->cap.max_recv_sge = 1;
attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
+ attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
+ attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
init_attr->event_handler = iwqp->ibqp.event_handler;
init_attr->qp_context = iwqp->ibqp.qp_context;
init_attr->send_cq = iwqp->ibqp.send_cq;
@@ -884,6 +896,11 @@ int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
spin_lock_irqsave(&iwqp->lock, flags);
if (attr_mask & IB_QP_STATE) {
+ if (iwdev->closing && attr->qp_state != IB_QPS_ERR) {
+ err = -EINVAL;
+ goto exit;
+ }
+
switch (attr->qp_state) {
case IB_QPS_INIT:
case IB_QPS_RTR:
@@ -944,7 +961,7 @@ int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
goto exit;
}
if (iwqp->sc_qp.term_flags)
- del_timer(&iwqp->terminate_timer);
+ i40iw_terminate_del_timer(&iwqp->sc_qp);
info.next_iwarp_state = I40IW_QP_STATE_ERROR;
if ((iwqp->hw_tcp_state > I40IW_TCP_STATE_CLOSED) &&
iwdev->iw_status &&
@@ -1037,11 +1054,11 @@ static void cq_free_resources(struct i40iw_device *iwdev, struct i40iw_cq *iwcq)
}
/**
- * cq_wq_destroy - send cq destroy cqp
+ * i40iw_cq_wq_destroy - send cq destroy cqp
* @iwdev: iwarp device
* @cq: hardware control cq
*/
-static void cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq)
+void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq)
{
enum i40iw_status_code status;
struct i40iw_cqp_request *cqp_request;
@@ -1080,9 +1097,10 @@ static int i40iw_destroy_cq(struct ib_cq *ib_cq)
iwcq = to_iwcq(ib_cq);
iwdev = to_iwdev(ib_cq->device);
cq = &iwcq->sc_cq;
- cq_wq_destroy(iwdev, cq);
+ i40iw_cq_wq_destroy(iwdev, cq);
cq_free_resources(iwdev, iwcq);
kfree(iwcq);
+ i40iw_rem_devusecount(iwdev);
return 0;
}
@@ -1113,6 +1131,9 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
int err_code;
int entries = attr->cqe;
+ if (iwdev->closing)
+ return ERR_PTR(-ENODEV);
+
if (entries > iwdev->max_cqe)
return ERR_PTR(-EINVAL);
@@ -1137,7 +1158,8 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
ukinfo->cq_id = cq_num;
iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
info.ceqe_mask = 0;
- info.ceq_id = 0;
+ if (attr->comp_vector < iwdev->ceqs_count)
+ info.ceq_id = attr->comp_vector;
info.ceq_id_valid = true;
info.ceqe_mask = 1;
info.type = I40IW_CQ_TYPE_IWARP;
@@ -1229,10 +1251,11 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
}
}
+ i40iw_add_devusecount(iwdev);
return (struct ib_cq *)iwcq;
cq_destroy:
- cq_wq_destroy(iwdev, cq);
+ i40iw_cq_wq_destroy(iwdev, cq);
cq_free_resources:
cq_free_resources(iwdev, iwcq);
error:
@@ -1266,6 +1289,7 @@ static void i40iw_free_stag(struct i40iw_device *iwdev, u32 stag)
stag_idx = (stag & iwdev->mr_stagmask) >> I40IW_CQPSQ_STAG_IDX_SHIFT;
i40iw_free_resource(iwdev, iwdev->allocated_mrs, stag_idx);
+ i40iw_rem_devusecount(iwdev);
}
/**
@@ -1296,19 +1320,18 @@ static u32 i40iw_create_stag(struct i40iw_device *iwdev)
stag = stag_index << I40IW_CQPSQ_STAG_IDX_SHIFT;
stag |= driver_key;
stag += (u32)consumer_key;
+ i40iw_add_devusecount(iwdev);
}
return stag;
}
/**
* i40iw_next_pbl_addr - Get next pbl address
- * @palloc: Poiner to allocated pbles
* @pbl: pointer to a pble
* @pinfo: info pointer
* @idx: index
*/
-static inline u64 *i40iw_next_pbl_addr(struct i40iw_pble_alloc *palloc,
- u64 *pbl,
+static inline u64 *i40iw_next_pbl_addr(u64 *pbl,
struct i40iw_pble_info **pinfo,
u32 *idx)
{
@@ -1336,9 +1359,11 @@ static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
struct i40iw_pble_info *pinfo;
struct scatterlist *sg;
+ u64 pg_addr = 0;
u32 idx = 0;
pinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf;
+
pg_shift = ffs(region->page_size) - 1;
for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
chunk_pages = sg_dma_len(sg) >> pg_shift;
@@ -1346,17 +1371,96 @@ static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
!iwpbl->qp_mr.sq_page)
iwpbl->qp_mr.sq_page = sg_page(sg);
for (i = 0; i < chunk_pages; i++) {
- *pbl = cpu_to_le64(sg_dma_address(sg) + region->page_size * i);
- pbl = i40iw_next_pbl_addr(palloc, pbl, &pinfo, &idx);
+ pg_addr = sg_dma_address(sg) + region->page_size * i;
+
+ if ((entry + i) == 0)
+ *pbl = cpu_to_le64(pg_addr & iwmr->page_msk);
+ else if (!(pg_addr & ~iwmr->page_msk))
+ *pbl = cpu_to_le64(pg_addr);
+ else
+ continue;
+ pbl = i40iw_next_pbl_addr(pbl, &pinfo, &idx);
+ }
+ }
+}
+
+/**
+ * i40iw_set_hugetlb_params - set MR pg size and mask to huge pg values.
+ * @addr: virtual address
+ * @iwmr: mr pointer for this memory registration
+ */
+static void i40iw_set_hugetlb_values(u64 addr, struct i40iw_mr *iwmr)
+{
+ struct vm_area_struct *vma;
+ struct hstate *h;
+
+ vma = find_vma(current->mm, addr);
+ if (vma && is_vm_hugetlb_page(vma)) {
+ h = hstate_vma(vma);
+ if (huge_page_size(h) == 0x200000) {
+ iwmr->page_size = huge_page_size(h);
+ iwmr->page_msk = huge_page_mask(h);
}
}
}
/**
+ * i40iw_check_mem_contiguous - check if pbls stored in arr are contiguous
+ * @arr: lvl1 pbl array
+ * @npages: page count
+ * pg_size: page size
+ *
+ */
+static bool i40iw_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
+{
+ u32 pg_idx;
+
+ for (pg_idx = 0; pg_idx < npages; pg_idx++) {
+ if ((*arr + (pg_size * pg_idx)) != arr[pg_idx])
+ return false;
+ }
+ return true;
+}
+
+/**
+ * i40iw_check_mr_contiguous - check if MR is physically contiguous
+ * @palloc: pbl allocation struct
+ * pg_size: page size
+ */
+static bool i40iw_check_mr_contiguous(struct i40iw_pble_alloc *palloc, u32 pg_size)
+{
+ struct i40iw_pble_level2 *lvl2 = &palloc->level2;
+ struct i40iw_pble_info *leaf = lvl2->leaf;
+ u64 *arr = NULL;
+ u64 *start_addr = NULL;
+ int i;
+ bool ret;
+
+ if (palloc->level == I40IW_LEVEL_1) {
+ arr = (u64 *)palloc->level1.addr;
+ ret = i40iw_check_mem_contiguous(arr, palloc->total_cnt, pg_size);
+ return ret;
+ }
+
+ start_addr = (u64 *)leaf->addr;
+
+ for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
+ arr = (u64 *)leaf->addr;
+ if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr)
+ return false;
+ ret = i40iw_check_mem_contiguous(arr, leaf->cnt, pg_size);
+ if (!ret)
+ return false;
+ }
+
+ return true;
+}
+
+/**
* i40iw_setup_pbles - copy user pg address to pble's
* @iwdev: iwarp device
* @iwmr: mr pointer for this memory registration
- * @use_pbles: flag if to use pble's or memory (level 0)
+ * @use_pbles: flag if to use pble's
*/
static int i40iw_setup_pbles(struct i40iw_device *iwdev,
struct i40iw_mr *iwmr,
@@ -1369,9 +1473,6 @@ static int i40iw_setup_pbles(struct i40iw_device *iwdev,
enum i40iw_status_code status;
enum i40iw_pble_level level = I40IW_LEVEL_1;
- if (!use_pbles && (iwmr->page_cnt > MAX_SAVE_PAGE_ADDRS))
- return -ENOMEM;
-
if (use_pbles) {
mutex_lock(&iwdev->pbl_mutex);
status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
@@ -1388,6 +1489,10 @@ static int i40iw_setup_pbles(struct i40iw_device *iwdev,
}
i40iw_copy_user_pgaddrs(iwmr, pbl, level);
+
+ if (use_pbles)
+ iwmr->pgaddrmem[0] = *pbl;
+
return 0;
}
@@ -1409,14 +1514,18 @@ static int i40iw_handle_q_mem(struct i40iw_device *iwdev,
struct i40iw_cq_mr *cqmr = &iwpbl->cq_mr;
struct i40iw_hmc_pble *hmc_p;
u64 *arr = iwmr->pgaddrmem;
+ u32 pg_size;
int err;
int total;
+ bool ret = true;
total = req->sq_pages + req->rq_pages + req->cq_pages;
+ pg_size = iwmr->page_size;
err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
if (err)
return err;
+
if (use_pbles && (palloc->level != I40IW_LEVEL_1)) {
i40iw_free_pble(iwdev->pble_rsrc, palloc);
iwpbl->pbl_allocated = false;
@@ -1425,26 +1534,44 @@ static int i40iw_handle_q_mem(struct i40iw_device *iwdev,
if (use_pbles)
arr = (u64 *)palloc->level1.addr;
- if (req->reg_type == IW_MEMREG_TYPE_QP) {
+
+ if (iwmr->type == IW_MEMREG_TYPE_QP) {
hmc_p = &qpmr->sq_pbl;
qpmr->shadow = (dma_addr_t)arr[total];
+
if (use_pbles) {
+ ret = i40iw_check_mem_contiguous(arr, req->sq_pages, pg_size);
+ if (ret)
+ ret = i40iw_check_mem_contiguous(&arr[req->sq_pages], req->rq_pages, pg_size);
+ }
+
+ if (!ret) {
hmc_p->idx = palloc->level1.idx;
hmc_p = &qpmr->rq_pbl;
hmc_p->idx = palloc->level1.idx + req->sq_pages;
} else {
hmc_p->addr = arr[0];
hmc_p = &qpmr->rq_pbl;
- hmc_p->addr = arr[1];
+ hmc_p->addr = arr[req->sq_pages];
}
} else { /* CQ */
hmc_p = &cqmr->cq_pbl;
cqmr->shadow = (dma_addr_t)arr[total];
+
if (use_pbles)
+ ret = i40iw_check_mem_contiguous(arr, req->cq_pages, pg_size);
+
+ if (!ret)
hmc_p->idx = palloc->level1.idx;
else
hmc_p->addr = arr[0];
}
+
+ if (use_pbles && ret) {
+ i40iw_free_pble(iwdev->pble_rsrc, palloc);
+ iwpbl->pbl_allocated = false;
+ }
+
return err;
}
@@ -1642,8 +1769,9 @@ static int i40iw_hwreg_mr(struct i40iw_device *iwdev,
stag_info->access_rights = access;
stag_info->pd_id = iwpd->sc_pd.pd_id;
stag_info->addr_type = I40IW_ADDR_TYPE_VA_BASED;
+ stag_info->page_size = iwmr->page_size;
- if (iwmr->page_cnt > 1) {
+ if (iwpbl->pbl_allocated) {
if (palloc->level == I40IW_LEVEL_1) {
stag_info->first_pm_pbl_index = palloc->level1.idx;
stag_info->chunk_size = 1;
@@ -1699,6 +1827,11 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
bool use_pbles = false;
unsigned long flags;
int err = -ENOSYS;
+ int ret;
+ int pg_shift;
+
+ if (iwdev->closing)
+ return ERR_PTR(-ENODEV);
if (length > I40IW_MAX_MR_SIZE)
return ERR_PTR(-EINVAL);
@@ -1723,9 +1856,17 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
iwmr->ibmr.pd = pd;
iwmr->ibmr.device = pd->device;
ucontext = to_ucontext(pd->uobject->context);
- region_length = region->length + (start & 0xfff);
- pbl_depth = region_length >> 12;
- pbl_depth += (region_length & (4096 - 1)) ? 1 : 0;
+
+ iwmr->page_size = region->page_size;
+ iwmr->page_msk = PAGE_MASK;
+
+ if (region->hugetlb && (req.reg_type == IW_MEMREG_TYPE_MEM))
+ i40iw_set_hugetlb_values(start, iwmr);
+
+ region_length = region->length + (start & (iwmr->page_size - 1));
+ pg_shift = ffs(iwmr->page_size) - 1;
+ pbl_depth = region_length >> pg_shift;
+ pbl_depth += (region_length & (iwmr->page_size - 1)) ? 1 : 0;
iwmr->length = region->length;
iwpbl->user_base = virt;
@@ -1755,13 +1896,21 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
break;
case IW_MEMREG_TYPE_MEM:
+ use_pbles = (iwmr->page_cnt != 1);
access = I40IW_ACCESS_FLAGS_LOCALREAD;
- use_pbles = (iwmr->page_cnt != 1);
err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
if (err)
goto error;
+ if (use_pbles) {
+ ret = i40iw_check_mr_contiguous(palloc, iwmr->page_size);
+ if (ret) {
+ i40iw_free_pble(iwdev->pble_rsrc, palloc);
+ iwpbl->pbl_allocated = false;
+ }
+ }
+
access |= i40iw_get_user_access(acc);
stag = i40iw_create_stag(iwdev);
if (!stag) {
@@ -1778,6 +1927,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
i40iw_free_stag(iwdev, stag);
goto error;
}
+
break;
default:
goto error;
@@ -1789,7 +1939,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
return &iwmr->ibmr;
error:
- if (palloc->level != I40IW_LEVEL_0)
+ if (palloc->level != I40IW_LEVEL_0 && iwpbl->pbl_allocated)
i40iw_free_pble(iwdev->pble_rsrc, palloc);
ib_umem_release(region);
kfree(iwmr);
@@ -2142,7 +2292,6 @@ static int i40iw_post_send(struct ib_qp *ibqp,
case IB_WR_REG_MR:
{
struct i40iw_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
- int page_shift = ilog2(reg_wr(ib_wr)->mr->page_size);
int flags = reg_wr(ib_wr)->access;
struct i40iw_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;
struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
@@ -2153,6 +2302,7 @@ static int i40iw_post_send(struct ib_qp *ibqp,
info.access_rights |= i40iw_get_user_access(flags);
info.stag_key = reg_wr(ib_wr)->key & 0xff;
info.stag_idx = reg_wr(ib_wr)->key >> 8;
+ info.page_size = reg_wr(ib_wr)->mr->page_size;
info.wr_id = ib_wr->wr_id;
info.addr_type = I40IW_ADDR_TYPE_VA_BASED;
@@ -2166,9 +2316,6 @@ static int i40iw_post_send(struct ib_qp *ibqp,
if (iwmr->npages > I40IW_MIN_PAGES_PER_FMR)
info.chunk_size = 1;
- if (page_shift == 21)
- info.page_size = 1; /* 2M page */
-
ret = dev->iw_priv_qp_ops->iw_mr_fast_register(&iwqp->sc_qp, &info, true);
if (ret)
err = -ENOMEM;
@@ -2487,21 +2634,17 @@ static int i40iw_get_hw_stats(struct ib_device *ibdev,
{
struct i40iw_device *iwdev = to_iwdev(ibdev);
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
- struct i40iw_dev_pestat *devstat = &dev->dev_pestat;
+ struct i40iw_vsi_pestat *devstat = iwdev->vsi.pestat;
struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
- unsigned long flags;
if (dev->is_pf) {
- spin_lock_irqsave(&devstat->stats_lock, flags);
- devstat->ops.iw_hw_stat_read_all(devstat,
- &devstat->hw_stats);
- spin_unlock_irqrestore(&devstat->stats_lock, flags);
+ i40iw_hw_stats_read_all(devstat, &devstat->hw_stats);
} else {
if (i40iw_vchnl_vf_get_pe_stats(dev, &devstat->hw_stats))
return -ENOSYS;
}
- memcpy(&stats->value[0], &hw_stats, sizeof(*hw_stats));
+ memcpy(&stats->value[0], hw_stats, sizeof(*hw_stats));
return stats->num_counters;
}
@@ -2562,7 +2705,9 @@ static int i40iw_query_pkey(struct ib_device *ibdev,
* @ah_attr: address handle attributes
*/
static struct ib_ah *i40iw_create_ah(struct ib_pd *ibpd,
- struct ib_ah_attr *attr)
+ struct ib_ah_attr *attr,
+ struct ib_udata *udata)
+
{
return ERR_PTR(-ENOSYS);
}
@@ -2621,7 +2766,7 @@ static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev
(1ull << IB_USER_VERBS_CMD_POST_RECV) |
(1ull << IB_USER_VERBS_CMD_POST_SEND);
iwibdev->ibdev.phys_port_cnt = 1;
- iwibdev->ibdev.num_comp_vectors = 1;
+ iwibdev->ibdev.num_comp_vectors = iwdev->ceqs_count;
iwibdev->ibdev.dma_device = &pcidev->dev;
iwibdev->ibdev.dev.parent = &pcidev->dev;
iwibdev->ibdev.query_port = i40iw_query_port;
@@ -2654,7 +2799,6 @@ static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev
iwibdev->ibdev.iwcm = kzalloc(sizeof(*iwibdev->ibdev.iwcm), GFP_KERNEL);
if (!iwibdev->ibdev.iwcm) {
ib_dealloc_device(&iwibdev->ibdev);
- i40iw_pr_err("iwcm == NULL\n");
return NULL;
}
@@ -2719,6 +2863,9 @@ void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev)
i40iw_unregister_rdma_device(iwibdev);
kfree(iwibdev->ibdev.iwcm);
iwibdev->ibdev.iwcm = NULL;
+ wait_event_timeout(iwibdev->iwdev->close_wq,
+ !atomic64_read(&iwibdev->iwdev->use_count),
+ I40IW_EVENT_TIMEOUT);
ib_dealloc_device(&iwibdev->ibdev);
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
index 0069be8a5a38..07c3fec77de6 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
@@ -42,6 +42,7 @@ struct i40iw_ucontext {
spinlock_t cq_reg_mem_list_lock; /* memory list for cq's */
struct list_head qp_reg_mem_list;
spinlock_t qp_reg_mem_list_lock; /* memory list for qp's */
+ int abi_ver;
};
struct i40iw_pd {
@@ -92,6 +93,8 @@ struct i40iw_mr {
struct ib_umem *region;
u16 type;
u32 page_cnt;
+ u32 page_size;
+ u64 page_msk;
u32 npages;
u32 stag;
u64 length;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
index 3041003c94d2..f4d13683a403 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
@@ -403,6 +403,19 @@ del_out:
}
/**
+ * i40iw_vf_init_pestat - Initialize stats for VF
+ * @devL pointer to the VF Device
+ * @stats: Statistics structure pointer
+ * @index: Stats index
+ */
+static void i40iw_vf_init_pestat(struct i40iw_sc_dev *dev, struct i40iw_vsi_pestat *stats, u16 index)
+{
+ stats->hw = dev->hw;
+ i40iw_hw_stats_init(stats, (u8)index, false);
+ spin_lock_init(&stats->lock);
+}
+
+/**
* i40iw_vchnl_recv_pf - Receive PF virtual channel messages
* @dev: IWARP device pointer
* @vf_id: Virtual function ID associated with the message
@@ -421,9 +434,8 @@ enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
u16 first_avail_iw_vf = I40IW_MAX_PE_ENABLED_VF_COUNT;
struct i40iw_virt_mem vf_dev_mem;
struct i40iw_virtchnl_work_info work_info;
- struct i40iw_dev_pestat *devstat;
+ struct i40iw_vsi_pestat *stats;
enum i40iw_status_code ret_code;
- unsigned long flags;
if (!dev || !msg || !len)
return I40IW_ERR_PARAM;
@@ -496,14 +508,7 @@ enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"VF%u error CQP HMC Function operation.\n",
vf_id);
- ret_code = i40iw_device_init_pestat(&vf_dev->dev_pestat);
- if (ret_code)
- i40iw_debug(dev, I40IW_DEBUG_VIRT,
- "VF%u - i40iw_device_init_pestat failed\n",
- vf_id);
- vf_dev->dev_pestat.ops.iw_hw_stat_init(&vf_dev->dev_pestat,
- (u8)vf_dev->pmf_index,
- dev->hw, false);
+ i40iw_vf_init_pestat(dev, &vf_dev->pestat, vf_dev->pmf_index);
vf_dev->stats_initialized = true;
} else {
if (vf_dev) {
@@ -534,12 +539,10 @@ enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
case I40IW_VCHNL_OP_GET_STATS:
if (!vf_dev)
return I40IW_ERR_BAD_PTR;
- devstat = &vf_dev->dev_pestat;
- spin_lock_irqsave(&dev->dev_pestat.stats_lock, flags);
- devstat->ops.iw_hw_stat_read_all(devstat, &devstat->hw_stats);
- spin_unlock_irqrestore(&dev->dev_pestat.stats_lock, flags);
+ stats = &vf_dev->pestat;
+ i40iw_hw_stats_read_all(stats, &stats->hw_stats);
vf_dev->msg_count--;
- vchnl_pf_send_get_pe_stats_resp(dev, vf_id, vchnl_msg, &devstat->hw_stats);
+ vchnl_pf_send_get_pe_stats_resp(dev, vf_id, vchnl_msg, &stats->hw_stats);
break;
default:
i40iw_debug(dev, I40IW_DEBUG_VIRT,
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index b9bf0759f10a..077c33d2dc75 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -114,7 +114,9 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
!(1 << ah->av.eth.stat_rate & dev->caps.stat_rate_support))
--ah->av.eth.stat_rate;
}
-
+ ah->av.eth.sl_tclass_flowlabel |=
+ cpu_to_be32((ah_attr->grh.traffic_class << 20) |
+ ah_attr->grh.flow_label);
/*
* HW requires multicast LID so we just choose one.
*/
@@ -122,12 +124,14 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
ah->av.ib.dlid = cpu_to_be16(0xc000);
memcpy(ah->av.eth.dgid, ah_attr->grh.dgid.raw, 16);
- ah->av.eth.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 29);
+ ah->av.eth.sl_tclass_flowlabel |= cpu_to_be32(ah_attr->sl << 29);
return &ah->ibah;
}
-struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
+struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+ struct ib_udata *udata)
+
{
struct mlx4_ib_ah *ah;
struct ib_ah *ret;
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index 5e9939045852..06020c54db20 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -755,10 +755,8 @@ static void alias_guid_work(struct work_struct *work)
struct mlx4_ib_dev *dev = container_of(ib_sriov, struct mlx4_ib_dev, sriov);
rec = kzalloc(sizeof *rec, GFP_KERNEL);
- if (!rec) {
- pr_err("alias_guid_work: No Memory\n");
+ if (!rec)
return;
- }
pr_debug("starting [port: %d]...\n", sriov_alias_port->port + 1);
ret = get_next_record_to_update(dev, sriov_alias_port->port, rec);
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index 39a488889fc7..d64845335e87 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -247,10 +247,8 @@ id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
ent = kmalloc(sizeof (struct id_map_entry), GFP_KERNEL);
- if (!ent) {
- mlx4_ib_warn(ibdev, "Couldn't allocate id cache entry - out of memory\n");
+ if (!ent)
return ERR_PTR(-ENOMEM);
- }
ent->sl_cm_id = sl_cm_id;
ent->slave_id = slave_id;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 1672907ff219..db564ccc0f92 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -39,6 +39,8 @@
#include <linux/mlx4/cmd.h>
#include <linux/gfp.h>
#include <rdma/ib_pma.h>
+#include <linux/ip.h>
+#include <net/ipv6.h>
#include <linux/mlx4/driver.h>
#include "mlx4_ib.h"
@@ -480,6 +482,23 @@ static int find_slave_port_pkey_ix(struct mlx4_ib_dev *dev, int slave,
return -EINVAL;
}
+static int get_gids_from_l3_hdr(struct ib_grh *grh, union ib_gid *sgid,
+ union ib_gid *dgid)
+{
+ int version = ib_get_rdma_header_version((const union rdma_network_hdr *)grh);
+ enum rdma_network_type net_type;
+
+ if (version == 4)
+ net_type = RDMA_NETWORK_IPV4;
+ else if (version == 6)
+ net_type = RDMA_NETWORK_IPV6;
+ else
+ return -EINVAL;
+
+ return ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
+ sgid, dgid);
+}
+
int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
enum ib_qp_type dest_qpt, struct ib_wc *wc,
struct ib_grh *grh, struct ib_mad *mad)
@@ -538,7 +557,10 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
memset(&attr, 0, sizeof attr);
attr.port_num = port;
if (is_eth) {
- memcpy(&attr.grh.dgid.raw[0], &grh->dgid.raw[0], 16);
+ union ib_gid sgid;
+
+ if (get_gids_from_l3_hdr(grh, &sgid, &attr.grh.dgid))
+ return -EINVAL;
attr.ah_flags = IB_AH_GRH;
}
ah = ib_create_ah(tun_ctx->pd, &attr);
@@ -651,6 +673,11 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
is_eth = 1;
if (is_eth) {
+ union ib_gid dgid;
+ union ib_gid sgid;
+
+ if (get_gids_from_l3_hdr(grh, &sgid, &dgid))
+ return -EINVAL;
if (!(wc->wc_flags & IB_WC_GRH)) {
mlx4_ib_warn(ibdev, "RoCE grh not present.\n");
return -EINVAL;
@@ -659,10 +686,10 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
mlx4_ib_warn(ibdev, "RoCE mgmt class is not CM\n");
return -EINVAL;
}
- err = mlx4_get_slave_from_roce_gid(dev->dev, port, grh->dgid.raw, &slave);
+ err = mlx4_get_slave_from_roce_gid(dev->dev, port, dgid.raw, &slave);
if (err && mlx4_is_mf_bonded(dev->dev)) {
other_port = (port == 1) ? 2 : 1;
- err = mlx4_get_slave_from_roce_gid(dev->dev, other_port, grh->dgid.raw, &slave);
+ err = mlx4_get_slave_from_roce_gid(dev->dev, other_port, dgid.raw, &slave);
if (!err) {
port = other_port;
pr_debug("resolved slave %d from gid %pI6 wire port %d other %d\n",
@@ -702,10 +729,18 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
/* If a grh is present, we demux according to it */
if (wc->wc_flags & IB_WC_GRH) {
- slave = mlx4_ib_find_real_gid(ibdev, port, grh->dgid.global.interface_id);
- if (slave < 0) {
- mlx4_ib_warn(ibdev, "failed matching grh\n");
- return -ENOENT;
+ if (grh->dgid.global.interface_id ==
+ cpu_to_be64(IB_SA_WELL_KNOWN_GUID) &&
+ grh->dgid.global.subnet_prefix == cpu_to_be64(
+ atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix))) {
+ slave = 0;
+ } else {
+ slave = mlx4_ib_find_real_gid(ibdev, port,
+ grh->dgid.global.interface_id);
+ if (slave < 0) {
+ mlx4_ib_warn(ibdev, "failed matching grh\n");
+ return -ENOENT;
+ }
}
}
/* Class-specific handling */
@@ -1102,10 +1137,8 @@ static void handle_slaves_guid_change(struct mlx4_ib_dev *dev, u8 port_num,
in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
- if (!in_mad || !out_mad) {
- mlx4_ib_warn(&dev->ib_dev, "failed to allocate memory for guid info mads\n");
+ if (!in_mad || !out_mad)
goto out;
- }
guid_tbl_blk_num *= 4;
@@ -1916,11 +1949,8 @@ static int alloc_pv_object(struct mlx4_ib_dev *dev, int slave, int port,
*ret_ctx = NULL;
ctx = kzalloc(sizeof (struct mlx4_ib_demux_pv_ctx), GFP_KERNEL);
- if (!ctx) {
- pr_err("failed allocating pv resource context "
- "for port %d, slave %d\n", port, slave);
+ if (!ctx)
return -ENOMEM;
- }
ctx->ib_dev = &dev->ib_dev;
ctx->port = port;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index b597e8227591..7031a8dd4d14 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -430,7 +430,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
struct mlx4_ib_dev *dev = to_mdev(ibdev);
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
- int err = -ENOMEM;
+ int err;
int have_ib_ports;
struct mlx4_uverbs_ex_query_device cmd;
struct mlx4_uverbs_ex_query_device_resp resp = {.comp_mask = 0};
@@ -455,6 +455,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
sizeof(resp.response_length);
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+ err = -ENOMEM;
if (!in_mad || !out_mad)
goto out;
@@ -547,6 +548,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
props->timestamp_mask = 0xFFFFFFFFFFFFULL;
+ props->max_ah = INT_MAX;
if (!mlx4_is_slave(dev->dev))
err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
@@ -697,9 +699,11 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
if (err)
goto out;
- props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ?
- IB_WIDTH_4X : IB_WIDTH_1X;
- props->active_speed = IB_SPEED_QDR;
+ props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ||
+ (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
+ IB_WIDTH_4X : IB_WIDTH_1X;
+ props->active_speed = (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
+ IB_SPEED_FDR : IB_SPEED_QDR;
props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
props->max_msg_sz = mdev->dev->caps.max_msg_sz;
@@ -1678,9 +1682,19 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
size += ret;
}
+ if (mlx4_is_master(mdev->dev) && flow_type == MLX4_FS_REGULAR &&
+ flow_attr->num_of_specs == 1) {
+ struct _rule_hw *rule_header = (struct _rule_hw *)(ctrl + 1);
+ enum ib_flow_spec_type header_spec =
+ ((union ib_flow_spec *)(flow_attr + 1))->type;
+
+ if (header_spec == IB_FLOW_SPEC_ETH)
+ mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
+ }
+
ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
- MLX4_CMD_WRAPPED);
+ MLX4_CMD_NATIVE);
if (ret == -ENOMEM)
pr_err("mcg table is full. Fail to register network rule.\n");
else if (ret == -ENXIO)
@@ -1697,7 +1711,7 @@ static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
int err;
err = mlx4_cmd(dev, reg_id, 0, 0,
MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
- MLX4_CMD_WRAPPED);
+ MLX4_CMD_NATIVE);
if (err)
pr_err("Fail to detach network rule. registration id = 0x%llx\n",
reg_id);
@@ -2814,20 +2828,22 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) *
sizeof(long),
GFP_KERNEL);
- if (!ibdev->ib_uc_qpns_bitmap) {
- dev_err(&dev->persist->pdev->dev,
- "bit map alloc failed\n");
+ if (!ibdev->ib_uc_qpns_bitmap)
goto err_steer_qp_release;
- }
-
- bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count);
- err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
- dev, ibdev->steer_qpn_base,
- ibdev->steer_qpn_base +
- ibdev->steer_qpn_count - 1);
- if (err)
- goto err_steer_free_bitmap;
+ if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB) {
+ bitmap_zero(ibdev->ib_uc_qpns_bitmap,
+ ibdev->steer_qpn_count);
+ err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
+ dev, ibdev->steer_qpn_base,
+ ibdev->steer_qpn_base +
+ ibdev->steer_qpn_count - 1);
+ if (err)
+ goto err_steer_free_bitmap;
+ } else {
+ bitmap_fill(ibdev->ib_uc_qpns_bitmap,
+ ibdev->steer_qpn_count);
+ }
}
for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
@@ -3055,15 +3071,12 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
- if (!dm) {
- pr_err("failed to allocate memory for tunneling qp update\n");
+ if (!dm)
return;
- }
for (i = 0; i < ports; i++) {
dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
if (!dm[i]) {
- pr_err("failed to allocate memory for tunneling qp update work struct\n");
while (--i >= 0)
kfree(dm[i]);
goto out;
@@ -3223,8 +3236,6 @@ void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
ew->port = port;
ew->ib_dev = ibdev;
queue_work(wq, &ew->work);
- } else {
- pr_err("failed to allocate memory for sl2vl update work\n");
}
}
@@ -3284,10 +3295,8 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
ew = kmalloc(sizeof *ew, GFP_ATOMIC);
- if (!ew) {
- pr_err("failed to allocate memory for events work\n");
+ if (!ew)
break;
- }
INIT_WORK(&ew->work, handle_port_mgmt_change_event);
memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index a21d37f02f35..e010fe459e67 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -1142,7 +1142,6 @@ void mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq)
work = kmalloc(sizeof *work, GFP_KERNEL);
if (!work) {
ctx->flushing = 0;
- mcg_warn("failed allocating work for cleanup\n");
return;
}
@@ -1202,10 +1201,8 @@ static int push_deleteing_req(struct mcast_group *group, int slave)
return 0;
req = kzalloc(sizeof *req, GFP_KERNEL);
- if (!req) {
- mcg_warn_group(group, "failed allocation - may leave stall groups\n");
+ if (!req)
return -ENOMEM;
- }
if (!list_empty(&group->func[slave].pending)) {
pend_req = list_entry(group->func[slave].pending.prev, struct mcast_req, group_list);
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 35141f451e5c..7f3d976d81ed 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -742,7 +742,8 @@ int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
-struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
+struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+ struct ib_udata *udata);
int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
int mlx4_ib_destroy_ah(struct ib_ah *ah);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 570bc866b1d6..c068add8838b 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -644,7 +644,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
int qpn;
int err;
struct ib_qp_cap backup_cap;
- struct mlx4_ib_sqp *sqp;
+ struct mlx4_ib_sqp *sqp = NULL;
struct mlx4_ib_qp *qp;
enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type;
struct mlx4_ib_cq *mcq;
@@ -933,7 +933,9 @@ err_db:
mlx4_db_free(dev->dev, &qp->db);
err:
- if (!*caller_qp)
+ if (sqp)
+ kfree(sqp);
+ else if (!*caller_qp)
kfree(qp);
return err;
}
@@ -1280,7 +1282,8 @@ static int _mlx4_ib_destroy_qp(struct ib_qp *qp)
if (is_qp0(dev, mqp))
mlx4_CLOSE_PORT(dev->dev, mqp->port);
- if (dev->qp1_proxy[mqp->port - 1] == mqp) {
+ if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI &&
+ dev->qp1_proxy[mqp->port - 1] == mqp) {
mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]);
dev->qp1_proxy[mqp->port - 1] = NULL;
mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]);
@@ -1764,14 +1767,14 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
u8 port_num = mlx4_is_bonded(to_mdev(ibqp->device)->dev) ? 1 :
attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
union ib_gid gid;
- struct ib_gid_attr gid_attr;
+ struct ib_gid_attr gid_attr = {.gid_type = IB_GID_TYPE_IB};
u16 vlan = 0xffff;
u8 smac[ETH_ALEN];
int status = 0;
int is_eth = rdma_cap_eth_ah(&dev->ib_dev, port_num) &&
attr->ah_attr.ah_flags & IB_AH_GRH;
- if (is_eth) {
+ if (is_eth && attr->ah_attr.ah_flags & IB_AH_GRH) {
int index = attr->ah_attr.grh.sgid_index;
status = ib_get_cached_gid(ibqp->device, port_num,
diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c
index 745efa4cfc71..d090e96f6f01 100644
--- a/drivers/infiniband/hw/mlx5/ah.c
+++ b/drivers/infiniband/hw/mlx5/ah.c
@@ -64,7 +64,9 @@ static struct ib_ah *create_ib_ah(struct mlx5_ib_dev *dev,
return &ah->ibah;
}
-struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
+struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+ struct ib_udata *udata)
+
{
struct mlx5_ib_ah *ah;
struct mlx5_ib_dev *dev = to_mdev(pd->device);
@@ -75,6 +77,27 @@ struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
if (ll == IB_LINK_LAYER_ETHERNET && !(ah_attr->ah_flags & IB_AH_GRH))
return ERR_PTR(-EINVAL);
+ if (ll == IB_LINK_LAYER_ETHERNET && udata) {
+ int err;
+ struct mlx5_ib_create_ah_resp resp = {};
+ u32 min_resp_len = offsetof(typeof(resp), dmac) +
+ sizeof(resp.dmac);
+
+ if (udata->outlen < min_resp_len)
+ return ERR_PTR(-EINVAL);
+
+ resp.response_length = min_resp_len;
+
+ err = ib_resolve_eth_dmac(pd->device, ah_attr);
+ if (err)
+ return ERR_PTR(err);
+
+ memcpy(resp.dmac, ah_attr->dmac, ETH_ALEN);
+ err = ib_copy_to_udata(udata, &resp, resp.response_length);
+ if (err)
+ return ERR_PTR(err);
+ }
+
ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
if (!ah)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index fcd04b881ec1..b3ef47c3ab73 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -731,7 +731,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
int entries, u32 **cqb,
int *cqe_size, int *index, int *inlen)
{
- struct mlx5_ib_create_cq ucmd;
+ struct mlx5_ib_create_cq ucmd = {};
size_t ucmdlen;
int page_shift;
__be64 *pas;
@@ -770,7 +770,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
if (err)
goto err_umem;
- mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift,
+ mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, 0, &npages, &page_shift,
&ncont, NULL);
mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n",
ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont);
@@ -792,8 +792,36 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
*index = to_mucontext(context)->uuari.uars[0].index;
+ if (ucmd.cqe_comp_en == 1) {
+ if (unlikely((*cqe_size != 64) ||
+ !MLX5_CAP_GEN(dev->mdev, cqe_compression))) {
+ err = -EOPNOTSUPP;
+ mlx5_ib_warn(dev, "CQE compression is not supported for size %d!\n",
+ *cqe_size);
+ goto err_cqb;
+ }
+
+ if (unlikely(!ucmd.cqe_comp_res_format ||
+ !(ucmd.cqe_comp_res_format <
+ MLX5_IB_CQE_RES_RESERVED) ||
+ (ucmd.cqe_comp_res_format &
+ (ucmd.cqe_comp_res_format - 1)))) {
+ err = -EOPNOTSUPP;
+ mlx5_ib_warn(dev, "CQE compression res format %d is not supported!\n",
+ ucmd.cqe_comp_res_format);
+ goto err_cqb;
+ }
+
+ MLX5_SET(cqc, cqc, cqe_comp_en, 1);
+ MLX5_SET(cqc, cqc, mini_cqe_res_format,
+ ilog2(ucmd.cqe_comp_res_format));
+ }
+
return 0;
+err_cqb:
+ kfree(cqb);
+
err_db:
mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db);
@@ -1124,7 +1152,7 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
return err;
}
- mlx5_ib_cont_pages(umem, ucmd.buf_addr, &npages, page_shift,
+ mlx5_ib_cont_pages(umem, ucmd.buf_addr, 0, &npages, page_shift,
npas, NULL);
cq->resize_umem = umem;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 2be65ddf56ba..d566f6738833 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -127,7 +127,7 @@ static int mlx5_netdev_event(struct notifier_block *this,
if ((upper == ndev || (!upper && ndev == ibdev->roce.netdev))
&& ibdev->ib_active) {
- struct ib_event ibev = {0};
+ struct ib_event ibev = { };
ibev.device = &ibdev->ib_dev;
ibev.event = (event == NETDEV_UP) ?
@@ -496,6 +496,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_core_dev *mdev = dev->mdev;
int err = -ENOMEM;
+ int max_sq_desc;
int max_rq_sg;
int max_sq_sg;
u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
@@ -618,9 +619,10 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
sizeof(struct mlx5_wqe_data_seg);
- max_sq_sg = (MLX5_CAP_GEN(mdev, max_wqe_sz_sq) -
- sizeof(struct mlx5_wqe_ctrl_seg)) /
- sizeof(struct mlx5_wqe_data_seg);
+ max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
+ max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) -
+ sizeof(struct mlx5_wqe_raddr_seg)) /
+ sizeof(struct mlx5_wqe_data_seg);
props->max_sge = min(max_rq_sg, max_sq_sg);
props->max_sge_rd = MLX5_MAX_SGE_RD;
props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
@@ -643,6 +645,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
props->max_mcast_grp;
props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
+ props->max_ah = INT_MAX;
props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
@@ -669,6 +672,40 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
}
+ if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
+ uhw->outlen)) {
+ resp.mlx5_ib_support_multi_pkt_send_wqes =
+ MLX5_CAP_ETH(mdev, multi_pkt_send_wqe);
+ resp.response_length +=
+ sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
+ }
+
+ if (field_avail(typeof(resp), reserved, uhw->outlen))
+ resp.response_length += sizeof(resp.reserved);
+
+ if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
+ resp.cqe_comp_caps.max_num =
+ MLX5_CAP_GEN(dev->mdev, cqe_compression) ?
+ MLX5_CAP_GEN(dev->mdev, cqe_compression_max_num) : 0;
+ resp.cqe_comp_caps.supported_format =
+ MLX5_IB_CQE_RES_FORMAT_HASH |
+ MLX5_IB_CQE_RES_FORMAT_CSUM;
+ resp.response_length += sizeof(resp.cqe_comp_caps);
+ }
+
+ if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen)) {
+ if (MLX5_CAP_QOS(mdev, packet_pacing) &&
+ MLX5_CAP_GEN(mdev, qos)) {
+ resp.packet_pacing_caps.qp_rate_limit_max =
+ MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
+ resp.packet_pacing_caps.qp_rate_limit_min =
+ MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
+ resp.packet_pacing_caps.supported_qpts |=
+ 1 << IB_QPT_RAW_PACKET;
+ }
+ resp.response_length += sizeof(resp.packet_pacing_caps);
+ }
+
if (uhw->outlen) {
err = ib_copy_to_udata(uhw, &resp, resp.response_length);
@@ -1093,7 +1130,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
resp.response_length += sizeof(resp.cqe_version);
if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) {
- resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE;
+ resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE |
+ MLX5_USER_CMDS_SUPP_UHW_CREATE_AH;
resp.response_length += sizeof(resp.cmds_supp_uhw);
}
@@ -1502,6 +1540,22 @@ static void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
}
+static void set_flow_label(void *misc_c, void *misc_v, u8 mask, u8 val,
+ bool inner)
+{
+ if (inner) {
+ MLX5_SET(fte_match_set_misc,
+ misc_c, inner_ipv6_flow_label, mask);
+ MLX5_SET(fte_match_set_misc,
+ misc_v, inner_ipv6_flow_label, val);
+ } else {
+ MLX5_SET(fte_match_set_misc,
+ misc_c, outer_ipv6_flow_label, mask);
+ MLX5_SET(fte_match_set_misc,
+ misc_v, outer_ipv6_flow_label, val);
+ }
+}
+
static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
{
MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask);
@@ -1515,6 +1569,7 @@ static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
#define LAST_IPV4_FIELD tos
#define LAST_IPV6_FIELD traffic_class
#define LAST_TCP_UDP_FIELD src_port
+#define LAST_TUNNEL_FIELD tunnel_id
/* Field is the last supported field */
#define FIELDS_NOT_SUPPORTED(filter, field)\
@@ -1527,155 +1582,164 @@ static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
static int parse_flow_attr(u32 *match_c, u32 *match_v,
const union ib_flow_spec *ib_spec)
{
- void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
- outer_headers);
- void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
- outer_headers);
void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
misc_parameters);
void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
misc_parameters);
+ void *headers_c;
+ void *headers_v;
+
+ if (ib_spec->type & IB_FLOW_SPEC_INNER) {
+ headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
+ inner_headers);
+ } else {
+ headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
+ outer_headers);
+ }
- switch (ib_spec->type) {
+ switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
case IB_FLOW_SPEC_ETH:
if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
return -ENOTSUPP;
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
dmac_47_16),
ib_spec->eth.mask.dst_mac);
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
dmac_47_16),
ib_spec->eth.val.dst_mac);
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
smac_47_16),
ib_spec->eth.mask.src_mac);
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
smac_47_16),
ib_spec->eth.val.src_mac);
if (ib_spec->eth.mask.vlan_tag) {
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
vlan_tag, 1);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
vlan_tag, 1);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
first_vid, ntohs(ib_spec->eth.mask.vlan_tag));
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
first_vid, ntohs(ib_spec->eth.val.vlan_tag));
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
first_cfi,
ntohs(ib_spec->eth.mask.vlan_tag) >> 12);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
first_cfi,
ntohs(ib_spec->eth.val.vlan_tag) >> 12);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
first_prio,
ntohs(ib_spec->eth.mask.vlan_tag) >> 13);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
first_prio,
ntohs(ib_spec->eth.val.vlan_tag) >> 13);
}
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
ethertype, ntohs(ib_spec->eth.mask.ether_type));
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
ethertype, ntohs(ib_spec->eth.val.ether_type));
break;
case IB_FLOW_SPEC_IPV4:
if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
return -ENOTSUPP;
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
ethertype, 0xffff);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
ethertype, ETH_P_IP);
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
src_ipv4_src_ipv6.ipv4_layout.ipv4),
&ib_spec->ipv4.mask.src_ip,
sizeof(ib_spec->ipv4.mask.src_ip));
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
src_ipv4_src_ipv6.ipv4_layout.ipv4),
&ib_spec->ipv4.val.src_ip,
sizeof(ib_spec->ipv4.val.src_ip));
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
&ib_spec->ipv4.mask.dst_ip,
sizeof(ib_spec->ipv4.mask.dst_ip));
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
&ib_spec->ipv4.val.dst_ip,
sizeof(ib_spec->ipv4.val.dst_ip));
- set_tos(outer_headers_c, outer_headers_v,
+ set_tos(headers_c, headers_v,
ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos);
- set_proto(outer_headers_c, outer_headers_v,
+ set_proto(headers_c, headers_v,
ib_spec->ipv4.mask.proto, ib_spec->ipv4.val.proto);
break;
case IB_FLOW_SPEC_IPV6:
if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD))
return -ENOTSUPP;
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
ethertype, 0xffff);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
ethertype, ETH_P_IPV6);
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
src_ipv4_src_ipv6.ipv6_layout.ipv6),
&ib_spec->ipv6.mask.src_ip,
sizeof(ib_spec->ipv6.mask.src_ip));
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
src_ipv4_src_ipv6.ipv6_layout.ipv6),
&ib_spec->ipv6.val.src_ip,
sizeof(ib_spec->ipv6.val.src_ip));
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
&ib_spec->ipv6.mask.dst_ip,
sizeof(ib_spec->ipv6.mask.dst_ip));
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
&ib_spec->ipv6.val.dst_ip,
sizeof(ib_spec->ipv6.val.dst_ip));
- set_tos(outer_headers_c, outer_headers_v,
+ set_tos(headers_c, headers_v,
ib_spec->ipv6.mask.traffic_class,
ib_spec->ipv6.val.traffic_class);
- set_proto(outer_headers_c, outer_headers_v,
+ set_proto(headers_c, headers_v,
ib_spec->ipv6.mask.next_hdr,
ib_spec->ipv6.val.next_hdr);
- MLX5_SET(fte_match_set_misc, misc_params_c,
- outer_ipv6_flow_label,
- ntohl(ib_spec->ipv6.mask.flow_label));
- MLX5_SET(fte_match_set_misc, misc_params_v,
- outer_ipv6_flow_label,
- ntohl(ib_spec->ipv6.val.flow_label));
+ set_flow_label(misc_params_c, misc_params_v,
+ ntohl(ib_spec->ipv6.mask.flow_label),
+ ntohl(ib_spec->ipv6.val.flow_label),
+ ib_spec->type & IB_FLOW_SPEC_INNER);
+
break;
case IB_FLOW_SPEC_TCP:
if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
LAST_TCP_UDP_FIELD))
return -ENOTSUPP;
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
0xff);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
IPPROTO_TCP);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport,
ntohs(ib_spec->tcp_udp.mask.src_port));
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_sport,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
ntohs(ib_spec->tcp_udp.val.src_port));
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_dport,
ntohs(ib_spec->tcp_udp.mask.dst_port));
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_dport,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
ntohs(ib_spec->tcp_udp.val.dst_port));
break;
case IB_FLOW_SPEC_UDP:
@@ -1683,21 +1747,31 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v,
LAST_TCP_UDP_FIELD))
return -ENOTSUPP;
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
0xff);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
IPPROTO_UDP);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_sport,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
ntohs(ib_spec->tcp_udp.mask.src_port));
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_sport,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
ntohs(ib_spec->tcp_udp.val.src_port));
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_dport,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport,
ntohs(ib_spec->tcp_udp.mask.dst_port));
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_dport,
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
ntohs(ib_spec->tcp_udp.val.dst_port));
break;
+ case IB_FLOW_SPEC_VXLAN_TUNNEL:
+ if (FIELDS_NOT_SUPPORTED(ib_spec->tunnel.mask,
+ LAST_TUNNEL_FIELD))
+ return -ENOTSUPP;
+
+ MLX5_SET(fte_match_set_misc, misc_params_c, vxlan_vni,
+ ntohl(ib_spec->tunnel.mask.tunnel_id));
+ MLX5_SET(fte_match_set_misc, misc_params_v, vxlan_vni,
+ ntohl(ib_spec->tunnel.val.tunnel_id));
+ break;
default:
return -EINVAL;
}
@@ -2721,6 +2795,8 @@ static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
int err;
err = mlx5_ib_query_port(ibdev, port_num, &attr);
@@ -2730,7 +2806,8 @@ static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
immutable->pkey_tbl_len = attr.pkey_tbl_len;
immutable->gid_tbl_len = attr.gid_tbl_len;
immutable->core_cap_flags = get_core_cap_flags(ibdev);
- immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
return 0;
}
@@ -2744,7 +2821,7 @@ static void get_dev_fw_str(struct ib_device *ibdev, char *str,
fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
}
-static int mlx5_roce_lag_init(struct mlx5_ib_dev *dev)
+static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
{
struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev,
@@ -2773,7 +2850,7 @@ err_destroy_vport_lag:
return err;
}
-static void mlx5_roce_lag_cleanup(struct mlx5_ib_dev *dev)
+static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
{
struct mlx5_core_dev *mdev = dev->mdev;
@@ -2785,7 +2862,21 @@ static void mlx5_roce_lag_cleanup(struct mlx5_ib_dev *dev)
}
}
-static void mlx5_remove_roce_notifier(struct mlx5_ib_dev *dev)
+static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev)
+{
+ int err;
+
+ dev->roce.nb.notifier_call = mlx5_netdev_event;
+ err = register_netdevice_notifier(&dev->roce.nb);
+ if (err) {
+ dev->roce.nb.notifier_call = NULL;
+ return err;
+ }
+
+ return 0;
+}
+
+static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev)
{
if (dev->roce.nb.notifier_call) {
unregister_netdevice_notifier(&dev->roce.nb);
@@ -2793,39 +2884,40 @@ static void mlx5_remove_roce_notifier(struct mlx5_ib_dev *dev)
}
}
-static int mlx5_enable_roce(struct mlx5_ib_dev *dev)
+static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
{
int err;
- dev->roce.nb.notifier_call = mlx5_netdev_event;
- err = register_netdevice_notifier(&dev->roce.nb);
- if (err) {
- dev->roce.nb.notifier_call = NULL;
+ err = mlx5_add_netdev_notifier(dev);
+ if (err)
return err;
- }
- err = mlx5_nic_vport_enable_roce(dev->mdev);
- if (err)
- goto err_unregister_netdevice_notifier;
+ if (MLX5_CAP_GEN(dev->mdev, roce)) {
+ err = mlx5_nic_vport_enable_roce(dev->mdev);
+ if (err)
+ goto err_unregister_netdevice_notifier;
+ }
- err = mlx5_roce_lag_init(dev);
+ err = mlx5_eth_lag_init(dev);
if (err)
goto err_disable_roce;
return 0;
err_disable_roce:
- mlx5_nic_vport_disable_roce(dev->mdev);
+ if (MLX5_CAP_GEN(dev->mdev, roce))
+ mlx5_nic_vport_disable_roce(dev->mdev);
err_unregister_netdevice_notifier:
- mlx5_remove_roce_notifier(dev);
+ mlx5_remove_netdev_notifier(dev);
return err;
}
-static void mlx5_disable_roce(struct mlx5_ib_dev *dev)
+static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
{
- mlx5_roce_lag_cleanup(dev);
- mlx5_nic_vport_disable_roce(dev->mdev);
+ mlx5_eth_lag_cleanup(dev);
+ if (MLX5_CAP_GEN(dev->mdev, roce))
+ mlx5_nic_vport_disable_roce(dev->mdev);
}
static void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev)
@@ -2947,9 +3039,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
port_type_cap = MLX5_CAP_GEN(mdev, port_type);
ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
- if ((ll == IB_LINK_LAYER_ETHERNET) && !MLX5_CAP_GEN(mdev, roce))
- return NULL;
-
printk_once(KERN_INFO "%s", mlx5_version);
dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
@@ -2995,6 +3084,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
(1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
(1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
(1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
(1ull << IB_USER_VERBS_CMD_REG_MR) |
(1ull << IB_USER_VERBS_CMD_REREG_MR) |
(1ull << IB_USER_VERBS_CMD_DEREG_MR) |
@@ -3017,7 +3108,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.uverbs_ex_cmd_mask =
(1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
(1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
- (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
+ (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP) |
+ (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP);
dev->ib_dev.query_device = mlx5_ib_query_device;
dev->ib_dev.query_port = mlx5_ib_query_port;
@@ -3128,14 +3220,14 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
spin_lock_init(&dev->reset_flow_resource_lock);
if (ll == IB_LINK_LAYER_ETHERNET) {
- err = mlx5_enable_roce(dev);
+ err = mlx5_enable_eth(dev);
if (err)
goto err_free_port;
}
err = create_dev_resources(&dev->devr);
if (err)
- goto err_disable_roce;
+ goto err_disable_eth;
err = mlx5_ib_odp_init_one(dev);
if (err)
@@ -3179,10 +3271,10 @@ err_odp:
err_rsrc:
destroy_dev_resources(&dev->devr);
-err_disable_roce:
+err_disable_eth:
if (ll == IB_LINK_LAYER_ETHERNET) {
- mlx5_disable_roce(dev);
- mlx5_remove_roce_notifier(dev);
+ mlx5_disable_eth(dev);
+ mlx5_remove_netdev_notifier(dev);
}
err_free_port:
@@ -3199,14 +3291,14 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
struct mlx5_ib_dev *dev = context;
enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
- mlx5_remove_roce_notifier(dev);
+ mlx5_remove_netdev_notifier(dev);
ib_unregister_device(&dev->ib_dev);
mlx5_ib_dealloc_q_counters(dev);
destroy_umrc_res(dev);
mlx5_ib_odp_remove_one(dev);
destroy_dev_resources(&dev->devr);
if (ll == IB_LINK_LAYER_ETHERNET)
- mlx5_disable_roce(dev);
+ mlx5_disable_eth(dev);
kfree(dev->port);
ib_dealloc_device(&dev->ib_dev);
}
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index 996b54e366b0..6851357c16f4 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -37,12 +37,15 @@
/* @umem: umem object to scan
* @addr: ib virtual address requested by the user
+ * @max_page_shift: high limit for page_shift - 0 means no limit
* @count: number of PAGE_SIZE pages covered by umem
* @shift: page shift for the compound pages found in the region
* @ncont: number of compund pages
* @order: log2 of the number of compound pages
*/
-void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
+void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
+ unsigned long max_page_shift,
+ int *count, int *shift,
int *ncont, int *order)
{
unsigned long tmp;
@@ -72,6 +75,8 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
addr = addr >> page_shift;
tmp = (unsigned long)addr;
m = find_first_bit(&tmp, BITS_PER_LONG);
+ if (max_page_shift)
+ m = min_t(unsigned long, max_page_shift - page_shift, m);
skip = 1 << m;
mask = skip - 1;
i = 0;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 854748b61212..6c6057eb60ea 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -63,6 +63,8 @@ pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
#define MLX5_IB_DEFAULT_UIDX 0xffffff
#define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
+#define MLX5_MKEY_PAGE_SHIFT_MASK __mlx5_mask(mkc, log_page_size)
+
enum {
MLX5_IB_MMAP_CMD_SHIFT = 8,
MLX5_IB_MMAP_CMD_MASK = 0xff,
@@ -387,6 +389,7 @@ struct mlx5_ib_qp {
struct list_head qps_list;
struct list_head cq_recv_list;
struct list_head cq_send_list;
+ u32 rate_limit;
};
struct mlx5_ib_cq_buf {
@@ -418,7 +421,7 @@ struct mlx5_umr_wr {
struct ib_pd *pd;
unsigned int page_shift;
unsigned int npages;
- u32 length;
+ u64 length;
int access_flags;
u32 mkey;
};
@@ -739,7 +742,8 @@ void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const void *in_mad, void *response_mad);
-struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
+struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+ struct ib_udata *udata);
int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
int mlx5_ib_destroy_ah(struct ib_ah *ah);
struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
@@ -825,7 +829,9 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props);
int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
-void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
+void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
+ unsigned long max_page_shift,
+ int *count, int *shift,
int *ncont, int *order);
void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
int page_shift, size_t offset, size_t num_pages,
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 4e9012463c37..8f608debe141 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -628,7 +628,8 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
ent->order = i + 2;
ent->dev = dev;
- if (dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE)
+ if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
+ (mlx5_core_is_pf(dev->mdev)))
limit = dev->mdev->profile->mr_cache[i].limit;
else
limit = 0;
@@ -646,6 +647,33 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
return 0;
}
+static void wait_for_async_commands(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_mr_cache *cache = &dev->cache;
+ struct mlx5_cache_ent *ent;
+ int total = 0;
+ int i;
+ int j;
+
+ for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
+ ent = &cache->ent[i];
+ for (j = 0 ; j < 1000; j++) {
+ if (!ent->pending)
+ break;
+ msleep(50);
+ }
+ }
+ for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
+ ent = &cache->ent[i];
+ total += ent->pending;
+ }
+
+ if (total)
+ mlx5_ib_warn(dev, "aborted while there are %d pending mr requests\n", total);
+ else
+ mlx5_ib_warn(dev, "done with all pending requests\n");
+}
+
int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
{
int i;
@@ -659,6 +687,7 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
clean_keys(dev, i);
destroy_workqueue(dev->cache.wq);
+ wait_for_async_commands(dev);
del_timer_sync(&dev->delay_timer);
return 0;
@@ -816,29 +845,34 @@ static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
umrwr->mkey = key;
}
-static struct ib_umem *mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
- int access_flags, int *npages,
- int *page_shift, int *ncont, int *order)
+static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
+ int access_flags, struct ib_umem **umem,
+ int *npages, int *page_shift, int *ncont,
+ int *order)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct ib_umem *umem = ib_umem_get(pd->uobject->context, start, length,
- access_flags, 0);
- if (IS_ERR(umem)) {
+ int err;
+
+ *umem = ib_umem_get(pd->uobject->context, start, length,
+ access_flags, 0);
+ err = PTR_ERR_OR_ZERO(*umem);
+ if (err < 0) {
mlx5_ib_err(dev, "umem get failed (%ld)\n", PTR_ERR(umem));
- return (void *)umem;
+ return err;
}
- mlx5_ib_cont_pages(umem, start, npages, page_shift, ncont, order);
+ mlx5_ib_cont_pages(*umem, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
+ page_shift, ncont, order);
if (!*npages) {
mlx5_ib_warn(dev, "avoid zero region\n");
- ib_umem_release(umem);
- return ERR_PTR(-EINVAL);
+ ib_umem_release(*umem);
+ return -EINVAL;
}
mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
*npages, *ncont, *order, *page_shift);
- return umem;
+ return 0;
}
static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
@@ -1164,11 +1198,11 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
start, virt_addr, length, access_flags);
- umem = mr_umem_get(pd, start, length, access_flags, &npages,
+ err = mr_umem_get(pd, start, length, access_flags, &umem, &npages,
&page_shift, &ncont, &order);
- if (IS_ERR(umem))
- return (void *)umem;
+ if (err < 0)
+ return ERR_PTR(err);
if (use_umr(order)) {
mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
@@ -1345,10 +1379,9 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
*/
flags |= IB_MR_REREG_TRANS;
ib_umem_release(mr->umem);
- mr->umem = mr_umem_get(pd, addr, len, access_flags, &npages,
- &page_shift, &ncont, &order);
- if (IS_ERR(mr->umem)) {
- err = PTR_ERR(mr->umem);
+ err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
+ &npages, &page_shift, &ncont, &order);
+ if (err < 0) {
mr->umem = NULL;
return err;
}
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index d1e921816bfe..a1b3125f0a6e 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -77,12 +77,14 @@ struct mlx5_wqe_eth_pad {
enum raw_qp_set_mask_map {
MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID = 1UL << 0,
+ MLX5_RAW_QP_RATE_LIMIT = 1UL << 1,
};
struct mlx5_modify_raw_qp_param {
u16 operation;
u32 set_mask; /* raw_qp_set_mask_map */
+ u32 rate_limit;
u8 rq_q_ctr_id;
};
@@ -351,6 +353,29 @@ static int calc_send_wqe(struct ib_qp_init_attr *attr)
return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
}
+static int get_send_sge(struct ib_qp_init_attr *attr, int wqe_size)
+{
+ int max_sge;
+
+ if (attr->qp_type == IB_QPT_RC)
+ max_sge = (min_t(int, wqe_size, 512) -
+ sizeof(struct mlx5_wqe_ctrl_seg) -
+ sizeof(struct mlx5_wqe_raddr_seg)) /
+ sizeof(struct mlx5_wqe_data_seg);
+ else if (attr->qp_type == IB_QPT_XRC_INI)
+ max_sge = (min_t(int, wqe_size, 512) -
+ sizeof(struct mlx5_wqe_ctrl_seg) -
+ sizeof(struct mlx5_wqe_xrc_seg) -
+ sizeof(struct mlx5_wqe_raddr_seg)) /
+ sizeof(struct mlx5_wqe_data_seg);
+ else
+ max_sge = (wqe_size - sq_overhead(attr)) /
+ sizeof(struct mlx5_wqe_data_seg);
+
+ return min_t(int, max_sge, wqe_size - sq_overhead(attr) /
+ sizeof(struct mlx5_wqe_data_seg));
+}
+
static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
struct mlx5_ib_qp *qp)
{
@@ -381,13 +406,18 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
- mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
+ mlx5_ib_dbg(dev, "send queue size (%d * %d / %d -> %d) exceeds limits(%d)\n",
+ attr->cap.max_send_wr, wqe_size, MLX5_SEND_WQE_BB,
qp->sq.wqe_cnt,
1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
return -ENOMEM;
}
qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
- qp->sq.max_gs = attr->cap.max_send_sge;
+ qp->sq.max_gs = get_send_sge(attr, wqe_size);
+ if (qp->sq.max_gs < attr->cap.max_send_sge)
+ return -ENOMEM;
+
+ attr->cap.max_send_sge = qp->sq.max_gs;
qp->sq.max_post = wq_size / wqe_size;
attr->cap.max_send_wr = qp->sq.max_post;
@@ -647,7 +677,7 @@ static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev,
return PTR_ERR(*umem);
}
- mlx5_ib_cont_pages(*umem, addr, npages, page_shift, ncont, NULL);
+ mlx5_ib_cont_pages(*umem, addr, 0, npages, page_shift, ncont, NULL);
err = mlx5_ib_get_buf_offset(addr, *page_shift, offset);
if (err) {
@@ -700,7 +730,7 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
return err;
}
- mlx5_ib_cont_pages(rwq->umem, ucmd->buf_addr, &npages, &page_shift,
+ mlx5_ib_cont_pages(rwq->umem, ucmd->buf_addr, 0, &npages, &page_shift,
&ncont, NULL);
err = mlx5_ib_get_buf_offset(ucmd->buf_addr, page_shift,
&rwq->rq_page_offset);
@@ -2442,8 +2472,14 @@ out:
}
static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev,
- struct mlx5_ib_sq *sq, int new_state)
+ struct mlx5_ib_sq *sq,
+ int new_state,
+ const struct mlx5_modify_raw_qp_param *raw_qp_param)
{
+ struct mlx5_ib_qp *ibqp = sq->base.container_mibqp;
+ u32 old_rate = ibqp->rate_limit;
+ u32 new_rate = old_rate;
+ u16 rl_index = 0;
void *in;
void *sqc;
int inlen;
@@ -2459,10 +2495,44 @@ static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev,
sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
MLX5_SET(sqc, sqc, state, new_state);
+ if (raw_qp_param->set_mask & MLX5_RAW_QP_RATE_LIMIT) {
+ if (new_state != MLX5_SQC_STATE_RDY)
+ pr_warn("%s: Rate limit can only be changed when SQ is moving to RDY\n",
+ __func__);
+ else
+ new_rate = raw_qp_param->rate_limit;
+ }
+
+ if (old_rate != new_rate) {
+ if (new_rate) {
+ err = mlx5_rl_add_rate(dev, new_rate, &rl_index);
+ if (err) {
+ pr_err("Failed configuring rate %u: %d\n",
+ new_rate, err);
+ goto out;
+ }
+ }
+
+ MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
+ MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index);
+ }
+
err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in, inlen);
- if (err)
+ if (err) {
+ /* Remove new rate from table if failed */
+ if (new_rate &&
+ old_rate != new_rate)
+ mlx5_rl_remove_rate(dev, new_rate);
goto out;
+ }
+
+ /* Only remove the old rate after new rate was set */
+ if ((old_rate &&
+ (old_rate != new_rate)) ||
+ (new_state != MLX5_SQC_STATE_RDY))
+ mlx5_rl_remove_rate(dev, old_rate);
+ ibqp->rate_limit = new_rate;
sq->state = new_state;
out:
@@ -2477,6 +2547,8 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
+ int modify_rq = !!qp->rq.wqe_cnt;
+ int modify_sq = !!qp->sq.wqe_cnt;
int rq_state;
int sq_state;
int err;
@@ -2494,10 +2566,18 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
rq_state = MLX5_RQC_STATE_RST;
sq_state = MLX5_SQC_STATE_RST;
break;
- case MLX5_CMD_OP_INIT2INIT_QP:
- case MLX5_CMD_OP_INIT2RTR_QP:
case MLX5_CMD_OP_RTR2RTS_QP:
case MLX5_CMD_OP_RTS2RTS_QP:
+ if (raw_qp_param->set_mask ==
+ MLX5_RAW_QP_RATE_LIMIT) {
+ modify_rq = 0;
+ sq_state = sq->state;
+ } else {
+ return raw_qp_param->set_mask ? -EINVAL : 0;
+ }
+ break;
+ case MLX5_CMD_OP_INIT2INIT_QP:
+ case MLX5_CMD_OP_INIT2RTR_QP:
if (raw_qp_param->set_mask)
return -EINVAL;
else
@@ -2507,13 +2587,13 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
return -EINVAL;
}
- if (qp->rq.wqe_cnt) {
- err = modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param);
+ if (modify_rq) {
+ err = modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param);
if (err)
return err;
}
- if (qp->sq.wqe_cnt) {
+ if (modify_sq) {
if (tx_affinity) {
err = modify_raw_packet_tx_affinity(dev->mdev, sq,
tx_affinity);
@@ -2521,7 +2601,7 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
return err;
}
- return modify_raw_packet_qp_sq(dev->mdev, sq, sq_state);
+ return modify_raw_packet_qp_sq(dev->mdev, sq, sq_state, raw_qp_param);
}
return 0;
@@ -2577,7 +2657,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
struct mlx5_ib_port *mibport = NULL;
enum mlx5_qp_state mlx5_cur, mlx5_new;
enum mlx5_qp_optpar optpar;
- int sqd_event;
int mlx5_st;
int err;
u16 op;
@@ -2724,12 +2803,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
context->db_rec_addr = cpu_to_be64(qp->db.dma);
- if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
- attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
- sqd_event = 1;
- else
- sqd_event = 0;
-
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num :
qp->port) - 1;
@@ -2776,6 +2849,12 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
raw_qp_param.rq_q_ctr_id = mibport->q_cnt_id;
raw_qp_param.set_mask |= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID;
}
+
+ if (attr_mask & IB_QP_RATE_LIMIT) {
+ raw_qp_param.rate_limit = attr->rate_limit;
+ raw_qp_param.set_mask |= MLX5_RAW_QP_RATE_LIMIT;
+ }
+
err = modify_raw_packet_qp(dev, qp, &raw_qp_param, tx_affinity);
} else {
err = mlx5_core_qp_modify(dev->mdev, op, optpar, context,
@@ -3067,10 +3146,10 @@ static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
{
memset(umr, 0, sizeof(*umr));
umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
- umr->flags = 1 << 7;
+ umr->flags = MLX5_UMR_INLINE;
}
-static __be64 get_umr_reg_mr_mask(void)
+static __be64 get_umr_reg_mr_mask(int atomic)
{
u64 result;
@@ -3083,9 +3162,11 @@ static __be64 get_umr_reg_mr_mask(void)
MLX5_MKEY_MASK_KEY |
MLX5_MKEY_MASK_RR |
MLX5_MKEY_MASK_RW |
- MLX5_MKEY_MASK_A |
MLX5_MKEY_MASK_FREE;
+ if (atomic)
+ result |= MLX5_MKEY_MASK_A;
+
return cpu_to_be64(result);
}
@@ -3146,7 +3227,7 @@ static __be64 get_umr_update_pd_mask(void)
}
static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
- struct ib_send_wr *wr)
+ struct ib_send_wr *wr, int atomic)
{
struct mlx5_umr_wr *umrwr = umr_wr(wr);
@@ -3171,7 +3252,7 @@ static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD)
umr->mkey_mask |= get_umr_update_pd_mask();
if (!umr->mkey_mask)
- umr->mkey_mask = get_umr_reg_mr_mask();
+ umr->mkey_mask = get_umr_reg_mr_mask(atomic);
} else {
umr->mkey_mask = get_umr_unreg_mr_mask();
}
@@ -4024,7 +4105,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
ctrl->imm = cpu_to_be32(umr_wr(wr)->mkey);
- set_reg_umr_segment(seg, wr);
+ set_reg_umr_segment(seg, wr, !!(MLX5_CAP_GEN(mdev, atomic)));
seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
if (unlikely((seg == qend)))
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 3857dbd9c956..6f4397ee1ed6 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -118,7 +118,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
return err;
}
- mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages,
+ mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, 0, &npages,
&page_shift, &ncont, NULL);
err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift,
&offset);
@@ -203,8 +203,6 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
srq->wrid = kmalloc(srq->msrq.max * sizeof(u64), GFP_KERNEL);
if (!srq->wrid) {
- mlx5_ib_dbg(dev, "kmalloc failed %lu\n",
- (unsigned long)(srq->msrq.max * sizeof(u64)));
err = -ENOMEM;
goto err_in;
}
@@ -282,6 +280,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n",
desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs,
srq->msrq.max_avail_gather);
+ in.type = init_attr->srq_type;
if (pd->uobject)
err = create_srq_user(pd, srq, &in, udata, buf_size);
@@ -294,7 +293,6 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
goto err_srq;
}
- in.type = init_attr->srq_type;
in.log_size = ilog2(srq->msrq.max);
in.wqe_shift = srq->msrq.wqe_shift - 4;
if (srq->wq_sig)
diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c
index bcac294042f5..c9f0f364f484 100644
--- a/drivers/infiniband/hw/mthca/mthca_av.c
+++ b/drivers/infiniband/hw/mthca/mthca_av.c
@@ -186,8 +186,8 @@ int mthca_create_ah(struct mthca_dev *dev,
on_hca_fail:
if (ah->type == MTHCA_AH_PCI_POOL) {
- ah->av = pci_pool_alloc(dev->av_table.pool,
- GFP_ATOMIC, &ah->avdma);
+ ah->av = pci_pool_zalloc(dev->av_table.pool,
+ GFP_ATOMIC, &ah->avdma);
if (!ah->av)
return -ENOMEM;
@@ -196,8 +196,6 @@ on_hca_fail:
ah->key = pd->ntmr.ibmr.lkey;
- memset(av, 0, MTHCA_AV_SIZE);
-
av->port_pd = cpu_to_be32(pd->pd_num | (ah_attr->port_num << 24));
av->g_slid = ah_attr->src_path_bits;
av->dlid = cpu_to_be16(ah_attr->dlid);
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 358930a41e36..d31708742ba5 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -410,7 +410,9 @@ static int mthca_dealloc_pd(struct ib_pd *pd)
}
static struct ib_ah *mthca_ah_create(struct ib_pd *pd,
- struct ib_ah_attr *ah_attr)
+ struct ib_ah_attr *ah_attr,
+ struct ib_udata *udata)
+
{
int err;
struct mthca_ah *ah;
diff --git a/drivers/infiniband/hw/mthca/mthca_reset.c b/drivers/infiniband/hw/mthca/mthca_reset.c
index 6727af27c017..2a6979e4ae1c 100644
--- a/drivers/infiniband/hw/mthca/mthca_reset.c
+++ b/drivers/infiniband/hw/mthca/mthca_reset.c
@@ -96,8 +96,6 @@ int mthca_reset(struct mthca_dev *mdev)
hca_header = kmalloc(256, GFP_KERNEL);
if (!hca_header) {
err = -ENOMEM;
- mthca_err(mdev, "Couldn't allocate memory to save HCA "
- "PCI header, aborting.\n");
goto put_dev;
}
@@ -119,8 +117,6 @@ int mthca_reset(struct mthca_dev *mdev)
bridge_header = kmalloc(256, GFP_KERNEL);
if (!bridge_header) {
err = -ENOMEM;
- mthca_err(mdev, "Couldn't allocate memory to save HCA "
- "bridge PCI header, aborting.\n");
goto free_hca;
}
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 2baa45a8e401..5b9601014f0c 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -515,7 +515,6 @@ static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
/* Allocate hardware structure */
nesdev = kzalloc(sizeof(struct nes_device), GFP_KERNEL);
if (!nesdev) {
- printk(KERN_ERR PFX "%s: Unable to alloc hardware struct\n", pci_name(pcidev));
ret = -ENOMEM;
goto bail2;
}
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 57db9b332f44..8e703479e7ce 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -2282,10 +2282,8 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
if (!listener) {
/* create a CM listen node (1/2 node to compare incoming traffic to) */
listener = kzalloc(sizeof(*listener), GFP_ATOMIC);
- if (!listener) {
- nes_debug(NES_DBG_CM, "Not creating listener memory allocation failed\n");
+ if (!listener)
return NULL;
- }
listener->loc_addr = cm_info->loc_addr;
listener->loc_port = cm_info->loc_port;
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index a1c6481d8038..19acd13c6cb1 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -351,9 +351,8 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
/* allocate a new adapter struct */
nesadapter = kzalloc(adapter_size, GFP_KERNEL);
- if (nesadapter == NULL) {
+ if (!nesadapter)
return NULL;
- }
nes_debug(NES_DBG_INIT, "Allocating new nesadapter @ %p, size = %u (actual size = %u).\n",
nesadapter, (u32)sizeof(struct nes_adapter), adapter_size);
@@ -1007,8 +1006,7 @@ int nes_init_cqp(struct nes_device *nesdev)
/* Allocate a twice the number of CQP requests as the SQ size */
nesdev->nes_cqp_requests = kzalloc(sizeof(struct nes_cqp_request) *
2 * NES_CQP_SQ_SIZE, GFP_KERNEL);
- if (nesdev->nes_cqp_requests == NULL) {
- nes_debug(NES_DBG_INIT, "Unable to allocate memory CQP request entries.\n");
+ if (!nesdev->nes_cqp_requests) {
pci_free_consistent(nesdev->pcidev, nesdev->cqp_mem_size, nesdev->cqp.sq_vbase,
nesdev->cqp.sq_pbase);
return -ENOMEM;
diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
index 416645259b0f..33624f17c347 100644
--- a/drivers/infiniband/hw/nes/nes_mgt.c
+++ b/drivers/infiniband/hw/nes/nes_mgt.c
@@ -320,8 +320,7 @@ static int get_fpdu_info(struct nes_device *nesdev, struct nes_qp *nesqp,
/* Found one */
fpdu_info = kzalloc(sizeof(*fpdu_info), GFP_ATOMIC);
- if (fpdu_info == NULL) {
- nes_debug(NES_DBG_PAU, "Failed to alloc a fpdu_info.\n");
+ if (!fpdu_info) {
rc = -ENOMEM;
goto out;
}
@@ -729,8 +728,7 @@ static int nes_change_quad_hash(struct nes_device *nesdev,
}
qh_chg = kmalloc(sizeof *qh_chg, GFP_ATOMIC);
- if (qh_chg == NULL) {
- nes_debug(NES_DBG_PAU, "Failed to get a cqp_request.\n");
+ if (!qh_chg) {
ret = -ENOMEM;
goto chg_qh_err;
}
@@ -880,10 +878,8 @@ int nes_init_mgt_qp(struct nes_device *nesdev, struct net_device *netdev, struct
/* Allocate space the all mgt QPs once */
mgtvnic = kzalloc(NES_MGT_QP_COUNT * sizeof(struct nes_vnic_mgt), GFP_KERNEL);
- if (mgtvnic == NULL) {
- nes_debug(NES_DBG_INIT, "Unable to allocate memory for mgt structure\n");
+ if (!mgtvnic)
return -ENOMEM;
- }
/* Allocate fragment, RQ, and CQ; Reuse CEQ based on the PCI function */
/* We are not sending from this NIC so sq is not allocated */
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 7f8597d6738b..5921ea3d50ae 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -662,10 +662,14 @@ tso_sq_no_longer_full:
nesnic->sq_head &= nesnic->sq_size-1;
}
} else {
- nesvnic->linearized_skbs++;
hoffset = skb_transport_header(skb) - skb->data;
nhoffset = skb_network_header(skb) - skb->data;
- skb_linearize(skb);
+ if (skb_linearize(skb)) {
+ nesvnic->tx_sw_dropped++;
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+ nesvnic->linearized_skbs++;
skb_set_transport_header(skb, hoffset);
skb_set_network_header(skb, nhoffset);
if (!nes_nic_send(skb, netdev))
@@ -1461,7 +1465,8 @@ static int nes_netdev_set_pauseparam(struct net_device *netdev,
/**
* nes_netdev_get_settings
*/
-static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd *et_cmd)
+static int nes_netdev_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
@@ -1470,54 +1475,59 @@ static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd
u8 phy_type = nesadapter->phy_type[mac_index];
u8 phy_index = nesadapter->phy_index[mac_index];
u16 phy_data;
+ u32 supported, advertising;
- et_cmd->duplex = DUPLEX_FULL;
- et_cmd->port = PORT_MII;
- et_cmd->maxtxpkt = 511;
- et_cmd->maxrxpkt = 511;
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.port = PORT_MII;
if (nesadapter->OneG_Mode) {
- ethtool_cmd_speed_set(et_cmd, SPEED_1000);
+ cmd->base.speed = SPEED_1000;
if (phy_type == NES_PHY_TYPE_PUMA_1G) {
- et_cmd->supported = SUPPORTED_1000baseT_Full;
- et_cmd->advertising = ADVERTISED_1000baseT_Full;
- et_cmd->autoneg = AUTONEG_DISABLE;
- et_cmd->transceiver = XCVR_INTERNAL;
- et_cmd->phy_address = mac_index;
+ supported = SUPPORTED_1000baseT_Full;
+ advertising = ADVERTISED_1000baseT_Full;
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ cmd->base.phy_address = mac_index;
} else {
unsigned long flags;
- et_cmd->supported = SUPPORTED_1000baseT_Full
- | SUPPORTED_Autoneg;
- et_cmd->advertising = ADVERTISED_1000baseT_Full
- | ADVERTISED_Autoneg;
+
+ supported = SUPPORTED_1000baseT_Full
+ | SUPPORTED_Autoneg;
+ advertising = ADVERTISED_1000baseT_Full
+ | ADVERTISED_Autoneg;
spin_lock_irqsave(&nesadapter->phy_lock, flags);
nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
if (phy_data & 0x1000)
- et_cmd->autoneg = AUTONEG_ENABLE;
+ cmd->base.autoneg = AUTONEG_ENABLE;
else
- et_cmd->autoneg = AUTONEG_DISABLE;
- et_cmd->transceiver = XCVR_EXTERNAL;
- et_cmd->phy_address = phy_index;
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ cmd->base.phy_address = phy_index;
}
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.supported, supported);
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.advertising, advertising);
return 0;
}
if ((phy_type == NES_PHY_TYPE_ARGUS) ||
(phy_type == NES_PHY_TYPE_SFP_D) ||
(phy_type == NES_PHY_TYPE_KR)) {
- et_cmd->transceiver = XCVR_EXTERNAL;
- et_cmd->port = PORT_FIBRE;
- et_cmd->supported = SUPPORTED_FIBRE;
- et_cmd->advertising = ADVERTISED_FIBRE;
- et_cmd->phy_address = phy_index;
+ cmd->base.port = PORT_FIBRE;
+ supported = SUPPORTED_FIBRE;
+ advertising = ADVERTISED_FIBRE;
+ cmd->base.phy_address = phy_index;
} else {
- et_cmd->transceiver = XCVR_INTERNAL;
- et_cmd->supported = SUPPORTED_10000baseT_Full;
- et_cmd->advertising = ADVERTISED_10000baseT_Full;
- et_cmd->phy_address = mac_index;
+ supported = SUPPORTED_10000baseT_Full;
+ advertising = ADVERTISED_10000baseT_Full;
+ cmd->base.phy_address = mac_index;
}
- ethtool_cmd_speed_set(et_cmd, SPEED_10000);
- et_cmd->autoneg = AUTONEG_DISABLE;
+ cmd->base.speed = SPEED_10000;
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
return 0;
}
@@ -1525,7 +1535,9 @@ static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd
/**
* nes_netdev_set_settings
*/
-static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd *et_cmd)
+static int
+nes_netdev_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
@@ -1539,7 +1551,7 @@ static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd
spin_lock_irqsave(&nesadapter->phy_lock, flags);
nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
- if (et_cmd->autoneg) {
+ if (cmd->base.autoneg) {
/* Turn on Full duplex, Autoneg, and restart autonegotiation */
phy_data |= 0x1300;
} else {
@@ -1556,8 +1568,6 @@ static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd
static const struct ethtool_ops nes_ethtool_ops = {
.get_link = ethtool_op_get_link,
- .get_settings = nes_netdev_get_settings,
- .set_settings = nes_netdev_set_settings,
.get_strings = nes_netdev_get_strings,
.get_sset_count = nes_netdev_get_sset_count,
.get_ethtool_stats = nes_netdev_get_ethtool_stats,
@@ -1566,6 +1576,8 @@ static const struct ethtool_ops nes_ethtool_ops = {
.set_coalesce = nes_netdev_set_coalesce,
.get_pauseparam = nes_netdev_get_pauseparam,
.set_pauseparam = nes_netdev_set_pauseparam,
+ .get_link_ksettings = nes_netdev_get_link_ksettings,
+ .set_link_ksettings = nes_netdev_set_link_ksettings,
};
static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev, netdev_features_t features)
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index bd69125731c1..aff9fb14768b 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -771,7 +771,8 @@ static int nes_dealloc_pd(struct ib_pd *ibpd)
/**
* nes_create_ah
*/
-static struct ib_ah *nes_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
+static struct ib_ah *nes_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+ struct ib_udata *udata)
{
return ERR_PTR(-ENOSYS);
}
@@ -1075,7 +1076,6 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
mem = kzalloc(sizeof(*nesqp)+NES_SW_CONTEXT_ALIGN-1, GFP_KERNEL);
if (!mem) {
nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
- nes_debug(NES_DBG_QP, "Unable to allocate QP\n");
return ERR_PTR(-ENOMEM);
}
u64nesqp = (unsigned long)mem;
@@ -1475,7 +1475,6 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev,
nescq = kzalloc(sizeof(struct nes_cq), GFP_KERNEL);
if (!nescq) {
nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
- nes_debug(NES_DBG_CQ, "Unable to allocate nes_cq struct\n");
return ERR_PTR(-ENOMEM);
}
@@ -2408,7 +2407,6 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
nespbl = kzalloc(sizeof(*nespbl), GFP_KERNEL);
if (!nespbl) {
- nes_debug(NES_DBG_MR, "Unable to allocate PBL\n");
ib_umem_release(region);
return ERR_PTR(-ENOMEM);
}
@@ -2416,7 +2414,6 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!nesmr) {
ib_umem_release(region);
kfree(nespbl);
- nes_debug(NES_DBG_MR, "Unable to allocate nesmr\n");
return ERR_PTR(-ENOMEM);
}
nesmr->region = region;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index 797362a297b2..14d33b0f3950 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -154,7 +154,8 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
return status;
}
-struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
+struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
+ struct ib_udata *udata)
{
u32 *ahid_addr;
int status;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
index 3856dd4c7e3d..0704a24b17c8 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
@@ -50,7 +50,9 @@ enum {
OCRDMA_AH_L3_TYPE_MASK = 0x03,
OCRDMA_AH_L3_TYPE_SHIFT = 0x1D /* 29 bits */
};
-struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *);
+
+struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *,
+ struct ib_udata *);
int ocrdma_destroy_ah(struct ib_ah *);
int ocrdma_query_ah(struct ib_ah *, struct ib_ah_attr *);
int ocrdma_modify_ah(struct ib_ah *, struct ib_ah_attr *);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 67fc0b6857e1..9a305201545e 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -1596,10 +1596,9 @@ void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev)
dev->pd_mgr = kzalloc(sizeof(struct ocrdma_pd_resource_mgr),
GFP_KERNEL);
- if (!dev->pd_mgr) {
- pr_err("%s(%d)Memory allocation failure.\n", __func__, dev->id);
+ if (!dev->pd_mgr)
return;
- }
+
status = ocrdma_mbx_alloc_pd_range(dev);
if (status) {
pr_err("%s(%d) Unable to initialize PD pool, using default.\n",
@@ -1642,7 +1641,7 @@ static int ocrdma_build_q_conf(u32 *num_entries, int entry_size,
static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev)
{
int i;
- int status = 0;
+ int status = -ENOMEM;
int max_ah;
struct ocrdma_create_ah_tbl *cmd;
struct ocrdma_create_ah_tbl_rsp *rsp;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index 8bef09a8c49f..f8e4b0a6486f 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -84,10 +84,8 @@ bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev)
/* Alloc debugfs mem */
mem->debugfs_mem = kzalloc(OCRDMA_MAX_DBGFS_MEM, GFP_KERNEL);
- if (!mem->debugfs_mem) {
- pr_err("%s: stats debugfs mem allocation failed\n", __func__);
+ if (!mem->debugfs_mem)
return false;
- }
return true;
}
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index a61514296767..57c8de208077 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -511,8 +511,10 @@ int qedr_dealloc_pd(struct ib_pd *ibpd)
struct qedr_dev *dev = get_qedr_dev(ibpd->device);
struct qedr_pd *pd = get_qedr_pd(ibpd);
- if (!pd)
+ if (!pd) {
pr_err("Invalid PD received in dealloc_pd\n");
+ return -EINVAL;
+ }
DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
@@ -888,6 +890,8 @@ struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
pbl_ptr = cq->q.pbl_tbl->pa;
page_cnt = cq->q.pbl_info.num_pbes;
+
+ cq->ibcq.cqe = chain_entries;
} else {
cq->cq_type = QEDR_CQ_TYPE_KERNEL;
@@ -903,6 +907,7 @@ struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
page_cnt = qed_chain_get_page_cnt(&cq->pbl);
pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
+ cq->ibcq.cqe = cq->pbl.capacity;
}
qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
@@ -980,8 +985,13 @@ int qedr_destroy_cq(struct ib_cq *ibcq)
/* GSIs CQs are handled by driver, so they don't exist in the FW */
if (cq->cq_type != QEDR_CQ_TYPE_GSI) {
+ int rc;
+
iparams.icid = cq->icid;
- dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
+ rc = dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams,
+ &oparams);
+ if (rc)
+ return rc;
dev->ops->common->chain_free(dev->cdev, &cq->pbl);
}
@@ -1477,6 +1487,7 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
struct qedr_ucontext *ctx = NULL;
struct qedr_create_qp_ureq ureq;
struct qedr_qp *qp;
+ struct ib_qp *ibqp;
int rc = 0;
DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
@@ -1486,13 +1497,13 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
if (rc)
return ERR_PTR(rc);
+ if (attrs->srq)
+ return ERR_PTR(-EINVAL);
+
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp)
return ERR_PTR(-ENOMEM);
- if (attrs->srq)
- return ERR_PTR(-EINVAL);
-
DP_DEBUG(dev, QEDR_MSG_QP,
"create qp: sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
get_qedr_cq(attrs->send_cq),
@@ -1508,7 +1519,10 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
"create qp: unexpected udata when creating GSI QP\n");
goto err0;
}
- return qedr_create_gsi_qp(dev, attrs, qp);
+ ibqp = qedr_create_gsi_qp(dev, attrs, qp);
+ if (IS_ERR(ibqp))
+ kfree(qp);
+ return ibqp;
}
memset(&in_params, 0, sizeof(in_params));
@@ -1960,7 +1974,7 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr_mask & IB_QP_STATE) {
if ((qp->qp_type != IB_QPT_GSI) && (!udata))
- qedr_update_qp_state(dev, qp, qp_params.new_state);
+ rc = qedr_update_qp_state(dev, qp, qp_params.new_state);
qp->state = qp_params.new_state;
}
@@ -2064,8 +2078,10 @@ int qedr_destroy_qp(struct ib_qp *ibqp)
DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
qp, qp->qp_type);
- if (qp->state != (QED_ROCE_QP_STATE_RESET | QED_ROCE_QP_STATE_ERR |
- QED_ROCE_QP_STATE_INIT)) {
+ if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
+ (qp->state != QED_ROCE_QP_STATE_ERR) &&
+ (qp->state != QED_ROCE_QP_STATE_INIT)) {
+
attr.qp_state = IB_QPS_ERR;
attr_mask |= IB_QP_STATE;
@@ -2094,7 +2110,8 @@ int qedr_destroy_qp(struct ib_qp *ibqp)
return rc;
}
-struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
+struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
+ struct ib_udata *udata)
{
struct qedr_ah *ah;
@@ -2413,8 +2430,7 @@ static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
*/
pbl = list_first_entry(&info->inuse_pbl_list,
struct qedr_pbl, list_entry);
- list_del(&pbl->list_entry);
- list_add_tail(&pbl->list_entry, &info->free_pbl_list);
+ list_move_tail(&pbl->list_entry, &info->free_pbl_list);
info->completed_handled++;
}
}
@@ -2620,7 +2636,9 @@ static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
- if (wr->send_flags & IB_SEND_INLINE) {
+ if (wr->send_flags & IB_SEND_INLINE &&
+ (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
+ wr->opcode == IB_WR_RDMA_WRITE)) {
u8 flags = 0;
SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
@@ -2971,8 +2989,9 @@ int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
spin_lock_irqsave(&qp->q_lock, flags);
- if ((qp->state == QED_ROCE_QP_STATE_RESET) ||
- (qp->state == QED_ROCE_QP_STATE_ERR)) {
+ if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
+ (qp->state != QED_ROCE_QP_STATE_ERR) &&
+ (qp->state != QED_ROCE_QP_STATE_SQD)) {
spin_unlock_irqrestore(&qp->q_lock, flags);
*bad_wr = wr;
DP_DEBUG(dev, QEDR_MSG_CQ,
@@ -2981,11 +3000,6 @@ int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
return -EINVAL;
}
- if (!wr) {
- DP_ERR(dev, "Got an empty post send.\n");
- return -EINVAL;
- }
-
while (wr) {
rc = __qedr_post_send(ibqp, wr, bad_wr);
if (rc)
@@ -3030,8 +3044,7 @@ int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
spin_lock_irqsave(&qp->q_lock, flags);
- if ((qp->state == QED_ROCE_QP_STATE_RESET) ||
- (qp->state == QED_ROCE_QP_STATE_ERR)) {
+ if (qp->state == QED_ROCE_QP_STATE_RESET) {
spin_unlock_irqrestore(&qp->q_lock, flags);
*bad_wr = wr;
return -EINVAL;
@@ -3173,6 +3186,7 @@ static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
/* fill WC */
wc->status = status;
+ wc->vendor_err = 0;
wc->wc_flags = 0;
wc->src_qp = qp->id;
wc->qp = &qp->ibqp;
@@ -3224,7 +3238,7 @@ static int qedr_poll_cq_req(struct qedr_dev *dev,
"Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
cq->icid, qp->icid);
cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
- IB_WC_WR_FLUSH_ERR, 0);
+ IB_WC_WR_FLUSH_ERR, 1);
break;
default:
/* process all WQE before the cosumer */
@@ -3362,6 +3376,7 @@ static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
/* fill WC */
wc->status = wc_status;
+ wc->vendor_err = 0;
wc->src_qp = qp->id;
wc->qp = &qp->ibqp;
wc->wr_id = wr_id;
@@ -3390,6 +3405,7 @@ static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
while (num_entries && qp->rq.wqe_cons != hw_cons) {
/* fill WC */
wc->status = IB_WC_WR_FLUSH_ERR;
+ wc->vendor_err = 0;
wc->wc_flags = 0;
wc->src_qp = qp->id;
wc->byte_len = 0;
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
index a9b5e67bb81e..070677ca4d19 100644
--- a/drivers/infiniband/hw/qedr/verbs.h
+++ b/drivers/infiniband/hw/qedr/verbs.h
@@ -70,7 +70,8 @@ int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *);
int qedr_destroy_qp(struct ib_qp *ibqp);
-struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr);
+struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
+ struct ib_udata *udata);
int qedr_destroy_ah(struct ib_ah *ibah);
int qedr_dereg_mr(struct ib_mr *);
diff --git a/drivers/infiniband/hw/qib/qib_diag.c b/drivers/infiniband/hw/qib/qib_diag.c
index 8c34b23e5bf6..775018b32b0d 100644
--- a/drivers/infiniband/hw/qib/qib_diag.c
+++ b/drivers/infiniband/hw/qib/qib_diag.c
@@ -609,8 +609,6 @@ static ssize_t qib_diagpkt_write(struct file *fp,
tmpbuf = vmalloc(plen);
if (!tmpbuf) {
- qib_devinfo(dd->pcidev,
- "Unable to allocate tmp buffer, failing\n");
ret = -ENOMEM;
goto bail;
}
@@ -702,10 +700,8 @@ int qib_register_observer(struct qib_devdata *dd,
if (!dd || !op)
return -EINVAL;
olp = vmalloc(sizeof(*olp));
- if (!olp) {
- pr_err("vmalloc for observer failed\n");
+ if (!olp)
return -ENOMEM;
- }
spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
olp->op = op;
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 728e0a030d2e..2b5982f743ef 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -420,8 +420,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
if (list_empty(&qp->rspwait)) {
qp->r_flags |=
RVT_R_RSP_NAK;
- atomic_inc(
- &qp->refcount);
+ rvt_get_qp(qp);
list_add_tail(
&qp->rspwait,
&rcd->qp_wait_list);
diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c
index 311ee6c3dd5e..33a2e74c8495 100644
--- a/drivers/infiniband/hw/qib/qib_eeprom.c
+++ b/drivers/infiniband/hw/qib/qib_eeprom.c
@@ -182,12 +182,8 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
* */
len = sizeof(struct qib_flash);
buf = vmalloc(len);
- if (!buf) {
- qib_dev_err(dd,
- "Couldn't allocate memory to read %u bytes from eeprom for GUID\n",
- len);
+ if (!buf)
goto bail;
- }
/*
* Use "public" eeprom read function, which does locking and
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 382466a90da7..2d1eacf1dfed 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -2066,8 +2066,11 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
ssize_t ret = 0;
void *dest;
- if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
+ if (!ib_safe_file_access(fp)) {
+ pr_err_once("qib_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
+ task_tgid_vnr(current), current->comm);
return -EACCES;
+ }
if (count < sizeof(cmd.type)) {
ret = -EINVAL;
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index a3733f25280f..92399d3ffd15 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -1759,9 +1759,7 @@ static void pe_boardname(struct qib_devdata *dd)
}
namelen = strlen(n) + 1;
dd->boardname = kmalloc(namelen, GFP_KERNEL);
- if (!dd->boardname)
- qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
- else
+ if (dd->boardname)
snprintf(dd->boardname, namelen, "%s", n);
if (dd->majrev != 4 || !dd->minrev || dd->minrev > 2)
@@ -2533,8 +2531,6 @@ static void init_6120_cntrnames(struct qib_devdata *dd)
dd->cspec->cntrnamelen = 1 + s - cntr6120names;
dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
* sizeof(u64), GFP_KERNEL);
- if (!dd->cspec->cntrs)
- qib_dev_err(dd, "Failed allocation for counters\n");
for (i = 0, s = (char *)portcntr6120names; s; i++)
s = strchr(s + 1, '\n');
@@ -2542,8 +2538,6 @@ static void init_6120_cntrnames(struct qib_devdata *dd)
dd->cspec->portcntrnamelen = sizeof(portcntr6120names) - 1;
dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs
* sizeof(u64), GFP_KERNEL);
- if (!dd->cspec->portcntrs)
- qib_dev_err(dd, "Failed allocation for portcounters\n");
}
static u32 qib_read_6120cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 00b2af211157..e55e31a69195 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -2070,9 +2070,7 @@ static void qib_7220_boardname(struct qib_devdata *dd)
namelen = strlen(n) + 1;
dd->boardname = kmalloc(namelen, GFP_KERNEL);
- if (!dd->boardname)
- qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
- else
+ if (dd->boardname)
snprintf(dd->boardname, namelen, "%s", n);
if (dd->majrev != 5 || !dd->minrev || dd->minrev > 2)
@@ -3179,8 +3177,6 @@ static void init_7220_cntrnames(struct qib_devdata *dd)
dd->cspec->cntrnamelen = 1 + s - cntr7220names;
dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
* sizeof(u64), GFP_KERNEL);
- if (!dd->cspec->cntrs)
- qib_dev_err(dd, "Failed allocation for counters\n");
for (i = 0, s = (char *)portcntr7220names; s; i++)
s = strchr(s + 1, '\n');
@@ -3188,8 +3184,6 @@ static void init_7220_cntrnames(struct qib_devdata *dd)
dd->cspec->portcntrnamelen = sizeof(portcntr7220names) - 1;
dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs
* sizeof(u64), GFP_KERNEL);
- if (!dd->cspec->portcntrs)
- qib_dev_err(dd, "Failed allocation for portcounters\n");
}
static u32 qib_read_7220cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index ded27172320e..c4a3616062f1 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -3627,9 +3627,7 @@ static unsigned qib_7322_boardname(struct qib_devdata *dd)
namelen = strlen(n) + 1;
dd->boardname = kmalloc(namelen, GFP_KERNEL);
- if (!dd->boardname)
- qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
- else
+ if (dd->boardname)
snprintf(dd->boardname, namelen, "%s", n);
snprintf(dd->boardversion, sizeof(dd->boardversion),
@@ -3656,7 +3654,7 @@ static unsigned qib_7322_boardname(struct qib_devdata *dd)
static int qib_do_7322_reset(struct qib_devdata *dd)
{
u64 val;
- u64 *msix_vecsave;
+ u64 *msix_vecsave = NULL;
int i, msix_entries, ret = 1;
u16 cmdval;
u8 int_line, clinesz;
@@ -3677,10 +3675,7 @@ static int qib_do_7322_reset(struct qib_devdata *dd)
/* can be up to 512 bytes, too big for stack */
msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries *
sizeof(u64), GFP_KERNEL);
- if (!msix_vecsave)
- qib_dev_err(dd, "No mem to save MSIx data\n");
- } else
- msix_vecsave = NULL;
+ }
/*
* Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
@@ -5043,8 +5038,6 @@ static void init_7322_cntrnames(struct qib_devdata *dd)
dd->cspec->cntrnamelen = 1 + s - cntr7322names;
dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
* sizeof(u64), GFP_KERNEL);
- if (!dd->cspec->cntrs)
- qib_dev_err(dd, "Failed allocation for counters\n");
for (i = 0, s = (char *)portcntr7322names; s; i++)
s = strchr(s + 1, '\n');
@@ -5053,9 +5046,6 @@ static void init_7322_cntrnames(struct qib_devdata *dd)
for (i = 0; i < dd->num_pports; ++i) {
dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs
* sizeof(u64), GFP_KERNEL);
- if (!dd->pport[i].cpspec->portcntrs)
- qib_dev_err(dd,
- "Failed allocation for portcounters\n");
}
}
@@ -6461,7 +6451,6 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
sizeof(*dd->cspec->sendibchk), GFP_KERNEL);
if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
!dd->cspec->sendibchk) {
- qib_dev_err(dd, "Failed allocation for hdrchk bitmaps\n");
ret = -ENOMEM;
goto bail;
}
@@ -7338,10 +7327,9 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
tabsize = actual_cnt;
dd->cspec->msix_entries = kzalloc(tabsize *
sizeof(struct qib_msix_entry), GFP_KERNEL);
- if (!dd->cspec->msix_entries) {
- qib_dev_err(dd, "No memory for MSIx table\n");
+ if (!dd->cspec->msix_entries)
tabsize = 0;
- }
+
for (i = 0; i < tabsize; i++)
dd->cspec->msix_entries[i].msix.entry = i;
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 1730aa839a47..b50240b1d5a4 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -133,11 +133,8 @@ int qib_create_ctxts(struct qib_devdata *dd)
* cleanup iterates across all possible ctxts.
*/
dd->rcd = kcalloc(dd->ctxtcnt, sizeof(*dd->rcd), GFP_KERNEL);
- if (!dd->rcd) {
- qib_dev_err(dd,
- "Unable to allocate ctxtdata array, failing\n");
+ if (!dd->rcd)
return -ENOMEM;
- }
/* create (one or more) kctxt */
for (i = 0; i < dd->first_user_ctxt; ++i) {
@@ -265,39 +262,23 @@ int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
size = IB_CC_TABLE_CAP_DEFAULT * sizeof(struct ib_cc_table_entry)
* IB_CCT_ENTRIES;
ppd->ccti_entries = kzalloc(size, GFP_KERNEL);
- if (!ppd->ccti_entries) {
- qib_dev_err(dd,
- "failed to allocate congestion control table for port %d!\n",
- port);
+ if (!ppd->ccti_entries)
goto bail;
- }
size = IB_CC_CCS_ENTRIES * sizeof(struct ib_cc_congestion_entry);
ppd->congestion_entries = kzalloc(size, GFP_KERNEL);
- if (!ppd->congestion_entries) {
- qib_dev_err(dd,
- "failed to allocate congestion setting list for port %d!\n",
- port);
+ if (!ppd->congestion_entries)
goto bail_1;
- }
size = sizeof(struct cc_table_shadow);
ppd->ccti_entries_shadow = kzalloc(size, GFP_KERNEL);
- if (!ppd->ccti_entries_shadow) {
- qib_dev_err(dd,
- "failed to allocate shadow ccti list for port %d!\n",
- port);
+ if (!ppd->ccti_entries_shadow)
goto bail_2;
- }
size = sizeof(struct ib_cc_congestion_setting_attr);
ppd->congestion_entries_shadow = kzalloc(size, GFP_KERNEL);
- if (!ppd->congestion_entries_shadow) {
- qib_dev_err(dd,
- "failed to allocate shadow congestion setting list for port %d!\n",
- port);
+ if (!ppd->congestion_entries_shadow)
goto bail_3;
- }
return 0;
@@ -391,18 +372,12 @@ static void init_shadow_tids(struct qib_devdata *dd)
dma_addr_t *addrs;
pages = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
- if (!pages) {
- qib_dev_err(dd,
- "failed to allocate shadow page * array, no expected sends!\n");
+ if (!pages)
goto bail;
- }
addrs = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
- if (!addrs) {
- qib_dev_err(dd,
- "failed to allocate shadow dma handle array, no expected sends!\n");
+ if (!addrs)
goto bail_free;
- }
dd->pageshadow = pages;
dd->physshadow = addrs;
@@ -1026,11 +1001,8 @@ static void qib_verify_pioperf(struct qib_devdata *dd)
cnt = 1024;
addr = vmalloc(cnt);
- if (!addr) {
- qib_devinfo(dd->pcidev,
- "Couldn't get memory for checking PIO perf, skipping\n");
+ if (!addr)
goto done;
- }
preempt_disable(); /* we want reasonably accurate elapsed time */
msecs = 1 + jiffies_to_msecs(jiffies);
@@ -1172,9 +1144,6 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
sizeof(long), GFP_KERNEL);
if (qib_cpulist)
qib_cpulist_count = count;
- else
- qib_early_err(&pdev->dev,
- "Could not alloc cpulist info, cpu affinity might be wrong\n");
}
#ifdef CONFIG_DEBUG_FS
qib_dbg_ibdev_init(&dd->verbs_dev);
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 2097512e75aa..031433cb7206 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -941,8 +941,6 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
{
struct ib_other_headers *ohdr;
struct rvt_swqe *wqe;
- struct ib_wc wc;
- unsigned i;
u32 opcode;
u32 psn;
@@ -988,22 +986,8 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
qp->s_last = s_last;
/* see post_send() */
barrier();
- for (i = 0; i < wqe->wr.num_sge; i++) {
- struct rvt_sge *sge = &wqe->sg_list[i];
-
- rvt_put_mr(sge->mr);
- }
- /* Post a send completion queue entry if requested. */
- if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
- (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
- memset(&wc, 0, sizeof(wc));
- wc.wr_id = wqe->wr.wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
- wc.byte_len = wqe->length;
- wc.qp = &qp->ibqp;
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
- }
+ rvt_put_swqe(wqe);
+ rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS);
}
/*
* If we were waiting for sends to complete before resending,
@@ -1032,9 +1016,6 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
struct rvt_swqe *wqe,
struct qib_ibport *ibp)
{
- struct ib_wc wc;
- unsigned i;
-
/*
* Don't decrement refcount and don't generate a
* completion if the SWQE is being resent until the send
@@ -1044,28 +1025,14 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
u32 s_last;
- for (i = 0; i < wqe->wr.num_sge; i++) {
- struct rvt_sge *sge = &wqe->sg_list[i];
-
- rvt_put_mr(sge->mr);
- }
+ rvt_put_swqe(wqe);
s_last = qp->s_last;
if (++s_last >= qp->s_size)
s_last = 0;
qp->s_last = s_last;
/* see post_send() */
barrier();
- /* Post a send completion queue entry if requested. */
- if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
- (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
- memset(&wc, 0, sizeof(wc));
- wc.wr_id = wqe->wr.wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
- wc.byte_len = wqe->length;
- wc.qp = &qp->ibqp;
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
- }
+ rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS);
} else
this_cpu_inc(*ibp->rvp.rc_delayed_comp);
@@ -2112,8 +2079,7 @@ send_last:
* Update the next expected PSN. We add 1 later
* below, so only add the remainder here.
*/
- if (len > pmtu)
- qp->r_psn += (len - 1) / pmtu;
+ qp->r_psn += rvt_div_mtu(qp, len - 1);
} else {
e->rdma_sge.mr = NULL;
e->rdma_sge.vaddr = NULL;
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index de1bde5950f5..e54a2feeeb10 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -793,7 +793,6 @@ void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
enum ib_wc_status status)
{
u32 old_last, last;
- unsigned i;
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
return;
@@ -805,32 +804,13 @@ void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
qp->s_last = last;
/* See post_send() */
barrier();
- for (i = 0; i < wqe->wr.num_sge; i++) {
- struct rvt_sge *sge = &wqe->sg_list[i];
-
- rvt_put_mr(sge->mr);
- }
+ rvt_put_swqe(wqe);
if (qp->ibqp.qp_type == IB_QPT_UD ||
qp->ibqp.qp_type == IB_QPT_SMI ||
qp->ibqp.qp_type == IB_QPT_GSI)
atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
- /* See ch. 11.2.4.1 and 10.7.3.1 */
- if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
- (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
- status != IB_WC_SUCCESS) {
- struct ib_wc wc;
-
- memset(&wc, 0, sizeof(wc));
- wc.wr_id = wqe->wr.wr_id;
- wc.status = status;
- wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
- wc.qp = &qp->ibqp;
- if (status == IB_WC_SUCCESS)
- wc.byte_len = wqe->length;
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,
- status != IB_WC_SUCCESS);
- }
+ rvt_qp_swqe_complete(qp, wqe, status);
if (qp->s_acked == old_last)
qp->s_acked = last;
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 954f15064514..4b54c0ddd08a 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -114,19 +114,6 @@ module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(disable_sma, "Disable the SMA");
/*
- * Translate ib_wr_opcode into ib_wc_opcode.
- */
-const enum ib_wc_opcode ib_qib_wc_opcode[] = {
- [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
- [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
- [IB_WR_SEND] = IB_WC_SEND,
- [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
- [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
- [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
- [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
-};
-
-/*
* System image GUID.
*/
__be64 ib_qib_sys_image_guid;
@@ -464,7 +451,7 @@ static void mem_timer(unsigned long data)
priv = list_entry(list->next, struct qib_qp_priv, iowait);
qp = priv->owner;
list_del_init(&priv->iowait);
- atomic_inc(&qp->refcount);
+ rvt_get_qp(qp);
if (!list_empty(list))
mod_timer(&dev->mem_timer, jiffies + 1);
}
@@ -477,8 +464,7 @@ static void mem_timer(unsigned long data)
qib_schedule_send(qp);
}
spin_unlock_irqrestore(&qp->s_lock, flags);
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
+ rvt_put_qp(qp);
}
}
@@ -762,7 +748,7 @@ void qib_put_txreq(struct qib_verbs_txreq *tx)
iowait);
qp = priv->owner;
list_del_init(&priv->iowait);
- atomic_inc(&qp->refcount);
+ rvt_get_qp(qp);
spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
spin_lock_irqsave(&qp->s_lock, flags);
@@ -772,8 +758,7 @@ void qib_put_txreq(struct qib_verbs_txreq *tx)
}
spin_unlock_irqrestore(&qp->s_lock, flags);
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
+ rvt_put_qp(qp);
} else
spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
}
@@ -808,7 +793,7 @@ void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
break;
avail -= qpp->s_tx->txreq.sg_count;
list_del_init(&qpp->iowait);
- atomic_inc(&qp->refcount);
+ rvt_get_qp(qp);
qps[n++] = qp;
}
@@ -822,8 +807,7 @@ void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
qib_schedule_send(qp);
}
spin_unlock(&qp->s_lock);
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
+ rvt_put_qp(qp);
}
}
@@ -1288,7 +1272,7 @@ void qib_ib_piobufavail(struct qib_devdata *dd)
priv = list_entry(list->next, struct qib_qp_priv, iowait);
qp = priv->owner;
list_del_init(&priv->iowait);
- atomic_inc(&qp->refcount);
+ rvt_get_qp(qp);
qps[n++] = qp;
}
dd->f_wantpiobuf_intr(dd, 0);
@@ -1306,8 +1290,7 @@ full:
spin_unlock_irqrestore(&qp->s_lock, flags);
/* Notify qib_destroy_qp() if it is waiting. */
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
+ rvt_put_qp(qp);
}
}
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
index 5b0248adf4ce..092d4e11a633 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
@@ -117,10 +117,10 @@ static int enable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
res_chunk = get_qp_res_chunk(qp_grp);
- if (IS_ERR_OR_NULL(res_chunk)) {
+ if (IS_ERR(res_chunk)) {
usnic_err("Unable to get qp res with err %ld\n",
PTR_ERR(res_chunk));
- return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
+ return PTR_ERR(res_chunk);
}
for (i = 0; i < res_chunk->cnt; i++) {
@@ -158,10 +158,10 @@ static int disable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
res_chunk = get_qp_res_chunk(qp_grp);
- if (IS_ERR_OR_NULL(res_chunk)) {
+ if (IS_ERR(res_chunk)) {
usnic_err("Unable to get qp res with err %ld\n",
PTR_ERR(res_chunk));
- return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
+ return PTR_ERR(res_chunk);
}
for (i = 0; i < res_chunk->cnt; i++) {
@@ -186,11 +186,11 @@ static int init_filter_action(struct usnic_ib_qp_grp *qp_grp,
struct usnic_vnic_res_chunk *res_chunk;
res_chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
- if (IS_ERR_OR_NULL(res_chunk)) {
+ if (IS_ERR(res_chunk)) {
usnic_err("Unable to get %s with err %ld\n",
usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
PTR_ERR(res_chunk));
- return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
+ return PTR_ERR(res_chunk);
}
uaction->vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
@@ -228,8 +228,6 @@ create_roce_custom_flow(struct usnic_ib_qp_grp *qp_grp,
flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
if (IS_ERR_OR_NULL(flow)) {
- usnic_err("Unable to alloc flow failed with err %ld\n",
- PTR_ERR(flow));
err = flow ? PTR_ERR(flow) : -EFAULT;
goto out_unreserve_port;
}
@@ -303,8 +301,6 @@ create_udp_flow(struct usnic_ib_qp_grp *qp_grp,
flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
if (IS_ERR_OR_NULL(flow)) {
- usnic_err("Unable to alloc flow failed with err %ld\n",
- PTR_ERR(flow));
err = flow ? PTR_ERR(flow) : -EFAULT;
goto out_put_sock;
}
@@ -694,18 +690,14 @@ usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
}
qp_grp = kzalloc(sizeof(*qp_grp), GFP_ATOMIC);
- if (!qp_grp) {
- usnic_err("Unable to alloc qp_grp - Out of memory\n");
+ if (!qp_grp)
return NULL;
- }
qp_grp->res_chunk_list = alloc_res_chunk_list(vf->vnic, res_spec,
qp_grp);
if (IS_ERR_OR_NULL(qp_grp->res_chunk_list)) {
err = qp_grp->res_chunk_list ?
PTR_ERR(qp_grp->res_chunk_list) : -ENOMEM;
- usnic_err("Unable to alloc res for %d with err %d\n",
- qp_grp->grp_id, err);
goto out_free_qp_grp;
}
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index a5bfbba6bbac..74819a7951e2 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -87,12 +87,12 @@ static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
resp.bar_len = bar->len;
chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
- if (IS_ERR_OR_NULL(chunk)) {
+ if (IS_ERR(chunk)) {
usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
qp_grp->grp_id,
PTR_ERR(chunk));
- return chunk ? PTR_ERR(chunk) : -ENOMEM;
+ return PTR_ERR(chunk);
}
WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_RQ);
@@ -101,12 +101,12 @@ static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
resp.rq_idx[i] = chunk->res[i]->vnic_idx;
chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_WQ);
- if (IS_ERR_OR_NULL(chunk)) {
+ if (IS_ERR(chunk)) {
usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_WQ),
qp_grp->grp_id,
PTR_ERR(chunk));
- return chunk ? PTR_ERR(chunk) : -ENOMEM;
+ return PTR_ERR(chunk);
}
WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_WQ);
@@ -115,12 +115,12 @@ static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
resp.wq_idx[i] = chunk->res[i]->vnic_idx;
chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_CQ);
- if (IS_ERR_OR_NULL(chunk)) {
+ if (IS_ERR(chunk)) {
usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_CQ),
qp_grp->grp_id,
PTR_ERR(chunk));
- return chunk ? PTR_ERR(chunk) : -ENOMEM;
+ return PTR_ERR(chunk);
}
WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_CQ);
@@ -738,7 +738,9 @@ int usnic_ib_mmap(struct ib_ucontext *context,
/* In ib callbacks section - Start of stub funcs */
struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd,
- struct ib_ah_attr *ah_attr)
+ struct ib_ah_attr *ah_attr,
+ struct ib_udata *udata)
+
{
usnic_dbg("\n");
return ERR_PTR(-EPERM);
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
index 0d9d2e6a14d5..0ed8e072329e 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
@@ -75,7 +75,9 @@ int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext);
int usnic_ib_mmap(struct ib_ucontext *context,
struct vm_area_struct *vma);
struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd,
- struct ib_ah_attr *ah_attr);
+ struct ib_ah_attr *ah_attr,
+ struct ib_udata *udata);
+
int usnic_ib_destroy_ah(struct ib_ah *ah);
int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr);
diff --git a/drivers/infiniband/hw/usnic/usnic_vnic.c b/drivers/infiniband/hw/usnic/usnic_vnic.c
index 887510718690..e7b0030254da 100644
--- a/drivers/infiniband/hw/usnic/usnic_vnic.c
+++ b/drivers/infiniband/hw/usnic/usnic_vnic.c
@@ -241,17 +241,12 @@ usnic_vnic_get_resources(struct usnic_vnic *vnic, enum usnic_vnic_res_type type,
return ERR_PTR(-EINVAL);
ret = kzalloc(sizeof(*ret), GFP_ATOMIC);
- if (!ret) {
- usnic_err("Failed to allocate chunk for %s - Out of memory\n",
- usnic_vnic_pci_name(vnic));
+ if (!ret)
return ERR_PTR(-ENOMEM);
- }
if (cnt > 0) {
ret->res = kcalloc(cnt, sizeof(*(ret->res)), GFP_ATOMIC);
if (!ret->res) {
- usnic_err("Failed to allocate resources for %s. Out of memory\n",
- usnic_vnic_pci_name(vnic));
kfree(ret);
return ERR_PTR(-ENOMEM);
}
@@ -311,8 +306,10 @@ static int usnic_vnic_alloc_res_chunk(struct usnic_vnic *vnic,
struct usnic_vnic_res *res;
cnt = vnic_dev_get_res_count(vnic->vdev, _to_vnic_res_type(type));
- if (cnt < 1)
+ if (cnt < 1) {
+ usnic_err("Wrong res count with cnt %d\n", cnt);
return -EINVAL;
+ }
chunk->cnt = chunk->free_cnt = cnt;
chunk->res = kzalloc(sizeof(*(chunk->res))*cnt, GFP_KERNEL);
@@ -384,12 +381,8 @@ static int usnic_vnic_discover_resources(struct pci_dev *pdev,
res_type < USNIC_VNIC_RES_TYPE_MAX; res_type++) {
err = usnic_vnic_alloc_res_chunk(vnic, res_type,
&vnic->chunks[res_type]);
- if (err) {
- usnic_err("Failed to alloc res %s with err %d\n",
- usnic_vnic_res_type_to_str(res_type),
- err);
+ if (err)
goto out_clean_chunks;
- }
}
return 0;
@@ -454,11 +447,8 @@ struct usnic_vnic *usnic_vnic_alloc(struct pci_dev *pdev)
}
vnic = kzalloc(sizeof(*vnic), GFP_KERNEL);
- if (!vnic) {
- usnic_err("Failed to alloc vnic for %s - out of memory\n",
- pci_name(pdev));
+ if (!vnic)
return ERR_PTR(-ENOMEM);
- }
spin_lock_init(&vnic->res_lock);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/Kconfig b/drivers/infiniband/hw/vmw_pvrdma/Kconfig
new file mode 100644
index 000000000000..5a9790ac0ede
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/Kconfig
@@ -0,0 +1,7 @@
+config INFINIBAND_VMWARE_PVRDMA
+ tristate "VMware Paravirtualized RDMA Driver"
+ depends on NETDEVICES && ETHERNET && PCI && INET && VMXNET3
+ ---help---
+ This driver provides low-level support for VMware Paravirtual
+ RDMA adapter. It interacts with the VMXNet3 driver to provide
+ Ethernet capabilities.
diff --git a/drivers/infiniband/hw/vmw_pvrdma/Makefile b/drivers/infiniband/hw/vmw_pvrdma/Makefile
new file mode 100644
index 000000000000..0194ed19f542
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_INFINIBAND_VMWARE_PVRDMA) += vmw_pvrdma.o
+
+vmw_pvrdma-y := pvrdma_cmd.o pvrdma_cq.o pvrdma_doorbell.o pvrdma_main.o pvrdma_misc.o pvrdma_mr.o pvrdma_qp.o pvrdma_verbs.o
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
new file mode 100644
index 000000000000..71e1d55d69d6
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -0,0 +1,474 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __PVRDMA_H__
+#define __PVRDMA_H__
+
+#include <linux/compiler.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/semaphore.h>
+#include <linux/workqueue.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/vmw_pvrdma-abi.h>
+
+#include "pvrdma_ring.h"
+#include "pvrdma_dev_api.h"
+#include "pvrdma_verbs.h"
+
+/* NOT the same as BIT_MASK(). */
+#define PVRDMA_MASK(n) ((n << 1) - 1)
+
+/*
+ * VMware PVRDMA PCI device id.
+ */
+#define PCI_DEVICE_ID_VMWARE_PVRDMA 0x0820
+
+struct pvrdma_dev;
+
+struct pvrdma_page_dir {
+ dma_addr_t dir_dma;
+ u64 *dir;
+ int ntables;
+ u64 **tables;
+ u64 npages;
+ void **pages;
+};
+
+struct pvrdma_cq {
+ struct ib_cq ibcq;
+ int offset;
+ spinlock_t cq_lock; /* Poll lock. */
+ struct pvrdma_uar_map *uar;
+ struct ib_umem *umem;
+ struct pvrdma_ring_state *ring_state;
+ struct pvrdma_page_dir pdir;
+ u32 cq_handle;
+ bool is_kernel;
+ atomic_t refcnt;
+ wait_queue_head_t wait;
+};
+
+struct pvrdma_id_table {
+ u32 last;
+ u32 top;
+ u32 max;
+ u32 mask;
+ spinlock_t lock; /* Table lock. */
+ unsigned long *table;
+};
+
+struct pvrdma_uar_map {
+ unsigned long pfn;
+ void __iomem *map;
+ int index;
+};
+
+struct pvrdma_uar_table {
+ struct pvrdma_id_table tbl;
+ int size;
+};
+
+struct pvrdma_ucontext {
+ struct ib_ucontext ibucontext;
+ struct pvrdma_dev *dev;
+ struct pvrdma_uar_map uar;
+ u64 ctx_handle;
+};
+
+struct pvrdma_pd {
+ struct ib_pd ibpd;
+ u32 pdn;
+ u32 pd_handle;
+ int privileged;
+};
+
+struct pvrdma_mr {
+ u32 mr_handle;
+ u64 iova;
+ u64 size;
+};
+
+struct pvrdma_user_mr {
+ struct ib_mr ibmr;
+ struct ib_umem *umem;
+ struct pvrdma_mr mmr;
+ struct pvrdma_page_dir pdir;
+ u64 *pages;
+ u32 npages;
+ u32 max_pages;
+ u32 page_shift;
+};
+
+struct pvrdma_wq {
+ struct pvrdma_ring *ring;
+ spinlock_t lock; /* Work queue lock. */
+ int wqe_cnt;
+ int wqe_size;
+ int max_sg;
+ int offset;
+};
+
+struct pvrdma_ah {
+ struct ib_ah ibah;
+ struct pvrdma_av av;
+};
+
+struct pvrdma_qp {
+ struct ib_qp ibqp;
+ u32 qp_handle;
+ u32 qkey;
+ struct pvrdma_wq sq;
+ struct pvrdma_wq rq;
+ struct ib_umem *rumem;
+ struct ib_umem *sumem;
+ struct pvrdma_page_dir pdir;
+ int npages;
+ int npages_send;
+ int npages_recv;
+ u32 flags;
+ u8 port;
+ u8 state;
+ bool is_kernel;
+ struct mutex mutex; /* QP state mutex. */
+ atomic_t refcnt;
+ wait_queue_head_t wait;
+};
+
+struct pvrdma_dev {
+ /* PCI device-related information. */
+ struct ib_device ib_dev;
+ struct pci_dev *pdev;
+ void __iomem *regs;
+ struct pvrdma_device_shared_region *dsr; /* Shared region pointer */
+ dma_addr_t dsrbase; /* Shared region base address */
+ void *cmd_slot;
+ void *resp_slot;
+ unsigned long flags;
+ struct list_head device_link;
+
+ /* Locking and interrupt information. */
+ spinlock_t cmd_lock; /* Command lock. */
+ struct semaphore cmd_sema;
+ struct completion cmd_done;
+ struct {
+ enum pvrdma_intr_type type; /* Intr type */
+ struct msix_entry msix_entry[PVRDMA_MAX_INTERRUPTS];
+ irq_handler_t handler[PVRDMA_MAX_INTERRUPTS];
+ u8 enabled[PVRDMA_MAX_INTERRUPTS];
+ u8 size;
+ } intr;
+
+ /* RDMA-related device information. */
+ union ib_gid *sgid_tbl;
+ struct pvrdma_ring_state *async_ring_state;
+ struct pvrdma_page_dir async_pdir;
+ struct pvrdma_ring_state *cq_ring_state;
+ struct pvrdma_page_dir cq_pdir;
+ struct pvrdma_cq **cq_tbl;
+ spinlock_t cq_tbl_lock;
+ struct pvrdma_qp **qp_tbl;
+ spinlock_t qp_tbl_lock;
+ struct pvrdma_uar_table uar_table;
+ struct pvrdma_uar_map driver_uar;
+ __be64 sys_image_guid;
+ spinlock_t desc_lock; /* Device modification lock. */
+ u32 port_cap_mask;
+ struct mutex port_mutex; /* Port modification mutex. */
+ bool ib_active;
+ atomic_t num_qps;
+ atomic_t num_cqs;
+ atomic_t num_pds;
+ atomic_t num_ahs;
+
+ /* Network device information. */
+ struct net_device *netdev;
+ struct notifier_block nb_netdev;
+};
+
+struct pvrdma_netdevice_work {
+ struct work_struct work;
+ struct net_device *event_netdev;
+ unsigned long event;
+};
+
+static inline struct pvrdma_dev *to_vdev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct pvrdma_dev, ib_dev);
+}
+
+static inline struct
+pvrdma_ucontext *to_vucontext(struct ib_ucontext *ibucontext)
+{
+ return container_of(ibucontext, struct pvrdma_ucontext, ibucontext);
+}
+
+static inline struct pvrdma_pd *to_vpd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct pvrdma_pd, ibpd);
+}
+
+static inline struct pvrdma_cq *to_vcq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct pvrdma_cq, ibcq);
+}
+
+static inline struct pvrdma_user_mr *to_vmr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct pvrdma_user_mr, ibmr);
+}
+
+static inline struct pvrdma_qp *to_vqp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct pvrdma_qp, ibqp);
+}
+
+static inline struct pvrdma_ah *to_vah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct pvrdma_ah, ibah);
+}
+
+static inline void pvrdma_write_reg(struct pvrdma_dev *dev, u32 reg, u32 val)
+{
+ writel(cpu_to_le32(val), dev->regs + reg);
+}
+
+static inline u32 pvrdma_read_reg(struct pvrdma_dev *dev, u32 reg)
+{
+ return le32_to_cpu(readl(dev->regs + reg));
+}
+
+static inline void pvrdma_write_uar_cq(struct pvrdma_dev *dev, u32 val)
+{
+ writel(cpu_to_le32(val), dev->driver_uar.map + PVRDMA_UAR_CQ_OFFSET);
+}
+
+static inline void pvrdma_write_uar_qp(struct pvrdma_dev *dev, u32 val)
+{
+ writel(cpu_to_le32(val), dev->driver_uar.map + PVRDMA_UAR_QP_OFFSET);
+}
+
+static inline void *pvrdma_page_dir_get_ptr(struct pvrdma_page_dir *pdir,
+ u64 offset)
+{
+ return pdir->pages[offset / PAGE_SIZE] + (offset % PAGE_SIZE);
+}
+
+static inline enum pvrdma_mtu ib_mtu_to_pvrdma(enum ib_mtu mtu)
+{
+ return (enum pvrdma_mtu)mtu;
+}
+
+static inline enum ib_mtu pvrdma_mtu_to_ib(enum pvrdma_mtu mtu)
+{
+ return (enum ib_mtu)mtu;
+}
+
+static inline enum pvrdma_port_state ib_port_state_to_pvrdma(
+ enum ib_port_state state)
+{
+ return (enum pvrdma_port_state)state;
+}
+
+static inline enum ib_port_state pvrdma_port_state_to_ib(
+ enum pvrdma_port_state state)
+{
+ return (enum ib_port_state)state;
+}
+
+static inline int ib_port_cap_flags_to_pvrdma(int flags)
+{
+ return flags & PVRDMA_MASK(PVRDMA_PORT_CAP_FLAGS_MAX);
+}
+
+static inline int pvrdma_port_cap_flags_to_ib(int flags)
+{
+ return flags;
+}
+
+static inline enum pvrdma_port_width ib_port_width_to_pvrdma(
+ enum ib_port_width width)
+{
+ return (enum pvrdma_port_width)width;
+}
+
+static inline enum ib_port_width pvrdma_port_width_to_ib(
+ enum pvrdma_port_width width)
+{
+ return (enum ib_port_width)width;
+}
+
+static inline enum pvrdma_port_speed ib_port_speed_to_pvrdma(
+ enum ib_port_speed speed)
+{
+ return (enum pvrdma_port_speed)speed;
+}
+
+static inline enum ib_port_speed pvrdma_port_speed_to_ib(
+ enum pvrdma_port_speed speed)
+{
+ return (enum ib_port_speed)speed;
+}
+
+static inline int pvrdma_qp_attr_mask_to_ib(int attr_mask)
+{
+ return attr_mask;
+}
+
+static inline int ib_qp_attr_mask_to_pvrdma(int attr_mask)
+{
+ return attr_mask & PVRDMA_MASK(PVRDMA_QP_ATTR_MASK_MAX);
+}
+
+static inline enum pvrdma_mig_state ib_mig_state_to_pvrdma(
+ enum ib_mig_state state)
+{
+ return (enum pvrdma_mig_state)state;
+}
+
+static inline enum ib_mig_state pvrdma_mig_state_to_ib(
+ enum pvrdma_mig_state state)
+{
+ return (enum ib_mig_state)state;
+}
+
+static inline int ib_access_flags_to_pvrdma(int flags)
+{
+ return flags;
+}
+
+static inline int pvrdma_access_flags_to_ib(int flags)
+{
+ return flags & PVRDMA_MASK(PVRDMA_ACCESS_FLAGS_MAX);
+}
+
+static inline enum pvrdma_qp_type ib_qp_type_to_pvrdma(enum ib_qp_type type)
+{
+ return (enum pvrdma_qp_type)type;
+}
+
+static inline enum ib_qp_type pvrdma_qp_type_to_ib(enum pvrdma_qp_type type)
+{
+ return (enum ib_qp_type)type;
+}
+
+static inline enum pvrdma_qp_state ib_qp_state_to_pvrdma(enum ib_qp_state state)
+{
+ return (enum pvrdma_qp_state)state;
+}
+
+static inline enum ib_qp_state pvrdma_qp_state_to_ib(enum pvrdma_qp_state state)
+{
+ return (enum ib_qp_state)state;
+}
+
+static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op)
+{
+ return (enum pvrdma_wr_opcode)op;
+}
+
+static inline enum ib_wc_status pvrdma_wc_status_to_ib(
+ enum pvrdma_wc_status status)
+{
+ return (enum ib_wc_status)status;
+}
+
+static inline int pvrdma_wc_opcode_to_ib(int opcode)
+{
+ return opcode;
+}
+
+static inline int pvrdma_wc_flags_to_ib(int flags)
+{
+ return flags;
+}
+
+static inline int ib_send_flags_to_pvrdma(int flags)
+{
+ return flags & PVRDMA_MASK(PVRDMA_SEND_FLAGS_MAX);
+}
+
+void pvrdma_qp_cap_to_ib(struct ib_qp_cap *dst,
+ const struct pvrdma_qp_cap *src);
+void ib_qp_cap_to_pvrdma(struct pvrdma_qp_cap *dst,
+ const struct ib_qp_cap *src);
+void pvrdma_gid_to_ib(union ib_gid *dst, const union pvrdma_gid *src);
+void ib_gid_to_pvrdma(union pvrdma_gid *dst, const union ib_gid *src);
+void pvrdma_global_route_to_ib(struct ib_global_route *dst,
+ const struct pvrdma_global_route *src);
+void ib_global_route_to_pvrdma(struct pvrdma_global_route *dst,
+ const struct ib_global_route *src);
+void pvrdma_ah_attr_to_ib(struct ib_ah_attr *dst,
+ const struct pvrdma_ah_attr *src);
+void ib_ah_attr_to_pvrdma(struct pvrdma_ah_attr *dst,
+ const struct ib_ah_attr *src);
+
+int pvrdma_uar_table_init(struct pvrdma_dev *dev);
+void pvrdma_uar_table_cleanup(struct pvrdma_dev *dev);
+
+int pvrdma_uar_alloc(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar);
+void pvrdma_uar_free(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar);
+
+void _pvrdma_flush_cqe(struct pvrdma_qp *qp, struct pvrdma_cq *cq);
+
+int pvrdma_page_dir_init(struct pvrdma_dev *dev, struct pvrdma_page_dir *pdir,
+ u64 npages, bool alloc_pages);
+void pvrdma_page_dir_cleanup(struct pvrdma_dev *dev,
+ struct pvrdma_page_dir *pdir);
+int pvrdma_page_dir_insert_dma(struct pvrdma_page_dir *pdir, u64 idx,
+ dma_addr_t daddr);
+int pvrdma_page_dir_insert_umem(struct pvrdma_page_dir *pdir,
+ struct ib_umem *umem, u64 offset);
+dma_addr_t pvrdma_page_dir_get_dma(struct pvrdma_page_dir *pdir, u64 idx);
+int pvrdma_page_dir_insert_page_list(struct pvrdma_page_dir *pdir,
+ u64 *page_list, int num_pages);
+
+int pvrdma_cmd_post(struct pvrdma_dev *dev, union pvrdma_cmd_req *req,
+ union pvrdma_cmd_resp *rsp, unsigned resp_code);
+
+#endif /* __PVRDMA_H__ */
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cmd.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cmd.c
new file mode 100644
index 000000000000..4a78c537d8a1
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cmd.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/list.h>
+
+#include "pvrdma.h"
+
+#define PVRDMA_CMD_TIMEOUT 10000 /* ms */
+
+static inline int pvrdma_cmd_recv(struct pvrdma_dev *dev,
+ union pvrdma_cmd_resp *resp,
+ unsigned resp_code)
+{
+ int err;
+
+ dev_dbg(&dev->pdev->dev, "receive response from device\n");
+
+ err = wait_for_completion_interruptible_timeout(&dev->cmd_done,
+ msecs_to_jiffies(PVRDMA_CMD_TIMEOUT));
+ if (err == 0 || err == -ERESTARTSYS) {
+ dev_warn(&dev->pdev->dev,
+ "completion timeout or interrupted\n");
+ return -ETIMEDOUT;
+ }
+
+ spin_lock(&dev->cmd_lock);
+ memcpy(resp, dev->resp_slot, sizeof(*resp));
+ spin_unlock(&dev->cmd_lock);
+
+ if (resp->hdr.ack != resp_code) {
+ dev_warn(&dev->pdev->dev,
+ "unknown response %#x expected %#x\n",
+ resp->hdr.ack, resp_code);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int
+pvrdma_cmd_post(struct pvrdma_dev *dev, union pvrdma_cmd_req *req,
+ union pvrdma_cmd_resp *resp, unsigned resp_code)
+{
+ int err;
+
+ dev_dbg(&dev->pdev->dev, "post request to device\n");
+
+ /* Serializiation */
+ down(&dev->cmd_sema);
+
+ BUILD_BUG_ON(sizeof(union pvrdma_cmd_req) !=
+ sizeof(struct pvrdma_cmd_modify_qp));
+
+ spin_lock(&dev->cmd_lock);
+ memcpy(dev->cmd_slot, req, sizeof(*req));
+ spin_unlock(&dev->cmd_lock);
+
+ init_completion(&dev->cmd_done);
+ pvrdma_write_reg(dev, PVRDMA_REG_REQUEST, 0);
+
+ /* Make sure the request is written before reading status. */
+ mb();
+
+ err = pvrdma_read_reg(dev, PVRDMA_REG_ERR);
+ if (err == 0) {
+ if (resp != NULL)
+ err = pvrdma_cmd_recv(dev, resp, resp_code);
+ } else {
+ dev_warn(&dev->pdev->dev,
+ "failed to write request error reg: %d\n", err);
+ err = -EFAULT;
+ }
+
+ up(&dev->cmd_sema);
+
+ return err;
+}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
new file mode 100644
index 000000000000..e429ca5b16aa
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -0,0 +1,425 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/page.h>
+#include <linux/io.h>
+#include <linux/wait.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_user_verbs.h>
+
+#include "pvrdma.h"
+
+/**
+ * pvrdma_req_notify_cq - request notification for a completion queue
+ * @ibcq: the completion queue
+ * @notify_flags: notification flags
+ *
+ * @return: 0 for success.
+ */
+int pvrdma_req_notify_cq(struct ib_cq *ibcq,
+ enum ib_cq_notify_flags notify_flags)
+{
+ struct pvrdma_dev *dev = to_vdev(ibcq->device);
+ struct pvrdma_cq *cq = to_vcq(ibcq);
+ u32 val = cq->cq_handle;
+
+ val |= (notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
+ PVRDMA_UAR_CQ_ARM_SOL : PVRDMA_UAR_CQ_ARM;
+
+ pvrdma_write_uar_cq(dev, val);
+
+ return 0;
+}
+
+/**
+ * pvrdma_create_cq - create completion queue
+ * @ibdev: the device
+ * @attr: completion queue attributes
+ * @context: user context
+ * @udata: user data
+ *
+ * @return: ib_cq completion queue pointer on success,
+ * otherwise returns negative errno.
+ */
+struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
+ const struct ib_cq_init_attr *attr,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ int entries = attr->cqe;
+ struct pvrdma_dev *dev = to_vdev(ibdev);
+ struct pvrdma_cq *cq;
+ int ret;
+ int npages;
+ unsigned long flags;
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_create_cq *cmd = &req.create_cq;
+ struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp;
+ struct pvrdma_create_cq ucmd;
+
+ BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64);
+
+ entries = roundup_pow_of_two(entries);
+ if (entries < 1 || entries > dev->dsr->caps.max_cqe)
+ return ERR_PTR(-EINVAL);
+
+ if (!atomic_add_unless(&dev->num_cqs, 1, dev->dsr->caps.max_cq))
+ return ERR_PTR(-ENOMEM);
+
+ cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+ if (!cq) {
+ atomic_dec(&dev->num_cqs);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ cq->ibcq.cqe = entries;
+
+ if (context) {
+ if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
+ ret = -EFAULT;
+ goto err_cq;
+ }
+
+ cq->umem = ib_umem_get(context, ucmd.buf_addr, ucmd.buf_size,
+ IB_ACCESS_LOCAL_WRITE, 1);
+ if (IS_ERR(cq->umem)) {
+ ret = PTR_ERR(cq->umem);
+ goto err_cq;
+ }
+
+ npages = ib_umem_page_count(cq->umem);
+ } else {
+ cq->is_kernel = true;
+
+ /* One extra page for shared ring state */
+ npages = 1 + (entries * sizeof(struct pvrdma_cqe) +
+ PAGE_SIZE - 1) / PAGE_SIZE;
+
+ /* Skip header page. */
+ cq->offset = PAGE_SIZE;
+ }
+
+ if (npages < 0 || npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
+ dev_warn(&dev->pdev->dev,
+ "overflow pages in completion queue\n");
+ ret = -EINVAL;
+ goto err_umem;
+ }
+
+ ret = pvrdma_page_dir_init(dev, &cq->pdir, npages, cq->is_kernel);
+ if (ret) {
+ dev_warn(&dev->pdev->dev,
+ "could not allocate page directory\n");
+ goto err_umem;
+ }
+
+ /* Ring state is always the first page. Set in library for user cq. */
+ if (cq->is_kernel)
+ cq->ring_state = cq->pdir.pages[0];
+ else
+ pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0);
+
+ atomic_set(&cq->refcnt, 1);
+ init_waitqueue_head(&cq->wait);
+ spin_lock_init(&cq->cq_lock);
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_CREATE_CQ;
+ cmd->nchunks = npages;
+ cmd->ctx_handle = (context) ?
+ (u64)to_vucontext(context)->ctx_handle : 0;
+ cmd->cqe = entries;
+ cmd->pdir_dma = cq->pdir.dir_dma;
+ ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_CQ_RESP);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not create completion queue, error: %d\n", ret);
+ goto err_page_dir;
+ }
+
+ cq->ibcq.cqe = resp->cqe;
+ cq->cq_handle = resp->cq_handle;
+ spin_lock_irqsave(&dev->cq_tbl_lock, flags);
+ dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq;
+ spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
+
+ if (context) {
+ cq->uar = &(to_vucontext(context)->uar);
+
+ /* Copy udata back. */
+ if (ib_copy_to_udata(udata, &cq->cq_handle, sizeof(__u32))) {
+ dev_warn(&dev->pdev->dev,
+ "failed to copy back udata\n");
+ pvrdma_destroy_cq(&cq->ibcq);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ return &cq->ibcq;
+
+err_page_dir:
+ pvrdma_page_dir_cleanup(dev, &cq->pdir);
+err_umem:
+ if (context)
+ ib_umem_release(cq->umem);
+err_cq:
+ atomic_dec(&dev->num_cqs);
+ kfree(cq);
+
+ return ERR_PTR(ret);
+}
+
+static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
+{
+ atomic_dec(&cq->refcnt);
+ wait_event(cq->wait, !atomic_read(&cq->refcnt));
+
+ if (!cq->is_kernel)
+ ib_umem_release(cq->umem);
+
+ pvrdma_page_dir_cleanup(dev, &cq->pdir);
+ kfree(cq);
+}
+
+/**
+ * pvrdma_destroy_cq - destroy completion queue
+ * @cq: the completion queue to destroy.
+ *
+ * @return: 0 for success.
+ */
+int pvrdma_destroy_cq(struct ib_cq *cq)
+{
+ struct pvrdma_cq *vcq = to_vcq(cq);
+ union pvrdma_cmd_req req;
+ struct pvrdma_cmd_destroy_cq *cmd = &req.destroy_cq;
+ struct pvrdma_dev *dev = to_vdev(cq->device);
+ unsigned long flags;
+ int ret;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_DESTROY_CQ;
+ cmd->cq_handle = vcq->cq_handle;
+
+ ret = pvrdma_cmd_post(dev, &req, NULL, 0);
+ if (ret < 0)
+ dev_warn(&dev->pdev->dev,
+ "could not destroy completion queue, error: %d\n",
+ ret);
+
+ /* free cq's resources */
+ spin_lock_irqsave(&dev->cq_tbl_lock, flags);
+ dev->cq_tbl[vcq->cq_handle] = NULL;
+ spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
+
+ pvrdma_free_cq(dev, vcq);
+ atomic_dec(&dev->num_cqs);
+
+ return ret;
+}
+
+/**
+ * pvrdma_modify_cq - modify the CQ moderation parameters
+ * @ibcq: the CQ to modify
+ * @cq_count: number of CQEs that will trigger an event
+ * @cq_period: max period of time in usec before triggering an event
+ *
+ * @return: -EOPNOTSUPP as CQ resize is not supported.
+ */
+int pvrdma_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline struct pvrdma_cqe *get_cqe(struct pvrdma_cq *cq, int i)
+{
+ return (struct pvrdma_cqe *)pvrdma_page_dir_get_ptr(
+ &cq->pdir,
+ cq->offset +
+ sizeof(struct pvrdma_cqe) * i);
+}
+
+void _pvrdma_flush_cqe(struct pvrdma_qp *qp, struct pvrdma_cq *cq)
+{
+ int head;
+ int has_data;
+
+ if (!cq->is_kernel)
+ return;
+
+ /* Lock held */
+ has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx,
+ cq->ibcq.cqe, &head);
+ if (unlikely(has_data > 0)) {
+ int items;
+ int curr;
+ int tail = pvrdma_idx(&cq->ring_state->rx.prod_tail,
+ cq->ibcq.cqe);
+ struct pvrdma_cqe *cqe;
+ struct pvrdma_cqe *curr_cqe;
+
+ items = (tail > head) ? (tail - head) :
+ (cq->ibcq.cqe - head + tail);
+ curr = --tail;
+ while (items-- > 0) {
+ if (curr < 0)
+ curr = cq->ibcq.cqe - 1;
+ if (tail < 0)
+ tail = cq->ibcq.cqe - 1;
+ curr_cqe = get_cqe(cq, curr);
+ if ((curr_cqe->qp & 0xFFFF) != qp->qp_handle) {
+ if (curr != tail) {
+ cqe = get_cqe(cq, tail);
+ *cqe = *curr_cqe;
+ }
+ tail--;
+ } else {
+ pvrdma_idx_ring_inc(
+ &cq->ring_state->rx.cons_head,
+ cq->ibcq.cqe);
+ }
+ curr--;
+ }
+ }
+}
+
+static int pvrdma_poll_one(struct pvrdma_cq *cq, struct pvrdma_qp **cur_qp,
+ struct ib_wc *wc)
+{
+ struct pvrdma_dev *dev = to_vdev(cq->ibcq.device);
+ int has_data;
+ unsigned int head;
+ bool tried = false;
+ struct pvrdma_cqe *cqe;
+
+retry:
+ has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx,
+ cq->ibcq.cqe, &head);
+ if (has_data == 0) {
+ if (tried)
+ return -EAGAIN;
+
+ pvrdma_write_uar_cq(dev, cq->cq_handle | PVRDMA_UAR_CQ_POLL);
+
+ tried = true;
+ goto retry;
+ } else if (has_data == PVRDMA_INVALID_IDX) {
+ dev_err(&dev->pdev->dev, "CQ ring state invalid\n");
+ return -EAGAIN;
+ }
+
+ cqe = get_cqe(cq, head);
+
+ /* Ensure cqe is valid. */
+ rmb();
+ if (dev->qp_tbl[cqe->qp & 0xffff])
+ *cur_qp = (struct pvrdma_qp *)dev->qp_tbl[cqe->qp & 0xffff];
+ else
+ return -EAGAIN;
+
+ wc->opcode = pvrdma_wc_opcode_to_ib(cqe->opcode);
+ wc->status = pvrdma_wc_status_to_ib(cqe->status);
+ wc->wr_id = cqe->wr_id;
+ wc->qp = &(*cur_qp)->ibqp;
+ wc->byte_len = cqe->byte_len;
+ wc->ex.imm_data = cqe->imm_data;
+ wc->src_qp = cqe->src_qp;
+ wc->wc_flags = pvrdma_wc_flags_to_ib(cqe->wc_flags);
+ wc->pkey_index = cqe->pkey_index;
+ wc->slid = cqe->slid;
+ wc->sl = cqe->sl;
+ wc->dlid_path_bits = cqe->dlid_path_bits;
+ wc->port_num = cqe->port_num;
+ wc->vendor_err = 0;
+
+ /* Update shared ring state */
+ pvrdma_idx_ring_inc(&cq->ring_state->rx.cons_head, cq->ibcq.cqe);
+
+ return 0;
+}
+
+/**
+ * pvrdma_poll_cq - poll for work completion queue entries
+ * @ibcq: completion queue
+ * @num_entries: the maximum number of entries
+ * @entry: pointer to work completion array
+ *
+ * @return: number of polled completion entries
+ */
+int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+ struct pvrdma_cq *cq = to_vcq(ibcq);
+ struct pvrdma_qp *cur_qp = NULL;
+ unsigned long flags;
+ int npolled;
+
+ if (num_entries < 1 || wc == NULL)
+ return 0;
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
+ for (npolled = 0; npolled < num_entries; ++npolled) {
+ if (pvrdma_poll_one(cq, &cur_qp, wc + npolled))
+ break;
+ }
+
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+
+ /* Ensure we do not return errors from poll_cq */
+ return npolled;
+}
+
+/**
+ * pvrdma_resize_cq - resize CQ
+ * @ibcq: the completion queue
+ * @entries: CQ entries
+ * @udata: user data
+ *
+ * @return: -EOPNOTSUPP as CQ resize is not supported.
+ */
+int pvrdma_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
+{
+ return -EOPNOTSUPP;
+}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
new file mode 100644
index 000000000000..c06768635d65
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
@@ -0,0 +1,586 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __PVRDMA_DEV_API_H__
+#define __PVRDMA_DEV_API_H__
+
+#include <linux/types.h>
+
+#include "pvrdma_verbs.h"
+
+#define PVRDMA_VERSION 17
+#define PVRDMA_BOARD_ID 1
+#define PVRDMA_REV_ID 1
+
+/*
+ * Masks and accessors for page directory, which is a two-level lookup:
+ * page directory -> page table -> page. Only one directory for now, but we
+ * could expand that easily. 9 bits for tables, 9 bits for pages, gives one
+ * gigabyte for memory regions and so forth.
+ */
+
+#define PVRDMA_PDIR_SHIFT 18
+#define PVRDMA_PTABLE_SHIFT 9
+#define PVRDMA_PAGE_DIR_DIR(x) (((x) >> PVRDMA_PDIR_SHIFT) & 0x1)
+#define PVRDMA_PAGE_DIR_TABLE(x) (((x) >> PVRDMA_PTABLE_SHIFT) & 0x1ff)
+#define PVRDMA_PAGE_DIR_PAGE(x) ((x) & 0x1ff)
+#define PVRDMA_PAGE_DIR_MAX_PAGES (1 * 512 * 512)
+#define PVRDMA_MAX_FAST_REG_PAGES 128
+
+/*
+ * Max MSI-X vectors.
+ */
+
+#define PVRDMA_MAX_INTERRUPTS 3
+
+/* Register offsets within PCI resource on BAR1. */
+#define PVRDMA_REG_VERSION 0x00 /* R: Version of device. */
+#define PVRDMA_REG_DSRLOW 0x04 /* W: Device shared region low PA. */
+#define PVRDMA_REG_DSRHIGH 0x08 /* W: Device shared region high PA. */
+#define PVRDMA_REG_CTL 0x0c /* W: PVRDMA_DEVICE_CTL */
+#define PVRDMA_REG_REQUEST 0x10 /* W: Indicate device request. */
+#define PVRDMA_REG_ERR 0x14 /* R: Device error. */
+#define PVRDMA_REG_ICR 0x18 /* R: Interrupt cause. */
+#define PVRDMA_REG_IMR 0x1c /* R/W: Interrupt mask. */
+#define PVRDMA_REG_MACL 0x20 /* R/W: MAC address low. */
+#define PVRDMA_REG_MACH 0x24 /* R/W: MAC address high. */
+
+/* Object flags. */
+#define PVRDMA_CQ_FLAG_ARMED_SOL BIT(0) /* Armed for solicited-only. */
+#define PVRDMA_CQ_FLAG_ARMED BIT(1) /* Armed. */
+#define PVRDMA_MR_FLAG_DMA BIT(0) /* DMA region. */
+#define PVRDMA_MR_FLAG_FRMR BIT(1) /* Fast reg memory region. */
+
+/*
+ * Atomic operation capability (masked versions are extended atomic
+ * operations.
+ */
+
+#define PVRDMA_ATOMIC_OP_COMP_SWAP BIT(0) /* Compare and swap. */
+#define PVRDMA_ATOMIC_OP_FETCH_ADD BIT(1) /* Fetch and add. */
+#define PVRDMA_ATOMIC_OP_MASK_COMP_SWAP BIT(2) /* Masked compare and swap. */
+#define PVRDMA_ATOMIC_OP_MASK_FETCH_ADD BIT(3) /* Masked fetch and add. */
+
+/*
+ * Base Memory Management Extension flags to support Fast Reg Memory Regions
+ * and Fast Reg Work Requests. Each flag represents a verb operation and we
+ * must support all of them to qualify for the BMME device cap.
+ */
+
+#define PVRDMA_BMME_FLAG_LOCAL_INV BIT(0) /* Local Invalidate. */
+#define PVRDMA_BMME_FLAG_REMOTE_INV BIT(1) /* Remote Invalidate. */
+#define PVRDMA_BMME_FLAG_FAST_REG_WR BIT(2) /* Fast Reg Work Request. */
+
+/*
+ * GID types. The interpretation of the gid_types bit field in the device
+ * capabilities will depend on the device mode. For now, the device only
+ * supports RoCE as mode, so only the different GID types for RoCE are
+ * defined.
+ */
+
+#define PVRDMA_GID_TYPE_FLAG_ROCE_V1 BIT(0)
+#define PVRDMA_GID_TYPE_FLAG_ROCE_V2 BIT(1)
+
+enum pvrdma_pci_resource {
+ PVRDMA_PCI_RESOURCE_MSIX, /* BAR0: MSI-X, MMIO. */
+ PVRDMA_PCI_RESOURCE_REG, /* BAR1: Registers, MMIO. */
+ PVRDMA_PCI_RESOURCE_UAR, /* BAR2: UAR pages, MMIO, 64-bit. */
+ PVRDMA_PCI_RESOURCE_LAST, /* Last. */
+};
+
+enum pvrdma_device_ctl {
+ PVRDMA_DEVICE_CTL_ACTIVATE, /* Activate device. */
+ PVRDMA_DEVICE_CTL_QUIESCE, /* Quiesce device. */
+ PVRDMA_DEVICE_CTL_RESET, /* Reset device. */
+};
+
+enum pvrdma_intr_vector {
+ PVRDMA_INTR_VECTOR_RESPONSE, /* Command response. */
+ PVRDMA_INTR_VECTOR_ASYNC, /* Async events. */
+ PVRDMA_INTR_VECTOR_CQ, /* CQ notification. */
+ /* Additional CQ notification vectors. */
+};
+
+enum pvrdma_intr_cause {
+ PVRDMA_INTR_CAUSE_RESPONSE = (1 << PVRDMA_INTR_VECTOR_RESPONSE),
+ PVRDMA_INTR_CAUSE_ASYNC = (1 << PVRDMA_INTR_VECTOR_ASYNC),
+ PVRDMA_INTR_CAUSE_CQ = (1 << PVRDMA_INTR_VECTOR_CQ),
+};
+
+enum pvrdma_intr_type {
+ PVRDMA_INTR_TYPE_INTX, /* Legacy. */
+ PVRDMA_INTR_TYPE_MSI, /* MSI. */
+ PVRDMA_INTR_TYPE_MSIX, /* MSI-X. */
+};
+
+enum pvrdma_gos_bits {
+ PVRDMA_GOS_BITS_UNK, /* Unknown. */
+ PVRDMA_GOS_BITS_32, /* 32-bit. */
+ PVRDMA_GOS_BITS_64, /* 64-bit. */
+};
+
+enum pvrdma_gos_type {
+ PVRDMA_GOS_TYPE_UNK, /* Unknown. */
+ PVRDMA_GOS_TYPE_LINUX, /* Linux. */
+};
+
+enum pvrdma_device_mode {
+ PVRDMA_DEVICE_MODE_ROCE, /* RoCE. */
+ PVRDMA_DEVICE_MODE_IWARP, /* iWarp. */
+ PVRDMA_DEVICE_MODE_IB, /* InfiniBand. */
+};
+
+struct pvrdma_gos_info {
+ u32 gos_bits:2; /* W: PVRDMA_GOS_BITS_ */
+ u32 gos_type:4; /* W: PVRDMA_GOS_TYPE_ */
+ u32 gos_ver:16; /* W: Guest OS version. */
+ u32 gos_misc:10; /* W: Other. */
+ u32 pad; /* Pad to 8-byte alignment. */
+};
+
+struct pvrdma_device_caps {
+ u64 fw_ver; /* R: Query device. */
+ __be64 node_guid;
+ __be64 sys_image_guid;
+ u64 max_mr_size;
+ u64 page_size_cap;
+ u64 atomic_arg_sizes; /* EX verbs. */
+ u32 ex_comp_mask; /* EX verbs. */
+ u32 device_cap_flags2; /* EX verbs. */
+ u32 max_fa_bit_boundary; /* EX verbs. */
+ u32 log_max_atomic_inline_arg; /* EX verbs. */
+ u32 vendor_id;
+ u32 vendor_part_id;
+ u32 hw_ver;
+ u32 max_qp;
+ u32 max_qp_wr;
+ u32 device_cap_flags;
+ u32 max_sge;
+ u32 max_sge_rd;
+ u32 max_cq;
+ u32 max_cqe;
+ u32 max_mr;
+ u32 max_pd;
+ u32 max_qp_rd_atom;
+ u32 max_ee_rd_atom;
+ u32 max_res_rd_atom;
+ u32 max_qp_init_rd_atom;
+ u32 max_ee_init_rd_atom;
+ u32 max_ee;
+ u32 max_rdd;
+ u32 max_mw;
+ u32 max_raw_ipv6_qp;
+ u32 max_raw_ethy_qp;
+ u32 max_mcast_grp;
+ u32 max_mcast_qp_attach;
+ u32 max_total_mcast_qp_attach;
+ u32 max_ah;
+ u32 max_fmr;
+ u32 max_map_per_fmr;
+ u32 max_srq;
+ u32 max_srq_wr;
+ u32 max_srq_sge;
+ u32 max_uar;
+ u32 gid_tbl_len;
+ u16 max_pkeys;
+ u8 local_ca_ack_delay;
+ u8 phys_port_cnt;
+ u8 mode; /* PVRDMA_DEVICE_MODE_ */
+ u8 atomic_ops; /* PVRDMA_ATOMIC_OP_* bits */
+ u8 bmme_flags; /* FRWR Mem Mgmt Extensions */
+ u8 gid_types; /* PVRDMA_GID_TYPE_FLAG_ */
+ u8 reserved[4];
+};
+
+struct pvrdma_ring_page_info {
+ u32 num_pages; /* Num pages incl. header. */
+ u32 reserved; /* Reserved. */
+ u64 pdir_dma; /* Page directory PA. */
+};
+
+#pragma pack(push, 1)
+
+struct pvrdma_device_shared_region {
+ u32 driver_version; /* W: Driver version. */
+ u32 pad; /* Pad to 8-byte align. */
+ struct pvrdma_gos_info gos_info; /* W: Guest OS information. */
+ u64 cmd_slot_dma; /* W: Command slot address. */
+ u64 resp_slot_dma; /* W: Response slot address. */
+ struct pvrdma_ring_page_info async_ring_pages;
+ /* W: Async ring page info. */
+ struct pvrdma_ring_page_info cq_ring_pages;
+ /* W: CQ ring page info. */
+ u32 uar_pfn; /* W: UAR pageframe. */
+ u32 pad2; /* Pad to 8-byte align. */
+ struct pvrdma_device_caps caps; /* R: Device capabilities. */
+};
+
+#pragma pack(pop)
+
+/* Event types. Currently a 1:1 mapping with enum ib_event. */
+enum pvrdma_eqe_type {
+ PVRDMA_EVENT_CQ_ERR,
+ PVRDMA_EVENT_QP_FATAL,
+ PVRDMA_EVENT_QP_REQ_ERR,
+ PVRDMA_EVENT_QP_ACCESS_ERR,
+ PVRDMA_EVENT_COMM_EST,
+ PVRDMA_EVENT_SQ_DRAINED,
+ PVRDMA_EVENT_PATH_MIG,
+ PVRDMA_EVENT_PATH_MIG_ERR,
+ PVRDMA_EVENT_DEVICE_FATAL,
+ PVRDMA_EVENT_PORT_ACTIVE,
+ PVRDMA_EVENT_PORT_ERR,
+ PVRDMA_EVENT_LID_CHANGE,
+ PVRDMA_EVENT_PKEY_CHANGE,
+ PVRDMA_EVENT_SM_CHANGE,
+ PVRDMA_EVENT_SRQ_ERR,
+ PVRDMA_EVENT_SRQ_LIMIT_REACHED,
+ PVRDMA_EVENT_QP_LAST_WQE_REACHED,
+ PVRDMA_EVENT_CLIENT_REREGISTER,
+ PVRDMA_EVENT_GID_CHANGE,
+};
+
+/* Event queue element. */
+struct pvrdma_eqe {
+ u32 type; /* Event type. */
+ u32 info; /* Handle, other. */
+};
+
+/* CQ notification queue element. */
+struct pvrdma_cqne {
+ u32 info; /* Handle */
+};
+
+enum {
+ PVRDMA_CMD_FIRST,
+ PVRDMA_CMD_QUERY_PORT = PVRDMA_CMD_FIRST,
+ PVRDMA_CMD_QUERY_PKEY,
+ PVRDMA_CMD_CREATE_PD,
+ PVRDMA_CMD_DESTROY_PD,
+ PVRDMA_CMD_CREATE_MR,
+ PVRDMA_CMD_DESTROY_MR,
+ PVRDMA_CMD_CREATE_CQ,
+ PVRDMA_CMD_RESIZE_CQ,
+ PVRDMA_CMD_DESTROY_CQ,
+ PVRDMA_CMD_CREATE_QP,
+ PVRDMA_CMD_MODIFY_QP,
+ PVRDMA_CMD_QUERY_QP,
+ PVRDMA_CMD_DESTROY_QP,
+ PVRDMA_CMD_CREATE_UC,
+ PVRDMA_CMD_DESTROY_UC,
+ PVRDMA_CMD_CREATE_BIND,
+ PVRDMA_CMD_DESTROY_BIND,
+ PVRDMA_CMD_MAX,
+};
+
+enum {
+ PVRDMA_CMD_FIRST_RESP = (1 << 31),
+ PVRDMA_CMD_QUERY_PORT_RESP = PVRDMA_CMD_FIRST_RESP,
+ PVRDMA_CMD_QUERY_PKEY_RESP,
+ PVRDMA_CMD_CREATE_PD_RESP,
+ PVRDMA_CMD_DESTROY_PD_RESP_NOOP,
+ PVRDMA_CMD_CREATE_MR_RESP,
+ PVRDMA_CMD_DESTROY_MR_RESP_NOOP,
+ PVRDMA_CMD_CREATE_CQ_RESP,
+ PVRDMA_CMD_RESIZE_CQ_RESP,
+ PVRDMA_CMD_DESTROY_CQ_RESP_NOOP,
+ PVRDMA_CMD_CREATE_QP_RESP,
+ PVRDMA_CMD_MODIFY_QP_RESP,
+ PVRDMA_CMD_QUERY_QP_RESP,
+ PVRDMA_CMD_DESTROY_QP_RESP,
+ PVRDMA_CMD_CREATE_UC_RESP,
+ PVRDMA_CMD_DESTROY_UC_RESP_NOOP,
+ PVRDMA_CMD_CREATE_BIND_RESP_NOOP,
+ PVRDMA_CMD_DESTROY_BIND_RESP_NOOP,
+ PVRDMA_CMD_MAX_RESP,
+};
+
+struct pvrdma_cmd_hdr {
+ u64 response; /* Key for response lookup. */
+ u32 cmd; /* PVRDMA_CMD_ */
+ u32 reserved; /* Reserved. */
+};
+
+struct pvrdma_cmd_resp_hdr {
+ u64 response; /* From cmd hdr. */
+ u32 ack; /* PVRDMA_CMD_XXX_RESP */
+ u8 err; /* Error. */
+ u8 reserved[3]; /* Reserved. */
+};
+
+struct pvrdma_cmd_query_port {
+ struct pvrdma_cmd_hdr hdr;
+ u8 port_num;
+ u8 reserved[7];
+};
+
+struct pvrdma_cmd_query_port_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ struct pvrdma_port_attr attrs;
+};
+
+struct pvrdma_cmd_query_pkey {
+ struct pvrdma_cmd_hdr hdr;
+ u8 port_num;
+ u8 index;
+ u8 reserved[6];
+};
+
+struct pvrdma_cmd_query_pkey_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ u16 pkey;
+ u8 reserved[6];
+};
+
+struct pvrdma_cmd_create_uc {
+ struct pvrdma_cmd_hdr hdr;
+ u32 pfn; /* UAR page frame number */
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_create_uc_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ u32 ctx_handle;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_destroy_uc {
+ struct pvrdma_cmd_hdr hdr;
+ u32 ctx_handle;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_create_pd {
+ struct pvrdma_cmd_hdr hdr;
+ u32 ctx_handle;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_create_pd_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ u32 pd_handle;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_destroy_pd {
+ struct pvrdma_cmd_hdr hdr;
+ u32 pd_handle;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_create_mr {
+ struct pvrdma_cmd_hdr hdr;
+ u64 start;
+ u64 length;
+ u64 pdir_dma;
+ u32 pd_handle;
+ u32 access_flags;
+ u32 flags;
+ u32 nchunks;
+};
+
+struct pvrdma_cmd_create_mr_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ u32 mr_handle;
+ u32 lkey;
+ u32 rkey;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_destroy_mr {
+ struct pvrdma_cmd_hdr hdr;
+ u32 mr_handle;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_create_cq {
+ struct pvrdma_cmd_hdr hdr;
+ u64 pdir_dma;
+ u32 ctx_handle;
+ u32 cqe;
+ u32 nchunks;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_create_cq_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ u32 cq_handle;
+ u32 cqe;
+};
+
+struct pvrdma_cmd_resize_cq {
+ struct pvrdma_cmd_hdr hdr;
+ u32 cq_handle;
+ u32 cqe;
+};
+
+struct pvrdma_cmd_resize_cq_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ u32 cqe;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_destroy_cq {
+ struct pvrdma_cmd_hdr hdr;
+ u32 cq_handle;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_create_qp {
+ struct pvrdma_cmd_hdr hdr;
+ u64 pdir_dma;
+ u32 pd_handle;
+ u32 send_cq_handle;
+ u32 recv_cq_handle;
+ u32 srq_handle;
+ u32 max_send_wr;
+ u32 max_recv_wr;
+ u32 max_send_sge;
+ u32 max_recv_sge;
+ u32 max_inline_data;
+ u32 lkey;
+ u32 access_flags;
+ u16 total_chunks;
+ u16 send_chunks;
+ u16 max_atomic_arg;
+ u8 sq_sig_all;
+ u8 qp_type;
+ u8 is_srq;
+ u8 reserved[3];
+};
+
+struct pvrdma_cmd_create_qp_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ u32 qpn;
+ u32 max_send_wr;
+ u32 max_recv_wr;
+ u32 max_send_sge;
+ u32 max_recv_sge;
+ u32 max_inline_data;
+};
+
+struct pvrdma_cmd_modify_qp {
+ struct pvrdma_cmd_hdr hdr;
+ u32 qp_handle;
+ u32 attr_mask;
+ struct pvrdma_qp_attr attrs;
+};
+
+struct pvrdma_cmd_query_qp {
+ struct pvrdma_cmd_hdr hdr;
+ u32 qp_handle;
+ u32 attr_mask;
+};
+
+struct pvrdma_cmd_query_qp_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ struct pvrdma_qp_attr attrs;
+};
+
+struct pvrdma_cmd_destroy_qp {
+ struct pvrdma_cmd_hdr hdr;
+ u32 qp_handle;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_destroy_qp_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ u32 events_reported;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_create_bind {
+ struct pvrdma_cmd_hdr hdr;
+ u32 mtu;
+ u32 vlan;
+ u32 index;
+ u8 new_gid[16];
+ u8 gid_type;
+ u8 reserved[3];
+};
+
+struct pvrdma_cmd_destroy_bind {
+ struct pvrdma_cmd_hdr hdr;
+ u32 index;
+ u8 dest_gid[16];
+ u8 reserved[4];
+};
+
+union pvrdma_cmd_req {
+ struct pvrdma_cmd_hdr hdr;
+ struct pvrdma_cmd_query_port query_port;
+ struct pvrdma_cmd_query_pkey query_pkey;
+ struct pvrdma_cmd_create_uc create_uc;
+ struct pvrdma_cmd_destroy_uc destroy_uc;
+ struct pvrdma_cmd_create_pd create_pd;
+ struct pvrdma_cmd_destroy_pd destroy_pd;
+ struct pvrdma_cmd_create_mr create_mr;
+ struct pvrdma_cmd_destroy_mr destroy_mr;
+ struct pvrdma_cmd_create_cq create_cq;
+ struct pvrdma_cmd_resize_cq resize_cq;
+ struct pvrdma_cmd_destroy_cq destroy_cq;
+ struct pvrdma_cmd_create_qp create_qp;
+ struct pvrdma_cmd_modify_qp modify_qp;
+ struct pvrdma_cmd_query_qp query_qp;
+ struct pvrdma_cmd_destroy_qp destroy_qp;
+ struct pvrdma_cmd_create_bind create_bind;
+ struct pvrdma_cmd_destroy_bind destroy_bind;
+};
+
+union pvrdma_cmd_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ struct pvrdma_cmd_query_port_resp query_port_resp;
+ struct pvrdma_cmd_query_pkey_resp query_pkey_resp;
+ struct pvrdma_cmd_create_uc_resp create_uc_resp;
+ struct pvrdma_cmd_create_pd_resp create_pd_resp;
+ struct pvrdma_cmd_create_mr_resp create_mr_resp;
+ struct pvrdma_cmd_create_cq_resp create_cq_resp;
+ struct pvrdma_cmd_resize_cq_resp resize_cq_resp;
+ struct pvrdma_cmd_create_qp_resp create_qp_resp;
+ struct pvrdma_cmd_query_qp_resp query_qp_resp;
+ struct pvrdma_cmd_destroy_qp_resp destroy_qp_resp;
+};
+
+#endif /* __PVRDMA_DEV_API_H__ */
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c
new file mode 100644
index 000000000000..bf51357ea3aa
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+#include "pvrdma.h"
+
+int pvrdma_uar_table_init(struct pvrdma_dev *dev)
+{
+ u32 num = dev->dsr->caps.max_uar;
+ u32 mask = num - 1;
+ struct pvrdma_id_table *tbl = &dev->uar_table.tbl;
+
+ if (!is_power_of_2(num))
+ return -EINVAL;
+
+ tbl->last = 0;
+ tbl->top = 0;
+ tbl->max = num;
+ tbl->mask = mask;
+ spin_lock_init(&tbl->lock);
+ tbl->table = kcalloc(BITS_TO_LONGS(num), sizeof(long), GFP_KERNEL);
+ if (!tbl->table)
+ return -ENOMEM;
+
+ /* 0th UAR is taken by the device. */
+ set_bit(0, tbl->table);
+
+ return 0;
+}
+
+void pvrdma_uar_table_cleanup(struct pvrdma_dev *dev)
+{
+ struct pvrdma_id_table *tbl = &dev->uar_table.tbl;
+
+ kfree(tbl->table);
+}
+
+int pvrdma_uar_alloc(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar)
+{
+ struct pvrdma_id_table *tbl;
+ unsigned long flags;
+ u32 obj;
+
+ tbl = &dev->uar_table.tbl;
+
+ spin_lock_irqsave(&tbl->lock, flags);
+ obj = find_next_zero_bit(tbl->table, tbl->max, tbl->last);
+ if (obj >= tbl->max) {
+ tbl->top = (tbl->top + tbl->max) & tbl->mask;
+ obj = find_first_zero_bit(tbl->table, tbl->max);
+ }
+
+ if (obj >= tbl->max) {
+ spin_unlock_irqrestore(&tbl->lock, flags);
+ return -ENOMEM;
+ }
+
+ set_bit(obj, tbl->table);
+ obj |= tbl->top;
+
+ spin_unlock_irqrestore(&tbl->lock, flags);
+
+ uar->index = obj;
+ uar->pfn = (pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_UAR) >>
+ PAGE_SHIFT) + uar->index;
+
+ return 0;
+}
+
+void pvrdma_uar_free(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar)
+{
+ struct pvrdma_id_table *tbl = &dev->uar_table.tbl;
+ unsigned long flags;
+ u32 obj;
+
+ obj = uar->index & (tbl->max - 1);
+ spin_lock_irqsave(&tbl->lock, flags);
+ clear_bit(obj, tbl->table);
+ tbl->last = min(tbl->last, obj);
+ tbl->top = (tbl->top + tbl->max) & tbl->mask;
+ spin_unlock_irqrestore(&tbl->lock, flags);
+}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
new file mode 100644
index 000000000000..231a1ce1f4be
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -0,0 +1,1211 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/errno.h>
+#include <linux/inetdevice.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_user_verbs.h>
+#include <net/addrconf.h>
+
+#include "pvrdma.h"
+
+#define DRV_NAME "vmw_pvrdma"
+#define DRV_VERSION "1.0.0.0-k"
+
+static DEFINE_MUTEX(pvrdma_device_list_lock);
+static LIST_HEAD(pvrdma_device_list);
+static struct workqueue_struct *event_wq;
+
+static int pvrdma_add_gid(struct ib_device *ibdev,
+ u8 port_num,
+ unsigned int index,
+ const union ib_gid *gid,
+ const struct ib_gid_attr *attr,
+ void **context);
+static int pvrdma_del_gid(struct ib_device *ibdev,
+ u8 port_num,
+ unsigned int index,
+ void **context);
+
+
+static ssize_t show_hca(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "VMW_PVRDMA-%s\n", DRV_VERSION);
+}
+
+static ssize_t show_rev(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", PVRDMA_REV_ID);
+}
+
+static ssize_t show_board(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", PVRDMA_BOARD_ID);
+}
+
+static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
+static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
+static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
+
+static struct device_attribute *pvrdma_class_attributes[] = {
+ &dev_attr_hw_rev,
+ &dev_attr_hca_type,
+ &dev_attr_board_id
+};
+
+static void pvrdma_get_fw_ver_str(struct ib_device *device, char *str,
+ size_t str_len)
+{
+ struct pvrdma_dev *dev =
+ container_of(device, struct pvrdma_dev, ib_dev);
+ snprintf(str, str_len, "%d.%d.%d\n",
+ (int) (dev->dsr->caps.fw_ver >> 32),
+ (int) (dev->dsr->caps.fw_ver >> 16) & 0xffff,
+ (int) dev->dsr->caps.fw_ver & 0xffff);
+}
+
+static int pvrdma_init_device(struct pvrdma_dev *dev)
+{
+ /* Initialize some device related stuff */
+ spin_lock_init(&dev->cmd_lock);
+ sema_init(&dev->cmd_sema, 1);
+ atomic_set(&dev->num_qps, 0);
+ atomic_set(&dev->num_cqs, 0);
+ atomic_set(&dev->num_pds, 0);
+ atomic_set(&dev->num_ahs, 0);
+
+ return 0;
+}
+
+static int pvrdma_port_immutable(struct ib_device *ibdev, u8 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ int err;
+
+ err = pvrdma_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ return 0;
+}
+
+static struct net_device *pvrdma_get_netdev(struct ib_device *ibdev,
+ u8 port_num)
+{
+ struct net_device *netdev;
+ struct pvrdma_dev *dev = to_vdev(ibdev);
+
+ if (port_num != 1)
+ return NULL;
+
+ rcu_read_lock();
+ netdev = dev->netdev;
+ if (netdev)
+ dev_hold(netdev);
+ rcu_read_unlock();
+
+ return netdev;
+}
+
+static int pvrdma_register_device(struct pvrdma_dev *dev)
+{
+ int ret = -1;
+ int i = 0;
+
+ strlcpy(dev->ib_dev.name, "vmw_pvrdma%d", IB_DEVICE_NAME_MAX);
+ dev->ib_dev.node_guid = dev->dsr->caps.node_guid;
+ dev->sys_image_guid = dev->dsr->caps.sys_image_guid;
+ dev->flags = 0;
+ dev->ib_dev.owner = THIS_MODULE;
+ dev->ib_dev.num_comp_vectors = 1;
+ dev->ib_dev.dma_device = &dev->pdev->dev;
+ dev->ib_dev.uverbs_abi_ver = PVRDMA_UVERBS_ABI_VERSION;
+ dev->ib_dev.uverbs_cmd_mask =
+ (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
+ (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_REG_MR) |
+ (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
+ (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
+ (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
+ (1ull << IB_USER_VERBS_CMD_POST_SEND) |
+ (1ull << IB_USER_VERBS_CMD_POST_RECV) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_AH);
+
+ dev->ib_dev.node_type = RDMA_NODE_IB_CA;
+ dev->ib_dev.phys_port_cnt = dev->dsr->caps.phys_port_cnt;
+
+ dev->ib_dev.query_device = pvrdma_query_device;
+ dev->ib_dev.query_port = pvrdma_query_port;
+ dev->ib_dev.query_gid = pvrdma_query_gid;
+ dev->ib_dev.query_pkey = pvrdma_query_pkey;
+ dev->ib_dev.modify_port = pvrdma_modify_port;
+ dev->ib_dev.alloc_ucontext = pvrdma_alloc_ucontext;
+ dev->ib_dev.dealloc_ucontext = pvrdma_dealloc_ucontext;
+ dev->ib_dev.mmap = pvrdma_mmap;
+ dev->ib_dev.alloc_pd = pvrdma_alloc_pd;
+ dev->ib_dev.dealloc_pd = pvrdma_dealloc_pd;
+ dev->ib_dev.create_ah = pvrdma_create_ah;
+ dev->ib_dev.destroy_ah = pvrdma_destroy_ah;
+ dev->ib_dev.create_qp = pvrdma_create_qp;
+ dev->ib_dev.modify_qp = pvrdma_modify_qp;
+ dev->ib_dev.query_qp = pvrdma_query_qp;
+ dev->ib_dev.destroy_qp = pvrdma_destroy_qp;
+ dev->ib_dev.post_send = pvrdma_post_send;
+ dev->ib_dev.post_recv = pvrdma_post_recv;
+ dev->ib_dev.create_cq = pvrdma_create_cq;
+ dev->ib_dev.modify_cq = pvrdma_modify_cq;
+ dev->ib_dev.resize_cq = pvrdma_resize_cq;
+ dev->ib_dev.destroy_cq = pvrdma_destroy_cq;
+ dev->ib_dev.poll_cq = pvrdma_poll_cq;
+ dev->ib_dev.req_notify_cq = pvrdma_req_notify_cq;
+ dev->ib_dev.get_dma_mr = pvrdma_get_dma_mr;
+ dev->ib_dev.reg_user_mr = pvrdma_reg_user_mr;
+ dev->ib_dev.dereg_mr = pvrdma_dereg_mr;
+ dev->ib_dev.alloc_mr = pvrdma_alloc_mr;
+ dev->ib_dev.map_mr_sg = pvrdma_map_mr_sg;
+ dev->ib_dev.add_gid = pvrdma_add_gid;
+ dev->ib_dev.del_gid = pvrdma_del_gid;
+ dev->ib_dev.get_netdev = pvrdma_get_netdev;
+ dev->ib_dev.get_port_immutable = pvrdma_port_immutable;
+ dev->ib_dev.get_link_layer = pvrdma_port_link_layer;
+ dev->ib_dev.get_dev_fw_str = pvrdma_get_fw_ver_str;
+
+ mutex_init(&dev->port_mutex);
+ spin_lock_init(&dev->desc_lock);
+
+ dev->cq_tbl = kcalloc(dev->dsr->caps.max_cq, sizeof(void *),
+ GFP_KERNEL);
+ if (!dev->cq_tbl)
+ return ret;
+ spin_lock_init(&dev->cq_tbl_lock);
+
+ dev->qp_tbl = kcalloc(dev->dsr->caps.max_qp, sizeof(void *),
+ GFP_KERNEL);
+ if (!dev->qp_tbl)
+ goto err_cq_free;
+ spin_lock_init(&dev->qp_tbl_lock);
+
+ ret = ib_register_device(&dev->ib_dev, NULL);
+ if (ret)
+ goto err_qp_free;
+
+ for (i = 0; i < ARRAY_SIZE(pvrdma_class_attributes); ++i) {
+ ret = device_create_file(&dev->ib_dev.dev,
+ pvrdma_class_attributes[i]);
+ if (ret)
+ goto err_class;
+ }
+
+ dev->ib_active = true;
+
+ return 0;
+
+err_class:
+ ib_unregister_device(&dev->ib_dev);
+err_qp_free:
+ kfree(dev->qp_tbl);
+err_cq_free:
+ kfree(dev->cq_tbl);
+
+ return ret;
+}
+
+static irqreturn_t pvrdma_intr0_handler(int irq, void *dev_id)
+{
+ u32 icr = PVRDMA_INTR_CAUSE_RESPONSE;
+ struct pvrdma_dev *dev = dev_id;
+
+ dev_dbg(&dev->pdev->dev, "interrupt 0 (response) handler\n");
+
+ if (dev->intr.type != PVRDMA_INTR_TYPE_MSIX) {
+ /* Legacy intr */
+ icr = pvrdma_read_reg(dev, PVRDMA_REG_ICR);
+ if (icr == 0)
+ return IRQ_NONE;
+ }
+
+ if (icr == PVRDMA_INTR_CAUSE_RESPONSE)
+ complete(&dev->cmd_done);
+
+ return IRQ_HANDLED;
+}
+
+static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type)
+{
+ struct pvrdma_qp *qp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->qp_tbl_lock, flags);
+ qp = dev->qp_tbl[qpn % dev->dsr->caps.max_qp];
+ if (qp)
+ atomic_inc(&qp->refcnt);
+ spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
+
+ if (qp && qp->ibqp.event_handler) {
+ struct ib_qp *ibqp = &qp->ibqp;
+ struct ib_event e;
+
+ e.device = ibqp->device;
+ e.element.qp = ibqp;
+ e.event = type; /* 1:1 mapping for now. */
+ ibqp->event_handler(&e, ibqp->qp_context);
+ }
+ if (qp) {
+ atomic_dec(&qp->refcnt);
+ if (atomic_read(&qp->refcnt) == 0)
+ wake_up(&qp->wait);
+ }
+}
+
+static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
+{
+ struct pvrdma_cq *cq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->cq_tbl_lock, flags);
+ cq = dev->cq_tbl[cqn % dev->dsr->caps.max_cq];
+ if (cq)
+ atomic_inc(&cq->refcnt);
+ spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
+
+ if (cq && cq->ibcq.event_handler) {
+ struct ib_cq *ibcq = &cq->ibcq;
+ struct ib_event e;
+
+ e.device = ibcq->device;
+ e.element.cq = ibcq;
+ e.event = type; /* 1:1 mapping for now. */
+ ibcq->event_handler(&e, ibcq->cq_context);
+ }
+ if (cq) {
+ atomic_dec(&cq->refcnt);
+ if (atomic_read(&cq->refcnt) == 0)
+ wake_up(&cq->wait);
+ }
+}
+
+static void pvrdma_dispatch_event(struct pvrdma_dev *dev, int port,
+ enum ib_event_type event)
+{
+ struct ib_event ib_event;
+
+ memset(&ib_event, 0, sizeof(ib_event));
+ ib_event.device = &dev->ib_dev;
+ ib_event.element.port_num = port;
+ ib_event.event = event;
+ ib_dispatch_event(&ib_event);
+}
+
+static void pvrdma_dev_event(struct pvrdma_dev *dev, u8 port, int type)
+{
+ if (port < 1 || port > dev->dsr->caps.phys_port_cnt) {
+ dev_warn(&dev->pdev->dev, "event on port %d\n", port);
+ return;
+ }
+
+ pvrdma_dispatch_event(dev, port, type);
+}
+
+static inline struct pvrdma_eqe *get_eqe(struct pvrdma_dev *dev, unsigned int i)
+{
+ return (struct pvrdma_eqe *)pvrdma_page_dir_get_ptr(
+ &dev->async_pdir,
+ PAGE_SIZE +
+ sizeof(struct pvrdma_eqe) * i);
+}
+
+static irqreturn_t pvrdma_intr1_handler(int irq, void *dev_id)
+{
+ struct pvrdma_dev *dev = dev_id;
+ struct pvrdma_ring *ring = &dev->async_ring_state->rx;
+ int ring_slots = (dev->dsr->async_ring_pages.num_pages - 1) *
+ PAGE_SIZE / sizeof(struct pvrdma_eqe);
+ unsigned int head;
+
+ dev_dbg(&dev->pdev->dev, "interrupt 1 (async event) handler\n");
+
+ /*
+ * Don't process events until the IB device is registered. Otherwise
+ * we'll try to ib_dispatch_event() on an invalid device.
+ */
+ if (!dev->ib_active)
+ return IRQ_HANDLED;
+
+ while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) {
+ struct pvrdma_eqe *eqe;
+
+ eqe = get_eqe(dev, head);
+
+ switch (eqe->type) {
+ case PVRDMA_EVENT_QP_FATAL:
+ case PVRDMA_EVENT_QP_REQ_ERR:
+ case PVRDMA_EVENT_QP_ACCESS_ERR:
+ case PVRDMA_EVENT_COMM_EST:
+ case PVRDMA_EVENT_SQ_DRAINED:
+ case PVRDMA_EVENT_PATH_MIG:
+ case PVRDMA_EVENT_PATH_MIG_ERR:
+ case PVRDMA_EVENT_QP_LAST_WQE_REACHED:
+ pvrdma_qp_event(dev, eqe->info, eqe->type);
+ break;
+
+ case PVRDMA_EVENT_CQ_ERR:
+ pvrdma_cq_event(dev, eqe->info, eqe->type);
+ break;
+
+ case PVRDMA_EVENT_SRQ_ERR:
+ case PVRDMA_EVENT_SRQ_LIMIT_REACHED:
+ break;
+
+ case PVRDMA_EVENT_PORT_ACTIVE:
+ case PVRDMA_EVENT_PORT_ERR:
+ case PVRDMA_EVENT_LID_CHANGE:
+ case PVRDMA_EVENT_PKEY_CHANGE:
+ case PVRDMA_EVENT_SM_CHANGE:
+ case PVRDMA_EVENT_CLIENT_REREGISTER:
+ case PVRDMA_EVENT_GID_CHANGE:
+ pvrdma_dev_event(dev, eqe->info, eqe->type);
+ break;
+
+ case PVRDMA_EVENT_DEVICE_FATAL:
+ pvrdma_dev_event(dev, 1, eqe->type);
+ break;
+
+ default:
+ break;
+ }
+
+ pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static inline struct pvrdma_cqne *get_cqne(struct pvrdma_dev *dev,
+ unsigned int i)
+{
+ return (struct pvrdma_cqne *)pvrdma_page_dir_get_ptr(
+ &dev->cq_pdir,
+ PAGE_SIZE +
+ sizeof(struct pvrdma_cqne) * i);
+}
+
+static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id)
+{
+ struct pvrdma_dev *dev = dev_id;
+ struct pvrdma_ring *ring = &dev->cq_ring_state->rx;
+ int ring_slots = (dev->dsr->cq_ring_pages.num_pages - 1) * PAGE_SIZE /
+ sizeof(struct pvrdma_cqne);
+ unsigned int head;
+ unsigned long flags;
+
+ dev_dbg(&dev->pdev->dev, "interrupt x (completion) handler\n");
+
+ while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) {
+ struct pvrdma_cqne *cqne;
+ struct pvrdma_cq *cq;
+
+ cqne = get_cqne(dev, head);
+ spin_lock_irqsave(&dev->cq_tbl_lock, flags);
+ cq = dev->cq_tbl[cqne->info % dev->dsr->caps.max_cq];
+ if (cq)
+ atomic_inc(&cq->refcnt);
+ spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
+
+ if (cq && cq->ibcq.comp_handler)
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+ if (cq) {
+ atomic_dec(&cq->refcnt);
+ if (atomic_read(&cq->refcnt))
+ wake_up(&cq->wait);
+ }
+ pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void pvrdma_disable_msi_all(struct pvrdma_dev *dev)
+{
+ if (dev->intr.type == PVRDMA_INTR_TYPE_MSIX)
+ pci_disable_msix(dev->pdev);
+ else if (dev->intr.type == PVRDMA_INTR_TYPE_MSI)
+ pci_disable_msi(dev->pdev);
+}
+
+static void pvrdma_free_irq(struct pvrdma_dev *dev)
+{
+ int i;
+
+ dev_dbg(&dev->pdev->dev, "freeing interrupts\n");
+
+ if (dev->intr.type == PVRDMA_INTR_TYPE_MSIX) {
+ for (i = 0; i < dev->intr.size; i++) {
+ if (dev->intr.enabled[i]) {
+ free_irq(dev->intr.msix_entry[i].vector, dev);
+ dev->intr.enabled[i] = 0;
+ }
+ }
+ } else if (dev->intr.type == PVRDMA_INTR_TYPE_INTX ||
+ dev->intr.type == PVRDMA_INTR_TYPE_MSI) {
+ free_irq(dev->pdev->irq, dev);
+ }
+}
+
+static void pvrdma_enable_intrs(struct pvrdma_dev *dev)
+{
+ dev_dbg(&dev->pdev->dev, "enable interrupts\n");
+ pvrdma_write_reg(dev, PVRDMA_REG_IMR, 0);
+}
+
+static void pvrdma_disable_intrs(struct pvrdma_dev *dev)
+{
+ dev_dbg(&dev->pdev->dev, "disable interrupts\n");
+ pvrdma_write_reg(dev, PVRDMA_REG_IMR, ~0);
+}
+
+static int pvrdma_enable_msix(struct pci_dev *pdev, struct pvrdma_dev *dev)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < PVRDMA_MAX_INTERRUPTS; i++) {
+ dev->intr.msix_entry[i].entry = i;
+ dev->intr.msix_entry[i].vector = i;
+
+ switch (i) {
+ case 0:
+ /* CMD ring handler */
+ dev->intr.handler[i] = pvrdma_intr0_handler;
+ break;
+ case 1:
+ /* Async event ring handler */
+ dev->intr.handler[i] = pvrdma_intr1_handler;
+ break;
+ default:
+ /* Completion queue handler */
+ dev->intr.handler[i] = pvrdma_intrx_handler;
+ break;
+ }
+ }
+
+ ret = pci_enable_msix(pdev, dev->intr.msix_entry,
+ PVRDMA_MAX_INTERRUPTS);
+ if (!ret) {
+ dev->intr.type = PVRDMA_INTR_TYPE_MSIX;
+ dev->intr.size = PVRDMA_MAX_INTERRUPTS;
+ } else if (ret > 0) {
+ ret = pci_enable_msix(pdev, dev->intr.msix_entry, ret);
+ if (!ret) {
+ dev->intr.type = PVRDMA_INTR_TYPE_MSIX;
+ dev->intr.size = ret;
+ } else {
+ dev->intr.size = 0;
+ }
+ }
+
+ dev_dbg(&pdev->dev, "using interrupt type %d, size %d\n",
+ dev->intr.type, dev->intr.size);
+
+ return ret;
+}
+
+static int pvrdma_alloc_intrs(struct pvrdma_dev *dev)
+{
+ int ret = 0;
+ int i;
+
+ if (pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX) &&
+ pvrdma_enable_msix(dev->pdev, dev)) {
+ /* Try MSI */
+ ret = pci_enable_msi(dev->pdev);
+ if (!ret) {
+ dev->intr.type = PVRDMA_INTR_TYPE_MSI;
+ } else {
+ /* Legacy INTR */
+ dev->intr.type = PVRDMA_INTR_TYPE_INTX;
+ }
+ }
+
+ /* Request First IRQ */
+ switch (dev->intr.type) {
+ case PVRDMA_INTR_TYPE_INTX:
+ case PVRDMA_INTR_TYPE_MSI:
+ ret = request_irq(dev->pdev->irq, pvrdma_intr0_handler,
+ IRQF_SHARED, DRV_NAME, dev);
+ if (ret) {
+ dev_err(&dev->pdev->dev,
+ "failed to request interrupt\n");
+ goto disable_msi;
+ }
+ break;
+ case PVRDMA_INTR_TYPE_MSIX:
+ ret = request_irq(dev->intr.msix_entry[0].vector,
+ pvrdma_intr0_handler, 0, DRV_NAME, dev);
+ if (ret) {
+ dev_err(&dev->pdev->dev,
+ "failed to request interrupt 0\n");
+ goto disable_msi;
+ }
+ dev->intr.enabled[0] = 1;
+ break;
+ default:
+ /* Not reached */
+ break;
+ }
+
+ /* For MSIX: request intr for each vector */
+ if (dev->intr.size > 1) {
+ ret = request_irq(dev->intr.msix_entry[1].vector,
+ pvrdma_intr1_handler, 0, DRV_NAME, dev);
+ if (ret) {
+ dev_err(&dev->pdev->dev,
+ "failed to request interrupt 1\n");
+ goto free_irq;
+ }
+ dev->intr.enabled[1] = 1;
+
+ for (i = 2; i < dev->intr.size; i++) {
+ ret = request_irq(dev->intr.msix_entry[i].vector,
+ pvrdma_intrx_handler, 0,
+ DRV_NAME, dev);
+ if (ret) {
+ dev_err(&dev->pdev->dev,
+ "failed to request interrupt %d\n", i);
+ goto free_irq;
+ }
+ dev->intr.enabled[i] = 1;
+ }
+ }
+
+ return 0;
+
+free_irq:
+ pvrdma_free_irq(dev);
+disable_msi:
+ pvrdma_disable_msi_all(dev);
+ return ret;
+}
+
+static void pvrdma_free_slots(struct pvrdma_dev *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+
+ if (dev->resp_slot)
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->resp_slot,
+ dev->dsr->resp_slot_dma);
+ if (dev->cmd_slot)
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->cmd_slot,
+ dev->dsr->cmd_slot_dma);
+}
+
+static int pvrdma_add_gid_at_index(struct pvrdma_dev *dev,
+ const union ib_gid *gid,
+ int index)
+{
+ int ret;
+ union pvrdma_cmd_req req;
+ struct pvrdma_cmd_create_bind *cmd_bind = &req.create_bind;
+
+ if (!dev->sgid_tbl) {
+ dev_warn(&dev->pdev->dev, "sgid table not initialized\n");
+ return -EINVAL;
+ }
+
+ memset(cmd_bind, 0, sizeof(*cmd_bind));
+ cmd_bind->hdr.cmd = PVRDMA_CMD_CREATE_BIND;
+ memcpy(cmd_bind->new_gid, gid->raw, 16);
+ cmd_bind->mtu = ib_mtu_enum_to_int(IB_MTU_1024);
+ cmd_bind->vlan = 0xfff;
+ cmd_bind->index = index;
+ cmd_bind->gid_type = PVRDMA_GID_TYPE_FLAG_ROCE_V1;
+
+ ret = pvrdma_cmd_post(dev, &req, NULL, 0);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not create binding, error: %d\n", ret);
+ return -EFAULT;
+ }
+ memcpy(&dev->sgid_tbl[index], gid, sizeof(*gid));
+ return 0;
+}
+
+static int pvrdma_add_gid(struct ib_device *ibdev,
+ u8 port_num,
+ unsigned int index,
+ const union ib_gid *gid,
+ const struct ib_gid_attr *attr,
+ void **context)
+{
+ struct pvrdma_dev *dev = to_vdev(ibdev);
+
+ return pvrdma_add_gid_at_index(dev, gid, index);
+}
+
+static int pvrdma_del_gid_at_index(struct pvrdma_dev *dev, int index)
+{
+ int ret;
+ union pvrdma_cmd_req req;
+ struct pvrdma_cmd_destroy_bind *cmd_dest = &req.destroy_bind;
+
+ /* Update sgid table. */
+ if (!dev->sgid_tbl) {
+ dev_warn(&dev->pdev->dev, "sgid table not initialized\n");
+ return -EINVAL;
+ }
+
+ memset(cmd_dest, 0, sizeof(*cmd_dest));
+ cmd_dest->hdr.cmd = PVRDMA_CMD_DESTROY_BIND;
+ memcpy(cmd_dest->dest_gid, &dev->sgid_tbl[index], 16);
+ cmd_dest->index = index;
+
+ ret = pvrdma_cmd_post(dev, &req, NULL, 0);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not destroy binding, error: %d\n", ret);
+ return ret;
+ }
+ memset(&dev->sgid_tbl[index], 0, 16);
+ return 0;
+}
+
+static int pvrdma_del_gid(struct ib_device *ibdev,
+ u8 port_num,
+ unsigned int index,
+ void **context)
+{
+ struct pvrdma_dev *dev = to_vdev(ibdev);
+
+ dev_dbg(&dev->pdev->dev, "removing gid at index %u from %s",
+ index, dev->netdev->name);
+
+ return pvrdma_del_gid_at_index(dev, index);
+}
+
+static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev,
+ unsigned long event)
+{
+ switch (event) {
+ case NETDEV_REBOOT:
+ case NETDEV_DOWN:
+ pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
+ break;
+ case NETDEV_UP:
+ pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
+ break;
+ default:
+ dev_dbg(&dev->pdev->dev, "ignore netdevice event %ld on %s\n",
+ event, dev->ib_dev.name);
+ break;
+ }
+}
+
+static void pvrdma_netdevice_event_work(struct work_struct *work)
+{
+ struct pvrdma_netdevice_work *netdev_work;
+ struct pvrdma_dev *dev;
+
+ netdev_work = container_of(work, struct pvrdma_netdevice_work, work);
+
+ mutex_lock(&pvrdma_device_list_lock);
+ list_for_each_entry(dev, &pvrdma_device_list, device_link) {
+ if (dev->netdev == netdev_work->event_netdev) {
+ pvrdma_netdevice_event_handle(dev, netdev_work->event);
+ break;
+ }
+ }
+ mutex_unlock(&pvrdma_device_list_lock);
+
+ kfree(netdev_work);
+}
+
+static int pvrdma_netdevice_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *event_netdev = netdev_notifier_info_to_dev(ptr);
+ struct pvrdma_netdevice_work *netdev_work;
+
+ netdev_work = kmalloc(sizeof(*netdev_work), GFP_ATOMIC);
+ if (!netdev_work)
+ return NOTIFY_BAD;
+
+ INIT_WORK(&netdev_work->work, pvrdma_netdevice_event_work);
+ netdev_work->event_netdev = event_netdev;
+ netdev_work->event = event;
+ queue_work(event_wq, &netdev_work->work);
+
+ return NOTIFY_DONE;
+}
+
+static int pvrdma_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct pci_dev *pdev_net;
+ struct pvrdma_dev *dev;
+ int ret;
+ unsigned long start;
+ unsigned long len;
+ unsigned int version;
+ dma_addr_t slot_dma = 0;
+
+ dev_dbg(&pdev->dev, "initializing driver %s\n", pci_name(pdev));
+
+ /* Allocate zero-out device */
+ dev = (struct pvrdma_dev *)ib_alloc_device(sizeof(*dev));
+ if (!dev) {
+ dev_err(&pdev->dev, "failed to allocate IB device\n");
+ return -ENOMEM;
+ }
+
+ mutex_lock(&pvrdma_device_list_lock);
+ list_add(&dev->device_link, &pvrdma_device_list);
+ mutex_unlock(&pvrdma_device_list_lock);
+
+ ret = pvrdma_init_device(dev);
+ if (ret)
+ goto err_free_device;
+
+ dev->pdev = pdev;
+ pci_set_drvdata(pdev, dev);
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot enable PCI device\n");
+ goto err_free_device;
+ }
+
+ dev_dbg(&pdev->dev, "PCI resource flags BAR0 %#lx\n",
+ pci_resource_flags(pdev, 0));
+ dev_dbg(&pdev->dev, "PCI resource len %#llx\n",
+ (unsigned long long)pci_resource_len(pdev, 0));
+ dev_dbg(&pdev->dev, "PCI resource start %#llx\n",
+ (unsigned long long)pci_resource_start(pdev, 0));
+ dev_dbg(&pdev->dev, "PCI resource flags BAR1 %#lx\n",
+ pci_resource_flags(pdev, 1));
+ dev_dbg(&pdev->dev, "PCI resource len %#llx\n",
+ (unsigned long long)pci_resource_len(pdev, 1));
+ dev_dbg(&pdev->dev, "PCI resource start %#llx\n",
+ (unsigned long long)pci_resource_start(pdev, 1));
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
+ !(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
+ dev_err(&pdev->dev, "PCI BAR region not MMIO\n");
+ ret = -ENOMEM;
+ goto err_free_device;
+ }
+
+ ret = pci_request_regions(pdev, DRV_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot request PCI resources\n");
+ goto err_disable_pdev;
+ }
+
+ /* Enable 64-Bit DMA */
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (ret != 0) {
+ dev_err(&pdev->dev,
+ "pci_set_consistent_dma_mask failed\n");
+ goto err_free_resource;
+ }
+ } else {
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret != 0) {
+ dev_err(&pdev->dev,
+ "pci_set_dma_mask failed\n");
+ goto err_free_resource;
+ }
+ }
+
+ pci_set_master(pdev);
+
+ /* Map register space */
+ start = pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_REG);
+ len = pci_resource_len(dev->pdev, PVRDMA_PCI_RESOURCE_REG);
+ dev->regs = ioremap(start, len);
+ if (!dev->regs) {
+ dev_err(&pdev->dev, "register mapping failed\n");
+ ret = -ENOMEM;
+ goto err_free_resource;
+ }
+
+ /* Setup per-device UAR. */
+ dev->driver_uar.index = 0;
+ dev->driver_uar.pfn =
+ pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_UAR) >>
+ PAGE_SHIFT;
+ dev->driver_uar.map =
+ ioremap(dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
+ if (!dev->driver_uar.map) {
+ dev_err(&pdev->dev, "failed to remap UAR pages\n");
+ ret = -ENOMEM;
+ goto err_unmap_regs;
+ }
+
+ version = pvrdma_read_reg(dev, PVRDMA_REG_VERSION);
+ dev_info(&pdev->dev, "device version %d, driver version %d\n",
+ version, PVRDMA_VERSION);
+ if (version < PVRDMA_VERSION) {
+ dev_err(&pdev->dev, "incompatible device version\n");
+ goto err_uar_unmap;
+ }
+
+ dev->dsr = dma_alloc_coherent(&pdev->dev, sizeof(*dev->dsr),
+ &dev->dsrbase, GFP_KERNEL);
+ if (!dev->dsr) {
+ dev_err(&pdev->dev, "failed to allocate shared region\n");
+ ret = -ENOMEM;
+ goto err_uar_unmap;
+ }
+
+ /* Setup the shared region */
+ memset(dev->dsr, 0, sizeof(*dev->dsr));
+ dev->dsr->driver_version = PVRDMA_VERSION;
+ dev->dsr->gos_info.gos_bits = sizeof(void *) == 4 ?
+ PVRDMA_GOS_BITS_32 :
+ PVRDMA_GOS_BITS_64;
+ dev->dsr->gos_info.gos_type = PVRDMA_GOS_TYPE_LINUX;
+ dev->dsr->gos_info.gos_ver = 1;
+ dev->dsr->uar_pfn = dev->driver_uar.pfn;
+
+ /* Command slot. */
+ dev->cmd_slot = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+ &slot_dma, GFP_KERNEL);
+ if (!dev->cmd_slot) {
+ ret = -ENOMEM;
+ goto err_free_dsr;
+ }
+
+ dev->dsr->cmd_slot_dma = (u64)slot_dma;
+
+ /* Response slot. */
+ dev->resp_slot = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+ &slot_dma, GFP_KERNEL);
+ if (!dev->resp_slot) {
+ ret = -ENOMEM;
+ goto err_free_slots;
+ }
+
+ dev->dsr->resp_slot_dma = (u64)slot_dma;
+
+ /* Async event ring */
+ dev->dsr->async_ring_pages.num_pages = 4;
+ ret = pvrdma_page_dir_init(dev, &dev->async_pdir,
+ dev->dsr->async_ring_pages.num_pages, true);
+ if (ret)
+ goto err_free_slots;
+ dev->async_ring_state = dev->async_pdir.pages[0];
+ dev->dsr->async_ring_pages.pdir_dma = dev->async_pdir.dir_dma;
+
+ /* CQ notification ring */
+ dev->dsr->cq_ring_pages.num_pages = 4;
+ ret = pvrdma_page_dir_init(dev, &dev->cq_pdir,
+ dev->dsr->cq_ring_pages.num_pages, true);
+ if (ret)
+ goto err_free_async_ring;
+ dev->cq_ring_state = dev->cq_pdir.pages[0];
+ dev->dsr->cq_ring_pages.pdir_dma = dev->cq_pdir.dir_dma;
+
+ /*
+ * Write the PA of the shared region to the device. The writes must be
+ * ordered such that the high bits are written last. When the writes
+ * complete, the device will have filled out the capabilities.
+ */
+
+ pvrdma_write_reg(dev, PVRDMA_REG_DSRLOW, (u32)dev->dsrbase);
+ pvrdma_write_reg(dev, PVRDMA_REG_DSRHIGH,
+ (u32)((u64)(dev->dsrbase) >> 32));
+
+ /* Make sure the write is complete before reading status. */
+ mb();
+
+ /* Currently, the driver only supports RoCE mode. */
+ if (dev->dsr->caps.mode != PVRDMA_DEVICE_MODE_ROCE) {
+ dev_err(&pdev->dev, "unsupported transport %d\n",
+ dev->dsr->caps.mode);
+ ret = -EFAULT;
+ goto err_free_cq_ring;
+ }
+
+ /* Currently, the driver only supports RoCE V1. */
+ if (!(dev->dsr->caps.gid_types & PVRDMA_GID_TYPE_FLAG_ROCE_V1)) {
+ dev_err(&pdev->dev, "driver needs RoCE v1 support\n");
+ ret = -EFAULT;
+ goto err_free_cq_ring;
+ }
+
+ /* Paired vmxnet3 will have same bus, slot. But func will be 0 */
+ pdev_net = pci_get_slot(pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), 0));
+ if (!pdev_net) {
+ dev_err(&pdev->dev, "failed to find paired net device\n");
+ ret = -ENODEV;
+ goto err_free_cq_ring;
+ }
+
+ if (pdev_net->vendor != PCI_VENDOR_ID_VMWARE ||
+ pdev_net->device != PCI_DEVICE_ID_VMWARE_VMXNET3) {
+ dev_err(&pdev->dev, "failed to find paired vmxnet3 device\n");
+ pci_dev_put(pdev_net);
+ ret = -ENODEV;
+ goto err_free_cq_ring;
+ }
+
+ dev->netdev = pci_get_drvdata(pdev_net);
+ pci_dev_put(pdev_net);
+ if (!dev->netdev) {
+ dev_err(&pdev->dev, "failed to get vmxnet3 device\n");
+ ret = -ENODEV;
+ goto err_free_cq_ring;
+ }
+
+ dev_info(&pdev->dev, "paired device to %s\n", dev->netdev->name);
+
+ /* Interrupt setup */
+ ret = pvrdma_alloc_intrs(dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to allocate interrupts\n");
+ ret = -ENOMEM;
+ goto err_netdevice;
+ }
+
+ /* Allocate UAR table. */
+ ret = pvrdma_uar_table_init(dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to allocate UAR table\n");
+ ret = -ENOMEM;
+ goto err_free_intrs;
+ }
+
+ /* Allocate GID table */
+ dev->sgid_tbl = kcalloc(dev->dsr->caps.gid_tbl_len,
+ sizeof(union ib_gid), GFP_KERNEL);
+ if (!dev->sgid_tbl) {
+ ret = -ENOMEM;
+ goto err_free_uar_table;
+ }
+ dev_dbg(&pdev->dev, "gid table len %d\n", dev->dsr->caps.gid_tbl_len);
+
+ pvrdma_enable_intrs(dev);
+
+ /* Activate pvrdma device */
+ pvrdma_write_reg(dev, PVRDMA_REG_CTL, PVRDMA_DEVICE_CTL_ACTIVATE);
+
+ /* Make sure the write is complete before reading status. */
+ mb();
+
+ /* Check if device was successfully activated */
+ ret = pvrdma_read_reg(dev, PVRDMA_REG_ERR);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "failed to activate device\n");
+ ret = -EFAULT;
+ goto err_disable_intr;
+ }
+
+ /* Register IB device */
+ ret = pvrdma_register_device(dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register IB device\n");
+ goto err_disable_intr;
+ }
+
+ dev->nb_netdev.notifier_call = pvrdma_netdevice_event;
+ ret = register_netdevice_notifier(&dev->nb_netdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register netdevice events\n");
+ goto err_unreg_ibdev;
+ }
+
+ dev_info(&pdev->dev, "attached to device\n");
+ return 0;
+
+err_unreg_ibdev:
+ ib_unregister_device(&dev->ib_dev);
+err_disable_intr:
+ pvrdma_disable_intrs(dev);
+ kfree(dev->sgid_tbl);
+err_free_uar_table:
+ pvrdma_uar_table_cleanup(dev);
+err_free_intrs:
+ pvrdma_free_irq(dev);
+ pvrdma_disable_msi_all(dev);
+err_netdevice:
+ unregister_netdevice_notifier(&dev->nb_netdev);
+err_free_cq_ring:
+ pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
+err_free_async_ring:
+ pvrdma_page_dir_cleanup(dev, &dev->async_pdir);
+err_free_slots:
+ pvrdma_free_slots(dev);
+err_free_dsr:
+ dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr,
+ dev->dsrbase);
+err_uar_unmap:
+ iounmap(dev->driver_uar.map);
+err_unmap_regs:
+ iounmap(dev->regs);
+err_free_resource:
+ pci_release_regions(pdev);
+err_disable_pdev:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+err_free_device:
+ mutex_lock(&pvrdma_device_list_lock);
+ list_del(&dev->device_link);
+ mutex_unlock(&pvrdma_device_list_lock);
+ ib_dealloc_device(&dev->ib_dev);
+ return ret;
+}
+
+static void pvrdma_pci_remove(struct pci_dev *pdev)
+{
+ struct pvrdma_dev *dev = pci_get_drvdata(pdev);
+
+ if (!dev)
+ return;
+
+ dev_info(&pdev->dev, "detaching from device\n");
+
+ unregister_netdevice_notifier(&dev->nb_netdev);
+ dev->nb_netdev.notifier_call = NULL;
+
+ flush_workqueue(event_wq);
+
+ /* Unregister ib device */
+ ib_unregister_device(&dev->ib_dev);
+
+ mutex_lock(&pvrdma_device_list_lock);
+ list_del(&dev->device_link);
+ mutex_unlock(&pvrdma_device_list_lock);
+
+ pvrdma_disable_intrs(dev);
+ pvrdma_free_irq(dev);
+ pvrdma_disable_msi_all(dev);
+
+ /* Deactivate pvrdma device */
+ pvrdma_write_reg(dev, PVRDMA_REG_CTL, PVRDMA_DEVICE_CTL_RESET);
+ pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
+ pvrdma_page_dir_cleanup(dev, &dev->async_pdir);
+ pvrdma_free_slots(dev);
+
+ iounmap(dev->regs);
+ kfree(dev->sgid_tbl);
+ kfree(dev->cq_tbl);
+ kfree(dev->qp_tbl);
+ pvrdma_uar_table_cleanup(dev);
+ iounmap(dev->driver_uar.map);
+
+ ib_dealloc_device(&dev->ib_dev);
+
+ /* Free pci resources */
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_device_id pvrdma_pci_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_PVRDMA), },
+ { 0 },
+};
+
+MODULE_DEVICE_TABLE(pci, pvrdma_pci_table);
+
+static struct pci_driver pvrdma_driver = {
+ .name = DRV_NAME,
+ .id_table = pvrdma_pci_table,
+ .probe = pvrdma_pci_probe,
+ .remove = pvrdma_pci_remove,
+};
+
+static int __init pvrdma_init(void)
+{
+ int err;
+
+ event_wq = alloc_ordered_workqueue("pvrdma_event_wq", WQ_MEM_RECLAIM);
+ if (!event_wq)
+ return -ENOMEM;
+
+ err = pci_register_driver(&pvrdma_driver);
+ if (err)
+ destroy_workqueue(event_wq);
+
+ return err;
+}
+
+static void __exit pvrdma_cleanup(void)
+{
+ pci_unregister_driver(&pvrdma_driver);
+
+ destroy_workqueue(event_wq);
+}
+
+module_init(pvrdma_init);
+module_exit(pvrdma_cleanup);
+
+MODULE_AUTHOR("VMware, Inc");
+MODULE_DESCRIPTION("VMware Paravirtual RDMA driver");
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c
new file mode 100644
index 000000000000..948b5ccd2a70
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/bitmap.h>
+
+#include "pvrdma.h"
+
+int pvrdma_page_dir_init(struct pvrdma_dev *dev, struct pvrdma_page_dir *pdir,
+ u64 npages, bool alloc_pages)
+{
+ u64 i;
+
+ if (npages > PVRDMA_PAGE_DIR_MAX_PAGES)
+ return -EINVAL;
+
+ memset(pdir, 0, sizeof(*pdir));
+
+ pdir->dir = dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
+ &pdir->dir_dma, GFP_KERNEL);
+ if (!pdir->dir)
+ goto err;
+
+ pdir->ntables = PVRDMA_PAGE_DIR_TABLE(npages - 1) + 1;
+ pdir->tables = kcalloc(pdir->ntables, sizeof(*pdir->tables),
+ GFP_KERNEL);
+ if (!pdir->tables)
+ goto err;
+
+ for (i = 0; i < pdir->ntables; i++) {
+ pdir->tables[i] = dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
+ (dma_addr_t *)&pdir->dir[i],
+ GFP_KERNEL);
+ if (!pdir->tables[i])
+ goto err;
+ }
+
+ pdir->npages = npages;
+
+ if (alloc_pages) {
+ pdir->pages = kcalloc(npages, sizeof(*pdir->pages),
+ GFP_KERNEL);
+ if (!pdir->pages)
+ goto err;
+
+ for (i = 0; i < pdir->npages; i++) {
+ dma_addr_t page_dma;
+
+ pdir->pages[i] = dma_alloc_coherent(&dev->pdev->dev,
+ PAGE_SIZE,
+ &page_dma,
+ GFP_KERNEL);
+ if (!pdir->pages[i])
+ goto err;
+
+ pvrdma_page_dir_insert_dma(pdir, i, page_dma);
+ }
+ }
+
+ return 0;
+
+err:
+ pvrdma_page_dir_cleanup(dev, pdir);
+
+ return -ENOMEM;
+}
+
+static u64 *pvrdma_page_dir_table(struct pvrdma_page_dir *pdir, u64 idx)
+{
+ return pdir->tables[PVRDMA_PAGE_DIR_TABLE(idx)];
+}
+
+dma_addr_t pvrdma_page_dir_get_dma(struct pvrdma_page_dir *pdir, u64 idx)
+{
+ return pvrdma_page_dir_table(pdir, idx)[PVRDMA_PAGE_DIR_PAGE(idx)];
+}
+
+static void pvrdma_page_dir_cleanup_pages(struct pvrdma_dev *dev,
+ struct pvrdma_page_dir *pdir)
+{
+ if (pdir->pages) {
+ u64 i;
+
+ for (i = 0; i < pdir->npages && pdir->pages[i]; i++) {
+ dma_addr_t page_dma = pvrdma_page_dir_get_dma(pdir, i);
+
+ dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
+ pdir->pages[i], page_dma);
+ }
+
+ kfree(pdir->pages);
+ }
+}
+
+static void pvrdma_page_dir_cleanup_tables(struct pvrdma_dev *dev,
+ struct pvrdma_page_dir *pdir)
+{
+ if (pdir->tables) {
+ int i;
+
+ pvrdma_page_dir_cleanup_pages(dev, pdir);
+
+ for (i = 0; i < pdir->ntables; i++) {
+ u64 *table = pdir->tables[i];
+
+ if (table)
+ dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
+ table, pdir->dir[i]);
+ }
+
+ kfree(pdir->tables);
+ }
+}
+
+void pvrdma_page_dir_cleanup(struct pvrdma_dev *dev,
+ struct pvrdma_page_dir *pdir)
+{
+ if (pdir->dir) {
+ pvrdma_page_dir_cleanup_tables(dev, pdir);
+ dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
+ pdir->dir, pdir->dir_dma);
+ }
+}
+
+int pvrdma_page_dir_insert_dma(struct pvrdma_page_dir *pdir, u64 idx,
+ dma_addr_t daddr)
+{
+ u64 *table;
+
+ if (idx >= pdir->npages)
+ return -EINVAL;
+
+ table = pvrdma_page_dir_table(pdir, idx);
+ table[PVRDMA_PAGE_DIR_PAGE(idx)] = daddr;
+
+ return 0;
+}
+
+int pvrdma_page_dir_insert_umem(struct pvrdma_page_dir *pdir,
+ struct ib_umem *umem, u64 offset)
+{
+ u64 i = offset;
+ int j, entry;
+ int ret = 0, len = 0;
+ struct scatterlist *sg;
+
+ if (offset >= pdir->npages)
+ return -EINVAL;
+
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
+ len = sg_dma_len(sg) >> PAGE_SHIFT;
+ for (j = 0; j < len; j++) {
+ dma_addr_t addr = sg_dma_address(sg) +
+ umem->page_size * j;
+
+ ret = pvrdma_page_dir_insert_dma(pdir, i, addr);
+ if (ret)
+ goto exit;
+
+ i++;
+ }
+ }
+
+exit:
+ return ret;
+}
+
+int pvrdma_page_dir_insert_page_list(struct pvrdma_page_dir *pdir,
+ u64 *page_list,
+ int num_pages)
+{
+ int i;
+ int ret;
+
+ if (num_pages > pdir->npages)
+ return -EINVAL;
+
+ for (i = 0; i < num_pages; i++) {
+ ret = pvrdma_page_dir_insert_dma(pdir, i, page_list[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void pvrdma_qp_cap_to_ib(struct ib_qp_cap *dst, const struct pvrdma_qp_cap *src)
+{
+ dst->max_send_wr = src->max_send_wr;
+ dst->max_recv_wr = src->max_recv_wr;
+ dst->max_send_sge = src->max_send_sge;
+ dst->max_recv_sge = src->max_recv_sge;
+ dst->max_inline_data = src->max_inline_data;
+}
+
+void ib_qp_cap_to_pvrdma(struct pvrdma_qp_cap *dst, const struct ib_qp_cap *src)
+{
+ dst->max_send_wr = src->max_send_wr;
+ dst->max_recv_wr = src->max_recv_wr;
+ dst->max_send_sge = src->max_send_sge;
+ dst->max_recv_sge = src->max_recv_sge;
+ dst->max_inline_data = src->max_inline_data;
+}
+
+void pvrdma_gid_to_ib(union ib_gid *dst, const union pvrdma_gid *src)
+{
+ BUILD_BUG_ON(sizeof(union pvrdma_gid) != sizeof(union ib_gid));
+ memcpy(dst, src, sizeof(*src));
+}
+
+void ib_gid_to_pvrdma(union pvrdma_gid *dst, const union ib_gid *src)
+{
+ BUILD_BUG_ON(sizeof(union pvrdma_gid) != sizeof(union ib_gid));
+ memcpy(dst, src, sizeof(*src));
+}
+
+void pvrdma_global_route_to_ib(struct ib_global_route *dst,
+ const struct pvrdma_global_route *src)
+{
+ pvrdma_gid_to_ib(&dst->dgid, &src->dgid);
+ dst->flow_label = src->flow_label;
+ dst->sgid_index = src->sgid_index;
+ dst->hop_limit = src->hop_limit;
+ dst->traffic_class = src->traffic_class;
+}
+
+void ib_global_route_to_pvrdma(struct pvrdma_global_route *dst,
+ const struct ib_global_route *src)
+{
+ ib_gid_to_pvrdma(&dst->dgid, &src->dgid);
+ dst->flow_label = src->flow_label;
+ dst->sgid_index = src->sgid_index;
+ dst->hop_limit = src->hop_limit;
+ dst->traffic_class = src->traffic_class;
+}
+
+void pvrdma_ah_attr_to_ib(struct ib_ah_attr *dst,
+ const struct pvrdma_ah_attr *src)
+{
+ pvrdma_global_route_to_ib(&dst->grh, &src->grh);
+ dst->dlid = src->dlid;
+ dst->sl = src->sl;
+ dst->src_path_bits = src->src_path_bits;
+ dst->static_rate = src->static_rate;
+ dst->ah_flags = src->ah_flags;
+ dst->port_num = src->port_num;
+ memcpy(&dst->dmac, &src->dmac, sizeof(dst->dmac));
+}
+
+void ib_ah_attr_to_pvrdma(struct pvrdma_ah_attr *dst,
+ const struct ib_ah_attr *src)
+{
+ ib_global_route_to_pvrdma(&dst->grh, &src->grh);
+ dst->dlid = src->dlid;
+ dst->sl = src->sl;
+ dst->src_path_bits = src->src_path_bits;
+ dst->static_rate = src->static_rate;
+ dst->ah_flags = src->ah_flags;
+ dst->port_num = src->port_num;
+ memcpy(&dst->dmac, &src->dmac, sizeof(dst->dmac));
+}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
new file mode 100644
index 000000000000..8519f3212e52
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
@@ -0,0 +1,334 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include "pvrdma.h"
+
+/**
+ * pvrdma_get_dma_mr - get a DMA memory region
+ * @pd: protection domain
+ * @acc: access flags
+ *
+ * @return: ib_mr pointer on success, otherwise returns an errno.
+ */
+struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc)
+{
+ struct pvrdma_dev *dev = to_vdev(pd->device);
+ struct pvrdma_user_mr *mr;
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
+ struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
+ int ret;
+
+ /* Support only LOCAL_WRITE flag for DMA MRs */
+ if (acc & ~IB_ACCESS_LOCAL_WRITE) {
+ dev_warn(&dev->pdev->dev,
+ "unsupported dma mr access flags %#x\n", acc);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
+ cmd->pd_handle = to_vpd(pd)->pd_handle;
+ cmd->access_flags = acc;
+ cmd->flags = PVRDMA_MR_FLAG_DMA;
+
+ ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not get DMA mem region, error: %d\n", ret);
+ kfree(mr);
+ return ERR_PTR(ret);
+ }
+
+ mr->mmr.mr_handle = resp->mr_handle;
+ mr->ibmr.lkey = resp->lkey;
+ mr->ibmr.rkey = resp->rkey;
+
+ return &mr->ibmr;
+}
+
+/**
+ * pvrdma_reg_user_mr - register a userspace memory region
+ * @pd: protection domain
+ * @start: starting address
+ * @length: length of region
+ * @virt_addr: I/O virtual address
+ * @access_flags: access flags for memory region
+ * @udata: user data
+ *
+ * @return: ib_mr pointer on success, otherwise returns an errno.
+ */
+struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int access_flags,
+ struct ib_udata *udata)
+{
+ struct pvrdma_dev *dev = to_vdev(pd->device);
+ struct pvrdma_user_mr *mr = NULL;
+ struct ib_umem *umem;
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
+ struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
+ int nchunks;
+ int ret;
+ int entry;
+ struct scatterlist *sg;
+
+ if (length == 0 || length > dev->dsr->caps.max_mr_size) {
+ dev_warn(&dev->pdev->dev, "invalid mem region length\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ umem = ib_umem_get(pd->uobject->context, start,
+ length, access_flags, 0);
+ if (IS_ERR(umem)) {
+ dev_warn(&dev->pdev->dev,
+ "could not get umem for mem region\n");
+ return ERR_CAST(umem);
+ }
+
+ nchunks = 0;
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry)
+ nchunks += sg_dma_len(sg) >> PAGE_SHIFT;
+
+ if (nchunks < 0 || nchunks > PVRDMA_PAGE_DIR_MAX_PAGES) {
+ dev_warn(&dev->pdev->dev, "overflow %d pages in mem region\n",
+ nchunks);
+ ret = -EINVAL;
+ goto err_umem;
+ }
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr) {
+ ret = -ENOMEM;
+ goto err_umem;
+ }
+
+ mr->mmr.iova = virt_addr;
+ mr->mmr.size = length;
+ mr->umem = umem;
+
+ ret = pvrdma_page_dir_init(dev, &mr->pdir, nchunks, false);
+ if (ret) {
+ dev_warn(&dev->pdev->dev,
+ "could not allocate page directory\n");
+ goto err_umem;
+ }
+
+ ret = pvrdma_page_dir_insert_umem(&mr->pdir, mr->umem, 0);
+ if (ret)
+ goto err_pdir;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
+ cmd->start = start;
+ cmd->length = length;
+ cmd->pd_handle = to_vpd(pd)->pd_handle;
+ cmd->access_flags = access_flags;
+ cmd->nchunks = nchunks;
+ cmd->pdir_dma = mr->pdir.dir_dma;
+
+ ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not register mem region, error: %d\n", ret);
+ goto err_pdir;
+ }
+
+ mr->mmr.mr_handle = resp->mr_handle;
+ mr->ibmr.lkey = resp->lkey;
+ mr->ibmr.rkey = resp->rkey;
+
+ return &mr->ibmr;
+
+err_pdir:
+ pvrdma_page_dir_cleanup(dev, &mr->pdir);
+err_umem:
+ ib_umem_release(umem);
+ kfree(mr);
+
+ return ERR_PTR(ret);
+}
+
+/**
+ * pvrdma_alloc_mr - allocate a memory region
+ * @pd: protection domain
+ * @mr_type: type of memory region
+ * @max_num_sg: maximum number of pages
+ *
+ * @return: ib_mr pointer on success, otherwise returns an errno.
+ */
+struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg)
+{
+ struct pvrdma_dev *dev = to_vdev(pd->device);
+ struct pvrdma_user_mr *mr;
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
+ struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
+ int size = max_num_sg * sizeof(u64);
+ int ret;
+
+ if (mr_type != IB_MR_TYPE_MEM_REG ||
+ max_num_sg > PVRDMA_MAX_FAST_REG_PAGES)
+ return ERR_PTR(-EINVAL);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ mr->pages = kzalloc(size, GFP_KERNEL);
+ if (!mr->pages) {
+ ret = -ENOMEM;
+ goto freemr;
+ }
+
+ ret = pvrdma_page_dir_init(dev, &mr->pdir, max_num_sg, false);
+ if (ret) {
+ dev_warn(&dev->pdev->dev,
+ "failed to allocate page dir for mr\n");
+ ret = -ENOMEM;
+ goto freepages;
+ }
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
+ cmd->pd_handle = to_vpd(pd)->pd_handle;
+ cmd->access_flags = 0;
+ cmd->flags = PVRDMA_MR_FLAG_FRMR;
+ cmd->nchunks = max_num_sg;
+
+ ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not create FR mem region, error: %d\n", ret);
+ goto freepdir;
+ }
+
+ mr->max_pages = max_num_sg;
+ mr->mmr.mr_handle = resp->mr_handle;
+ mr->ibmr.lkey = resp->lkey;
+ mr->ibmr.rkey = resp->rkey;
+ mr->page_shift = PAGE_SHIFT;
+ mr->umem = NULL;
+
+ return &mr->ibmr;
+
+freepdir:
+ pvrdma_page_dir_cleanup(dev, &mr->pdir);
+freepages:
+ kfree(mr->pages);
+freemr:
+ kfree(mr);
+ return ERR_PTR(ret);
+}
+
+/**
+ * pvrdma_dereg_mr - deregister a memory region
+ * @ibmr: memory region
+ *
+ * @return: 0 on success.
+ */
+int pvrdma_dereg_mr(struct ib_mr *ibmr)
+{
+ struct pvrdma_user_mr *mr = to_vmr(ibmr);
+ struct pvrdma_dev *dev = to_vdev(ibmr->device);
+ union pvrdma_cmd_req req;
+ struct pvrdma_cmd_destroy_mr *cmd = &req.destroy_mr;
+ int ret;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_DESTROY_MR;
+ cmd->mr_handle = mr->mmr.mr_handle;
+ ret = pvrdma_cmd_post(dev, &req, NULL, 0);
+ if (ret < 0)
+ dev_warn(&dev->pdev->dev,
+ "could not deregister mem region, error: %d\n", ret);
+
+ pvrdma_page_dir_cleanup(dev, &mr->pdir);
+ if (mr->umem)
+ ib_umem_release(mr->umem);
+
+ kfree(mr->pages);
+ kfree(mr);
+
+ return 0;
+}
+
+static int pvrdma_set_page(struct ib_mr *ibmr, u64 addr)
+{
+ struct pvrdma_user_mr *mr = to_vmr(ibmr);
+
+ if (mr->npages == mr->max_pages)
+ return -ENOMEM;
+
+ mr->pages[mr->npages++] = addr;
+ return 0;
+}
+
+int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset)
+{
+ struct pvrdma_user_mr *mr = to_vmr(ibmr);
+ struct pvrdma_dev *dev = to_vdev(ibmr->device);
+ int ret;
+
+ mr->npages = 0;
+
+ ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, pvrdma_set_page);
+ if (ret < 0)
+ dev_warn(&dev->pdev->dev, "could not map sg to pages\n");
+
+ return ret;
+}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
new file mode 100644
index 000000000000..c8c01e558125
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -0,0 +1,972 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/page.h>
+#include <linux/io.h>
+#include <linux/wait.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_user_verbs.h>
+
+#include "pvrdma.h"
+
+static inline void get_cqs(struct pvrdma_qp *qp, struct pvrdma_cq **send_cq,
+ struct pvrdma_cq **recv_cq)
+{
+ *send_cq = to_vcq(qp->ibqp.send_cq);
+ *recv_cq = to_vcq(qp->ibqp.recv_cq);
+}
+
+static void pvrdma_lock_cqs(struct pvrdma_cq *scq, struct pvrdma_cq *rcq,
+ unsigned long *scq_flags,
+ unsigned long *rcq_flags)
+ __acquires(scq->cq_lock) __acquires(rcq->cq_lock)
+{
+ if (scq == rcq) {
+ spin_lock_irqsave(&scq->cq_lock, *scq_flags);
+ __acquire(rcq->cq_lock);
+ } else if (scq->cq_handle < rcq->cq_handle) {
+ spin_lock_irqsave(&scq->cq_lock, *scq_flags);
+ spin_lock_irqsave_nested(&rcq->cq_lock, *rcq_flags,
+ SINGLE_DEPTH_NESTING);
+ } else {
+ spin_lock_irqsave(&rcq->cq_lock, *rcq_flags);
+ spin_lock_irqsave_nested(&scq->cq_lock, *scq_flags,
+ SINGLE_DEPTH_NESTING);
+ }
+}
+
+static void pvrdma_unlock_cqs(struct pvrdma_cq *scq, struct pvrdma_cq *rcq,
+ unsigned long *scq_flags,
+ unsigned long *rcq_flags)
+ __releases(scq->cq_lock) __releases(rcq->cq_lock)
+{
+ if (scq == rcq) {
+ __release(rcq->cq_lock);
+ spin_unlock_irqrestore(&scq->cq_lock, *scq_flags);
+ } else if (scq->cq_handle < rcq->cq_handle) {
+ spin_unlock_irqrestore(&rcq->cq_lock, *rcq_flags);
+ spin_unlock_irqrestore(&scq->cq_lock, *scq_flags);
+ } else {
+ spin_unlock_irqrestore(&scq->cq_lock, *scq_flags);
+ spin_unlock_irqrestore(&rcq->cq_lock, *rcq_flags);
+ }
+}
+
+static void pvrdma_reset_qp(struct pvrdma_qp *qp)
+{
+ struct pvrdma_cq *scq, *rcq;
+ unsigned long scq_flags, rcq_flags;
+
+ /* Clean up cqes */
+ get_cqs(qp, &scq, &rcq);
+ pvrdma_lock_cqs(scq, rcq, &scq_flags, &rcq_flags);
+
+ _pvrdma_flush_cqe(qp, scq);
+ if (scq != rcq)
+ _pvrdma_flush_cqe(qp, rcq);
+
+ pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
+
+ /*
+ * Reset queuepair. The checks are because usermode queuepairs won't
+ * have kernel ringstates.
+ */
+ if (qp->rq.ring) {
+ atomic_set(&qp->rq.ring->cons_head, 0);
+ atomic_set(&qp->rq.ring->prod_tail, 0);
+ }
+ if (qp->sq.ring) {
+ atomic_set(&qp->sq.ring->cons_head, 0);
+ atomic_set(&qp->sq.ring->prod_tail, 0);
+ }
+}
+
+static int pvrdma_set_rq_size(struct pvrdma_dev *dev,
+ struct ib_qp_cap *req_cap,
+ struct pvrdma_qp *qp)
+{
+ if (req_cap->max_recv_wr > dev->dsr->caps.max_qp_wr ||
+ req_cap->max_recv_sge > dev->dsr->caps.max_sge) {
+ dev_warn(&dev->pdev->dev, "recv queue size invalid\n");
+ return -EINVAL;
+ }
+
+ qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, req_cap->max_recv_wr));
+ qp->rq.max_sg = roundup_pow_of_two(max(1U, req_cap->max_recv_sge));
+
+ /* Write back */
+ req_cap->max_recv_wr = qp->rq.wqe_cnt;
+ req_cap->max_recv_sge = qp->rq.max_sg;
+
+ qp->rq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_rq_wqe_hdr) +
+ sizeof(struct pvrdma_sge) *
+ qp->rq.max_sg);
+ qp->npages_recv = (qp->rq.wqe_cnt * qp->rq.wqe_size + PAGE_SIZE - 1) /
+ PAGE_SIZE;
+
+ return 0;
+}
+
+static int pvrdma_set_sq_size(struct pvrdma_dev *dev, struct ib_qp_cap *req_cap,
+ enum ib_qp_type type, struct pvrdma_qp *qp)
+{
+ if (req_cap->max_send_wr > dev->dsr->caps.max_qp_wr ||
+ req_cap->max_send_sge > dev->dsr->caps.max_sge) {
+ dev_warn(&dev->pdev->dev, "send queue size invalid\n");
+ return -EINVAL;
+ }
+
+ qp->sq.wqe_cnt = roundup_pow_of_two(max(1U, req_cap->max_send_wr));
+ qp->sq.max_sg = roundup_pow_of_two(max(1U, req_cap->max_send_sge));
+
+ /* Write back */
+ req_cap->max_send_wr = qp->sq.wqe_cnt;
+ req_cap->max_send_sge = qp->sq.max_sg;
+
+ qp->sq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_sq_wqe_hdr) +
+ sizeof(struct pvrdma_sge) *
+ qp->sq.max_sg);
+ /* Note: one extra page for the header. */
+ qp->npages_send = 1 + (qp->sq.wqe_cnt * qp->sq.wqe_size +
+ PAGE_SIZE - 1) / PAGE_SIZE;
+
+ return 0;
+}
+
+/**
+ * pvrdma_create_qp - create queue pair
+ * @pd: protection domain
+ * @init_attr: queue pair attributes
+ * @udata: user data
+ *
+ * @return: the ib_qp pointer on success, otherwise returns an errno.
+ */
+struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct pvrdma_qp *qp = NULL;
+ struct pvrdma_dev *dev = to_vdev(pd->device);
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_create_qp *cmd = &req.create_qp;
+ struct pvrdma_cmd_create_qp_resp *resp = &rsp.create_qp_resp;
+ struct pvrdma_create_qp ucmd;
+ unsigned long flags;
+ int ret;
+
+ if (init_attr->create_flags) {
+ dev_warn(&dev->pdev->dev,
+ "invalid create queuepair flags %#x\n",
+ init_attr->create_flags);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (init_attr->qp_type != IB_QPT_RC &&
+ init_attr->qp_type != IB_QPT_UD &&
+ init_attr->qp_type != IB_QPT_GSI) {
+ dev_warn(&dev->pdev->dev, "queuepair type %d not supported\n",
+ init_attr->qp_type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!atomic_add_unless(&dev->num_qps, 1, dev->dsr->caps.max_qp))
+ return ERR_PTR(-ENOMEM);
+
+ switch (init_attr->qp_type) {
+ case IB_QPT_GSI:
+ if (init_attr->port_num == 0 ||
+ init_attr->port_num > pd->device->phys_port_cnt ||
+ udata) {
+ dev_warn(&dev->pdev->dev, "invalid queuepair attrs\n");
+ ret = -EINVAL;
+ goto err_qp;
+ }
+ /* fall through */
+ case IB_QPT_RC:
+ case IB_QPT_UD:
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp) {
+ ret = -ENOMEM;
+ goto err_qp;
+ }
+
+ spin_lock_init(&qp->sq.lock);
+ spin_lock_init(&qp->rq.lock);
+ mutex_init(&qp->mutex);
+ atomic_set(&qp->refcnt, 1);
+ init_waitqueue_head(&qp->wait);
+
+ qp->state = IB_QPS_RESET;
+
+ if (pd->uobject && udata) {
+ dev_dbg(&dev->pdev->dev,
+ "create queuepair from user space\n");
+
+ if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
+ ret = -EFAULT;
+ goto err_qp;
+ }
+
+ /* set qp->sq.wqe_cnt, shift, buf_size.. */
+ qp->rumem = ib_umem_get(pd->uobject->context,
+ ucmd.rbuf_addr,
+ ucmd.rbuf_size, 0, 0);
+ if (IS_ERR(qp->rumem)) {
+ ret = PTR_ERR(qp->rumem);
+ goto err_qp;
+ }
+
+ qp->sumem = ib_umem_get(pd->uobject->context,
+ ucmd.sbuf_addr,
+ ucmd.sbuf_size, 0, 0);
+ if (IS_ERR(qp->sumem)) {
+ ib_umem_release(qp->rumem);
+ ret = PTR_ERR(qp->sumem);
+ goto err_qp;
+ }
+
+ qp->npages_send = ib_umem_page_count(qp->sumem);
+ qp->npages_recv = ib_umem_page_count(qp->rumem);
+ qp->npages = qp->npages_send + qp->npages_recv;
+ } else {
+ qp->is_kernel = true;
+
+ ret = pvrdma_set_sq_size(to_vdev(pd->device),
+ &init_attr->cap,
+ init_attr->qp_type, qp);
+ if (ret)
+ goto err_qp;
+
+ ret = pvrdma_set_rq_size(to_vdev(pd->device),
+ &init_attr->cap, qp);
+ if (ret)
+ goto err_qp;
+
+ qp->npages = qp->npages_send + qp->npages_recv;
+
+ /* Skip header page. */
+ qp->sq.offset = PAGE_SIZE;
+
+ /* Recv queue pages are after send pages. */
+ qp->rq.offset = qp->npages_send * PAGE_SIZE;
+ }
+
+ if (qp->npages < 0 || qp->npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
+ dev_warn(&dev->pdev->dev,
+ "overflow pages in queuepair\n");
+ ret = -EINVAL;
+ goto err_umem;
+ }
+
+ ret = pvrdma_page_dir_init(dev, &qp->pdir, qp->npages,
+ qp->is_kernel);
+ if (ret) {
+ dev_warn(&dev->pdev->dev,
+ "could not allocate page directory\n");
+ goto err_umem;
+ }
+
+ if (!qp->is_kernel) {
+ pvrdma_page_dir_insert_umem(&qp->pdir, qp->sumem, 0);
+ pvrdma_page_dir_insert_umem(&qp->pdir, qp->rumem,
+ qp->npages_send);
+ } else {
+ /* Ring state is always the first page. */
+ qp->sq.ring = qp->pdir.pages[0];
+ qp->rq.ring = &qp->sq.ring[1];
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_qp;
+ }
+
+ /* Not supported */
+ init_attr->cap.max_inline_data = 0;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_CREATE_QP;
+ cmd->pd_handle = to_vpd(pd)->pd_handle;
+ cmd->send_cq_handle = to_vcq(init_attr->send_cq)->cq_handle;
+ cmd->recv_cq_handle = to_vcq(init_attr->recv_cq)->cq_handle;
+ cmd->max_send_wr = init_attr->cap.max_send_wr;
+ cmd->max_recv_wr = init_attr->cap.max_recv_wr;
+ cmd->max_send_sge = init_attr->cap.max_send_sge;
+ cmd->max_recv_sge = init_attr->cap.max_recv_sge;
+ cmd->max_inline_data = init_attr->cap.max_inline_data;
+ cmd->sq_sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
+ cmd->qp_type = ib_qp_type_to_pvrdma(init_attr->qp_type);
+ cmd->access_flags = IB_ACCESS_LOCAL_WRITE;
+ cmd->total_chunks = qp->npages;
+ cmd->send_chunks = qp->npages_send - 1;
+ cmd->pdir_dma = qp->pdir.dir_dma;
+
+ dev_dbg(&dev->pdev->dev, "create queuepair with %d, %d, %d, %d\n",
+ cmd->max_send_wr, cmd->max_recv_wr, cmd->max_send_sge,
+ cmd->max_recv_sge);
+
+ ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_QP_RESP);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not create queuepair, error: %d\n", ret);
+ goto err_pdir;
+ }
+
+ /* max_send_wr/_recv_wr/_send_sge/_recv_sge/_inline_data */
+ qp->qp_handle = resp->qpn;
+ qp->port = init_attr->port_num;
+ qp->ibqp.qp_num = resp->qpn;
+ spin_lock_irqsave(&dev->qp_tbl_lock, flags);
+ dev->qp_tbl[qp->qp_handle % dev->dsr->caps.max_qp] = qp;
+ spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
+
+ return &qp->ibqp;
+
+err_pdir:
+ pvrdma_page_dir_cleanup(dev, &qp->pdir);
+err_umem:
+ if (pd->uobject && udata) {
+ if (qp->rumem)
+ ib_umem_release(qp->rumem);
+ if (qp->sumem)
+ ib_umem_release(qp->sumem);
+ }
+err_qp:
+ kfree(qp);
+ atomic_dec(&dev->num_qps);
+
+ return ERR_PTR(ret);
+}
+
+static void pvrdma_free_qp(struct pvrdma_qp *qp)
+{
+ struct pvrdma_dev *dev = to_vdev(qp->ibqp.device);
+ struct pvrdma_cq *scq;
+ struct pvrdma_cq *rcq;
+ unsigned long flags, scq_flags, rcq_flags;
+
+ /* In case cq is polling */
+ get_cqs(qp, &scq, &rcq);
+ pvrdma_lock_cqs(scq, rcq, &scq_flags, &rcq_flags);
+
+ _pvrdma_flush_cqe(qp, scq);
+ if (scq != rcq)
+ _pvrdma_flush_cqe(qp, rcq);
+
+ spin_lock_irqsave(&dev->qp_tbl_lock, flags);
+ dev->qp_tbl[qp->qp_handle] = NULL;
+ spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
+
+ pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
+
+ atomic_dec(&qp->refcnt);
+ wait_event(qp->wait, !atomic_read(&qp->refcnt));
+
+ pvrdma_page_dir_cleanup(dev, &qp->pdir);
+
+ kfree(qp);
+
+ atomic_dec(&dev->num_qps);
+}
+
+/**
+ * pvrdma_destroy_qp - destroy a queue pair
+ * @qp: the queue pair to destroy
+ *
+ * @return: 0 on success.
+ */
+int pvrdma_destroy_qp(struct ib_qp *qp)
+{
+ struct pvrdma_qp *vqp = to_vqp(qp);
+ union pvrdma_cmd_req req;
+ struct pvrdma_cmd_destroy_qp *cmd = &req.destroy_qp;
+ int ret;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_DESTROY_QP;
+ cmd->qp_handle = vqp->qp_handle;
+
+ ret = pvrdma_cmd_post(to_vdev(qp->device), &req, NULL, 0);
+ if (ret < 0)
+ dev_warn(&to_vdev(qp->device)->pdev->dev,
+ "destroy queuepair failed, error: %d\n", ret);
+
+ pvrdma_free_qp(vqp);
+
+ return 0;
+}
+
+/**
+ * pvrdma_modify_qp - modify queue pair attributes
+ * @ibqp: the queue pair
+ * @attr: the new queue pair's attributes
+ * @attr_mask: attributes mask
+ * @udata: user data
+ *
+ * @returns 0 on success, otherwise returns an errno.
+ */
+int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct pvrdma_dev *dev = to_vdev(ibqp->device);
+ struct pvrdma_qp *qp = to_vqp(ibqp);
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_modify_qp *cmd = &req.modify_qp;
+ int cur_state, next_state;
+ int ret;
+
+ /* Sanity checking. Should need lock here */
+ mutex_lock(&qp->mutex);
+ cur_state = (attr_mask & IB_QP_CUR_STATE) ? attr->cur_qp_state :
+ qp->state;
+ next_state = (attr_mask & IB_QP_STATE) ? attr->qp_state : cur_state;
+
+ if (!ib_modify_qp_is_ok(cur_state, next_state, ibqp->qp_type,
+ attr_mask, IB_LINK_LAYER_ETHERNET)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (attr_mask & IB_QP_PORT) {
+ if (attr->port_num == 0 ||
+ attr->port_num > ibqp->device->phys_port_cnt) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (attr_mask & IB_QP_MIN_RNR_TIMER) {
+ if (attr->min_rnr_timer > 31) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (attr_mask & IB_QP_PKEY_INDEX) {
+ if (attr->pkey_index >= dev->dsr->caps.max_pkeys) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (attr_mask & IB_QP_QKEY)
+ qp->qkey = attr->qkey;
+
+ if (cur_state == next_state && cur_state == IB_QPS_RESET) {
+ ret = 0;
+ goto out;
+ }
+
+ qp->state = next_state;
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_MODIFY_QP;
+ cmd->qp_handle = qp->qp_handle;
+ cmd->attr_mask = ib_qp_attr_mask_to_pvrdma(attr_mask);
+ cmd->attrs.qp_state = ib_qp_state_to_pvrdma(attr->qp_state);
+ cmd->attrs.cur_qp_state =
+ ib_qp_state_to_pvrdma(attr->cur_qp_state);
+ cmd->attrs.path_mtu = ib_mtu_to_pvrdma(attr->path_mtu);
+ cmd->attrs.path_mig_state =
+ ib_mig_state_to_pvrdma(attr->path_mig_state);
+ cmd->attrs.qkey = attr->qkey;
+ cmd->attrs.rq_psn = attr->rq_psn;
+ cmd->attrs.sq_psn = attr->sq_psn;
+ cmd->attrs.dest_qp_num = attr->dest_qp_num;
+ cmd->attrs.qp_access_flags =
+ ib_access_flags_to_pvrdma(attr->qp_access_flags);
+ cmd->attrs.pkey_index = attr->pkey_index;
+ cmd->attrs.alt_pkey_index = attr->alt_pkey_index;
+ cmd->attrs.en_sqd_async_notify = attr->en_sqd_async_notify;
+ cmd->attrs.sq_draining = attr->sq_draining;
+ cmd->attrs.max_rd_atomic = attr->max_rd_atomic;
+ cmd->attrs.max_dest_rd_atomic = attr->max_dest_rd_atomic;
+ cmd->attrs.min_rnr_timer = attr->min_rnr_timer;
+ cmd->attrs.port_num = attr->port_num;
+ cmd->attrs.timeout = attr->timeout;
+ cmd->attrs.retry_cnt = attr->retry_cnt;
+ cmd->attrs.rnr_retry = attr->rnr_retry;
+ cmd->attrs.alt_port_num = attr->alt_port_num;
+ cmd->attrs.alt_timeout = attr->alt_timeout;
+ ib_qp_cap_to_pvrdma(&cmd->attrs.cap, &attr->cap);
+ ib_ah_attr_to_pvrdma(&cmd->attrs.ah_attr, &attr->ah_attr);
+ ib_ah_attr_to_pvrdma(&cmd->attrs.alt_ah_attr, &attr->alt_ah_attr);
+
+ ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_MODIFY_QP_RESP);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not modify queuepair, error: %d\n", ret);
+ } else if (rsp.hdr.err > 0) {
+ dev_warn(&dev->pdev->dev,
+ "cannot modify queuepair, error: %d\n", rsp.hdr.err);
+ ret = -EINVAL;
+ }
+
+ if (ret == 0 && next_state == IB_QPS_RESET)
+ pvrdma_reset_qp(qp);
+
+out:
+ mutex_unlock(&qp->mutex);
+
+ return ret;
+}
+
+static inline void *get_sq_wqe(struct pvrdma_qp *qp, int n)
+{
+ return pvrdma_page_dir_get_ptr(&qp->pdir,
+ qp->sq.offset + n * qp->sq.wqe_size);
+}
+
+static inline void *get_rq_wqe(struct pvrdma_qp *qp, int n)
+{
+ return pvrdma_page_dir_get_ptr(&qp->pdir,
+ qp->rq.offset + n * qp->rq.wqe_size);
+}
+
+static int set_reg_seg(struct pvrdma_sq_wqe_hdr *wqe_hdr, struct ib_reg_wr *wr)
+{
+ struct pvrdma_user_mr *mr = to_vmr(wr->mr);
+
+ wqe_hdr->wr.fast_reg.iova_start = mr->ibmr.iova;
+ wqe_hdr->wr.fast_reg.pl_pdir_dma = mr->pdir.dir_dma;
+ wqe_hdr->wr.fast_reg.page_shift = mr->page_shift;
+ wqe_hdr->wr.fast_reg.page_list_len = mr->npages;
+ wqe_hdr->wr.fast_reg.length = mr->ibmr.length;
+ wqe_hdr->wr.fast_reg.access_flags = wr->access;
+ wqe_hdr->wr.fast_reg.rkey = wr->key;
+
+ return pvrdma_page_dir_insert_page_list(&mr->pdir, mr->pages,
+ mr->npages);
+}
+
+/**
+ * pvrdma_post_send - post send work request entries on a QP
+ * @ibqp: the QP
+ * @wr: work request list to post
+ * @bad_wr: the first bad WR returned
+ *
+ * @return: 0 on success, otherwise errno returned.
+ */
+int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ struct pvrdma_qp *qp = to_vqp(ibqp);
+ struct pvrdma_dev *dev = to_vdev(ibqp->device);
+ unsigned long flags;
+ struct pvrdma_sq_wqe_hdr *wqe_hdr;
+ struct pvrdma_sge *sge;
+ int i, index;
+ int nreq;
+ int ret;
+
+ /*
+ * In states lower than RTS, we can fail immediately. In other states,
+ * just post and let the device figure it out.
+ */
+ if (qp->state < IB_QPS_RTS) {
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&qp->sq.lock, flags);
+
+ index = pvrdma_idx(&qp->sq.ring->prod_tail, qp->sq.wqe_cnt);
+ for (nreq = 0; wr; nreq++, wr = wr->next) {
+ unsigned int tail;
+
+ if (unlikely(!pvrdma_idx_ring_has_space(
+ qp->sq.ring, qp->sq.wqe_cnt, &tail))) {
+ dev_warn_ratelimited(&dev->pdev->dev,
+ "send queue is full\n");
+ *bad_wr = wr;
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (unlikely(wr->num_sge > qp->sq.max_sg || wr->num_sge < 0)) {
+ dev_warn_ratelimited(&dev->pdev->dev,
+ "send SGE overflow\n");
+ *bad_wr = wr;
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (unlikely(wr->opcode < 0)) {
+ dev_warn_ratelimited(&dev->pdev->dev,
+ "invalid send opcode\n");
+ *bad_wr = wr;
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Only support UD, RC.
+ * Need to check opcode table for thorough checking.
+ * opcode _UD _UC _RC
+ * _SEND x x x
+ * _SEND_WITH_IMM x x x
+ * _RDMA_WRITE x x
+ * _RDMA_WRITE_WITH_IMM x x
+ * _LOCAL_INV x x
+ * _SEND_WITH_INV x x
+ * _RDMA_READ x
+ * _ATOMIC_CMP_AND_SWP x
+ * _ATOMIC_FETCH_AND_ADD x
+ * _MASK_ATOMIC_CMP_AND_SWP x
+ * _MASK_ATOMIC_FETCH_AND_ADD x
+ * _REG_MR x
+ *
+ */
+ if (qp->ibqp.qp_type != IB_QPT_UD &&
+ qp->ibqp.qp_type != IB_QPT_RC &&
+ wr->opcode != IB_WR_SEND) {
+ dev_warn_ratelimited(&dev->pdev->dev,
+ "unsupported queuepair type\n");
+ *bad_wr = wr;
+ ret = -EINVAL;
+ goto out;
+ } else if (qp->ibqp.qp_type == IB_QPT_UD ||
+ qp->ibqp.qp_type == IB_QPT_GSI) {
+ if (wr->opcode != IB_WR_SEND &&
+ wr->opcode != IB_WR_SEND_WITH_IMM) {
+ dev_warn_ratelimited(&dev->pdev->dev,
+ "invalid send opcode\n");
+ *bad_wr = wr;
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, index);
+ memset(wqe_hdr, 0, sizeof(*wqe_hdr));
+ wqe_hdr->wr_id = wr->wr_id;
+ wqe_hdr->num_sge = wr->num_sge;
+ wqe_hdr->opcode = ib_wr_opcode_to_pvrdma(wr->opcode);
+ wqe_hdr->send_flags = ib_send_flags_to_pvrdma(wr->send_flags);
+ if (wr->opcode == IB_WR_SEND_WITH_IMM ||
+ wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
+ wqe_hdr->ex.imm_data = wr->ex.imm_data;
+
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_GSI:
+ case IB_QPT_UD:
+ if (unlikely(!ud_wr(wr)->ah)) {
+ dev_warn_ratelimited(&dev->pdev->dev,
+ "invalid address handle\n");
+ *bad_wr = wr;
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Use qkey from qp context if high order bit set,
+ * otherwise from work request.
+ */
+ wqe_hdr->wr.ud.remote_qpn = ud_wr(wr)->remote_qpn;
+ wqe_hdr->wr.ud.remote_qkey =
+ ud_wr(wr)->remote_qkey & 0x80000000 ?
+ qp->qkey : ud_wr(wr)->remote_qkey;
+ wqe_hdr->wr.ud.av = to_vah(ud_wr(wr)->ah)->av;
+
+ break;
+ case IB_QPT_RC:
+ switch (wr->opcode) {
+ case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ wqe_hdr->wr.rdma.remote_addr =
+ rdma_wr(wr)->remote_addr;
+ wqe_hdr->wr.rdma.rkey = rdma_wr(wr)->rkey;
+ break;
+ case IB_WR_LOCAL_INV:
+ case IB_WR_SEND_WITH_INV:
+ wqe_hdr->ex.invalidate_rkey =
+ wr->ex.invalidate_rkey;
+ break;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ wqe_hdr->wr.atomic.remote_addr =
+ atomic_wr(wr)->remote_addr;
+ wqe_hdr->wr.atomic.rkey = atomic_wr(wr)->rkey;
+ wqe_hdr->wr.atomic.compare_add =
+ atomic_wr(wr)->compare_add;
+ if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP)
+ wqe_hdr->wr.atomic.swap =
+ atomic_wr(wr)->swap;
+ break;
+ case IB_WR_REG_MR:
+ ret = set_reg_seg(wqe_hdr, reg_wr(wr));
+ if (ret < 0) {
+ dev_warn_ratelimited(&dev->pdev->dev,
+ "Failed to set fast register work request\n");
+ *bad_wr = wr;
+ goto out;
+ }
+ break;
+ default:
+ break;
+ }
+
+ break;
+ default:
+ dev_warn_ratelimited(&dev->pdev->dev,
+ "invalid queuepair type\n");
+ ret = -EINVAL;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ sge = (struct pvrdma_sge *)(wqe_hdr + 1);
+ for (i = 0; i < wr->num_sge; i++) {
+ /* Need to check wqe_size 0 or max size */
+ sge->addr = wr->sg_list[i].addr;
+ sge->length = wr->sg_list[i].length;
+ sge->lkey = wr->sg_list[i].lkey;
+ sge++;
+ }
+
+ /* Make sure wqe is written before index update */
+ smp_wmb();
+
+ index++;
+ if (unlikely(index >= qp->sq.wqe_cnt))
+ index = 0;
+ /* Update shared sq ring */
+ pvrdma_idx_ring_inc(&qp->sq.ring->prod_tail,
+ qp->sq.wqe_cnt);
+ }
+
+ ret = 0;
+
+out:
+ spin_unlock_irqrestore(&qp->sq.lock, flags);
+
+ if (!ret)
+ pvrdma_write_uar_qp(dev, PVRDMA_UAR_QP_SEND | qp->qp_handle);
+
+ return ret;
+}
+
+/**
+ * pvrdma_post_receive - post receive work request entries on a QP
+ * @ibqp: the QP
+ * @wr: the work request list to post
+ * @bad_wr: the first bad WR returned
+ *
+ * @return: 0 on success, otherwise errno returned.
+ */
+int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct pvrdma_dev *dev = to_vdev(ibqp->device);
+ unsigned long flags;
+ struct pvrdma_qp *qp = to_vqp(ibqp);
+ struct pvrdma_rq_wqe_hdr *wqe_hdr;
+ struct pvrdma_sge *sge;
+ int index, nreq;
+ int ret = 0;
+ int i;
+
+ /*
+ * In the RESET state, we can fail immediately. For other states,
+ * just post and let the device figure it out.
+ */
+ if (qp->state == IB_QPS_RESET) {
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&qp->rq.lock, flags);
+
+ index = pvrdma_idx(&qp->rq.ring->prod_tail, qp->rq.wqe_cnt);
+ for (nreq = 0; wr; nreq++, wr = wr->next) {
+ unsigned int tail;
+
+ if (unlikely(wr->num_sge > qp->rq.max_sg ||
+ wr->num_sge < 0)) {
+ ret = -EINVAL;
+ *bad_wr = wr;
+ dev_warn_ratelimited(&dev->pdev->dev,
+ "recv SGE overflow\n");
+ goto out;
+ }
+
+ if (unlikely(!pvrdma_idx_ring_has_space(
+ qp->rq.ring, qp->rq.wqe_cnt, &tail))) {
+ ret = -ENOMEM;
+ *bad_wr = wr;
+ dev_warn_ratelimited(&dev->pdev->dev,
+ "recv queue full\n");
+ goto out;
+ }
+
+ wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, index);
+ wqe_hdr->wr_id = wr->wr_id;
+ wqe_hdr->num_sge = wr->num_sge;
+ wqe_hdr->total_len = 0;
+
+ sge = (struct pvrdma_sge *)(wqe_hdr + 1);
+ for (i = 0; i < wr->num_sge; i++) {
+ sge->addr = wr->sg_list[i].addr;
+ sge->length = wr->sg_list[i].length;
+ sge->lkey = wr->sg_list[i].lkey;
+ sge++;
+ }
+
+ /* Make sure wqe is written before index update */
+ smp_wmb();
+
+ index++;
+ if (unlikely(index >= qp->rq.wqe_cnt))
+ index = 0;
+ /* Update shared rq ring */
+ pvrdma_idx_ring_inc(&qp->rq.ring->prod_tail,
+ qp->rq.wqe_cnt);
+ }
+
+ spin_unlock_irqrestore(&qp->rq.lock, flags);
+
+ pvrdma_write_uar_qp(dev, PVRDMA_UAR_QP_RECV | qp->qp_handle);
+
+ return ret;
+
+out:
+ spin_unlock_irqrestore(&qp->rq.lock, flags);
+
+ return ret;
+}
+
+/**
+ * pvrdma_query_qp - query a queue pair's attributes
+ * @ibqp: the queue pair to query
+ * @attr: the queue pair's attributes
+ * @attr_mask: attributes mask
+ * @init_attr: initial queue pair attributes
+ *
+ * @returns 0 on success, otherwise returns an errno.
+ */
+int pvrdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_qp_init_attr *init_attr)
+{
+ struct pvrdma_dev *dev = to_vdev(ibqp->device);
+ struct pvrdma_qp *qp = to_vqp(ibqp);
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_query_qp *cmd = &req.query_qp;
+ struct pvrdma_cmd_query_qp_resp *resp = &rsp.query_qp_resp;
+ int ret = 0;
+
+ mutex_lock(&qp->mutex);
+
+ if (qp->state == IB_QPS_RESET) {
+ attr->qp_state = IB_QPS_RESET;
+ goto out;
+ }
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_QUERY_QP;
+ cmd->qp_handle = qp->qp_handle;
+ cmd->attr_mask = ib_qp_attr_mask_to_pvrdma(attr_mask);
+
+ ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_QUERY_QP_RESP);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not query queuepair, error: %d\n", ret);
+ goto out;
+ }
+
+ attr->qp_state = pvrdma_qp_state_to_ib(resp->attrs.qp_state);
+ attr->cur_qp_state =
+ pvrdma_qp_state_to_ib(resp->attrs.cur_qp_state);
+ attr->path_mtu = pvrdma_mtu_to_ib(resp->attrs.path_mtu);
+ attr->path_mig_state =
+ pvrdma_mig_state_to_ib(resp->attrs.path_mig_state);
+ attr->qkey = resp->attrs.qkey;
+ attr->rq_psn = resp->attrs.rq_psn;
+ attr->sq_psn = resp->attrs.sq_psn;
+ attr->dest_qp_num = resp->attrs.dest_qp_num;
+ attr->qp_access_flags =
+ pvrdma_access_flags_to_ib(resp->attrs.qp_access_flags);
+ attr->pkey_index = resp->attrs.pkey_index;
+ attr->alt_pkey_index = resp->attrs.alt_pkey_index;
+ attr->en_sqd_async_notify = resp->attrs.en_sqd_async_notify;
+ attr->sq_draining = resp->attrs.sq_draining;
+ attr->max_rd_atomic = resp->attrs.max_rd_atomic;
+ attr->max_dest_rd_atomic = resp->attrs.max_dest_rd_atomic;
+ attr->min_rnr_timer = resp->attrs.min_rnr_timer;
+ attr->port_num = resp->attrs.port_num;
+ attr->timeout = resp->attrs.timeout;
+ attr->retry_cnt = resp->attrs.retry_cnt;
+ attr->rnr_retry = resp->attrs.rnr_retry;
+ attr->alt_port_num = resp->attrs.alt_port_num;
+ attr->alt_timeout = resp->attrs.alt_timeout;
+ pvrdma_qp_cap_to_ib(&attr->cap, &resp->attrs.cap);
+ pvrdma_ah_attr_to_ib(&attr->ah_attr, &resp->attrs.ah_attr);
+ pvrdma_ah_attr_to_ib(&attr->alt_ah_attr, &resp->attrs.alt_ah_attr);
+
+ qp->state = attr->qp_state;
+
+ ret = 0;
+
+out:
+ attr->cur_qp_state = attr->qp_state;
+
+ init_attr->event_handler = qp->ibqp.event_handler;
+ init_attr->qp_context = qp->ibqp.qp_context;
+ init_attr->send_cq = qp->ibqp.send_cq;
+ init_attr->recv_cq = qp->ibqp.recv_cq;
+ init_attr->srq = qp->ibqp.srq;
+ init_attr->xrcd = NULL;
+ init_attr->cap = attr->cap;
+ init_attr->sq_sig_type = 0;
+ init_attr->qp_type = qp->ibqp.qp_type;
+ init_attr->create_flags = 0;
+ init_attr->port_num = qp->port;
+
+ mutex_unlock(&qp->mutex);
+ return ret;
+}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h
new file mode 100644
index 000000000000..ed9022a91a1d
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __PVRDMA_RING_H__
+#define __PVRDMA_RING_H__
+
+#include <linux/types.h>
+
+#define PVRDMA_INVALID_IDX -1 /* Invalid index. */
+
+struct pvrdma_ring {
+ atomic_t prod_tail; /* Producer tail. */
+ atomic_t cons_head; /* Consumer head. */
+};
+
+struct pvrdma_ring_state {
+ struct pvrdma_ring tx; /* Tx ring. */
+ struct pvrdma_ring rx; /* Rx ring. */
+};
+
+static inline int pvrdma_idx_valid(__u32 idx, __u32 max_elems)
+{
+ /* Generates fewer instructions than a less-than. */
+ return (idx & ~((max_elems << 1) - 1)) == 0;
+}
+
+static inline __s32 pvrdma_idx(atomic_t *var, __u32 max_elems)
+{
+ const unsigned int idx = atomic_read(var);
+
+ if (pvrdma_idx_valid(idx, max_elems))
+ return idx & (max_elems - 1);
+ return PVRDMA_INVALID_IDX;
+}
+
+static inline void pvrdma_idx_ring_inc(atomic_t *var, __u32 max_elems)
+{
+ __u32 idx = atomic_read(var) + 1; /* Increment. */
+
+ idx &= (max_elems << 1) - 1; /* Modulo size, flip gen. */
+ atomic_set(var, idx);
+}
+
+static inline __s32 pvrdma_idx_ring_has_space(const struct pvrdma_ring *r,
+ __u32 max_elems, __u32 *out_tail)
+{
+ const __u32 tail = atomic_read(&r->prod_tail);
+ const __u32 head = atomic_read(&r->cons_head);
+
+ if (pvrdma_idx_valid(tail, max_elems) &&
+ pvrdma_idx_valid(head, max_elems)) {
+ *out_tail = tail & (max_elems - 1);
+ return tail != (head ^ max_elems);
+ }
+ return PVRDMA_INVALID_IDX;
+}
+
+static inline __s32 pvrdma_idx_ring_has_data(const struct pvrdma_ring *r,
+ __u32 max_elems, __u32 *out_head)
+{
+ const __u32 tail = atomic_read(&r->prod_tail);
+ const __u32 head = atomic_read(&r->cons_head);
+
+ if (pvrdma_idx_valid(tail, max_elems) &&
+ pvrdma_idx_valid(head, max_elems)) {
+ *out_head = head & (max_elems - 1);
+ return tail != head;
+ }
+ return PVRDMA_INVALID_IDX;
+}
+
+static inline bool pvrdma_idx_ring_is_valid_idx(const struct pvrdma_ring *r,
+ __u32 max_elems, __u32 *idx)
+{
+ const __u32 tail = atomic_read(&r->prod_tail);
+ const __u32 head = atomic_read(&r->cons_head);
+
+ if (pvrdma_idx_valid(tail, max_elems) &&
+ pvrdma_idx_valid(head, max_elems) &&
+ pvrdma_idx_valid(*idx, max_elems)) {
+ if (tail > head && (*idx < tail && *idx >= head))
+ return true;
+ else if (head > tail && (*idx >= head || *idx < tail))
+ return true;
+ }
+ return false;
+}
+
+#endif /* __PVRDMA_RING_H__ */
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
new file mode 100644
index 000000000000..54891370d18a
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
@@ -0,0 +1,579 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/page.h>
+#include <linux/inet.h>
+#include <linux/io.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/vmw_pvrdma-abi.h>
+
+#include "pvrdma.h"
+
+/**
+ * pvrdma_query_device - query device
+ * @ibdev: the device to query
+ * @props: the device properties
+ * @uhw: user data
+ *
+ * @return: 0 on success, otherwise negative errno
+ */
+int pvrdma_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *props,
+ struct ib_udata *uhw)
+{
+ struct pvrdma_dev *dev = to_vdev(ibdev);
+
+ if (uhw->inlen || uhw->outlen)
+ return -EINVAL;
+
+ memset(props, 0, sizeof(*props));
+
+ props->fw_ver = dev->dsr->caps.fw_ver;
+ props->sys_image_guid = dev->dsr->caps.sys_image_guid;
+ props->max_mr_size = dev->dsr->caps.max_mr_size;
+ props->page_size_cap = dev->dsr->caps.page_size_cap;
+ props->vendor_id = dev->dsr->caps.vendor_id;
+ props->vendor_part_id = dev->pdev->device;
+ props->hw_ver = dev->dsr->caps.hw_ver;
+ props->max_qp = dev->dsr->caps.max_qp;
+ props->max_qp_wr = dev->dsr->caps.max_qp_wr;
+ props->device_cap_flags = dev->dsr->caps.device_cap_flags;
+ props->max_sge = dev->dsr->caps.max_sge;
+ props->max_cq = dev->dsr->caps.max_cq;
+ props->max_cqe = dev->dsr->caps.max_cqe;
+ props->max_mr = dev->dsr->caps.max_mr;
+ props->max_pd = dev->dsr->caps.max_pd;
+ props->max_qp_rd_atom = dev->dsr->caps.max_qp_rd_atom;
+ props->max_qp_init_rd_atom = dev->dsr->caps.max_qp_init_rd_atom;
+ props->atomic_cap =
+ dev->dsr->caps.atomic_ops &
+ (PVRDMA_ATOMIC_OP_COMP_SWAP | PVRDMA_ATOMIC_OP_FETCH_ADD) ?
+ IB_ATOMIC_HCA : IB_ATOMIC_NONE;
+ props->masked_atomic_cap = props->atomic_cap;
+ props->max_ah = dev->dsr->caps.max_ah;
+ props->max_pkeys = dev->dsr->caps.max_pkeys;
+ props->local_ca_ack_delay = dev->dsr->caps.local_ca_ack_delay;
+ if ((dev->dsr->caps.bmme_flags & PVRDMA_BMME_FLAG_LOCAL_INV) &&
+ (dev->dsr->caps.bmme_flags & PVRDMA_BMME_FLAG_REMOTE_INV) &&
+ (dev->dsr->caps.bmme_flags & PVRDMA_BMME_FLAG_FAST_REG_WR)) {
+ props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
+ }
+
+ return 0;
+}
+
+/**
+ * pvrdma_query_port - query device port attributes
+ * @ibdev: the device to query
+ * @port: the port number
+ * @props: the device properties
+ *
+ * @return: 0 on success, otherwise negative errno
+ */
+int pvrdma_query_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props)
+{
+ struct pvrdma_dev *dev = to_vdev(ibdev);
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_query_port *cmd = &req.query_port;
+ struct pvrdma_cmd_query_port_resp *resp = &rsp.query_port_resp;
+ int err;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_QUERY_PORT;
+ cmd->port_num = port;
+
+ err = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_QUERY_PORT_RESP);
+ if (err < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not query port, error: %d\n", err);
+ return err;
+ }
+
+ memset(props, 0, sizeof(*props));
+
+ props->state = pvrdma_port_state_to_ib(resp->attrs.state);
+ props->max_mtu = pvrdma_mtu_to_ib(resp->attrs.max_mtu);
+ props->active_mtu = pvrdma_mtu_to_ib(resp->attrs.active_mtu);
+ props->gid_tbl_len = resp->attrs.gid_tbl_len;
+ props->port_cap_flags =
+ pvrdma_port_cap_flags_to_ib(resp->attrs.port_cap_flags);
+ props->max_msg_sz = resp->attrs.max_msg_sz;
+ props->bad_pkey_cntr = resp->attrs.bad_pkey_cntr;
+ props->qkey_viol_cntr = resp->attrs.qkey_viol_cntr;
+ props->pkey_tbl_len = resp->attrs.pkey_tbl_len;
+ props->lid = resp->attrs.lid;
+ props->sm_lid = resp->attrs.sm_lid;
+ props->lmc = resp->attrs.lmc;
+ props->max_vl_num = resp->attrs.max_vl_num;
+ props->sm_sl = resp->attrs.sm_sl;
+ props->subnet_timeout = resp->attrs.subnet_timeout;
+ props->init_type_reply = resp->attrs.init_type_reply;
+ props->active_width = pvrdma_port_width_to_ib(resp->attrs.active_width);
+ props->active_speed = pvrdma_port_speed_to_ib(resp->attrs.active_speed);
+ props->phys_state = resp->attrs.phys_state;
+
+ return 0;
+}
+
+/**
+ * pvrdma_query_gid - query device gid
+ * @ibdev: the device to query
+ * @port: the port number
+ * @index: the index
+ * @gid: the device gid value
+ *
+ * @return: 0 on success, otherwise negative errno
+ */
+int pvrdma_query_gid(struct ib_device *ibdev, u8 port, int index,
+ union ib_gid *gid)
+{
+ struct pvrdma_dev *dev = to_vdev(ibdev);
+
+ if (index >= dev->dsr->caps.gid_tbl_len)
+ return -EINVAL;
+
+ memcpy(gid, &dev->sgid_tbl[index], sizeof(union ib_gid));
+
+ return 0;
+}
+
+/**
+ * pvrdma_query_pkey - query device port's P_Key table
+ * @ibdev: the device to query
+ * @port: the port number
+ * @index: the index
+ * @pkey: the device P_Key value
+ *
+ * @return: 0 on success, otherwise negative errno
+ */
+int pvrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+ u16 *pkey)
+{
+ int err = 0;
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_query_pkey *cmd = &req.query_pkey;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_QUERY_PKEY;
+ cmd->port_num = port;
+ cmd->index = index;
+
+ err = pvrdma_cmd_post(to_vdev(ibdev), &req, &rsp,
+ PVRDMA_CMD_QUERY_PKEY_RESP);
+ if (err < 0) {
+ dev_warn(&to_vdev(ibdev)->pdev->dev,
+ "could not query pkey, error: %d\n", err);
+ return err;
+ }
+
+ *pkey = rsp.query_pkey_resp.pkey;
+
+ return 0;
+}
+
+enum rdma_link_layer pvrdma_port_link_layer(struct ib_device *ibdev,
+ u8 port)
+{
+ return IB_LINK_LAYER_ETHERNET;
+}
+
+int pvrdma_modify_device(struct ib_device *ibdev, int mask,
+ struct ib_device_modify *props)
+{
+ unsigned long flags;
+
+ if (mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
+ IB_DEVICE_MODIFY_NODE_DESC)) {
+ dev_warn(&to_vdev(ibdev)->pdev->dev,
+ "unsupported device modify mask %#x\n", mask);
+ return -EOPNOTSUPP;
+ }
+
+ if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
+ spin_lock_irqsave(&to_vdev(ibdev)->desc_lock, flags);
+ memcpy(ibdev->node_desc, props->node_desc, 64);
+ spin_unlock_irqrestore(&to_vdev(ibdev)->desc_lock, flags);
+ }
+
+ if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
+ mutex_lock(&to_vdev(ibdev)->port_mutex);
+ to_vdev(ibdev)->sys_image_guid =
+ cpu_to_be64(props->sys_image_guid);
+ mutex_unlock(&to_vdev(ibdev)->port_mutex);
+ }
+
+ return 0;
+}
+
+/**
+ * pvrdma_modify_port - modify device port attributes
+ * @ibdev: the device to modify
+ * @port: the port number
+ * @mask: attributes to modify
+ * @props: the device properties
+ *
+ * @return: 0 on success, otherwise negative errno
+ */
+int pvrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
+ struct ib_port_modify *props)
+{
+ struct ib_port_attr attr;
+ struct pvrdma_dev *vdev = to_vdev(ibdev);
+ int ret;
+
+ if (mask & ~IB_PORT_SHUTDOWN) {
+ dev_warn(&vdev->pdev->dev,
+ "unsupported port modify mask %#x\n", mask);
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&vdev->port_mutex);
+ ret = pvrdma_query_port(ibdev, port, &attr);
+ if (ret)
+ goto out;
+
+ vdev->port_cap_mask |= props->set_port_cap_mask;
+ vdev->port_cap_mask &= ~props->clr_port_cap_mask;
+
+ if (mask & IB_PORT_SHUTDOWN)
+ vdev->ib_active = false;
+
+out:
+ mutex_unlock(&vdev->port_mutex);
+ return ret;
+}
+
+/**
+ * pvrdma_alloc_ucontext - allocate ucontext
+ * @ibdev: the IB device
+ * @udata: user data
+ *
+ * @return: the ib_ucontext pointer on success, otherwise errno.
+ */
+struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev,
+ struct ib_udata *udata)
+{
+ struct pvrdma_dev *vdev = to_vdev(ibdev);
+ struct pvrdma_ucontext *context;
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_create_uc *cmd = &req.create_uc;
+ struct pvrdma_cmd_create_uc_resp *resp = &rsp.create_uc_resp;
+ struct pvrdma_alloc_ucontext_resp uresp;
+ int ret;
+ void *ptr;
+
+ if (!vdev->ib_active)
+ return ERR_PTR(-EAGAIN);
+
+ context = kmalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return ERR_PTR(-ENOMEM);
+
+ context->dev = vdev;
+ ret = pvrdma_uar_alloc(vdev, &context->uar);
+ if (ret) {
+ kfree(context);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* get ctx_handle from host */
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->pfn = context->uar.pfn;
+ cmd->hdr.cmd = PVRDMA_CMD_CREATE_UC;
+ ret = pvrdma_cmd_post(vdev, &req, &rsp, PVRDMA_CMD_CREATE_UC_RESP);
+ if (ret < 0) {
+ dev_warn(&vdev->pdev->dev,
+ "could not create ucontext, error: %d\n", ret);
+ ptr = ERR_PTR(ret);
+ goto err;
+ }
+
+ context->ctx_handle = resp->ctx_handle;
+
+ /* copy back to user */
+ uresp.qp_tab_size = vdev->dsr->caps.max_qp;
+ ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (ret) {
+ pvrdma_uar_free(vdev, &context->uar);
+ context->ibucontext.device = ibdev;
+ pvrdma_dealloc_ucontext(&context->ibucontext);
+ return ERR_PTR(-EFAULT);
+ }
+
+ return &context->ibucontext;
+
+err:
+ pvrdma_uar_free(vdev, &context->uar);
+ kfree(context);
+ return ptr;
+}
+
+/**
+ * pvrdma_dealloc_ucontext - deallocate ucontext
+ * @ibcontext: the ucontext
+ *
+ * @return: 0 on success, otherwise errno.
+ */
+int pvrdma_dealloc_ucontext(struct ib_ucontext *ibcontext)
+{
+ struct pvrdma_ucontext *context = to_vucontext(ibcontext);
+ union pvrdma_cmd_req req;
+ struct pvrdma_cmd_destroy_uc *cmd = &req.destroy_uc;
+ int ret;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_DESTROY_UC;
+ cmd->ctx_handle = context->ctx_handle;
+
+ ret = pvrdma_cmd_post(context->dev, &req, NULL, 0);
+ if (ret < 0)
+ dev_warn(&context->dev->pdev->dev,
+ "destroy ucontext failed, error: %d\n", ret);
+
+ /* Free the UAR even if the device command failed */
+ pvrdma_uar_free(to_vdev(ibcontext->device), &context->uar);
+ kfree(context);
+
+ return ret;
+}
+
+/**
+ * pvrdma_mmap - create mmap region
+ * @ibcontext: the user context
+ * @vma: the VMA
+ *
+ * @return: 0 on success, otherwise errno.
+ */
+int pvrdma_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
+{
+ struct pvrdma_ucontext *context = to_vucontext(ibcontext);
+ unsigned long start = vma->vm_start;
+ unsigned long size = vma->vm_end - vma->vm_start;
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+
+ dev_dbg(&context->dev->pdev->dev, "create mmap region\n");
+
+ if ((size != PAGE_SIZE) || (offset & ~PAGE_MASK)) {
+ dev_warn(&context->dev->pdev->dev,
+ "invalid params for mmap region\n");
+ return -EINVAL;
+ }
+
+ /* Map UAR to kernel space, VM_LOCKED? */
+ vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ if (io_remap_pfn_range(vma, start, context->uar.pfn, size,
+ vma->vm_page_prot))
+ return -EAGAIN;
+
+ return 0;
+}
+
+/**
+ * pvrdma_alloc_pd - allocate protection domain
+ * @ibdev: the IB device
+ * @context: user context
+ * @udata: user data
+ *
+ * @return: the ib_pd protection domain pointer on success, otherwise errno.
+ */
+struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct pvrdma_pd *pd;
+ struct pvrdma_dev *dev = to_vdev(ibdev);
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_create_pd *cmd = &req.create_pd;
+ struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp;
+ int ret;
+ void *ptr;
+
+ /* Check allowed max pds */
+ if (!atomic_add_unless(&dev->num_pds, 1, dev->dsr->caps.max_pd))
+ return ERR_PTR(-ENOMEM);
+
+ pd = kmalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd) {
+ ptr = ERR_PTR(-ENOMEM);
+ goto err;
+ }
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_CREATE_PD;
+ cmd->ctx_handle = (context) ? to_vucontext(context)->ctx_handle : 0;
+ ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_PD_RESP);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "failed to allocate protection domain, error: %d\n",
+ ret);
+ ptr = ERR_PTR(ret);
+ goto freepd;
+ }
+
+ pd->privileged = !context;
+ pd->pd_handle = resp->pd_handle;
+ pd->pdn = resp->pd_handle;
+
+ if (context) {
+ if (ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) {
+ dev_warn(&dev->pdev->dev,
+ "failed to copy back protection domain\n");
+ pvrdma_dealloc_pd(&pd->ibpd);
+ return ERR_PTR(-EFAULT);
+ }
+ }
+
+ /* u32 pd handle */
+ return &pd->ibpd;
+
+freepd:
+ kfree(pd);
+err:
+ atomic_dec(&dev->num_pds);
+ return ptr;
+}
+
+/**
+ * pvrdma_dealloc_pd - deallocate protection domain
+ * @pd: the protection domain to be released
+ *
+ * @return: 0 on success, otherwise errno.
+ */
+int pvrdma_dealloc_pd(struct ib_pd *pd)
+{
+ struct pvrdma_dev *dev = to_vdev(pd->device);
+ union pvrdma_cmd_req req;
+ struct pvrdma_cmd_destroy_pd *cmd = &req.destroy_pd;
+ int ret;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_DESTROY_PD;
+ cmd->pd_handle = to_vpd(pd)->pd_handle;
+
+ ret = pvrdma_cmd_post(dev, &req, NULL, 0);
+ if (ret)
+ dev_warn(&dev->pdev->dev,
+ "could not dealloc protection domain, error: %d\n",
+ ret);
+
+ kfree(to_vpd(pd));
+ atomic_dec(&dev->num_pds);
+
+ return 0;
+}
+
+/**
+ * pvrdma_create_ah - create an address handle
+ * @pd: the protection domain
+ * @ah_attr: the attributes of the AH
+ * @udata: user data blob
+ *
+ * @return: the ib_ah pointer on success, otherwise errno.
+ */
+struct ib_ah *pvrdma_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+ struct ib_udata *udata)
+{
+ struct pvrdma_dev *dev = to_vdev(pd->device);
+ struct pvrdma_ah *ah;
+ enum rdma_link_layer ll;
+
+ if (!(ah_attr->ah_flags & IB_AH_GRH))
+ return ERR_PTR(-EINVAL);
+
+ ll = rdma_port_get_link_layer(pd->device, ah_attr->port_num);
+
+ if (ll != IB_LINK_LAYER_ETHERNET ||
+ rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw))
+ return ERR_PTR(-EINVAL);
+
+ if (!atomic_add_unless(&dev->num_ahs, 1, dev->dsr->caps.max_ah))
+ return ERR_PTR(-ENOMEM);
+
+ ah = kzalloc(sizeof(*ah), GFP_KERNEL);
+ if (!ah) {
+ atomic_dec(&dev->num_ahs);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ah->av.port_pd = to_vpd(pd)->pd_handle | (ah_attr->port_num << 24);
+ ah->av.src_path_bits = ah_attr->src_path_bits;
+ ah->av.src_path_bits |= 0x80;
+ ah->av.gid_index = ah_attr->grh.sgid_index;
+ ah->av.hop_limit = ah_attr->grh.hop_limit;
+ ah->av.sl_tclass_flowlabel = (ah_attr->grh.traffic_class << 20) |
+ ah_attr->grh.flow_label;
+ memcpy(ah->av.dgid, ah_attr->grh.dgid.raw, 16);
+ memcpy(ah->av.dmac, ah_attr->dmac, 6);
+
+ ah->ibah.device = pd->device;
+ ah->ibah.pd = pd;
+ ah->ibah.uobject = NULL;
+
+ return &ah->ibah;
+}
+
+/**
+ * pvrdma_destroy_ah - destroy an address handle
+ * @ah: the address handle to destroyed
+ *
+ * @return: 0 on success.
+ */
+int pvrdma_destroy_ah(struct ib_ah *ah)
+{
+ struct pvrdma_dev *dev = to_vdev(ah->device);
+
+ kfree(to_vah(ah));
+ atomic_dec(&dev->num_ahs);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
new file mode 100644
index 000000000000..bfbe96b56255
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
@@ -0,0 +1,436 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __PVRDMA_VERBS_H__
+#define __PVRDMA_VERBS_H__
+
+#include <linux/types.h>
+
+union pvrdma_gid {
+ u8 raw[16];
+ struct {
+ __be64 subnet_prefix;
+ __be64 interface_id;
+ } global;
+};
+
+enum pvrdma_link_layer {
+ PVRDMA_LINK_LAYER_UNSPECIFIED,
+ PVRDMA_LINK_LAYER_INFINIBAND,
+ PVRDMA_LINK_LAYER_ETHERNET,
+};
+
+enum pvrdma_mtu {
+ PVRDMA_MTU_256 = 1,
+ PVRDMA_MTU_512 = 2,
+ PVRDMA_MTU_1024 = 3,
+ PVRDMA_MTU_2048 = 4,
+ PVRDMA_MTU_4096 = 5,
+};
+
+static inline int pvrdma_mtu_enum_to_int(enum pvrdma_mtu mtu)
+{
+ switch (mtu) {
+ case PVRDMA_MTU_256: return 256;
+ case PVRDMA_MTU_512: return 512;
+ case PVRDMA_MTU_1024: return 1024;
+ case PVRDMA_MTU_2048: return 2048;
+ case PVRDMA_MTU_4096: return 4096;
+ default: return -1;
+ }
+}
+
+static inline enum pvrdma_mtu pvrdma_mtu_int_to_enum(int mtu)
+{
+ switch (mtu) {
+ case 256: return PVRDMA_MTU_256;
+ case 512: return PVRDMA_MTU_512;
+ case 1024: return PVRDMA_MTU_1024;
+ case 2048: return PVRDMA_MTU_2048;
+ case 4096:
+ default: return PVRDMA_MTU_4096;
+ }
+}
+
+enum pvrdma_port_state {
+ PVRDMA_PORT_NOP = 0,
+ PVRDMA_PORT_DOWN = 1,
+ PVRDMA_PORT_INIT = 2,
+ PVRDMA_PORT_ARMED = 3,
+ PVRDMA_PORT_ACTIVE = 4,
+ PVRDMA_PORT_ACTIVE_DEFER = 5,
+};
+
+enum pvrdma_port_cap_flags {
+ PVRDMA_PORT_SM = 1 << 1,
+ PVRDMA_PORT_NOTICE_SUP = 1 << 2,
+ PVRDMA_PORT_TRAP_SUP = 1 << 3,
+ PVRDMA_PORT_OPT_IPD_SUP = 1 << 4,
+ PVRDMA_PORT_AUTO_MIGR_SUP = 1 << 5,
+ PVRDMA_PORT_SL_MAP_SUP = 1 << 6,
+ PVRDMA_PORT_MKEY_NVRAM = 1 << 7,
+ PVRDMA_PORT_PKEY_NVRAM = 1 << 8,
+ PVRDMA_PORT_LED_INFO_SUP = 1 << 9,
+ PVRDMA_PORT_SM_DISABLED = 1 << 10,
+ PVRDMA_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
+ PVRDMA_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
+ PVRDMA_PORT_EXTENDED_SPEEDS_SUP = 1 << 14,
+ PVRDMA_PORT_CM_SUP = 1 << 16,
+ PVRDMA_PORT_SNMP_TUNNEL_SUP = 1 << 17,
+ PVRDMA_PORT_REINIT_SUP = 1 << 18,
+ PVRDMA_PORT_DEVICE_MGMT_SUP = 1 << 19,
+ PVRDMA_PORT_VENDOR_CLASS_SUP = 1 << 20,
+ PVRDMA_PORT_DR_NOTICE_SUP = 1 << 21,
+ PVRDMA_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
+ PVRDMA_PORT_BOOT_MGMT_SUP = 1 << 23,
+ PVRDMA_PORT_LINK_LATENCY_SUP = 1 << 24,
+ PVRDMA_PORT_CLIENT_REG_SUP = 1 << 25,
+ PVRDMA_PORT_IP_BASED_GIDS = 1 << 26,
+ PVRDMA_PORT_CAP_FLAGS_MAX = PVRDMA_PORT_IP_BASED_GIDS,
+};
+
+enum pvrdma_port_width {
+ PVRDMA_WIDTH_1X = 1,
+ PVRDMA_WIDTH_4X = 2,
+ PVRDMA_WIDTH_8X = 4,
+ PVRDMA_WIDTH_12X = 8,
+};
+
+static inline int pvrdma_width_enum_to_int(enum pvrdma_port_width width)
+{
+ switch (width) {
+ case PVRDMA_WIDTH_1X: return 1;
+ case PVRDMA_WIDTH_4X: return 4;
+ case PVRDMA_WIDTH_8X: return 8;
+ case PVRDMA_WIDTH_12X: return 12;
+ default: return -1;
+ }
+}
+
+enum pvrdma_port_speed {
+ PVRDMA_SPEED_SDR = 1,
+ PVRDMA_SPEED_DDR = 2,
+ PVRDMA_SPEED_QDR = 4,
+ PVRDMA_SPEED_FDR10 = 8,
+ PVRDMA_SPEED_FDR = 16,
+ PVRDMA_SPEED_EDR = 32,
+};
+
+struct pvrdma_port_attr {
+ enum pvrdma_port_state state;
+ enum pvrdma_mtu max_mtu;
+ enum pvrdma_mtu active_mtu;
+ u32 gid_tbl_len;
+ u32 port_cap_flags;
+ u32 max_msg_sz;
+ u32 bad_pkey_cntr;
+ u32 qkey_viol_cntr;
+ u16 pkey_tbl_len;
+ u16 lid;
+ u16 sm_lid;
+ u8 lmc;
+ u8 max_vl_num;
+ u8 sm_sl;
+ u8 subnet_timeout;
+ u8 init_type_reply;
+ u8 active_width;
+ u8 active_speed;
+ u8 phys_state;
+ u8 reserved[2];
+};
+
+struct pvrdma_global_route {
+ union pvrdma_gid dgid;
+ u32 flow_label;
+ u8 sgid_index;
+ u8 hop_limit;
+ u8 traffic_class;
+ u8 reserved;
+};
+
+struct pvrdma_grh {
+ __be32 version_tclass_flow;
+ __be16 paylen;
+ u8 next_hdr;
+ u8 hop_limit;
+ union pvrdma_gid sgid;
+ union pvrdma_gid dgid;
+};
+
+enum pvrdma_ah_flags {
+ PVRDMA_AH_GRH = 1,
+};
+
+enum pvrdma_rate {
+ PVRDMA_RATE_PORT_CURRENT = 0,
+ PVRDMA_RATE_2_5_GBPS = 2,
+ PVRDMA_RATE_5_GBPS = 5,
+ PVRDMA_RATE_10_GBPS = 3,
+ PVRDMA_RATE_20_GBPS = 6,
+ PVRDMA_RATE_30_GBPS = 4,
+ PVRDMA_RATE_40_GBPS = 7,
+ PVRDMA_RATE_60_GBPS = 8,
+ PVRDMA_RATE_80_GBPS = 9,
+ PVRDMA_RATE_120_GBPS = 10,
+ PVRDMA_RATE_14_GBPS = 11,
+ PVRDMA_RATE_56_GBPS = 12,
+ PVRDMA_RATE_112_GBPS = 13,
+ PVRDMA_RATE_168_GBPS = 14,
+ PVRDMA_RATE_25_GBPS = 15,
+ PVRDMA_RATE_100_GBPS = 16,
+ PVRDMA_RATE_200_GBPS = 17,
+ PVRDMA_RATE_300_GBPS = 18,
+};
+
+struct pvrdma_ah_attr {
+ struct pvrdma_global_route grh;
+ u16 dlid;
+ u16 vlan_id;
+ u8 sl;
+ u8 src_path_bits;
+ u8 static_rate;
+ u8 ah_flags;
+ u8 port_num;
+ u8 dmac[6];
+ u8 reserved;
+};
+
+enum pvrdma_cq_notify_flags {
+ PVRDMA_CQ_SOLICITED = 1 << 0,
+ PVRDMA_CQ_NEXT_COMP = 1 << 1,
+ PVRDMA_CQ_SOLICITED_MASK = PVRDMA_CQ_SOLICITED |
+ PVRDMA_CQ_NEXT_COMP,
+ PVRDMA_CQ_REPORT_MISSED_EVENTS = 1 << 2,
+};
+
+struct pvrdma_qp_cap {
+ u32 max_send_wr;
+ u32 max_recv_wr;
+ u32 max_send_sge;
+ u32 max_recv_sge;
+ u32 max_inline_data;
+ u32 reserved;
+};
+
+enum pvrdma_sig_type {
+ PVRDMA_SIGNAL_ALL_WR,
+ PVRDMA_SIGNAL_REQ_WR,
+};
+
+enum pvrdma_qp_type {
+ PVRDMA_QPT_SMI,
+ PVRDMA_QPT_GSI,
+ PVRDMA_QPT_RC,
+ PVRDMA_QPT_UC,
+ PVRDMA_QPT_UD,
+ PVRDMA_QPT_RAW_IPV6,
+ PVRDMA_QPT_RAW_ETHERTYPE,
+ PVRDMA_QPT_RAW_PACKET = 8,
+ PVRDMA_QPT_XRC_INI = 9,
+ PVRDMA_QPT_XRC_TGT,
+ PVRDMA_QPT_MAX,
+};
+
+enum pvrdma_qp_create_flags {
+ PVRDMA_QP_CREATE_IPOPVRDMA_UD_LSO = 1 << 0,
+ PVRDMA_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
+};
+
+enum pvrdma_qp_attr_mask {
+ PVRDMA_QP_STATE = 1 << 0,
+ PVRDMA_QP_CUR_STATE = 1 << 1,
+ PVRDMA_QP_EN_SQD_ASYNC_NOTIFY = 1 << 2,
+ PVRDMA_QP_ACCESS_FLAGS = 1 << 3,
+ PVRDMA_QP_PKEY_INDEX = 1 << 4,
+ PVRDMA_QP_PORT = 1 << 5,
+ PVRDMA_QP_QKEY = 1 << 6,
+ PVRDMA_QP_AV = 1 << 7,
+ PVRDMA_QP_PATH_MTU = 1 << 8,
+ PVRDMA_QP_TIMEOUT = 1 << 9,
+ PVRDMA_QP_RETRY_CNT = 1 << 10,
+ PVRDMA_QP_RNR_RETRY = 1 << 11,
+ PVRDMA_QP_RQ_PSN = 1 << 12,
+ PVRDMA_QP_MAX_QP_RD_ATOMIC = 1 << 13,
+ PVRDMA_QP_ALT_PATH = 1 << 14,
+ PVRDMA_QP_MIN_RNR_TIMER = 1 << 15,
+ PVRDMA_QP_SQ_PSN = 1 << 16,
+ PVRDMA_QP_MAX_DEST_RD_ATOMIC = 1 << 17,
+ PVRDMA_QP_PATH_MIG_STATE = 1 << 18,
+ PVRDMA_QP_CAP = 1 << 19,
+ PVRDMA_QP_DEST_QPN = 1 << 20,
+ PVRDMA_QP_ATTR_MASK_MAX = PVRDMA_QP_DEST_QPN,
+};
+
+enum pvrdma_qp_state {
+ PVRDMA_QPS_RESET,
+ PVRDMA_QPS_INIT,
+ PVRDMA_QPS_RTR,
+ PVRDMA_QPS_RTS,
+ PVRDMA_QPS_SQD,
+ PVRDMA_QPS_SQE,
+ PVRDMA_QPS_ERR,
+};
+
+enum pvrdma_mig_state {
+ PVRDMA_MIG_MIGRATED,
+ PVRDMA_MIG_REARM,
+ PVRDMA_MIG_ARMED,
+};
+
+enum pvrdma_mw_type {
+ PVRDMA_MW_TYPE_1 = 1,
+ PVRDMA_MW_TYPE_2 = 2,
+};
+
+struct pvrdma_qp_attr {
+ enum pvrdma_qp_state qp_state;
+ enum pvrdma_qp_state cur_qp_state;
+ enum pvrdma_mtu path_mtu;
+ enum pvrdma_mig_state path_mig_state;
+ u32 qkey;
+ u32 rq_psn;
+ u32 sq_psn;
+ u32 dest_qp_num;
+ u32 qp_access_flags;
+ u16 pkey_index;
+ u16 alt_pkey_index;
+ u8 en_sqd_async_notify;
+ u8 sq_draining;
+ u8 max_rd_atomic;
+ u8 max_dest_rd_atomic;
+ u8 min_rnr_timer;
+ u8 port_num;
+ u8 timeout;
+ u8 retry_cnt;
+ u8 rnr_retry;
+ u8 alt_port_num;
+ u8 alt_timeout;
+ u8 reserved[5];
+ struct pvrdma_qp_cap cap;
+ struct pvrdma_ah_attr ah_attr;
+ struct pvrdma_ah_attr alt_ah_attr;
+};
+
+enum pvrdma_send_flags {
+ PVRDMA_SEND_FENCE = 1 << 0,
+ PVRDMA_SEND_SIGNALED = 1 << 1,
+ PVRDMA_SEND_SOLICITED = 1 << 2,
+ PVRDMA_SEND_INLINE = 1 << 3,
+ PVRDMA_SEND_IP_CSUM = 1 << 4,
+ PVRDMA_SEND_FLAGS_MAX = PVRDMA_SEND_IP_CSUM,
+};
+
+enum pvrdma_access_flags {
+ PVRDMA_ACCESS_LOCAL_WRITE = 1 << 0,
+ PVRDMA_ACCESS_REMOTE_WRITE = 1 << 1,
+ PVRDMA_ACCESS_REMOTE_READ = 1 << 2,
+ PVRDMA_ACCESS_REMOTE_ATOMIC = 1 << 3,
+ PVRDMA_ACCESS_MW_BIND = 1 << 4,
+ PVRDMA_ZERO_BASED = 1 << 5,
+ PVRDMA_ACCESS_ON_DEMAND = 1 << 6,
+ PVRDMA_ACCESS_FLAGS_MAX = PVRDMA_ACCESS_ON_DEMAND,
+};
+
+int pvrdma_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *props,
+ struct ib_udata *udata);
+int pvrdma_query_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props);
+int pvrdma_query_gid(struct ib_device *ibdev, u8 port,
+ int index, union ib_gid *gid);
+int pvrdma_query_pkey(struct ib_device *ibdev, u8 port,
+ u16 index, u16 *pkey);
+enum rdma_link_layer pvrdma_port_link_layer(struct ib_device *ibdev,
+ u8 port);
+int pvrdma_modify_device(struct ib_device *ibdev, int mask,
+ struct ib_device_modify *props);
+int pvrdma_modify_port(struct ib_device *ibdev, u8 port,
+ int mask, struct ib_port_modify *props);
+int pvrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
+struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev,
+ struct ib_udata *udata);
+int pvrdma_dealloc_ucontext(struct ib_ucontext *context);
+struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_udata *udata);
+int pvrdma_dealloc_pd(struct ib_pd *ibpd);
+struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc);
+struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int access_flags,
+ struct ib_udata *udata);
+int pvrdma_dereg_mr(struct ib_mr *mr);
+struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg);
+int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+ int sg_nents, unsigned int *sg_offset);
+int pvrdma_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
+int pvrdma_resize_cq(struct ib_cq *ibcq, int entries,
+ struct ib_udata *udata);
+struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
+ const struct ib_cq_init_attr *attr,
+ struct ib_ucontext *context,
+ struct ib_udata *udata);
+int pvrdma_resize_cq(struct ib_cq *ibcq, int entries,
+ struct ib_udata *udata);
+int pvrdma_destroy_cq(struct ib_cq *cq);
+int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+int pvrdma_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
+struct ib_ah *pvrdma_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+ struct ib_udata *udata);
+int pvrdma_destroy_ah(struct ib_ah *ah);
+struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata);
+int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
+int pvrdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
+int pvrdma_destroy_qp(struct ib_qp *qp);
+int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr);
+int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr);
+
+#endif /* __PVRDMA_VERBS_H__ */
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index 6d9904a4a0ab..7aa7a4e312f1 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -119,18 +119,17 @@ void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
if (cq->notify == IB_CQ_NEXT_COMP ||
(cq->notify == IB_CQ_SOLICITED &&
(solicited || entry->status != IB_WC_SUCCESS))) {
- struct kthread_worker *worker;
/*
* This will cause send_complete() to be called in
* another thread.
*/
- smp_read_barrier_depends(); /* see rvt_cq_exit */
- worker = cq->rdi->worker;
- if (likely(worker)) {
+ spin_lock(&cq->rdi->n_cqs_lock);
+ if (likely(cq->rdi->worker)) {
cq->notify = RVT_CQ_NONE;
cq->triggered++;
- kthread_queue_work(worker, &cq->comptask);
+ kthread_queue_work(cq->rdi->worker, &cq->comptask);
}
+ spin_unlock(&cq->rdi->n_cqs_lock);
}
spin_unlock_irqrestore(&cq->lock, flags);
@@ -240,15 +239,15 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
}
}
- spin_lock(&rdi->n_cqs_lock);
+ spin_lock_irq(&rdi->n_cqs_lock);
if (rdi->n_cqs_allocated == rdi->dparms.props.max_cq) {
- spin_unlock(&rdi->n_cqs_lock);
+ spin_unlock_irq(&rdi->n_cqs_lock);
ret = ERR_PTR(-ENOMEM);
goto bail_ip;
}
rdi->n_cqs_allocated++;
- spin_unlock(&rdi->n_cqs_lock);
+ spin_unlock_irq(&rdi->n_cqs_lock);
if (cq->ip) {
spin_lock_irq(&rdi->pending_lock);
@@ -296,9 +295,9 @@ int rvt_destroy_cq(struct ib_cq *ibcq)
struct rvt_dev_info *rdi = cq->rdi;
kthread_flush_work(&cq->comptask);
- spin_lock(&rdi->n_cqs_lock);
+ spin_lock_irq(&rdi->n_cqs_lock);
rdi->n_cqs_allocated--;
- spin_unlock(&rdi->n_cqs_lock);
+ spin_unlock_irq(&rdi->n_cqs_lock);
if (cq->ip)
kref_put(&cq->ip->ref, rvt_release_mmap_info);
else
@@ -504,33 +503,23 @@ int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
*/
int rvt_driver_cq_init(struct rvt_dev_info *rdi)
{
- int ret = 0;
int cpu;
- struct task_struct *task;
+ struct kthread_worker *worker;
if (rdi->worker)
return 0;
+
spin_lock_init(&rdi->n_cqs_lock);
- rdi->worker = kzalloc(sizeof(*rdi->worker), GFP_KERNEL);
- if (!rdi->worker)
- return -ENOMEM;
- kthread_init_worker(rdi->worker);
- task = kthread_create_on_node(
- kthread_worker_fn,
- rdi->worker,
- rdi->dparms.node,
- "%s", rdi->dparms.cq_name);
- if (IS_ERR(task)) {
- kfree(rdi->worker);
- rdi->worker = NULL;
- return PTR_ERR(task);
- }
- set_user_nice(task, MIN_NICE);
cpu = cpumask_first(cpumask_of_node(rdi->dparms.node));
- kthread_bind(task, cpu);
- wake_up_process(task);
- return ret;
+ worker = kthread_create_worker_on_cpu(cpu, 0,
+ "%s", rdi->dparms.cq_name);
+ if (IS_ERR(worker))
+ return PTR_ERR(worker);
+
+ set_user_nice(worker->task, MIN_NICE);
+ rdi->worker = worker;
+ return 0;
}
/**
@@ -541,13 +530,15 @@ void rvt_cq_exit(struct rvt_dev_info *rdi)
{
struct kthread_worker *worker;
+ /* block future queuing from send_complete() */
+ spin_lock_irq(&rdi->n_cqs_lock);
worker = rdi->worker;
- if (!worker)
+ if (!worker) {
+ spin_unlock_irq(&rdi->n_cqs_lock);
return;
- /* blocks future queuing from send_complete() */
+ }
rdi->worker = NULL;
- smp_wmb(); /* See rdi_cq_enter */
- kthread_flush_worker(worker);
- kthread_stop(worker->task);
- kfree(worker);
+ spin_unlock_irq(&rdi->n_cqs_lock);
+
+ kthread_destroy_worker(worker);
}
diff --git a/drivers/infiniband/sw/rdmavt/mcast.c b/drivers/infiniband/sw/rdmavt/mcast.c
index 983d319ac976..05c8c2afb0e3 100644
--- a/drivers/infiniband/sw/rdmavt/mcast.c
+++ b/drivers/infiniband/sw/rdmavt/mcast.c
@@ -81,7 +81,7 @@ static struct rvt_mcast_qp *rvt_mcast_qp_alloc(struct rvt_qp *qp)
goto bail;
mqp->qp = qp;
- atomic_inc(&qp->refcount);
+ rvt_get_qp(qp);
bail:
return mqp;
@@ -92,8 +92,7 @@ static void rvt_mcast_qp_free(struct rvt_mcast_qp *mqp)
struct rvt_qp *qp = mqp->qp;
/* Notify hfi1_destroy_qp() if it is waiting. */
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
+ rvt_put_qp(qp);
kfree(mqp);
}
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 46b64970058e..52fd15276ee6 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -51,6 +51,7 @@
#include <rdma/rdma_vt.h>
#include "vt.h"
#include "mr.h"
+#include "trace.h"
/**
* rvt_driver_mr_init - Init MR resources per driver
@@ -84,6 +85,7 @@ int rvt_driver_mr_init(struct rvt_dev_info *rdi)
lkey_table_size = rdi->dparms.lkey_table_size;
}
rdi->lkey_table.max = 1 << lkey_table_size;
+ rdi->lkey_table.shift = 32 - lkey_table_size;
lk_tab_size = rdi->lkey_table.max * sizeof(*rdi->lkey_table.table);
rdi->lkey_table.table = (struct rvt_mregion __rcu **)
vmalloc_node(lk_tab_size, rdi->dparms.node);
@@ -402,6 +404,7 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
mr->mr.map[m]->segs[n].vaddr = vaddr;
mr->mr.map[m]->segs[n].length = umem->page_size;
+ trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr, umem->page_size);
n++;
if (n == RVT_SEGSZ) {
m++;
@@ -506,6 +509,7 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
n = mapped_segs % RVT_SEGSZ;
mr->mr.map[m]->segs[n].vaddr = (void *)addr;
mr->mr.map[m]->segs[n].length = ps;
+ trace_rvt_mr_page_seg(&mr->mr, m, n, (void *)addr, ps);
mr->mr.length += ps;
return 0;
@@ -692,6 +696,7 @@ int rvt_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
for (i = 0; i < list_len; i++) {
fmr->mr.map[m]->segs[n].vaddr = (void *)page_list[i];
fmr->mr.map[m]->segs[n].length = ps;
+ trace_rvt_mr_fmr_seg(&fmr->mr, m, n, (void *)page_list[i], ps);
if (++n == RVT_SEGSZ) {
m++;
n = 0;
@@ -774,7 +779,6 @@ int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
struct rvt_mregion *mr;
unsigned n, m;
size_t off;
- struct rvt_dev_info *dev = ib_to_rvt(pd->ibpd.device);
/*
* We use LKEY == zero for kernel virtual addresses
@@ -782,12 +786,14 @@ int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
*/
rcu_read_lock();
if (sge->lkey == 0) {
+ struct rvt_dev_info *dev = ib_to_rvt(pd->ibpd.device);
+
if (pd->user)
goto bail;
mr = rcu_dereference(dev->dma_mr);
if (!mr)
goto bail;
- atomic_inc(&mr->refcount);
+ rvt_get_mr(mr);
rcu_read_unlock();
isge->mr = mr;
@@ -798,8 +804,7 @@ int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
isge->n = 0;
goto ok;
}
- mr = rcu_dereference(
- rkt->table[(sge->lkey >> (32 - dev->dparms.lkey_table_size))]);
+ mr = rcu_dereference(rkt->table[sge->lkey >> rkt->shift]);
if (unlikely(!mr || atomic_read(&mr->lkey_invalid) ||
mr->lkey != sge->lkey || mr->pd != &pd->ibpd))
goto bail;
@@ -809,7 +814,7 @@ int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
off + sge->length > mr->length ||
(mr->access_flags & acc) != acc))
goto bail;
- atomic_inc(&mr->refcount);
+ rvt_get_mr(mr);
rcu_read_unlock();
off += mr->offset;
@@ -887,7 +892,7 @@ int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
mr = rcu_dereference(rdi->dma_mr);
if (!mr)
goto bail;
- atomic_inc(&mr->refcount);
+ rvt_get_mr(mr);
rcu_read_unlock();
sge->mr = mr;
@@ -899,8 +904,7 @@ int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
goto ok;
}
- mr = rcu_dereference(
- rkt->table[(rkey >> (32 - dev->dparms.lkey_table_size))]);
+ mr = rcu_dereference(rkt->table[rkey >> rkt->shift]);
if (unlikely(!mr || atomic_read(&mr->lkey_invalid) ||
mr->lkey != rkey || qp->ibqp.pd != mr->pd))
goto bail;
@@ -909,7 +913,7 @@ int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
if (unlikely(vaddr < mr->iova || off + len > mr->length ||
(mr->access_flags & acc) == 0))
goto bail;
- atomic_inc(&mr->refcount);
+ rvt_get_mr(mr);
rcu_read_unlock();
off += mr->offset;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 6500c3b5a89c..2a13ac660f2b 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -76,6 +76,23 @@ const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
};
EXPORT_SYMBOL(ib_rvt_state_ops);
+/*
+ * Translate ib_wr_opcode into ib_wc_opcode.
+ */
+const enum ib_wc_opcode ib_rvt_wc_opcode[] = {
+ [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
+ [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
+ [IB_WR_SEND] = IB_WC_SEND,
+ [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
+ [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
+ [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
+ [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD,
+ [IB_WR_SEND_WITH_INV] = IB_WC_SEND,
+ [IB_WR_LOCAL_INV] = IB_WC_LOCAL_INV,
+ [IB_WR_REG_MR] = IB_WC_REG_MR
+};
+EXPORT_SYMBOL(ib_rvt_wc_opcode);
+
static void get_map_page(struct rvt_qpn_table *qpt,
struct rvt_qpn_map *map,
gfp_t gfp)
@@ -884,7 +901,8 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
return ret;
bail_ip:
- kref_put(&qp->ip->ref, rvt_release_mmap_info);
+ if (qp->ip)
+ kref_put(&qp->ip->ref, rvt_release_mmap_info);
bail_qpn:
free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
diff --git a/drivers/infiniband/sw/rdmavt/trace.h b/drivers/infiniband/sw/rdmavt/trace.h
index 6c0457db5499..e2d23acb6a7d 100644
--- a/drivers/infiniband/sw/rdmavt/trace.h
+++ b/drivers/infiniband/sw/rdmavt/trace.h
@@ -45,143 +45,10 @@
*
*/
-#undef TRACE_SYSTEM_VAR
-#define TRACE_SYSTEM_VAR rdmavt
-
-#if !defined(__RDMAVT_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
-#define __RDMAVT_TRACE_H
-
-#include <linux/tracepoint.h>
-#include <linux/trace_seq.h>
-
-#include <rdma/ib_verbs.h>
-#include <rdma/rdma_vt.h>
-
#define RDI_DEV_ENTRY(rdi) __string(dev, rdi->driver_f.get_card_name(rdi))
#define RDI_DEV_ASSIGN(rdi) __assign_str(dev, rdi->driver_f.get_card_name(rdi))
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM rdmavt
-
-TRACE_EVENT(rvt_dbg,
- TP_PROTO(struct rvt_dev_info *rdi,
- const char *msg),
- TP_ARGS(rdi, msg),
- TP_STRUCT__entry(
- RDI_DEV_ENTRY(rdi)
- __string(msg, msg)
- ),
- TP_fast_assign(
- RDI_DEV_ASSIGN(rdi);
- __assign_str(msg, msg);
- ),
- TP_printk("[%s]: %s", __get_str(dev), __get_str(msg))
-);
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM rvt_qphash
-DECLARE_EVENT_CLASS(rvt_qphash_template,
- TP_PROTO(struct rvt_qp *qp, u32 bucket),
- TP_ARGS(qp, bucket),
- TP_STRUCT__entry(
- RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
- __field(u32, qpn)
- __field(u32, bucket)
- ),
- TP_fast_assign(
- RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device))
- __entry->qpn = qp->ibqp.qp_num;
- __entry->bucket = bucket;
- ),
- TP_printk(
- "[%s] qpn 0x%x bucket %u",
- __get_str(dev),
- __entry->qpn,
- __entry->bucket
- )
-);
-
-DEFINE_EVENT(rvt_qphash_template, rvt_qpinsert,
- TP_PROTO(struct rvt_qp *qp, u32 bucket),
- TP_ARGS(qp, bucket));
-
-DEFINE_EVENT(rvt_qphash_template, rvt_qpremove,
- TP_PROTO(struct rvt_qp *qp, u32 bucket),
- TP_ARGS(qp, bucket));
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM rvt_tx
-
-#define wr_opcode_name(opcode) { IB_WR_##opcode, #opcode }
-#define show_wr_opcode(opcode) \
-__print_symbolic(opcode, \
- wr_opcode_name(RDMA_WRITE), \
- wr_opcode_name(RDMA_WRITE_WITH_IMM), \
- wr_opcode_name(SEND), \
- wr_opcode_name(SEND_WITH_IMM), \
- wr_opcode_name(RDMA_READ), \
- wr_opcode_name(ATOMIC_CMP_AND_SWP), \
- wr_opcode_name(ATOMIC_FETCH_AND_ADD), \
- wr_opcode_name(LSO), \
- wr_opcode_name(SEND_WITH_INV), \
- wr_opcode_name(RDMA_READ_WITH_INV), \
- wr_opcode_name(LOCAL_INV), \
- wr_opcode_name(MASKED_ATOMIC_CMP_AND_SWP), \
- wr_opcode_name(MASKED_ATOMIC_FETCH_AND_ADD))
-
-#define POS_PRN \
-"[%s] wr_id %llx qpn %x psn 0x%x lpsn 0x%x length %u opcode 0x%.2x,%s size %u avail %u head %u last %u"
-
-TRACE_EVENT(
- rvt_post_one_wr,
- TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe),
- TP_ARGS(qp, wqe),
- TP_STRUCT__entry(
- RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
- __field(u64, wr_id)
- __field(u32, qpn)
- __field(u32, psn)
- __field(u32, lpsn)
- __field(u32, length)
- __field(u32, opcode)
- __field(u32, size)
- __field(u32, avail)
- __field(u32, head)
- __field(u32, last)
- ),
- TP_fast_assign(
- RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device))
- __entry->wr_id = wqe->wr.wr_id;
- __entry->qpn = qp->ibqp.qp_num;
- __entry->psn = wqe->psn;
- __entry->lpsn = wqe->lpsn;
- __entry->length = wqe->length;
- __entry->opcode = wqe->wr.opcode;
- __entry->size = qp->s_size;
- __entry->avail = qp->s_avail;
- __entry->head = qp->s_head;
- __entry->last = qp->s_last;
- ),
- TP_printk(
- POS_PRN,
- __get_str(dev),
- __entry->wr_id,
- __entry->qpn,
- __entry->psn,
- __entry->lpsn,
- __entry->length,
- __entry->opcode, show_wr_opcode(__entry->opcode),
- __entry->size,
- __entry->avail,
- __entry->head,
- __entry->last
- )
-);
-
-#endif /* __RDMAVT_TRACE_H */
-
-#undef TRACE_INCLUDE_PATH
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE trace
-#include <trace/define_trace.h>
+#include "trace_rvt.h"
+#include "trace_qp.h"
+#include "trace_tx.h"
+#include "trace_mr.h"
diff --git a/drivers/infiniband/sw/rdmavt/trace_mr.h b/drivers/infiniband/sw/rdmavt/trace_mr.h
new file mode 100644
index 000000000000..3318a6c36373
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/trace_mr.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#if !defined(__RVT_TRACE_MR_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __RVT_TRACE_MR_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_vt.h>
+#include <rdma/rdmavt_mr.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rvt_mr
+DECLARE_EVENT_CLASS(
+ rvt_mr_template,
+ TP_PROTO(struct rvt_mregion *mr, u16 m, u16 n, void *v, size_t len),
+ TP_ARGS(mr, m, n, v, len),
+ TP_STRUCT__entry(
+ RDI_DEV_ENTRY(ib_to_rvt(mr->pd->device))
+ __field(void *, vaddr)
+ __field(struct page *, page)
+ __field(size_t, len)
+ __field(u32, lkey)
+ __field(u16, m)
+ __field(u16, n)
+ ),
+ TP_fast_assign(
+ RDI_DEV_ASSIGN(ib_to_rvt(mr->pd->device));
+ __entry->vaddr = v;
+ __entry->page = virt_to_page(v);
+ __entry->m = m;
+ __entry->n = n;
+ __entry->len = len;
+ ),
+ TP_printk(
+ "[%s] vaddr %p page %p m %u n %u len %ld",
+ __get_str(dev),
+ __entry->vaddr,
+ __entry->page,
+ __entry->m,
+ __entry->n,
+ __entry->len
+ )
+);
+
+DEFINE_EVENT(
+ rvt_mr_template, rvt_mr_page_seg,
+ TP_PROTO(struct rvt_mregion *mr, u16 m, u16 n, void *v, size_t len),
+ TP_ARGS(mr, m, n, v, len));
+
+DEFINE_EVENT(
+ rvt_mr_template, rvt_mr_fmr_seg,
+ TP_PROTO(struct rvt_mregion *mr, u16 m, u16 n, void *v, size_t len),
+ TP_ARGS(mr, m, n, v, len));
+
+DEFINE_EVENT(
+ rvt_mr_template, rvt_mr_user_seg,
+ TP_PROTO(struct rvt_mregion *mr, u16 m, u16 n, void *v, size_t len),
+ TP_ARGS(mr, m, n, v, len));
+
+#endif /* __RVT_TRACE_MR_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_mr
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/sw/rdmavt/trace_qp.h b/drivers/infiniband/sw/rdmavt/trace_qp.h
new file mode 100644
index 000000000000..4c77a3119bda
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/trace_qp.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#if !defined(__RVT_TRACE_QP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __RVT_TRACE_QP_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_vt.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rvt_qp
+
+DECLARE_EVENT_CLASS(rvt_qphash_template,
+ TP_PROTO(struct rvt_qp *qp, u32 bucket),
+ TP_ARGS(qp, bucket),
+ TP_STRUCT__entry(
+ RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u32, bucket)
+ ),
+ TP_fast_assign(
+ RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device))
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->bucket = bucket;
+ ),
+ TP_printk(
+ "[%s] qpn 0x%x bucket %u",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->bucket
+ )
+);
+
+DEFINE_EVENT(rvt_qphash_template, rvt_qpinsert,
+ TP_PROTO(struct rvt_qp *qp, u32 bucket),
+ TP_ARGS(qp, bucket));
+
+DEFINE_EVENT(rvt_qphash_template, rvt_qpremove,
+ TP_PROTO(struct rvt_qp *qp, u32 bucket),
+ TP_ARGS(qp, bucket));
+
+
+#endif /* __RVT_TRACE_QP_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_qp
+#include <trace/define_trace.h>
+
diff --git a/drivers/infiniband/sw/rdmavt/trace_rvt.h b/drivers/infiniband/sw/rdmavt/trace_rvt.h
new file mode 100644
index 000000000000..746f33461d9a
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/trace_rvt.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#if !defined(__RVT_TRACE_RVT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __RVT_TRACE_RVT_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_vt.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rvt
+
+TRACE_EVENT(rvt_dbg,
+ TP_PROTO(struct rvt_dev_info *rdi,
+ const char *msg),
+ TP_ARGS(rdi, msg),
+ TP_STRUCT__entry(
+ RDI_DEV_ENTRY(rdi)
+ __string(msg, msg)
+ ),
+ TP_fast_assign(
+ RDI_DEV_ASSIGN(rdi);
+ __assign_str(msg, msg);
+ ),
+ TP_printk("[%s]: %s", __get_str(dev), __get_str(msg))
+);
+
+#endif /* __RVT_TRACE_MISC_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_rvt
+#include <trace/define_trace.h>
+
diff --git a/drivers/infiniband/sw/rdmavt/trace_tx.h b/drivers/infiniband/sw/rdmavt/trace_tx.h
new file mode 100644
index 000000000000..0e03173662d8
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/trace_tx.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#if !defined(__RVT_TRACE_TX_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __RVT_TRACE_TX_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_vt.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rvt_tx
+
+#define wr_opcode_name(opcode) { IB_WR_##opcode, #opcode }
+#define show_wr_opcode(opcode) \
+__print_symbolic(opcode, \
+ wr_opcode_name(RDMA_WRITE), \
+ wr_opcode_name(RDMA_WRITE_WITH_IMM), \
+ wr_opcode_name(SEND), \
+ wr_opcode_name(SEND_WITH_IMM), \
+ wr_opcode_name(RDMA_READ), \
+ wr_opcode_name(ATOMIC_CMP_AND_SWP), \
+ wr_opcode_name(ATOMIC_FETCH_AND_ADD), \
+ wr_opcode_name(LSO), \
+ wr_opcode_name(SEND_WITH_INV), \
+ wr_opcode_name(RDMA_READ_WITH_INV), \
+ wr_opcode_name(LOCAL_INV), \
+ wr_opcode_name(MASKED_ATOMIC_CMP_AND_SWP), \
+ wr_opcode_name(MASKED_ATOMIC_FETCH_AND_ADD))
+
+#define POS_PRN \
+"[%s] wr_id %llx qpn %x psn 0x%x lpsn 0x%x length %u opcode 0x%.2x,%s size %u avail %u head %u last %u"
+
+TRACE_EVENT(
+ rvt_post_one_wr,
+ TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe),
+ TP_ARGS(qp, wqe),
+ TP_STRUCT__entry(
+ RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
+ __field(u64, wr_id)
+ __field(u32, qpn)
+ __field(u32, psn)
+ __field(u32, lpsn)
+ __field(u32, length)
+ __field(u32, opcode)
+ __field(u32, size)
+ __field(u32, avail)
+ __field(u32, head)
+ __field(u32, last)
+ ),
+ TP_fast_assign(
+ RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device))
+ __entry->wr_id = wqe->wr.wr_id;
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->psn = wqe->psn;
+ __entry->lpsn = wqe->lpsn;
+ __entry->length = wqe->length;
+ __entry->opcode = wqe->wr.opcode;
+ __entry->size = qp->s_size;
+ __entry->avail = qp->s_avail;
+ __entry->head = qp->s_head;
+ __entry->last = qp->s_last;
+ ),
+ TP_printk(
+ POS_PRN,
+ __get_str(dev),
+ __entry->wr_id,
+ __entry->qpn,
+ __entry->psn,
+ __entry->lpsn,
+ __entry->length,
+ __entry->opcode, show_wr_opcode(__entry->opcode),
+ __entry->size,
+ __entry->avail,
+ __entry->head,
+ __entry->last
+ )
+);
+
+#endif /* __RVT_TRACE_TX_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_tx
+#include <trace/define_trace.h>
+
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 6c5e29db88e3..d369f24425f9 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -224,7 +224,7 @@ static inline enum comp_state check_psn(struct rxe_qp *qp,
else
return COMPST_DONE;
} else if ((diff > 0) && (wqe->mask & WR_ATOMIC_OR_READ_MASK)) {
- return COMPST_ERROR_RETRY;
+ return COMPST_DONE;
} else {
return COMPST_CHECK_ACK;
}
@@ -420,11 +420,12 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
(wqe->wr.send_flags & IB_SEND_SIGNALED) ||
(qp->req.state == QP_STATE_ERROR)) {
make_send_cqe(qp, wqe, &cqe);
+ advance_consumer(qp->sq.queue);
rxe_cq_post(qp->scq, &cqe, 0);
+ } else {
+ advance_consumer(qp->sq.queue);
}
- advance_consumer(qp->sq.queue);
-
/*
* we completed something so let req run again
* if it is trying to fence
@@ -510,6 +511,8 @@ int rxe_completer(void *arg)
struct rxe_pkt_info *pkt = NULL;
enum comp_state state;
+ rxe_add_ref(qp);
+
if (!qp->valid) {
while ((skb = skb_dequeue(&qp->resp_pkts))) {
rxe_drop_ref(qp);
@@ -739,11 +742,13 @@ exit:
/* we come here if we are done with processing and want the task to
* exit from the loop calling us
*/
+ rxe_drop_ref(qp);
return -EAGAIN;
done:
/* we come here if we have processed a packet we want the task to call
* us again to see if there is anything else to do
*/
+ rxe_drop_ref(qp);
return 0;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 73849a5a91b3..efe4c6a35442 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -266,8 +266,6 @@ static inline int rxe_xmit_packet(struct rxe_dev *rxe, struct rxe_qp *qp,
return err;
}
- atomic_inc(&qp->skb_out);
-
if ((qp_type(qp) != IB_QPT_RC) &&
(pkt->mask & RXE_END_MASK)) {
pkt->wqe->state = wqe_state_done;
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 1869152f1d23..d0faca294006 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -355,6 +355,9 @@ int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
size_t offset;
u32 crc = crcp ? (*crcp) : 0;
+ if (length == 0)
+ return 0;
+
if (mem->type == RXE_MEM_TYPE_DMA) {
u8 *src, *dest;
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index ffff5a54cb34..342e78163613 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -46,7 +46,7 @@
#include "rxe_loc.h"
static LIST_HEAD(rxe_dev_list);
-static spinlock_t dev_list_lock; /* spinlock for device list */
+static DEFINE_SPINLOCK(dev_list_lock); /* spinlock for device list */
struct rxe_dev *net_to_rxe(struct net_device *ndev)
{
@@ -455,6 +455,7 @@ static int send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
return -EAGAIN;
}
+ atomic_inc(&pkt->qp->skb_out);
kfree_skb(skb);
return 0;
@@ -659,8 +660,6 @@ struct notifier_block rxe_net_notifier = {
int rxe_net_ipv4_init(void)
{
- spin_lock_init(&dev_list_lock);
-
recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net,
htons(ROCE_V2_UDP_DPORT), false);
if (IS_ERR(recv_sockets.sk4)) {
@@ -676,8 +675,6 @@ int rxe_net_ipv6_init(void)
{
#if IS_ENABLED(CONFIG_IPV6)
- spin_lock_init(&dev_list_lock);
-
recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net,
htons(ROCE_V2_UDP_DPORT), true);
if (IS_ERR(recv_sockets.sk6)) {
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index f459c43a77c8..13ed2cc6eaa2 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -82,7 +82,7 @@ enum rxe_device_param {
RXE_MAX_SGE = 32,
RXE_MAX_SGE_RD = 32,
RXE_MAX_CQ = 16384,
- RXE_MAX_LOG_CQE = 13,
+ RXE_MAX_LOG_CQE = 15,
RXE_MAX_MR = 2 * 1024,
RXE_MAX_PD = 0x7ffc,
RXE_MAX_QP_RD_ATOM = 128,
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index 6bac0717c540..d723947a8542 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -180,7 +180,6 @@ static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
size = BITS_TO_LONGS(max - min + 1) * sizeof(long);
pool->table = kmalloc(size, GFP_KERNEL);
if (!pool->table) {
- pr_warn("no memory for bit table\n");
err = -ENOMEM;
goto out;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index c3e60e4bde6e..486d576e55bc 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -855,4 +855,5 @@ void rxe_qp_cleanup(void *arg)
free_rd_atomic_resources(qp);
kernel_sock_shutdown(qp->sk, SHUT_RDWR);
+ sock_release(qp->sk);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index 46f062842a9a..252b4d637d45 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -391,16 +391,15 @@ int rxe_rcv(struct sk_buff *skb)
payload_size(pkt));
calc_icrc = cpu_to_be32(~calc_icrc);
if (unlikely(calc_icrc != pack_icrc)) {
- char saddr[sizeof(struct in6_addr)];
-
if (skb->protocol == htons(ETH_P_IPV6))
- sprintf(saddr, "%pI6", &ipv6_hdr(skb)->saddr);
+ pr_warn_ratelimited("bad ICRC from %pI6c\n",
+ &ipv6_hdr(skb)->saddr);
else if (skb->protocol == htons(ETH_P_IP))
- sprintf(saddr, "%pI4", &ip_hdr(skb)->saddr);
+ pr_warn_ratelimited("bad ICRC from %pI4\n",
+ &ip_hdr(skb)->saddr);
else
- sprintf(saddr, "unknown");
+ pr_warn_ratelimited("bad ICRC from unknown\n");
- pr_warn_ratelimited("bad ICRC from %s\n", saddr);
goto drop;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 22bd9630dcd9..73d4a97603a1 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -548,23 +548,23 @@ static void update_wqe_psn(struct rxe_qp *qp,
static void save_state(struct rxe_send_wqe *wqe,
struct rxe_qp *qp,
struct rxe_send_wqe *rollback_wqe,
- struct rxe_qp *rollback_qp)
+ u32 *rollback_psn)
{
rollback_wqe->state = wqe->state;
rollback_wqe->first_psn = wqe->first_psn;
rollback_wqe->last_psn = wqe->last_psn;
- rollback_qp->req.psn = qp->req.psn;
+ *rollback_psn = qp->req.psn;
}
static void rollback_state(struct rxe_send_wqe *wqe,
struct rxe_qp *qp,
struct rxe_send_wqe *rollback_wqe,
- struct rxe_qp *rollback_qp)
+ u32 rollback_psn)
{
wqe->state = rollback_wqe->state;
wqe->first_psn = rollback_wqe->first_psn;
wqe->last_psn = rollback_wqe->last_psn;
- qp->req.psn = rollback_qp->req.psn;
+ qp->req.psn = rollback_psn;
}
static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
@@ -593,8 +593,10 @@ int rxe_requester(void *arg)
int mtu;
int opcode;
int ret;
- struct rxe_qp rollback_qp;
struct rxe_send_wqe rollback_wqe;
+ u32 rollback_psn;
+
+ rxe_add_ref(qp);
next_wqe:
if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
@@ -697,6 +699,7 @@ next_wqe:
wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS;
__rxe_do_task(&qp->comp.task);
+ rxe_drop_ref(qp);
return 0;
}
payload = mtu;
@@ -719,7 +722,7 @@ next_wqe:
* rxe_xmit_packet().
* Otherwise, completer might initiate an unjustified retry flow.
*/
- save_state(wqe, qp, &rollback_wqe, &rollback_qp);
+ save_state(wqe, qp, &rollback_wqe, &rollback_psn);
update_wqe_state(qp, wqe, &pkt);
update_wqe_psn(qp, wqe, &pkt, payload);
ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb);
@@ -727,7 +730,7 @@ next_wqe:
qp->need_req_skb = 1;
kfree_skb(skb);
- rollback_state(wqe, qp, &rollback_wqe, &rollback_qp);
+ rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
if (ret == -EAGAIN) {
rxe_run_task(&qp->req.task, 1);
@@ -756,8 +759,7 @@ err:
*/
wqe->wr.send_flags |= IB_SEND_SIGNALED;
__rxe_do_task(&qp->comp.task);
- return -EAGAIN;
-
exit:
+ rxe_drop_ref(qp);
return -EAGAIN;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index dd3d88adc003..3435efff8799 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -444,6 +444,13 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
return RESPST_EXECUTE;
}
+ /* A zero-byte op is not required to set an addr or rkey. */
+ if ((pkt->mask & (RXE_READ_MASK | RXE_WRITE_OR_SEND)) &&
+ (pkt->mask & RXE_RETH_MASK) &&
+ reth_len(pkt) == 0) {
+ return RESPST_EXECUTE;
+ }
+
va = qp->resp.va;
rkey = qp->resp.rkey;
resid = qp->resp.resid;
@@ -680,9 +687,14 @@ static enum resp_states read_reply(struct rxe_qp *qp,
res->read.va_org = qp->resp.va;
res->first_psn = req_pkt->psn;
- res->last_psn = req_pkt->psn +
- (reth_len(req_pkt) + mtu - 1) /
- mtu - 1;
+
+ if (reth_len(req_pkt)) {
+ res->last_psn = (req_pkt->psn +
+ (reth_len(req_pkt) + mtu - 1) /
+ mtu - 1) & BTH_PSN_MASK;
+ } else {
+ res->last_psn = res->first_psn;
+ }
res->cur_psn = req_pkt->psn;
res->read.resid = qp->resp.resid;
@@ -742,7 +754,8 @@ static enum resp_states read_reply(struct rxe_qp *qp,
} else {
qp->resp.res = NULL;
qp->resp.opcode = -1;
- qp->resp.psn = res->cur_psn;
+ if (psn_compare(res->cur_psn, qp->resp.psn) >= 0)
+ qp->resp.psn = res->cur_psn;
state = RESPST_CLEANUP;
}
@@ -1057,12 +1070,13 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
struct rxe_pkt_info *pkt)
{
enum resp_states rc;
+ u32 prev_psn = (qp->resp.psn - 1) & BTH_PSN_MASK;
if (pkt->mask & RXE_SEND_MASK ||
pkt->mask & RXE_WRITE_MASK) {
/* SEND. Ack again and cleanup. C9-105. */
if (bth_ack(pkt))
- send_ack(qp, pkt, AETH_ACK_UNLIMITED, qp->resp.psn - 1);
+ send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn);
rc = RESPST_CLEANUP;
goto out;
} else if (pkt->mask & RXE_READ_MASK) {
@@ -1132,6 +1146,7 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
pkt, skb_copy);
if (rc) {
pr_err("Failed resending result. This flow is not handled - skb ignored\n");
+ rxe_drop_ref(qp);
kfree_skb(skb_copy);
rc = RESPST_CLEANUP;
goto out;
@@ -1198,6 +1213,8 @@ int rxe_responder(void *arg)
struct rxe_pkt_info *pkt = NULL;
int ret = 0;
+ rxe_add_ref(qp);
+
qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
if (!qp->valid) {
@@ -1386,5 +1403,6 @@ int rxe_responder(void *arg)
exit:
ret = -EAGAIN;
done:
+ rxe_drop_ref(qp);
return ret;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c
index 2a6e3cd2d4e8..efc832a2d7c6 100644
--- a/drivers/infiniband/sw/rxe/rxe_srq.c
+++ b/drivers/infiniband/sw/rxe/rxe_srq.c
@@ -169,7 +169,7 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
}
}
- err = rxe_queue_resize(q, (unsigned int *)&attr->max_wr,
+ err = rxe_queue_resize(q, &attr->max_wr,
rcv_wqe_size(srq->rq.max_sge),
srq->rq.queue->ip ?
srq->rq.queue->ip->context :
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
index 1e19bf828a6e..d2a14a1bdc7f 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.c
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -121,6 +121,7 @@ int rxe_init_task(void *obj, struct rxe_task *task,
task->arg = arg;
task->func = func;
snprintf(task->name, sizeof(task->name), "%s", name);
+ task->destroyed = false;
tasklet_init(&task->tasklet, rxe_do_task, (unsigned long)task);
@@ -132,11 +133,29 @@ int rxe_init_task(void *obj, struct rxe_task *task,
void rxe_cleanup_task(struct rxe_task *task)
{
+ unsigned long flags;
+ bool idle;
+
+ /*
+ * Mark the task, then wait for it to finish. It might be
+ * running in a non-tasklet (direct call) context.
+ */
+ task->destroyed = true;
+
+ do {
+ spin_lock_irqsave(&task->state_lock, flags);
+ idle = (task->state == TASK_STATE_START);
+ spin_unlock_irqrestore(&task->state_lock, flags);
+ } while (!idle);
+
tasklet_kill(&task->tasklet);
}
void rxe_run_task(struct rxe_task *task, int sched)
{
+ if (task->destroyed)
+ return;
+
if (sched)
tasklet_schedule(&task->tasklet);
else
diff --git a/drivers/infiniband/sw/rxe/rxe_task.h b/drivers/infiniband/sw/rxe/rxe_task.h
index d14aa6daed05..08ff42d451c6 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.h
+++ b/drivers/infiniband/sw/rxe/rxe_task.h
@@ -54,6 +54,7 @@ struct rxe_task {
int (*func)(void *arg);
int ret;
char name[16];
+ bool destroyed;
};
/*
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 19841c863daf..beb7021ff18a 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -316,7 +316,9 @@ static int rxe_init_av(struct rxe_dev *rxe, struct ib_ah_attr *attr,
return err;
}
-static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
+static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
+ struct ib_udata *udata)
+
{
int err;
struct rxe_dev *rxe = to_rdev(ibpd->device);
@@ -564,7 +566,7 @@ static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
if (udata) {
if (udata->inlen) {
err = -EINVAL;
- goto err1;
+ goto err2;
}
qp->is_user = 1;
}
@@ -573,12 +575,13 @@ static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
err = rxe_qp_from_init(rxe, qp, pd, init, udata, ibpd);
if (err)
- goto err2;
+ goto err3;
return &qp->ibqp;
-err2:
+err3:
rxe_drop_index(qp);
+err2:
rxe_drop_ref(qp);
err1:
return ERR_PTR(err);
@@ -1007,11 +1010,19 @@ static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
{
struct rxe_cq *cq = to_rcq(ibcq);
+ unsigned long irq_flags;
+ int ret = 0;
+ spin_lock_irqsave(&cq->cq_lock, irq_flags);
if (cq->notify != IB_CQ_NEXT_COMP)
cq->notify = flags & IB_CQ_SOLICITED_MASK;
- return 0;
+ if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
+ ret = 1;
+
+ spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
+
+ return ret;
}
static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 339a1eecdfe3..096c4f6fbd65 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -357,11 +357,8 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i
int i;
rx->rx_ring = vzalloc(ipoib_recvq_size * sizeof *rx->rx_ring);
- if (!rx->rx_ring) {
- printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n",
- priv->ca->name, ipoib_recvq_size);
+ if (!rx->rx_ring)
return -ENOMEM;
- }
t = kmalloc(sizeof *t, GFP_KERNEL);
if (!t) {
@@ -1054,8 +1051,6 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
tx_qp = ib_create_qp(priv->pd, &attr);
if (PTR_ERR(tx_qp) == -EINVAL) {
- ipoib_warn(priv, "can't use GFP_NOIO for QPs on device %s, using GFP_KERNEL\n",
- priv->ca->name);
attr.create_flags &= ~IB_QP_CREATE_USE_GFP_NOIO;
tx_qp = ib_create_qp(priv->pd, &attr);
}
@@ -1134,7 +1129,6 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
p->tx_ring = __vmalloc(ipoib_sendq_size * sizeof *p->tx_ring,
GFP_NOIO, PAGE_KERNEL);
if (!p->tx_ring) {
- ipoib_warn(priv, "failed to allocate tx ring\n");
ret = -ENOMEM;
goto err_tx;
}
@@ -1550,8 +1544,6 @@ static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
priv->cm.srq_ring = vzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring);
if (!priv->cm.srq_ring) {
- printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n",
- priv->ca->name, ipoib_recvq_size);
ib_destroy_srq(priv->cm.srq);
priv->cm.srq = NULL;
return;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 830fecb6934c..5038f9d2d753 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -416,11 +416,8 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
"(status=%d, wrid=%d vend_err %x)\n",
wc->status, wr_id, wc->vendor_err);
qp_work = kzalloc(sizeof(*qp_work), GFP_ATOMIC);
- if (!qp_work) {
- ipoib_warn(priv, "%s Failed alloc ipoib_qp_state_validate for qp: 0x%x\n",
- __func__, priv->qp->qp_num);
+ if (!qp_work)
return;
- }
INIT_WORK(&qp_work->work, ipoib_qp_state_validate_work);
qp_work->priv = priv;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index c50794fb92db..3ce0765a05ab 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1619,11 +1619,8 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
/* Allocate RX/TX "rings" to hold queued skbs */
priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
GFP_KERNEL);
- if (!priv->rx_ring) {
- printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
- ca->name, ipoib_recvq_size);
+ if (!priv->rx_ring)
goto out;
- }
priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
if (!priv->tx_ring) {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 1909dd252c94..fddff403d5d2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -575,8 +575,11 @@ void ipoib_mcast_join_task(struct work_struct *work)
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
return;
- if (ib_query_port(priv->ca, priv->port, &port_attr) ||
- port_attr.state != IB_PORT_ACTIVE) {
+ if (ib_query_port(priv->ca, priv->port, &port_attr)) {
+ ipoib_dbg(priv, "ib_query_port() failed\n");
+ return;
+ }
+ if (port_attr.state != IB_PORT_ACTIVE) {
ipoib_dbg(priv, "port state is not ACTIVE (state = %d) suspending join task\n",
port_attr.state);
return;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index a2f9f29c6ab5..fd811115af49 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -35,7 +35,7 @@
#include <linux/init.h>
#include <linux/seq_file.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "ipoib.h"
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 64b3d11dcf1e..9104e6b8cac9 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -62,7 +62,7 @@
#include <net/sock.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index a4b791dfaa1d..8ae7a3beddb7 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -890,11 +890,14 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
case RDMA_CM_EVENT_ESTABLISHED:
iser_connected_handler(cma_id, event->param.conn.private_data);
break;
+ case RDMA_CM_EVENT_REJECTED:
+ iser_info("Connection rejected: %s\n",
+ rdma_reject_msg(cma_id, event->status));
+ /* FALLTHROUGH */
case RDMA_CM_EVENT_ADDR_ERROR:
case RDMA_CM_EVENT_ROUTE_ERROR:
case RDMA_CM_EVENT_CONNECT_ERROR:
case RDMA_CM_EVENT_UNREACHABLE:
- case RDMA_CM_EVENT_REJECTED:
iser_connect_error(cma_id);
break;
case RDMA_CM_EVENT_DISCONNECTED:
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 6dd43f63238e..314e95516068 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -184,7 +184,7 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
isert_conn->rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
sizeof(struct iser_rx_desc), GFP_KERNEL);
if (!isert_conn->rx_descs)
- goto fail;
+ return -ENOMEM;
rx_desc = isert_conn->rx_descs;
@@ -213,9 +213,7 @@ dma_map_fail:
}
kfree(isert_conn->rx_descs);
isert_conn->rx_descs = NULL;
-fail:
isert_err("conn %p failed to allocate rx descriptors\n", isert_conn);
-
return -ENOMEM;
}
@@ -269,10 +267,8 @@ isert_alloc_comps(struct isert_device *device)
device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp),
GFP_KERNEL);
- if (!device->comps) {
- isert_err("Unable to allocate completion contexts\n");
+ if (!device->comps)
return -ENOMEM;
- }
max_cqe = min(ISER_MAX_CQ_LEN, device->ib_device->attrs.max_cqe);
@@ -432,10 +428,8 @@ isert_alloc_login_buf(struct isert_conn *isert_conn,
isert_conn->login_req_buf = kzalloc(sizeof(*isert_conn->login_req_buf),
GFP_KERNEL);
- if (!isert_conn->login_req_buf) {
- isert_err("Unable to allocate isert_conn->login_buf\n");
+ if (!isert_conn->login_req_buf)
return -ENOMEM;
- }
isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
isert_conn->login_req_buf,
@@ -795,6 +789,8 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
*/
return 1;
case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
+ isert_info("Connection rejected: %s\n",
+ rdma_reject_msg(cma_id, event->status));
case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
case RDMA_CM_EVENT_CONNECT_ERROR:
ret = isert_connect_error(cma_id);
@@ -1276,11 +1272,8 @@ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd
if (payload_length) {
text_in = kzalloc(payload_length, GFP_KERNEL);
- if (!text_in) {
- isert_err("Unable to allocate text_in of payload_length: %u\n",
- payload_length);
+ if (!text_in)
return -ENOMEM;
- }
}
cmd->text_in_ptr = text_in;
@@ -1851,6 +1844,8 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
(void *)cmd->sense_buffer, pdu_len,
DMA_TO_DEVICE);
+ if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma))
+ return -ENOMEM;
isert_cmd->pdu_buf_len = pdu_len;
tx_dsg->addr = isert_cmd->pdu_buf_dma;
@@ -1978,6 +1973,8 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
(void *)cmd->buf_ptr, ISCSI_HDR_LEN,
DMA_TO_DEVICE);
+ if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma))
+ return -ENOMEM;
isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
tx_dsg->addr = isert_cmd->pdu_buf_dma;
tx_dsg->length = ISCSI_HDR_LEN;
@@ -2018,6 +2015,8 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
+ if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma))
+ return -ENOMEM;
isert_cmd->pdu_buf_len = txt_rsp_len;
tx_dsg->addr = isert_cmd->pdu_buf_dma;
@@ -2307,10 +2306,9 @@ isert_setup_np(struct iscsi_np *np,
int ret;
isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
- if (!isert_np) {
- isert_err("Unable to allocate struct isert_np\n");
+ if (!isert_np)
return -ENOMEM;
- }
+
sema_init(&isert_np->sem, 0);
mutex_init(&isert_np->mutex);
INIT_LIST_HEAD(&isert_np->accepted);
@@ -2651,7 +2649,6 @@ static int __init isert_init(void)
WQ_UNBOUND | WQ_HIGHPRI, 0);
if (!isert_comp_wq) {
isert_err("Unable to allocate isert_comp_wq\n");
- ret = -ENOMEM;
return -ENOMEM;
}
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index d980fb458ad4..8ddc07123193 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -64,6 +64,11 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_INFO(release_date, DRV_RELDATE);
+#if !defined(CONFIG_DYNAMIC_DEBUG)
+#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt)
+#define DYNAMIC_DEBUG_BRANCH(descriptor) false
+#endif
+
static unsigned int srp_sg_tablesize;
static unsigned int cmd_sg_entries;
static unsigned int indirect_sg_entries;
@@ -384,6 +389,9 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
max_page_list_len);
if (IS_ERR(mr)) {
ret = PTR_ERR(mr);
+ if (ret == -ENOMEM)
+ pr_info("%s: ib_alloc_mr() failed. Try to reduce max_cmd_per_lun, max_sect or ch_count\n",
+ dev_name(&device->dev));
goto destroy_pool;
}
d->mr = mr;
@@ -1266,8 +1274,12 @@ static int srp_map_finish_fmr(struct srp_map_state *state,
struct ib_pool_fmr *fmr;
u64 io_addr = 0;
- if (state->fmr.next >= state->fmr.end)
+ if (state->fmr.next >= state->fmr.end) {
+ shost_printk(KERN_ERR, ch->target->scsi_host,
+ PFX "Out of MRs (mr_per_cmd = %d)\n",
+ ch->target->mr_per_cmd);
return -ENOMEM;
+ }
WARN_ON_ONCE(!dev->use_fmr);
@@ -1323,8 +1335,12 @@ static int srp_map_finish_fr(struct srp_map_state *state,
u32 rkey;
int n, err;
- if (state->fr.next >= state->fr.end)
+ if (state->fr.next >= state->fr.end) {
+ shost_printk(KERN_ERR, ch->target->scsi_host,
+ PFX "Out of MRs (mr_per_cmd = %d)\n",
+ ch->target->mr_per_cmd);
return -ENOMEM;
+ }
WARN_ON_ONCE(!dev->use_fast_reg);
@@ -1556,7 +1572,6 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
return 0;
}
-#if defined(DYNAMIC_DATA_DEBUG)
static void srp_check_mapping(struct srp_map_state *state,
struct srp_rdma_ch *ch, struct srp_request *req,
struct scatterlist *scat, int count)
@@ -1580,7 +1595,6 @@ static void srp_check_mapping(struct srp_map_state *state,
scsi_bufflen(req->scmnd), desc_len, mr_len,
state->ndesc, state->nmdesc);
}
-#endif
/**
* srp_map_data() - map SCSI data buffer onto an SRP request
@@ -1669,14 +1683,12 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
if (ret < 0)
goto unmap;
-#if defined(DYNAMIC_DEBUG)
{
DEFINE_DYNAMIC_DEBUG_METADATA(ddm,
"Memory mapping consistency check");
- if (unlikely(ddm.flags & _DPRINTK_FLAGS_PRINT))
+ if (DYNAMIC_DEBUG_BRANCH(ddm))
srp_check_mapping(&state, ch, req, scat, count);
}
-#endif
/* We've mapped the request, now pull as much of the indirect
* descriptor table as we can into the command buffer. If this
@@ -3287,7 +3299,9 @@ static ssize_t srp_create_target(struct device *dev,
*/
scsi_host_get(target->scsi_host);
- mutex_lock(&host->add_target_mutex);
+ ret = mutex_lock_interruptible(&host->add_target_mutex);
+ if (ret < 0)
+ goto put;
ret = srp_parse_options(buf, target);
if (ret)
@@ -3443,6 +3457,7 @@ connected:
out:
mutex_unlock(&host->add_target_mutex);
+put:
scsi_host_put(target->scsi_host);
if (ret < 0)
scsi_host_put(target->scsi_host);
@@ -3526,6 +3541,7 @@ free_host:
static void srp_add_one(struct ib_device *device)
{
struct srp_device *srp_dev;
+ struct ib_device_attr *attr = &device->attrs;
struct srp_host *host;
int mr_page_shift, p;
u64 max_pages_per_mr;
@@ -3540,25 +3556,25 @@ static void srp_add_one(struct ib_device *device)
* minimum of 4096 bytes. We're unlikely to build large sglists
* out of smaller entries.
*/
- mr_page_shift = max(12, ffs(device->attrs.page_size_cap) - 1);
+ mr_page_shift = max(12, ffs(attr->page_size_cap) - 1);
srp_dev->mr_page_size = 1 << mr_page_shift;
srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
- max_pages_per_mr = device->attrs.max_mr_size;
+ max_pages_per_mr = attr->max_mr_size;
do_div(max_pages_per_mr, srp_dev->mr_page_size);
pr_debug("%s: %llu / %u = %llu <> %u\n", __func__,
- device->attrs.max_mr_size, srp_dev->mr_page_size,
+ attr->max_mr_size, srp_dev->mr_page_size,
max_pages_per_mr, SRP_MAX_PAGES_PER_MR);
srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
max_pages_per_mr);
srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
device->map_phys_fmr && device->unmap_fmr);
- srp_dev->has_fr = (device->attrs.device_cap_flags &
+ srp_dev->has_fr = (attr->device_cap_flags &
IB_DEVICE_MEM_MGT_EXTENSIONS);
if (!never_register && !srp_dev->has_fmr && !srp_dev->has_fr) {
dev_warn(&device->dev, "neither FMR nor FR is supported\n");
} else if (!never_register &&
- device->attrs.max_mr_size >= 2 * srp_dev->mr_page_size) {
+ attr->max_mr_size >= 2 * srp_dev->mr_page_size) {
srp_dev->use_fast_reg = (srp_dev->has_fr &&
(!srp_dev->has_fmr || prefer_fr));
srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
@@ -3571,13 +3587,13 @@ static void srp_add_one(struct ib_device *device)
if (srp_dev->use_fast_reg) {
srp_dev->max_pages_per_mr =
min_t(u32, srp_dev->max_pages_per_mr,
- device->attrs.max_fast_reg_page_list_len);
+ attr->max_fast_reg_page_list_len);
}
srp_dev->mr_max_size = srp_dev->mr_page_size *
srp_dev->max_pages_per_mr;
pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
- device->name, mr_page_shift, device->attrs.max_mr_size,
- device->attrs.max_fast_reg_page_list_len,
+ device->name, mr_page_shift, attr->max_mr_size,
+ attr->max_fast_reg_page_list_len,
srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
INIT_LIST_HEAD(&srp_dev->dev_list);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 0b1f69ed2e92..d21ba9d857c3 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1840,7 +1840,6 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
struct srpt_rdma_ch *ch, *tmp_ch;
u32 it_iu_len;
int i, ret = 0;
- unsigned char *p;
WARN_ON_ONCE(irqs_disabled());
@@ -1994,21 +1993,18 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
be64_to_cpu(*(__be64 *)(ch->i_port_id + 8)));
pr_debug("registering session %s\n", ch->sess_name);
- p = &ch->sess_name[0];
-try_again:
ch->sess = target_alloc_session(&sport->port_tpg_1, 0, 0,
- TARGET_PROT_NORMAL, p, ch, NULL);
+ TARGET_PROT_NORMAL, ch->sess_name, ch,
+ NULL);
+ /* Retry without leading "0x" */
+ if (IS_ERR(ch->sess))
+ ch->sess = target_alloc_session(&sport->port_tpg_1, 0, 0,
+ TARGET_PROT_NORMAL,
+ ch->sess_name + 2, ch, NULL);
if (IS_ERR(ch->sess)) {
- pr_info("Rejected login because no ACL has been"
- " configured yet for initiator %s.\n", p);
- /*
- * XXX: Hack to retry of ch->i_port_id without leading '0x'
- */
- if (p == &ch->sess_name[0]) {
- p += 2;
- goto try_again;
- }
+ pr_info("Rejected login because no ACL has been configured yet for initiator %s.\n",
+ ch->sess_name);
rej->reason = cpu_to_be32((PTR_ERR(ch->sess) == -ENOMEM) ?
SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES :
SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
diff --git a/drivers/input/input-compat.c b/drivers/input/input-compat.c
index d84d20b9cec0..2186f71c9fe5 100644
--- a/drivers/input/input-compat.c
+++ b/drivers/input/input-compat.c
@@ -9,7 +9,7 @@
*/
#include <linux/export.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "input-compat.h"
#ifdef CONFIG_COMPAT
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index f3135ae22df4..abd18f31b24f 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -22,7 +22,6 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/mm.h>
-#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/poll.h>
#include <linux/init.h>
diff --git a/drivers/input/joystick/walkera0701.c b/drivers/input/joystick/walkera0701.c
index 70a893a17467..36a5b93156ed 100644
--- a/drivers/input/joystick/walkera0701.c
+++ b/drivers/input/joystick/walkera0701.c
@@ -165,7 +165,7 @@ static void walkera0701_irq_handler(void *handler_data)
RESERVE + BIN1_PULSE - BIN0_PULSE) /* frame sync .. */
w->counter = 0;
- hrtimer_start(&w->timer, ktime_set(0, BIN_SAMPLE), HRTIMER_MODE_REL);
+ hrtimer_start(&w->timer, BIN_SAMPLE, HRTIMER_MODE_REL);
}
static enum hrtimer_restart timer_handler(struct hrtimer
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 83af17ad0f1f..c7d5b2b643d1 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -134,6 +134,7 @@ static const struct xpad_device {
{ 0x045e, 0x02d1, "Microsoft X-Box One pad", 0, XTYPE_XBOXONE },
{ 0x045e, 0x02dd, "Microsoft X-Box One pad (Firmware 2015)", 0, XTYPE_XBOXONE },
{ 0x045e, 0x02e3, "Microsoft X-Box One Elite pad", 0, XTYPE_XBOXONE },
+ { 0x045e, 0x02ea, "Microsoft X-Box One S pad", 0, XTYPE_XBOXONE },
{ 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
{ 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
@@ -1044,9 +1045,9 @@ static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect
packet->data[7] = 0x00;
packet->data[8] = strong / 512; /* left actuator */
packet->data[9] = weak / 512; /* right actuator */
- packet->data[10] = 0xFF;
- packet->data[11] = 0x00;
- packet->data[12] = 0x00;
+ packet->data[10] = 0xFF; /* on period */
+ packet->data[11] = 0x00; /* off period */
+ packet->data[12] = 0xFF; /* repeat count */
packet->len = 13;
packet->pending = true;
break;
@@ -1376,6 +1377,12 @@ static int xpad_init_input(struct usb_xpad *xpad)
input_dev->name = xpad->name;
input_dev->phys = xpad->phys;
usb_to_input_id(xpad->udev, &input_dev->id);
+
+ if (xpad->xtype == XTYPE_XBOX360W) {
+ /* x360w controllers and the receiver have different ids */
+ input_dev->id.product = 0x02a1;
+ }
+
input_dev->dev.parent = &xpad->intf->dev;
input_set_drvdata(input_dev, xpad);
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 29093657f2ef..582462d0af75 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -26,15 +26,15 @@
#include <linux/gpio_keys.h>
#include <linux/workqueue.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include <linux/spinlock.h>
struct gpio_button_data {
const struct gpio_keys_button *button;
struct input_dev *input;
+ struct gpio_desc *gpiod;
struct timer_list release_timer;
unsigned int release_delay; /* in msecs, for IRQ-only buttons */
@@ -140,7 +140,7 @@ static void gpio_keys_disable_button(struct gpio_button_data *bdata)
*/
disable_irq(bdata->irq);
- if (gpio_is_valid(bdata->button->gpio))
+ if (bdata->gpiod)
cancel_delayed_work_sync(&bdata->work);
else
del_timer_sync(&bdata->release_timer);
@@ -358,19 +358,20 @@ static void gpio_keys_gpio_report_event(struct gpio_button_data *bdata)
const struct gpio_keys_button *button = bdata->button;
struct input_dev *input = bdata->input;
unsigned int type = button->type ?: EV_KEY;
- int state = gpio_get_value_cansleep(button->gpio);
+ int state;
+ state = gpiod_get_value_cansleep(bdata->gpiod);
if (state < 0) {
- dev_err(input->dev.parent, "failed to get gpio state\n");
+ dev_err(input->dev.parent,
+ "failed to get gpio state: %d\n", state);
return;
}
- state = (state ? 1 : 0) ^ button->active_low;
if (type == EV_ABS) {
if (state)
input_event(input, type, button->code, button->value);
} else {
- input_event(input, type, button->code, !!state);
+ input_event(input, type, button->code, state);
}
input_sync(input);
}
@@ -456,7 +457,7 @@ static void gpio_keys_quiesce_key(void *data)
{
struct gpio_button_data *bdata = data;
- if (gpio_is_valid(bdata->button->gpio))
+ if (bdata->gpiod)
cancel_delayed_work_sync(&bdata->work);
else
del_timer_sync(&bdata->release_timer);
@@ -465,7 +466,8 @@ static void gpio_keys_quiesce_key(void *data)
static int gpio_keys_setup_key(struct platform_device *pdev,
struct input_dev *input,
struct gpio_button_data *bdata,
- const struct gpio_keys_button *button)
+ const struct gpio_keys_button *button,
+ struct fwnode_handle *child)
{
const char *desc = button->desc ? button->desc : "gpio_keys";
struct device *dev = &pdev->dev;
@@ -478,18 +480,56 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
bdata->button = button;
spin_lock_init(&bdata->lock);
- if (gpio_is_valid(button->gpio)) {
+ if (child) {
+ bdata->gpiod = devm_get_gpiod_from_child(dev, NULL, child);
+ if (IS_ERR(bdata->gpiod)) {
+ error = PTR_ERR(bdata->gpiod);
+ if (error == -ENOENT) {
+ /*
+ * GPIO is optional, we may be dealing with
+ * purely interrupt-driven setup.
+ */
+ bdata->gpiod = NULL;
+ } else {
+ if (error != -EPROBE_DEFER)
+ dev_err(dev, "failed to get gpio: %d\n",
+ error);
+ return error;
+ }
+ } else {
+ error = gpiod_direction_input(bdata->gpiod);
+ if (error) {
+ dev_err(dev, "Failed to configure GPIO %d as input: %d\n",
+ desc_to_gpio(bdata->gpiod), error);
+ return error;
+ }
+ }
+ } else if (gpio_is_valid(button->gpio)) {
+ /*
+ * Legacy GPIO number, so request the GPIO here and
+ * convert it to descriptor.
+ */
+ unsigned flags = GPIOF_IN;
+
+ if (button->active_low)
+ flags |= GPIOF_ACTIVE_LOW;
- error = devm_gpio_request_one(&pdev->dev, button->gpio,
- GPIOF_IN, desc);
+ error = devm_gpio_request_one(&pdev->dev, button->gpio, flags,
+ desc);
if (error < 0) {
dev_err(dev, "Failed to request GPIO %d, error %d\n",
button->gpio, error);
return error;
}
+ bdata->gpiod = gpio_to_desc(button->gpio);
+ if (!bdata->gpiod)
+ return -EINVAL;
+ }
+
+ if (bdata->gpiod) {
if (button->debounce_interval) {
- error = gpio_set_debounce(button->gpio,
+ error = gpiod_set_debounce(bdata->gpiod,
button->debounce_interval * 1000);
/* use timer if gpiolib doesn't provide debounce */
if (error < 0)
@@ -500,7 +540,7 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
if (button->irq) {
bdata->irq = button->irq;
} else {
- irq = gpio_to_irq(button->gpio);
+ irq = gpiod_to_irq(bdata->gpiod);
if (irq < 0) {
error = irq;
dev_err(dev,
@@ -518,9 +558,10 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
} else {
if (!button->irq) {
- dev_err(dev, "No IRQ specified\n");
+ dev_err(dev, "Found button without gpio or irq\n");
return -EINVAL;
}
+
bdata->irq = button->irq;
if (button->type && button->type != EV_KEY) {
@@ -575,7 +616,7 @@ static void gpio_keys_report_state(struct gpio_keys_drvdata *ddata)
for (i = 0; i < ddata->pdata->nbuttons; i++) {
struct gpio_button_data *bdata = &ddata->data[i];
- if (gpio_is_valid(bdata->button->gpio))
+ if (bdata->gpiod)
gpio_keys_gpio_report_event(bdata);
}
input_sync(input);
@@ -612,25 +653,18 @@ static void gpio_keys_close(struct input_dev *input)
* Handlers for alternative sources of platform_data
*/
-#ifdef CONFIG_OF
/*
- * Translate OpenFirmware node properties into platform_data
+ * Translate properties into platform_data
*/
static struct gpio_keys_platform_data *
gpio_keys_get_devtree_pdata(struct device *dev)
{
- struct device_node *node, *pp;
struct gpio_keys_platform_data *pdata;
struct gpio_keys_button *button;
- int error;
+ struct fwnode_handle *child;
int nbuttons;
- int i;
- node = dev->of_node;
- if (!node)
- return ERR_PTR(-ENODEV);
-
- nbuttons = of_get_available_child_count(node);
+ nbuttons = device_get_child_node_count(dev);
if (nbuttons == 0)
return ERR_PTR(-ENODEV);
@@ -640,64 +674,47 @@ gpio_keys_get_devtree_pdata(struct device *dev)
if (!pdata)
return ERR_PTR(-ENOMEM);
- pdata->buttons = (struct gpio_keys_button *)(pdata + 1);
- pdata->nbuttons = nbuttons;
-
- pdata->rep = !!of_get_property(node, "autorepeat", NULL);
-
- of_property_read_string(node, "label", &pdata->name);
-
- i = 0;
- for_each_available_child_of_node(node, pp) {
- enum of_gpio_flags flags;
+ button = (struct gpio_keys_button *)(pdata + 1);
- button = &pdata->buttons[i++];
+ pdata->buttons = button;
+ pdata->nbuttons = nbuttons;
- button->gpio = of_get_gpio_flags(pp, 0, &flags);
- if (button->gpio < 0) {
- error = button->gpio;
- if (error != -ENOENT) {
- if (error != -EPROBE_DEFER)
- dev_err(dev,
- "Failed to get gpio flags, error: %d\n",
- error);
- return ERR_PTR(error);
- }
- } else {
- button->active_low = flags & OF_GPIO_ACTIVE_LOW;
- }
+ pdata->rep = device_property_read_bool(dev, "autorepeat");
- button->irq = irq_of_parse_and_map(pp, 0);
+ device_property_read_string(dev, "label", &pdata->name);
- if (!gpio_is_valid(button->gpio) && !button->irq) {
- dev_err(dev, "Found button without gpios or irqs\n");
- return ERR_PTR(-EINVAL);
- }
+ device_for_each_child_node(dev, child) {
+ if (is_of_node(child))
+ button->irq =
+ irq_of_parse_and_map(to_of_node(child), 0);
- if (of_property_read_u32(pp, "linux,code", &button->code)) {
- dev_err(dev, "Button without keycode: 0x%x\n",
- button->gpio);
+ if (fwnode_property_read_u32(child, "linux,code",
+ &button->code)) {
+ dev_err(dev, "Button without keycode\n");
+ fwnode_handle_put(child);
return ERR_PTR(-EINVAL);
}
- button->desc = of_get_property(pp, "label", NULL);
+ fwnode_property_read_string(child, "label", &button->desc);
- if (of_property_read_u32(pp, "linux,input-type", &button->type))
+ if (fwnode_property_read_u32(child, "linux,input-type",
+ &button->type))
button->type = EV_KEY;
- button->wakeup = of_property_read_bool(pp, "wakeup-source") ||
- /* legacy name */
- of_property_read_bool(pp, "gpio-key,wakeup");
+ button->wakeup =
+ fwnode_property_read_bool(child, "wakeup-source") ||
+ /* legacy name */
+ fwnode_property_read_bool(child, "gpio-key,wakeup");
- button->can_disable = !!of_get_property(pp, "linux,can-disable", NULL);
+ button->can_disable =
+ fwnode_property_read_bool(child, "linux,can-disable");
- if (of_property_read_u32(pp, "debounce-interval",
+ if (fwnode_property_read_u32(child, "debounce-interval",
&button->debounce_interval))
button->debounce_interval = 5;
- }
- if (pdata->nbuttons == 0)
- return ERR_PTR(-EINVAL);
+ button++;
+ }
return pdata;
}
@@ -708,20 +725,11 @@ static const struct of_device_id gpio_keys_of_match[] = {
};
MODULE_DEVICE_TABLE(of, gpio_keys_of_match);
-#else
-
-static inline struct gpio_keys_platform_data *
-gpio_keys_get_devtree_pdata(struct device *dev)
-{
- return ERR_PTR(-ENODEV);
-}
-
-#endif
-
static int gpio_keys_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct gpio_keys_platform_data *pdata = dev_get_platdata(dev);
+ struct fwnode_handle *child = NULL;
struct gpio_keys_drvdata *ddata;
struct input_dev *input;
size_t size;
@@ -774,14 +782,28 @@ static int gpio_keys_probe(struct platform_device *pdev)
const struct gpio_keys_button *button = &pdata->buttons[i];
struct gpio_button_data *bdata = &ddata->data[i];
- error = gpio_keys_setup_key(pdev, input, bdata, button);
- if (error)
+ if (!dev_get_platdata(dev)) {
+ child = device_get_next_child_node(&pdev->dev, child);
+ if (!child) {
+ dev_err(&pdev->dev,
+ "missing child device node for entry %d\n",
+ i);
+ return -EINVAL;
+ }
+ }
+
+ error = gpio_keys_setup_key(pdev, input, bdata, button, child);
+ if (error) {
+ fwnode_handle_put(child);
return error;
+ }
if (button->wakeup)
wakeup = 1;
}
+ fwnode_handle_put(child);
+
error = sysfs_create_group(&pdev->dev.kobj, &gpio_keys_attr_group);
if (error) {
dev_err(dev, "Unable to export keys/switches, error: %d\n",
@@ -814,8 +836,7 @@ static int gpio_keys_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int gpio_keys_suspend(struct device *dev)
+static int __maybe_unused gpio_keys_suspend(struct device *dev)
{
struct gpio_keys_drvdata *ddata = dev_get_drvdata(dev);
struct input_dev *input = ddata->input;
@@ -837,7 +858,7 @@ static int gpio_keys_suspend(struct device *dev)
return 0;
}
-static int gpio_keys_resume(struct device *dev)
+static int __maybe_unused gpio_keys_resume(struct device *dev)
{
struct gpio_keys_drvdata *ddata = dev_get_drvdata(dev);
struct input_dev *input = ddata->input;
@@ -863,7 +884,6 @@ static int gpio_keys_resume(struct device *dev)
gpio_keys_report_state(ddata);
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(gpio_keys_pm_ops, gpio_keys_suspend, gpio_keys_resume);
@@ -873,7 +893,7 @@ static struct platform_driver gpio_keys_device_driver = {
.driver = {
.name = "gpio-keys",
.pm = &gpio_keys_pm_ops,
- .of_match_table = of_match_ptr(gpio_keys_of_match),
+ .of_match_table = gpio_keys_of_match,
}
};
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index 62bdb1d48c49..bed4f2086158 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -30,10 +30,10 @@
#define DRV_NAME "gpio-keys-polled"
struct gpio_keys_button_data {
+ struct gpio_desc *gpiod;
int last_state;
int count;
int threshold;
- int can_sleep;
};
struct gpio_keys_polled_dev {
@@ -46,7 +46,7 @@ struct gpio_keys_polled_dev {
};
static void gpio_keys_button_event(struct input_polled_dev *dev,
- struct gpio_keys_button *button,
+ const struct gpio_keys_button *button,
int state)
{
struct gpio_keys_polled_dev *bdev = dev->private;
@@ -70,21 +70,22 @@ static void gpio_keys_button_event(struct input_polled_dev *dev,
}
static void gpio_keys_polled_check_state(struct input_polled_dev *dev,
- struct gpio_keys_button *button,
+ const struct gpio_keys_button *button,
struct gpio_keys_button_data *bdata)
{
int state;
- if (bdata->can_sleep)
- state = !!gpiod_get_value_cansleep(button->gpiod);
- else
- state = !!gpiod_get_value(button->gpiod);
-
- gpio_keys_button_event(dev, button, state);
+ state = gpiod_get_value_cansleep(bdata->gpiod);
+ if (state < 0) {
+ dev_err(dev->input->dev.parent,
+ "failed to get gpio state: %d\n", state);
+ } else {
+ gpio_keys_button_event(dev, button, state);
- if (state != bdata->last_state) {
- bdata->count = 0;
- bdata->last_state = state;
+ if (state != bdata->last_state) {
+ bdata->count = 0;
+ bdata->last_state = state;
+ }
}
}
@@ -142,48 +143,35 @@ static void gpio_keys_polled_close(struct input_polled_dev *dev)
pdata->disable(bdev->dev);
}
-static struct gpio_keys_platform_data *gpio_keys_polled_get_devtree_pdata(struct device *dev)
+static struct gpio_keys_platform_data *
+gpio_keys_polled_get_devtree_pdata(struct device *dev)
{
struct gpio_keys_platform_data *pdata;
struct gpio_keys_button *button;
struct fwnode_handle *child;
- int error;
int nbuttons;
nbuttons = device_get_child_node_count(dev);
if (nbuttons == 0)
- return NULL;
+ return ERR_PTR(-EINVAL);
pdata = devm_kzalloc(dev, sizeof(*pdata) + nbuttons * sizeof(*button),
GFP_KERNEL);
if (!pdata)
return ERR_PTR(-ENOMEM);
- pdata->buttons = (struct gpio_keys_button *)(pdata + 1);
+ button = (struct gpio_keys_button *)(pdata + 1);
+
+ pdata->buttons = button;
+ pdata->nbuttons = nbuttons;
pdata->rep = device_property_present(dev, "autorepeat");
device_property_read_u32(dev, "poll-interval", &pdata->poll_interval);
device_for_each_child_node(dev, child) {
- struct gpio_desc *desc;
-
- desc = devm_get_gpiod_from_child(dev, NULL, child);
- if (IS_ERR(desc)) {
- error = PTR_ERR(desc);
- if (error != -EPROBE_DEFER)
- dev_err(dev,
- "Failed to get gpio flags, error: %d\n",
- error);
- fwnode_handle_put(child);
- return ERR_PTR(error);
- }
-
- button = &pdata->buttons[pdata->nbuttons++];
- button->gpiod = desc;
-
- if (fwnode_property_read_u32(child, "linux,code", &button->code)) {
- dev_err(dev, "Button without keycode: %d\n",
- pdata->nbuttons - 1);
+ if (fwnode_property_read_u32(child, "linux,code",
+ &button->code)) {
+ dev_err(dev, "button without keycode\n");
fwnode_handle_put(child);
return ERR_PTR(-EINVAL);
}
@@ -206,10 +194,9 @@ static struct gpio_keys_platform_data *gpio_keys_polled_get_devtree_pdata(struct
if (fwnode_property_read_u32(child, "debounce-interval",
&button->debounce_interval))
button->debounce_interval = 5;
- }
- if (pdata->nbuttons == 0)
- return ERR_PTR(-EINVAL);
+ button++;
+ }
return pdata;
}
@@ -220,7 +207,7 @@ static void gpio_keys_polled_set_abs_params(struct input_dev *input,
int i, min = 0, max = 0;
for (i = 0; i < pdata->nbuttons; i++) {
- struct gpio_keys_button *button = &pdata->buttons[i];
+ const struct gpio_keys_button *button = &pdata->buttons[i];
if (button->type != EV_ABS || button->code != code)
continue;
@@ -230,6 +217,7 @@ static void gpio_keys_polled_set_abs_params(struct input_dev *input,
if (button->value > max)
max = button->value;
}
+
input_set_abs_params(input, code, min, max, 0, 0);
}
@@ -242,6 +230,7 @@ MODULE_DEVICE_TABLE(of, gpio_keys_polled_of_match);
static int gpio_keys_polled_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct fwnode_handle *child = NULL;
const struct gpio_keys_platform_data *pdata = dev_get_platdata(dev);
struct gpio_keys_polled_dev *bdev;
struct input_polled_dev *poll_dev;
@@ -254,10 +243,6 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
pdata = gpio_keys_polled_get_devtree_pdata(dev);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
- if (!pdata) {
- dev_err(dev, "missing platform data\n");
- return -EINVAL;
- }
}
if (!pdata->poll_interval) {
@@ -300,20 +285,48 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
__set_bit(EV_REP, input->evbit);
for (i = 0; i < pdata->nbuttons; i++) {
- struct gpio_keys_button *button = &pdata->buttons[i];
+ const struct gpio_keys_button *button = &pdata->buttons[i];
struct gpio_keys_button_data *bdata = &bdev->data[i];
unsigned int type = button->type ?: EV_KEY;
if (button->wakeup) {
dev_err(dev, DRV_NAME " does not support wakeup\n");
+ fwnode_handle_put(child);
return -EINVAL;
}
- /*
- * Legacy GPIO number so request the GPIO here and
- * convert it to descriptor.
- */
- if (!button->gpiod && gpio_is_valid(button->gpio)) {
+ if (!dev_get_platdata(dev)) {
+ /* No legacy static platform data */
+ child = device_get_next_child_node(dev, child);
+ if (!child) {
+ dev_err(dev, "missing child device node\n");
+ return -EINVAL;
+ }
+
+ bdata->gpiod = devm_get_gpiod_from_child(dev, NULL,
+ child);
+ if (IS_ERR(bdata->gpiod)) {
+ error = PTR_ERR(bdata->gpiod);
+ if (error != -EPROBE_DEFER)
+ dev_err(dev,
+ "failed to get gpio: %d\n",
+ error);
+ fwnode_handle_put(child);
+ return error;
+ }
+
+ error = gpiod_direction_input(bdata->gpiod);
+ if (error) {
+ dev_err(dev, "Failed to configure GPIO %d as input: %d\n",
+ desc_to_gpio(bdata->gpiod), error);
+ fwnode_handle_put(child);
+ return error;
+ }
+ } else if (gpio_is_valid(button->gpio)) {
+ /*
+ * Legacy GPIO number so request the GPIO here and
+ * convert it to descriptor.
+ */
unsigned flags = GPIOF_IN;
if (button->active_low)
@@ -322,18 +335,21 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
error = devm_gpio_request_one(&pdev->dev, button->gpio,
flags, button->desc ? : DRV_NAME);
if (error) {
- dev_err(dev, "unable to claim gpio %u, err=%d\n",
+ dev_err(dev,
+ "unable to claim gpio %u, err=%d\n",
button->gpio, error);
return error;
}
- button->gpiod = gpio_to_desc(button->gpio);
+ bdata->gpiod = gpio_to_desc(button->gpio);
+ if (!bdata->gpiod) {
+ dev_err(dev,
+ "unable to convert gpio %u to descriptor\n",
+ button->gpio);
+ return -EINVAL;
+ }
}
- if (IS_ERR(button->gpiod))
- return PTR_ERR(button->gpiod);
-
- bdata->can_sleep = gpiod_cansleep(button->gpiod);
bdata->last_state = -1;
bdata->threshold = DIV_ROUND_UP(button->debounce_interval,
pdata->poll_interval);
@@ -344,6 +360,8 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
button->code);
}
+ fwnode_handle_put(child);
+
bdev->poll_dev = poll_dev;
bdev->dev = dev;
bdev->pdata = pdata;
diff --git a/drivers/input/keyboard/lpc32xx-keys.c b/drivers/input/keyboard/lpc32xx-keys.c
index 265d641c40e2..632523d4f5dc 100644
--- a/drivers/input/keyboard/lpc32xx-keys.c
+++ b/drivers/input/keyboard/lpc32xx-keys.c
@@ -182,7 +182,7 @@ static int lpc32xx_kscan_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0 || irq >= NR_IRQS) {
+ if (irq < 0) {
dev_err(&pdev->dev, "failed to get platform irq\n");
return -EINVAL;
}
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index fcef5d1365e2..e24443376e75 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -316,7 +316,7 @@ static int pxa27x_keypad_build_keycode_from_dt(struct pxa27x_keypad *keypad)
error = of_property_read_u32(np, "marvell,debounce-interval",
&pdata->debounce_interval);
if (error) {
- dev_err(dev, "failed to parse debpunce-interval\n");
+ dev_err(dev, "failed to parse debounce-interval\n");
return error;
}
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
index 9002298698fc..3048ef3e3e16 100644
--- a/drivers/input/keyboard/tca8418_keypad.c
+++ b/drivers/input/keyboard/tca8418_keypad.c
@@ -164,11 +164,18 @@ static void tca8418_read_keypad(struct tca8418_keypad *keypad_data)
int error, col, row;
u8 reg, state, code;
- /* Initial read of the key event FIFO */
- error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, &reg);
+ do {
+ error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, &reg);
+ if (error < 0) {
+ dev_err(&keypad_data->client->dev,
+ "unable to read REG_KEY_EVENT_A\n");
+ break;
+ }
+
+ /* Assume that key code 0 signifies empty FIFO */
+ if (reg <= 0)
+ break;
- /* Assume that key code 0 signifies empty FIFO */
- while (error >= 0 && reg > 0) {
state = reg & KEY_EVENT_VALUE;
code = reg & KEY_EVENT_CODE;
@@ -184,11 +191,7 @@ static void tca8418_read_keypad(struct tca8418_keypad *keypad_data)
/* Read for next loop */
error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, &reg);
- }
-
- if (error < 0)
- dev_err(&keypad_data->client->dev,
- "unable to read REG_KEY_EVENT_A\n");
+ } while (1);
input_sync(input);
}
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 7ffb614ce566..1ae4d9617ff8 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -625,11 +625,12 @@ config INPUT_DA9055_ONKEY
will be called da9055_onkey.
config INPUT_DA9063_ONKEY
- tristate "Dialog DA9062/63 OnKey"
+ tristate "Dialog DA9063/62/61 OnKey"
depends on MFD_DA9063 || MFD_DA9062
help
- Support the ONKEY of Dialog DA9063 and DA9062 Power Management ICs
- as an input device capable of reporting the power button status.
+ Support the ONKEY of Dialog DA9063, DA9062 and DA9061 Power
+ Management ICs as an input device capable of reporting the
+ power button status.
To compile this driver as a module, choose M here: the module
will be called da9063_onkey.
diff --git a/drivers/input/misc/adxl34x-i2c.c b/drivers/input/misc/adxl34x-i2c.c
index a8b0a2eec344..7fed92fb8cc1 100644
--- a/drivers/input/misc/adxl34x-i2c.c
+++ b/drivers/input/misc/adxl34x-i2c.c
@@ -136,7 +136,6 @@ static const struct i2c_device_id adxl34x_id[] = {
MODULE_DEVICE_TABLE(i2c, adxl34x_id);
-#ifdef CONFIG_OF
static const struct of_device_id adxl34x_of_id[] = {
/*
* The ADXL346 is backward-compatible with the ADXL345. Differences are
@@ -153,13 +152,12 @@ static const struct of_device_id adxl34x_of_id[] = {
};
MODULE_DEVICE_TABLE(of, adxl34x_of_id);
-#endif
static struct i2c_driver adxl34x_driver = {
.driver = {
.name = "adxl34x",
.pm = &adxl34x_i2c_pm,
- .of_match_table = of_match_ptr(adxl34x_of_id),
+ .of_match_table = adxl34x_of_id,
},
.probe = adxl34x_i2c_probe,
.remove = adxl34x_i2c_remove,
diff --git a/drivers/input/misc/arizona-haptics.c b/drivers/input/misc/arizona-haptics.c
index 982936334537..07ec465f1095 100644
--- a/drivers/input/misc/arizona-haptics.c
+++ b/drivers/input/misc/arizona-haptics.c
@@ -37,6 +37,8 @@ static void arizona_haptics_work(struct work_struct *work)
struct arizona_haptics,
work);
struct arizona *arizona = haptics->arizona;
+ struct snd_soc_component *component =
+ snd_soc_dapm_to_component(arizona->dapm);
int ret;
if (!haptics->arizona->dapm) {
@@ -66,7 +68,7 @@ static void arizona_haptics_work(struct work_struct *work)
return;
}
- ret = snd_soc_dapm_enable_pin(arizona->dapm, "HAPTICS");
+ ret = snd_soc_component_enable_pin(component, "HAPTICS");
if (ret != 0) {
dev_err(arizona->dev, "Failed to start HAPTICS: %d\n",
ret);
@@ -81,7 +83,7 @@ static void arizona_haptics_work(struct work_struct *work)
}
} else {
/* This disable sequence will be a noop if already enabled */
- ret = snd_soc_dapm_disable_pin(arizona->dapm, "HAPTICS");
+ ret = snd_soc_component_disable_pin(component, "HAPTICS");
if (ret != 0) {
dev_err(arizona->dev, "Failed to disable HAPTICS: %d\n",
ret);
@@ -140,11 +142,14 @@ static int arizona_haptics_play(struct input_dev *input, void *data,
static void arizona_haptics_close(struct input_dev *input)
{
struct arizona_haptics *haptics = input_get_drvdata(input);
+ struct snd_soc_component *component;
cancel_work_sync(&haptics->work);
- if (haptics->arizona->dapm)
- snd_soc_dapm_disable_pin(haptics->arizona->dapm, "HAPTICS");
+ if (haptics->arizona->dapm) {
+ component = snd_soc_dapm_to_component(haptics->arizona->dapm);
+ snd_soc_component_disable_pin(component, "HAPTICS");
+ }
}
static int arizona_haptics_probe(struct platform_device *pdev)
diff --git a/drivers/input/misc/atlas_btns.c b/drivers/input/misc/atlas_btns.c
index 638165c78e75..6423aaccc763 100644
--- a/drivers/input/misc/atlas_btns.c
+++ b/drivers/input/misc/atlas_btns.c
@@ -28,7 +28,7 @@
#include <linux/input.h>
#include <linux/types.h>
#include <linux/acpi.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define ACPI_ATLAS_NAME "Atlas ACPI"
#define ACPI_ATLAS_CLASS "Atlas"
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
index b0d445390ee4..2124390ec38c 100644
--- a/drivers/input/misc/bma150.c
+++ b/drivers/input/misc/bma150.c
@@ -538,8 +538,13 @@ static int bma150_probe(struct i2c_client *client,
return -EIO;
}
+ /*
+ * Note if the IIO CONFIG_BMA180 driver is enabled we want to fail
+ * the probe for the bma180 as the iio driver is preferred.
+ */
chip_id = i2c_smbus_read_byte_data(client, BMA150_CHIP_ID_REG);
- if (chip_id != BMA150_CHIP_ID && chip_id != BMA180_CHIP_ID) {
+ if (chip_id != BMA150_CHIP_ID &&
+ (IS_ENABLED(CONFIG_BMA180) || chip_id != BMA180_CHIP_ID)) {
dev_err(&client->dev, "BMA150 chip id error: %d\n", chip_id);
return -EINVAL;
}
@@ -643,7 +648,9 @@ static UNIVERSAL_DEV_PM_OPS(bma150_pm, bma150_suspend, bma150_resume, NULL);
static const struct i2c_device_id bma150_id[] = {
{ "bma150", 0 },
+#if !IS_ENABLED(CONFIG_BMA180)
{ "bma180", 0 },
+#endif
{ "smb380", 0 },
{ "bma023", 0 },
{ }
diff --git a/drivers/input/misc/da9063_onkey.c b/drivers/input/misc/da9063_onkey.c
index bb863e062b03..b4ff1e86d3d3 100644
--- a/drivers/input/misc/da9063_onkey.c
+++ b/drivers/input/misc/da9063_onkey.c
@@ -1,5 +1,5 @@
/*
- * OnKey device driver for DA9063 and DA9062 PMICs
+ * OnKey device driver for DA9063, DA9062 and DA9061 PMICs
* Copyright (C) 2015 Dialog Semiconductor Ltd.
*
* This program is free software; you can redistribute it and/or
@@ -87,6 +87,7 @@ static const struct of_device_id da9063_compatible_reg_id_table[] = {
{ .compatible = "dlg,da9062-onkey", .data = &da9062_regs },
{ },
};
+MODULE_DEVICE_TABLE(of, da9063_compatible_reg_id_table);
static void da9063_poll_on(struct work_struct *work)
{
@@ -149,13 +150,13 @@ static void da9063_poll_on(struct work_struct *work)
* and then send shutdown command
*/
dev_dbg(&onkey->input->dev,
- "Sending SHUTDOWN to DA9063 ...\n");
+ "Sending SHUTDOWN to PMIC ...\n");
error = regmap_write(onkey->regmap,
config->onkey_shutdown,
config->onkey_shutdown_mask);
if (error)
dev_err(&onkey->input->dev,
- "Cannot SHUTDOWN DA9063: %d\n",
+ "Cannot SHUTDOWN PMIC: %d\n",
error);
}
}
@@ -300,6 +301,6 @@ static struct platform_driver da9063_onkey_driver = {
module_platform_driver(da9063_onkey_driver);
MODULE_AUTHOR("S Twiss <stwiss.opensource@diasemi.com>");
-MODULE_DESCRIPTION("Onkey device driver for Dialog DA9063 and DA9062");
+MODULE_DESCRIPTION("Onkey device driver for Dialog DA9063, DA9062 and DA9061");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DA9063_DRVNAME_ONKEY);
diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c
index 2adfd86c869a..0a2b865b1000 100644
--- a/drivers/input/misc/drv260x.c
+++ b/drivers/input/misc/drv260x.c
@@ -18,8 +18,6 @@
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
-#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/delay.h>
@@ -27,7 +25,6 @@
#include <linux/regulator/consumer.h>
#include <dt-bindings/input/ti-drv260x.h>
-#include <linux/platform_data/drv260x-pdata.h>
#define DRV260X_STATUS 0x0
#define DRV260X_MODE 0x1
@@ -468,90 +465,39 @@ static const struct regmap_config drv260x_regmap_config = {
.cache_type = REGCACHE_NONE,
};
-#ifdef CONFIG_OF
-static int drv260x_parse_dt(struct device *dev,
- struct drv260x_data *haptics)
-{
- struct device_node *np = dev->of_node;
- unsigned int voltage;
- int error;
-
- error = of_property_read_u32(np, "mode", &haptics->mode);
- if (error) {
- dev_err(dev, "%s: No entry for mode\n", __func__);
- return error;
- }
-
- error = of_property_read_u32(np, "library-sel", &haptics->library);
- if (error) {
- dev_err(dev, "%s: No entry for library selection\n",
- __func__);
- return error;
- }
-
- error = of_property_read_u32(np, "vib-rated-mv", &voltage);
- if (!error)
- haptics->rated_voltage = drv260x_calculate_voltage(voltage);
-
-
- error = of_property_read_u32(np, "vib-overdrive-mv", &voltage);
- if (!error)
- haptics->overdrive_voltage = drv260x_calculate_voltage(voltage);
-
- return 0;
-}
-#else
-static inline int drv260x_parse_dt(struct device *dev,
- struct drv260x_data *haptics)
-{
- dev_err(dev, "no platform data defined\n");
-
- return -EINVAL;
-}
-#endif
-
static int drv260x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- const struct drv260x_platform_data *pdata = dev_get_platdata(&client->dev);
+ struct device *dev = &client->dev;
struct drv260x_data *haptics;
+ u32 voltage;
int error;
- haptics = devm_kzalloc(&client->dev, sizeof(*haptics), GFP_KERNEL);
+ haptics = devm_kzalloc(dev, sizeof(*haptics), GFP_KERNEL);
if (!haptics)
return -ENOMEM;
- haptics->rated_voltage = DRV260X_DEF_OD_CLAMP_VOLT;
- haptics->rated_voltage = DRV260X_DEF_RATED_VOLT;
-
- if (pdata) {
- haptics->mode = pdata->mode;
- haptics->library = pdata->library_selection;
- if (pdata->vib_overdrive_voltage)
- haptics->overdrive_voltage = drv260x_calculate_voltage(pdata->vib_overdrive_voltage);
- if (pdata->vib_rated_voltage)
- haptics->rated_voltage = drv260x_calculate_voltage(pdata->vib_rated_voltage);
- } else if (client->dev.of_node) {
- error = drv260x_parse_dt(&client->dev, haptics);
- if (error)
- return error;
- } else {
- dev_err(&client->dev, "Platform data not set\n");
- return -ENODEV;
+ error = device_property_read_u32(dev, "mode", &haptics->mode);
+ if (error) {
+ dev_err(dev, "Can't fetch 'mode' property: %d\n", error);
+ return error;
}
-
if (haptics->mode < DRV260X_LRA_MODE ||
haptics->mode > DRV260X_ERM_MODE) {
- dev_err(&client->dev,
- "Vibrator mode is invalid: %i\n",
- haptics->mode);
+ dev_err(dev, "Vibrator mode is invalid: %i\n", haptics->mode);
return -EINVAL;
}
+ error = device_property_read_u32(dev, "library-sel", &haptics->library);
+ if (error) {
+ dev_err(dev, "Can't fetch 'library-sel' property: %d\n", error);
+ return error;
+ }
+
if (haptics->library < DRV260X_LIB_EMPTY ||
haptics->library > DRV260X_ERM_LIB_F) {
- dev_err(&client->dev,
+ dev_err(dev,
"Library value is invalid: %i\n", haptics->library);
return -EINVAL;
}
@@ -559,40 +505,44 @@ static int drv260x_probe(struct i2c_client *client,
if (haptics->mode == DRV260X_LRA_MODE &&
haptics->library != DRV260X_LIB_EMPTY &&
haptics->library != DRV260X_LIB_LRA) {
- dev_err(&client->dev,
- "LRA Mode with ERM Library mismatch\n");
+ dev_err(dev, "LRA Mode with ERM Library mismatch\n");
return -EINVAL;
}
if (haptics->mode == DRV260X_ERM_MODE &&
(haptics->library == DRV260X_LIB_EMPTY ||
haptics->library == DRV260X_LIB_LRA)) {
- dev_err(&client->dev,
- "ERM Mode with LRA Library mismatch\n");
+ dev_err(dev, "ERM Mode with LRA Library mismatch\n");
return -EINVAL;
}
- haptics->regulator = devm_regulator_get(&client->dev, "vbat");
+ error = device_property_read_u32(dev, "vib-rated-mv", &voltage);
+ haptics->rated_voltage = error ? DRV260X_DEF_RATED_VOLT :
+ drv260x_calculate_voltage(voltage);
+
+ error = device_property_read_u32(dev, "vib-overdrive-mv", &voltage);
+ haptics->overdrive_voltage = error ? DRV260X_DEF_OD_CLAMP_VOLT :
+ drv260x_calculate_voltage(voltage);
+
+ haptics->regulator = devm_regulator_get(dev, "vbat");
if (IS_ERR(haptics->regulator)) {
error = PTR_ERR(haptics->regulator);
- dev_err(&client->dev,
- "unable to get regulator, error: %d\n", error);
+ dev_err(dev, "unable to get regulator, error: %d\n", error);
return error;
}
- haptics->enable_gpio = devm_gpiod_get_optional(&client->dev, "enable",
+ haptics->enable_gpio = devm_gpiod_get_optional(dev, "enable",
GPIOD_OUT_HIGH);
if (IS_ERR(haptics->enable_gpio))
return PTR_ERR(haptics->enable_gpio);
- haptics->input_dev = devm_input_allocate_device(&client->dev);
+ haptics->input_dev = devm_input_allocate_device(dev);
if (!haptics->input_dev) {
dev_err(&client->dev, "Failed to allocate input device\n");
return -ENOMEM;
}
haptics->input_dev->name = "drv260x:haptics";
- haptics->input_dev->dev.parent = client->dev.parent;
haptics->input_dev->close = drv260x_close;
input_set_drvdata(haptics->input_dev, haptics);
input_set_capability(haptics->input_dev, EV_FF, FF_RUMBLE);
@@ -600,8 +550,7 @@ static int drv260x_probe(struct i2c_client *client,
error = input_ff_create_memless(haptics->input_dev, NULL,
drv260x_haptics_play);
if (error) {
- dev_err(&client->dev, "input_ff_create() failed: %d\n",
- error);
+ dev_err(dev, "input_ff_create() failed: %d\n", error);
return error;
}
@@ -613,21 +562,19 @@ static int drv260x_probe(struct i2c_client *client,
haptics->regmap = devm_regmap_init_i2c(client, &drv260x_regmap_config);
if (IS_ERR(haptics->regmap)) {
error = PTR_ERR(haptics->regmap);
- dev_err(&client->dev, "Failed to allocate register map: %d\n",
- error);
+ dev_err(dev, "Failed to allocate register map: %d\n", error);
return error;
}
error = drv260x_init(haptics);
if (error) {
- dev_err(&client->dev, "Device init failed: %d\n", error);
+ dev_err(dev, "Device init failed: %d\n", error);
return error;
}
error = input_register_device(haptics->input_dev);
if (error) {
- dev_err(&client->dev, "couldn't register input device: %d\n",
- error);
+ dev_err(dev, "couldn't register input device: %d\n", error);
return error;
}
diff --git a/drivers/input/misc/drv2665.c b/drivers/input/misc/drv2665.c
index ef9bc12b3be3..dcb6d8e94b11 100644
--- a/drivers/input/misc/drv2665.c
+++ b/drivers/input/misc/drv2665.c
@@ -125,8 +125,8 @@ static void drv2665_close(struct input_dev *input)
cancel_work_sync(&haptics->work);
- error = regmap_update_bits(haptics->regmap,
- DRV2665_CTRL_2, DRV2665_STANDBY, 1);
+ error = regmap_update_bits(haptics->regmap, DRV2665_CTRL_2,
+ DRV2665_STANDBY, DRV2665_STANDBY);
if (error)
dev_err(&haptics->client->dev,
"Failed to enter standby mode: %d\n", error);
@@ -240,7 +240,7 @@ static int __maybe_unused drv2665_suspend(struct device *dev)
if (haptics->input_dev->users) {
ret = regmap_update_bits(haptics->regmap, DRV2665_CTRL_2,
- DRV2665_STANDBY, 1);
+ DRV2665_STANDBY, DRV2665_STANDBY);
if (ret) {
dev_err(dev, "Failed to set standby mode\n");
regulator_disable(haptics->regulator);
diff --git a/drivers/input/misc/drv2667.c b/drivers/input/misc/drv2667.c
index d5ba7481328c..2849bb6906a8 100644
--- a/drivers/input/misc/drv2667.c
+++ b/drivers/input/misc/drv2667.c
@@ -256,7 +256,7 @@ static void drv2667_close(struct input_dev *input)
cancel_work_sync(&haptics->work);
error = regmap_update_bits(haptics->regmap, DRV2667_CTRL_2,
- DRV2667_STANDBY, 1);
+ DRV2667_STANDBY, DRV2667_STANDBY);
if (error)
dev_err(&haptics->client->dev,
"Failed to enter standby mode: %d\n", error);
@@ -415,7 +415,7 @@ static int __maybe_unused drv2667_suspend(struct device *dev)
if (haptics->input_dev->users) {
ret = regmap_update_bits(haptics->regmap, DRV2667_CTRL_2,
- DRV2667_STANDBY, 1);
+ DRV2667_STANDBY, DRV2667_STANDBY);
if (ret) {
dev_err(dev, "Failed to set standby mode\n");
regulator_disable(haptics->regulator);
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index c14b82709b0f..908b51089dee 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -17,6 +17,7 @@
#include <linux/acpi.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio_keys.h>
+#include <linux/gpio.h>
#include <linux/platform_device.h>
/*
@@ -92,7 +93,7 @@ soc_button_device_create(struct platform_device *pdev,
continue;
gpio = soc_button_lookup_gpio(&pdev->dev, info->acpi_index);
- if (gpio < 0)
+ if (!gpio_is_valid(gpio))
continue;
gpio_keys[n_buttons].type = info->event_type;
@@ -166,6 +167,11 @@ static int soc_button_probe(struct platform_device *pdev)
button_info = (struct soc_button_info *)id->driver_data;
+ if (gpiod_count(&pdev->dev, KBUILD_MODNAME) <= 0) {
+ dev_dbg(&pdev->dev, "no GPIO attached, ignoring...\n");
+ return -ENODEV;
+ }
+
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
diff --git a/drivers/input/misc/tps65218-pwrbutton.c b/drivers/input/misc/tps65218-pwrbutton.c
index 3273217ce80c..cc74a41bdb0d 100644
--- a/drivers/input/misc/tps65218-pwrbutton.c
+++ b/drivers/input/misc/tps65218-pwrbutton.c
@@ -150,12 +150,20 @@ static int tps6521x_pb_probe(struct platform_device *pdev)
return 0;
}
+static const struct platform_device_id tps6521x_pwrbtn_id_table[] = {
+ { "tps65218-pwrbutton", },
+ { "tps65217-pwrbutton", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, tps6521x_pwrbtn_id_table);
+
static struct platform_driver tps6521x_pb_driver = {
.probe = tps6521x_pb_probe,
.driver = {
.name = "tps6521x_pwrbutton",
.of_match_table = of_tps6521x_pb_match,
},
+ .id_table = tps6521x_pwrbtn_id_table,
};
module_platform_driver(tps6521x_pb_driver);
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 6d7de9bfed9a..328edc8c8786 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -1153,15 +1153,13 @@ static void alps_process_packet_v7(struct psmouse *psmouse)
alps_process_touchpad_packet_v7(psmouse);
}
-static unsigned char alps_get_pkt_id_ss4_v2(unsigned char *byte)
+static enum SS4_PACKET_ID alps_get_pkt_id_ss4_v2(unsigned char *byte)
{
- unsigned char pkt_id = SS4_PACKET_ID_IDLE;
+ enum SS4_PACKET_ID pkt_id = SS4_PACKET_ID_IDLE;
switch (byte[3] & 0x30) {
case 0x00:
- if (byte[0] == 0x18 && byte[1] == 0x10 && byte[2] == 0x00 &&
- (byte[3] & 0x88) == 0x08 && byte[4] == 0x10 &&
- byte[5] == 0x00) {
+ if (SS4_IS_IDLE_V2(byte)) {
pkt_id = SS4_PACKET_ID_IDLE;
} else {
pkt_id = SS4_PACKET_ID_ONE;
@@ -1188,7 +1186,7 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
unsigned char *p, struct psmouse *psmouse)
{
struct alps_data *priv = psmouse->private;
- unsigned char pkt_id;
+ enum SS4_PACKET_ID pkt_id;
unsigned int no_data_x, no_data_y;
pkt_id = alps_get_pkt_id_ss4_v2(p);
@@ -1267,18 +1265,12 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
break;
case SS4_PACKET_ID_STICK:
- if (!(priv->flags & ALPS_DUALPOINT)) {
- psmouse_warn(psmouse,
- "Rejected trackstick packet from non DualPoint device");
- } else {
- int x = (s8)(((p[0] & 1) << 7) | (p[1] & 0x7f));
- int y = (s8)(((p[3] & 1) << 7) | (p[2] & 0x7f));
- int pressure = (s8)(p[4] & 0x7f);
-
- input_report_rel(priv->dev2, REL_X, x);
- input_report_rel(priv->dev2, REL_Y, -y);
- input_report_abs(priv->dev2, ABS_PRESSURE, pressure);
- }
+ /*
+ * x, y, and pressure are decoded in
+ * alps_process_packet_ss4_v2()
+ */
+ f->first_mp = 0;
+ f->is_mp = 0;
break;
case SS4_PACKET_ID_IDLE:
@@ -1346,6 +1338,27 @@ static void alps_process_packet_ss4_v2(struct psmouse *psmouse)
priv->multi_packet = 0;
+ /* Report trackstick */
+ if (alps_get_pkt_id_ss4_v2(packet) == SS4_PACKET_ID_STICK) {
+ if (!(priv->flags & ALPS_DUALPOINT)) {
+ psmouse_warn(psmouse,
+ "Rejected trackstick packet from non DualPoint device");
+ return;
+ }
+
+ input_report_rel(dev2, REL_X, SS4_TS_X_V2(packet));
+ input_report_rel(dev2, REL_Y, SS4_TS_Y_V2(packet));
+ input_report_abs(dev2, ABS_PRESSURE, SS4_TS_Z_V2(packet));
+
+ input_report_key(dev2, BTN_LEFT, f->ts_left);
+ input_report_key(dev2, BTN_RIGHT, f->ts_right);
+ input_report_key(dev2, BTN_MIDDLE, f->ts_middle);
+
+ input_sync(dev2);
+ return;
+ }
+
+ /* Report touchpad */
alps_report_mt_data(psmouse, (f->fingers <= 4) ? f->fingers : 4);
input_mt_report_finger_count(dev, f->fingers);
@@ -1356,13 +1369,6 @@ static void alps_process_packet_ss4_v2(struct psmouse *psmouse)
input_report_abs(dev, ABS_PRESSURE, f->pressure);
input_sync(dev);
-
- if (priv->flags & ALPS_DUALPOINT) {
- input_report_key(dev2, BTN_LEFT, f->ts_left);
- input_report_key(dev2, BTN_RIGHT, f->ts_right);
- input_report_key(dev2, BTN_MIDDLE, f->ts_middle);
- input_sync(dev2);
- }
}
static bool alps_is_valid_package_ss4_v2(struct psmouse *psmouse)
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index b9417e2d7ad3..6d279aa27cb9 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -54,7 +54,15 @@ enum SS4_PACKET_ID {
#define SS4_MASK_NORMAL_BUTTONS 0x07
-#define SS4_1F_X_V2(_b) ((_b[0] & 0x0007) | \
+#define SS4_IS_IDLE_V2(_b) (((_b[0]) == 0x18) && \
+ ((_b[1]) == 0x10) && \
+ ((_b[2]) == 0x00) && \
+ ((_b[3] & 0x88) == 0x08) && \
+ ((_b[4]) == 0x10) && \
+ ((_b[5]) == 0x00) \
+ )
+
+#define SS4_1F_X_V2(_b) (((_b[0]) & 0x0007) | \
((_b[1] << 3) & 0x0078) | \
((_b[1] << 2) & 0x0380) | \
((_b[2] << 5) & 0x1C00) \
@@ -101,6 +109,18 @@ enum SS4_PACKET_ID {
#define SS4_IS_MF_CONTINUE(_b) ((_b[2] & 0x10) == 0x10)
#define SS4_IS_5F_DETECTED(_b) ((_b[2] & 0x10) == 0x10)
+#define SS4_TS_X_V2(_b) (s8)( \
+ ((_b[0] & 0x01) << 7) | \
+ (_b[1] & 0x7F) \
+ )
+
+#define SS4_TS_Y_V2(_b) -(s8)( \
+ ((_b[3] & 0x01) << 7) | \
+ (_b[2] & 0x7F) \
+ )
+
+#define SS4_TS_Z_V2(_b) (s8)(_b[4] & 0x7F)
+
#define SS4_MFPACKET_NO_AX 8160 /* X-Coordinate value */
#define SS4_MFPACKET_NO_AY 4080 /* Y-Coordinate value */
@@ -146,7 +166,7 @@ struct alps_protocol_info {
* (aka command mode response) identifies the firmware minor version. This
* can be used to distinguish different hardware models which are not
* uniquely identifiable through their E7 responses.
- * @protocol_info: information about protcol used by the device.
+ * @protocol_info: information about protocol used by the device.
*
* Many (but not all) ALPS touchpads can be identified by looking at the
* values returned in the "E7 report" and/or the "EC report." This table
diff --git a/drivers/input/mouse/amimouse.c b/drivers/input/mouse/amimouse.c
index a7fd8f22ba56..a33437c480e3 100644
--- a/drivers/input/mouse/amimouse.c
+++ b/drivers/input/mouse/amimouse.c
@@ -25,7 +25,7 @@
#include <asm/irq.h>
#include <asm/setup.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/amigahw.h>
#include <asm/amigaints.h>
diff --git a/drivers/input/mouse/atarimouse.c b/drivers/input/mouse/atarimouse.c
index d1c43236b125..96f2f51604bd 100644
--- a/drivers/input/mouse/atarimouse.c
+++ b/drivers/input/mouse/atarimouse.c
@@ -47,7 +47,7 @@
#include <asm/irq.h>
#include <asm/setup.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/atarihw.h>
#include <asm/atarikb.h>
#include <asm/atariints.h>
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index d15b33813021..fa598f7f4372 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1093,19 +1093,18 @@ static int elan_probe(struct i2c_client *client,
if (error)
return error;
+ dev_info(&client->dev,
+ "Elan Touchpad: Module ID: 0x%04x, Firmware: 0x%04x, Sample: 0x%04x, IAP: 0x%04x\n",
+ data->product_id,
+ data->fw_version,
+ data->sm_version,
+ data->iap_version);
+
dev_dbg(&client->dev,
- "Elan Touchpad Information:\n"
- " Module product ID: 0x%04x\n"
- " Firmware Version: 0x%04x\n"
- " Sample Version: 0x%04x\n"
- " IAP Version: 0x%04x\n"
+ "Elan Touchpad Extra Information:\n"
" Max ABS X,Y: %d,%d\n"
" Width X,Y: %d,%d\n"
" Resolution X,Y: %d,%d (dots/mm)\n",
- data->product_id,
- data->fw_version,
- data->sm_version,
- data->iap_version,
data->max_x, data->max_y,
data->width_x, data->width_y,
data->x_res, data->y_res);
diff --git a/drivers/input/mouse/synaptics_i2c.c b/drivers/input/mouse/synaptics_i2c.c
index aa7c5da60800..cb2bf203f4ca 100644
--- a/drivers/input/mouse/synaptics_i2c.c
+++ b/drivers/input/mouse/synaptics_i2c.c
@@ -29,7 +29,7 @@
* after soft reset, we should wait for 1 ms
* before the device becomes operational
*/
-#define SOFT_RESET_DELAY_MS 3
+#define SOFT_RESET_DELAY_US 3000
/* and after hard reset, we should wait for max 500ms */
#define HARD_RESET_DELAY_MS 500
@@ -311,7 +311,7 @@ static int synaptics_i2c_reset_config(struct i2c_client *client)
if (ret) {
dev_err(&client->dev, "Unable to reset device\n");
} else {
- msleep(SOFT_RESET_DELAY_MS);
+ usleep_range(SOFT_RESET_DELAY_US, SOFT_RESET_DELAY_US + 100);
ret = synaptics_i2c_config(client);
if (ret)
dev_err(&client->dev, "Unable to config device\n");
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index 354d47ecd66a..7331084973e1 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -15,7 +15,7 @@
#include <linux/input.h>
#include <linux/libps2.h>
#include <linux/proc_fs.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "psmouse.h"
#include "trackpoint.h"
diff --git a/drivers/input/rmi4/Kconfig b/drivers/input/rmi4/Kconfig
index 4c8a55857e00..8993983e3fe4 100644
--- a/drivers/input/rmi4/Kconfig
+++ b/drivers/input/rmi4/Kconfig
@@ -27,6 +27,28 @@ config RMI4_SPI
If unsure, say N.
+config RMI4_SMB
+ tristate "RMI4 SMB Support"
+ depends on RMI4_CORE && I2C
+ help
+ Say Y here if you want to support RMI4 devices connected to an SMB
+ bus.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the module will be
+ called rmi_smbus.
+
+config RMI4_F03
+ bool "RMI4 Function 03 (PS2 Guest)"
+ depends on RMI4_CORE
+ depends on SERIO=y || RMI4_CORE=SERIO
+ help
+ Say Y here if you want to add support for RMI4 function 03.
+
+ Function 03 provides PS2 guest support for RMI4 devices. This
+ includes support for TrackPoints on TouchPads.
+
config RMI4_2D_SENSOR
bool
depends on RMI4_CORE
@@ -62,13 +84,34 @@ config RMI4_F30
Function 30 provides GPIO and LED support for RMI4 devices. This
includes support for buttons on TouchPads and ClickPads.
+config RMI4_F34
+ bool "RMI4 Function 34 (Device reflash)"
+ depends on RMI4_CORE
+ select FW_LOADER
+ help
+ Say Y here if you want to add support for RMI4 function 34.
+
+ Function 34 provides support for upgrading the firmware on the RMI4
+ device via the firmware loader interface. This is triggered using a
+ sysfs attribute.
+
config RMI4_F54
bool "RMI4 Function 54 (Analog diagnostics)"
depends on RMI4_CORE
depends on VIDEO_V4L2=y || (RMI4_CORE=m && VIDEO_V4L2=m)
select VIDEOBUF2_VMALLOC
+ select RMI4_F55
help
Say Y here if you want to add support for RMI4 function 54
Function 54 provides access to various diagnostic features in certain
RMI4 touch sensors.
+
+config RMI4_F55
+ bool "RMI4 Function 55 (Sensor tuning)"
+ depends on RMI4_CORE
+ help
+ Say Y here if you want to add support for RMI4 function 55
+
+ Function 55 provides access to the RMI4 touch sensor tuning
+ mechanism.
diff --git a/drivers/input/rmi4/Makefile b/drivers/input/rmi4/Makefile
index 0bafc8502c4b..9aaac3dd8613 100644
--- a/drivers/input/rmi4/Makefile
+++ b/drivers/input/rmi4/Makefile
@@ -4,11 +4,15 @@ rmi_core-y := rmi_bus.o rmi_driver.o rmi_f01.o
rmi_core-$(CONFIG_RMI4_2D_SENSOR) += rmi_2d_sensor.o
# Function drivers
+rmi_core-$(CONFIG_RMI4_F03) += rmi_f03.o
rmi_core-$(CONFIG_RMI4_F11) += rmi_f11.o
rmi_core-$(CONFIG_RMI4_F12) += rmi_f12.o
rmi_core-$(CONFIG_RMI4_F30) += rmi_f30.o
+rmi_core-$(CONFIG_RMI4_F34) += rmi_f34.o rmi_f34v7.o
rmi_core-$(CONFIG_RMI4_F54) += rmi_f54.o
+rmi_core-$(CONFIG_RMI4_F55) += rmi_f55.o
# Transports
obj-$(CONFIG_RMI4_I2C) += rmi_i2c.o
obj-$(CONFIG_RMI4_SPI) += rmi_spi.o
+obj-$(CONFIG_RMI4_SMB) += rmi_smbus.o
diff --git a/drivers/input/rmi4/rmi_2d_sensor.c b/drivers/input/rmi4/rmi_2d_sensor.c
index e97bd7fabccc..07007ff8e29f 100644
--- a/drivers/input/rmi4/rmi_2d_sensor.c
+++ b/drivers/input/rmi4/rmi_2d_sensor.c
@@ -177,10 +177,12 @@ static void rmi_2d_sensor_set_input_params(struct rmi_2d_sensor *sensor)
sensor->dmax = DMAX * res_x;
}
- input_set_abs_params(input, ABS_MT_PRESSURE, 0, 0xff, 0, 0);
- input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 0x0f, 0, 0);
- input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 0x0f, 0, 0);
- input_set_abs_params(input, ABS_MT_ORIENTATION, 0, 1, 0, 0);
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0, 0xff, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 0x0f, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 0x0f, 0, 0);
+ input_set_abs_params(input, ABS_MT_ORIENTATION, 0, 1, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOOL_TYPE,
+ 0, MT_TOOL_MAX, 0, 0);
if (sensor->sensor_type == rmi_sensor_touchpad)
input_flags = INPUT_MT_POINTER;
diff --git a/drivers/input/rmi4/rmi_2d_sensor.h b/drivers/input/rmi4/rmi_2d_sensor.h
index 77fcdfef003c..c871bef4dac0 100644
--- a/drivers/input/rmi4/rmi_2d_sensor.h
+++ b/drivers/input/rmi4/rmi_2d_sensor.h
@@ -67,6 +67,8 @@ struct rmi_2d_sensor {
u8 report_rel;
u8 x_mm;
u8 y_mm;
+ enum rmi_reg_state dribble;
+ enum rmi_reg_state palm_detect;
};
int rmi_2d_sensor_of_probe(struct device *dev,
diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c
index ef8c747c35e7..1c40d94ca506 100644
--- a/drivers/input/rmi4/rmi_bus.c
+++ b/drivers/input/rmi4/rmi_bus.c
@@ -230,6 +230,9 @@ err_put_device:
void rmi_unregister_function(struct rmi_function *fn)
{
+ rmi_dbg(RMI_DEBUG_CORE, &fn->dev, "Unregistering F%02X.\n",
+ fn->fd.function_number);
+
device_del(&fn->dev);
of_node_put(fn->dev.of_node);
put_device(&fn->dev);
@@ -302,6 +305,9 @@ struct bus_type rmi_bus_type = {
static struct rmi_function_handler *fn_handlers[] = {
&rmi_f01_handler,
+#ifdef CONFIG_RMI4_F03
+ &rmi_f03_handler,
+#endif
#ifdef CONFIG_RMI4_F11
&rmi_f11_handler,
#endif
@@ -311,9 +317,15 @@ static struct rmi_function_handler *fn_handlers[] = {
#ifdef CONFIG_RMI4_F30
&rmi_f30_handler,
#endif
+#ifdef CONFIG_RMI4_F34
+ &rmi_f34_handler,
+#endif
#ifdef CONFIG_RMI4_F54
&rmi_f54_handler,
#endif
+#ifdef CONFIG_RMI4_F55
+ &rmi_f55_handler,
+#endif
};
static void __rmi_unregister_function_handlers(int start_idx)
diff --git a/drivers/input/rmi4/rmi_bus.h b/drivers/input/rmi4/rmi_bus.h
index 899579830536..b7625a9ac66a 100644
--- a/drivers/input/rmi4/rmi_bus.h
+++ b/drivers/input/rmi4/rmi_bus.h
@@ -105,6 +105,18 @@ rmi_get_platform_data(struct rmi_device *d)
bool rmi_is_physical_device(struct device *dev);
/**
+ * rmi_reset - reset a RMI4 device
+ * @d: Pointer to an RMI device
+ *
+ * Calls for a reset of each function implemented by a specific device.
+ * Returns 0 on success or a negative error code.
+ */
+static inline int rmi_reset(struct rmi_device *d)
+{
+ return d->driver->reset_handler(d);
+}
+
+/**
* rmi_read - read a single byte
* @d: Pointer to an RMI device
* @addr: The address to read from
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index 4a88312fbd25..11447ab1055c 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -17,6 +17,7 @@
#include <linux/bitmap.h>
#include <linux/delay.h>
#include <linux/fs.h>
+#include <linux/irq.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/of.h>
@@ -33,12 +34,22 @@
#define RMI_DEVICE_RESET_CMD 0x01
#define DEFAULT_RESET_DELAY_MS 100
-static void rmi_free_function_list(struct rmi_device *rmi_dev)
+void rmi_free_function_list(struct rmi_device *rmi_dev)
{
struct rmi_function *fn, *tmp;
struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Freeing function list\n");
+
+ devm_kfree(&rmi_dev->dev, data->irq_memory);
+ data->irq_memory = NULL;
+ data->irq_status = NULL;
+ data->fn_irq_bits = NULL;
+ data->current_irq_mask = NULL;
+ data->new_irq_mask = NULL;
+
data->f01_container = NULL;
+ data->f34_container = NULL;
/* Doing it in the reverse order so F01 will be removed last */
list_for_each_entry_safe_reverse(fn, tmp,
@@ -133,7 +144,7 @@ static void process_one_interrupt(struct rmi_driver_data *data,
}
}
-int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
+static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
{
struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
struct device *dev = &rmi_dev->dev;
@@ -143,7 +154,7 @@ int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
if (!data)
return 0;
- if (!rmi_dev->xport->attn_data) {
+ if (!data->attn_data.data) {
error = rmi_read_block(rmi_dev,
data->f01_container->fd.data_base_addr + 1,
data->irq_status, data->num_of_irq_regs);
@@ -178,7 +189,81 @@ int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
return 0;
}
-EXPORT_SYMBOL_GPL(rmi_process_interrupt_requests);
+
+void rmi_set_attn_data(struct rmi_device *rmi_dev, unsigned long irq_status,
+ void *data, size_t size)
+{
+ struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
+ struct rmi4_attn_data attn_data;
+ void *fifo_data;
+
+ if (!drvdata->enabled)
+ return;
+
+ fifo_data = kmemdup(data, size, GFP_ATOMIC);
+ if (!fifo_data)
+ return;
+
+ attn_data.irq_status = irq_status;
+ attn_data.size = size;
+ attn_data.data = fifo_data;
+
+ kfifo_put(&drvdata->attn_fifo, attn_data);
+}
+EXPORT_SYMBOL_GPL(rmi_set_attn_data);
+
+static irqreturn_t rmi_irq_fn(int irq, void *dev_id)
+{
+ struct rmi_device *rmi_dev = dev_id;
+ struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
+ struct rmi4_attn_data attn_data = {0};
+ int ret, count;
+
+ count = kfifo_get(&drvdata->attn_fifo, &attn_data);
+ if (count) {
+ *(drvdata->irq_status) = attn_data.irq_status;
+ drvdata->attn_data = attn_data;
+ }
+
+ ret = rmi_process_interrupt_requests(rmi_dev);
+ if (ret)
+ rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev,
+ "Failed to process interrupt request: %d\n", ret);
+
+ if (count)
+ kfree(attn_data.data);
+
+ if (!kfifo_is_empty(&drvdata->attn_fifo))
+ return rmi_irq_fn(irq, dev_id);
+
+ return IRQ_HANDLED;
+}
+
+static int rmi_irq_init(struct rmi_device *rmi_dev)
+{
+ struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ int irq_flags = irq_get_trigger_type(pdata->irq);
+ int ret;
+
+ if (!irq_flags)
+ irq_flags = IRQF_TRIGGER_LOW;
+
+ ret = devm_request_threaded_irq(&rmi_dev->dev, pdata->irq, NULL,
+ rmi_irq_fn, irq_flags | IRQF_ONESHOT,
+ dev_name(rmi_dev->xport->dev),
+ rmi_dev);
+ if (ret < 0) {
+ dev_err(&rmi_dev->dev, "Failed to register interrupt %d\n",
+ pdata->irq);
+
+ return ret;
+ }
+
+ data->enabled = true;
+
+ return 0;
+}
static int suspend_one_function(struct rmi_function *fn)
{
@@ -248,7 +333,7 @@ static int rmi_resume_functions(struct rmi_device *rmi_dev)
return 0;
}
-static int enable_sensor(struct rmi_device *rmi_dev)
+int rmi_enable_sensor(struct rmi_device *rmi_dev)
{
int retval = 0;
@@ -379,8 +464,8 @@ static int rmi_driver_reset_handler(struct rmi_device *rmi_dev)
return 0;
}
-int rmi_read_pdt_entry(struct rmi_device *rmi_dev, struct pdt_entry *entry,
- u16 pdt_address)
+static int rmi_read_pdt_entry(struct rmi_device *rmi_dev,
+ struct pdt_entry *entry, u16 pdt_address)
{
u8 buf[RMI_PDT_ENTRY_SIZE];
int error;
@@ -403,7 +488,6 @@ int rmi_read_pdt_entry(struct rmi_device *rmi_dev, struct pdt_entry *entry,
return 0;
}
-EXPORT_SYMBOL_GPL(rmi_read_pdt_entry);
static void rmi_driver_copy_pdt_to_fd(const struct pdt_entry *pdt,
struct rmi_function_descriptor *fd)
@@ -422,6 +506,7 @@ static void rmi_driver_copy_pdt_to_fd(const struct pdt_entry *pdt,
static int rmi_scan_pdt_page(struct rmi_device *rmi_dev,
int page,
+ int *empty_pages,
void *ctx,
int (*callback)(struct rmi_device *rmi_dev,
void *ctx,
@@ -449,20 +534,30 @@ static int rmi_scan_pdt_page(struct rmi_device *rmi_dev,
return retval;
}
- return (data->f01_bootloader_mode || addr == pdt_start) ?
+ /*
+ * Count number of empty PDT pages. If a gap of two pages
+ * or more is found, stop scanning.
+ */
+ if (addr == pdt_start)
+ ++*empty_pages;
+ else
+ *empty_pages = 0;
+
+ return (data->bootloader_mode || *empty_pages >= 2) ?
RMI_SCAN_DONE : RMI_SCAN_CONTINUE;
}
-static int rmi_scan_pdt(struct rmi_device *rmi_dev, void *ctx,
- int (*callback)(struct rmi_device *rmi_dev,
- void *ctx,
- const struct pdt_entry *entry))
+int rmi_scan_pdt(struct rmi_device *rmi_dev, void *ctx,
+ int (*callback)(struct rmi_device *rmi_dev,
+ void *ctx, const struct pdt_entry *entry))
{
int page;
+ int empty_pages = 0;
int retval = RMI_SCAN_DONE;
for (page = 0; page <= RMI4_MAX_PAGE; page++) {
- retval = rmi_scan_pdt_page(rmi_dev, page, ctx, callback);
+ retval = rmi_scan_pdt_page(rmi_dev, page, &empty_pages,
+ ctx, callback);
if (retval != RMI_SCAN_CONTINUE)
break;
}
@@ -600,7 +695,6 @@ free_struct_buff:
kfree(struct_buf);
return ret;
}
-EXPORT_SYMBOL_GPL(rmi_read_register_desc);
const struct rmi_register_desc_item *rmi_get_register_desc_item(
struct rmi_register_descriptor *rdesc, u16 reg)
@@ -616,7 +710,6 @@ const struct rmi_register_desc_item *rmi_get_register_desc_item(
return NULL;
}
-EXPORT_SYMBOL_GPL(rmi_get_register_desc_item);
size_t rmi_register_desc_calc_size(struct rmi_register_descriptor *rdesc)
{
@@ -630,7 +723,6 @@ size_t rmi_register_desc_calc_size(struct rmi_register_descriptor *rdesc)
}
return size;
}
-EXPORT_SYMBOL_GPL(rmi_register_desc_calc_size);
/* Compute the register offset relative to the base address */
int rmi_register_desc_calc_reg_offset(
@@ -648,7 +740,6 @@ int rmi_register_desc_calc_reg_offset(
}
return -1;
}
-EXPORT_SYMBOL_GPL(rmi_register_desc_calc_reg_offset);
bool rmi_register_desc_has_subpacket(const struct rmi_register_desc_item *item,
u8 subpacket)
@@ -657,51 +748,55 @@ bool rmi_register_desc_has_subpacket(const struct rmi_register_desc_item *item,
subpacket) == subpacket;
}
-/* Indicates that flash programming is enabled (bootloader mode). */
-#define RMI_F01_STATUS_BOOTLOADER(status) (!!((status) & 0x40))
-
-/*
- * Given the PDT entry for F01, read the device status register to determine
- * if we're stuck in bootloader mode or not.
- *
- */
static int rmi_check_bootloader_mode(struct rmi_device *rmi_dev,
const struct pdt_entry *pdt)
{
- int error;
- u8 device_status;
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ int ret;
+ u8 status;
- error = rmi_read(rmi_dev, pdt->data_base_addr + pdt->page_start,
- &device_status);
- if (error) {
- dev_err(&rmi_dev->dev,
- "Failed to read device status: %d.\n", error);
- return error;
+ if (pdt->function_number == 0x34 && pdt->function_version > 1) {
+ ret = rmi_read(rmi_dev, pdt->data_base_addr, &status);
+ if (ret) {
+ dev_err(&rmi_dev->dev,
+ "Failed to read F34 status: %d.\n", ret);
+ return ret;
+ }
+
+ if (status & BIT(7))
+ data->bootloader_mode = true;
+ } else if (pdt->function_number == 0x01) {
+ ret = rmi_read(rmi_dev, pdt->data_base_addr, &status);
+ if (ret) {
+ dev_err(&rmi_dev->dev,
+ "Failed to read F01 status: %d.\n", ret);
+ return ret;
+ }
+
+ if (status & BIT(6))
+ data->bootloader_mode = true;
}
- return RMI_F01_STATUS_BOOTLOADER(device_status);
+ return 0;
}
static int rmi_count_irqs(struct rmi_device *rmi_dev,
void *ctx, const struct pdt_entry *pdt)
{
- struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
int *irq_count = ctx;
+ int ret;
*irq_count += pdt->interrupt_source_count;
- if (pdt->function_number == 0x01) {
- data->f01_bootloader_mode =
- rmi_check_bootloader_mode(rmi_dev, pdt);
- if (data->f01_bootloader_mode)
- dev_warn(&rmi_dev->dev,
- "WARNING: RMI4 device is in bootloader mode!\n");
- }
+
+ ret = rmi_check_bootloader_mode(rmi_dev, pdt);
+ if (ret < 0)
+ return ret;
return RMI_SCAN_CONTINUE;
}
-static int rmi_initial_reset(struct rmi_device *rmi_dev,
- void *ctx, const struct pdt_entry *pdt)
+int rmi_initial_reset(struct rmi_device *rmi_dev, void *ctx,
+ const struct pdt_entry *pdt)
{
int error;
@@ -720,6 +815,7 @@ static int rmi_initial_reset(struct rmi_device *rmi_dev,
return RMI_SCAN_DONE;
}
+ rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Sending reset\n");
error = rmi_write_block(rmi_dev, cmd_addr, &cmd_buf, 1);
if (error) {
dev_err(&rmi_dev->dev,
@@ -776,6 +872,8 @@ static int rmi_create_function(struct rmi_device *rmi_dev,
if (pdt->function_number == 0x01)
data->f01_container = fn;
+ else if (pdt->function_number == 0x34)
+ data->f34_container = fn;
list_add_tail(&fn->node, &data->function_list);
@@ -786,23 +884,95 @@ err_put_fn:
return error;
}
-int rmi_driver_suspend(struct rmi_device *rmi_dev)
+void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake)
{
- int retval = 0;
+ struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ int irq = pdata->irq;
+ int irq_flags;
+ int retval;
+
+ mutex_lock(&data->enabled_mutex);
+
+ if (data->enabled)
+ goto out;
+
+ enable_irq(irq);
+ data->enabled = true;
+ if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) {
+ retval = disable_irq_wake(irq);
+ if (!retval)
+ dev_warn(&rmi_dev->dev,
+ "Failed to disable irq for wake: %d\n",
+ retval);
+ }
+
+ /*
+ * Call rmi_process_interrupt_requests() after enabling irq,
+ * otherwise we may lose interrupt on edge-triggered systems.
+ */
+ irq_flags = irq_get_trigger_type(pdata->irq);
+ if (irq_flags & IRQ_TYPE_EDGE_BOTH)
+ rmi_process_interrupt_requests(rmi_dev);
+
+out:
+ mutex_unlock(&data->enabled_mutex);
+}
+
+void rmi_disable_irq(struct rmi_device *rmi_dev, bool enable_wake)
+{
+ struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ struct rmi4_attn_data attn_data = {0};
+ int irq = pdata->irq;
+ int retval, count;
+
+ mutex_lock(&data->enabled_mutex);
+
+ if (!data->enabled)
+ goto out;
+
+ data->enabled = false;
+ disable_irq(irq);
+ if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) {
+ retval = enable_irq_wake(irq);
+ if (!retval)
+ dev_warn(&rmi_dev->dev,
+ "Failed to enable irq for wake: %d\n",
+ retval);
+ }
+
+ /* make sure the fifo is clean */
+ while (!kfifo_is_empty(&data->attn_fifo)) {
+ count = kfifo_get(&data->attn_fifo, &attn_data);
+ if (count)
+ kfree(attn_data.data);
+ }
+
+out:
+ mutex_unlock(&data->enabled_mutex);
+}
+
+int rmi_driver_suspend(struct rmi_device *rmi_dev, bool enable_wake)
+{
+ int retval;
retval = rmi_suspend_functions(rmi_dev);
if (retval)
dev_warn(&rmi_dev->dev, "Failed to suspend functions: %d\n",
retval);
+ rmi_disable_irq(rmi_dev, enable_wake);
return retval;
}
EXPORT_SYMBOL_GPL(rmi_driver_suspend);
-int rmi_driver_resume(struct rmi_device *rmi_dev)
+int rmi_driver_resume(struct rmi_device *rmi_dev, bool clear_wake)
{
int retval;
+ rmi_enable_irq(rmi_dev, clear_wake);
+
retval = rmi_resume_functions(rmi_dev);
if (retval)
dev_warn(&rmi_dev->dev, "Failed to suspend functions: %d\n",
@@ -816,6 +986,9 @@ static int rmi_driver_remove(struct device *dev)
{
struct rmi_device *rmi_dev = to_rmi_device(dev);
+ rmi_disable_irq(rmi_dev, false);
+
+ rmi_f34_remove_sysfs(rmi_dev);
rmi_free_function_list(rmi_dev);
return 0;
@@ -842,15 +1015,95 @@ static inline int rmi_driver_of_probe(struct device *dev,
}
#endif
+int rmi_probe_interrupts(struct rmi_driver_data *data)
+{
+ struct rmi_device *rmi_dev = data->rmi_dev;
+ struct device *dev = &rmi_dev->dev;
+ int irq_count;
+ size_t size;
+ int retval;
+
+ /*
+ * We need to count the IRQs and allocate their storage before scanning
+ * the PDT and creating the function entries, because adding a new
+ * function can trigger events that result in the IRQ related storage
+ * being accessed.
+ */
+ rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Counting IRQs.\n", __func__);
+ irq_count = 0;
+ data->bootloader_mode = false;
+
+ retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_count_irqs);
+ if (retval < 0) {
+ dev_err(dev, "IRQ counting failed with code %d.\n", retval);
+ return retval;
+ }
+
+ if (data->bootloader_mode)
+ dev_warn(&rmi_dev->dev, "Device in bootloader mode.\n");
+
+ data->irq_count = irq_count;
+ data->num_of_irq_regs = (data->irq_count + 7) / 8;
+
+ size = BITS_TO_LONGS(data->irq_count) * sizeof(unsigned long);
+ data->irq_memory = devm_kzalloc(dev, size * 4, GFP_KERNEL);
+ if (!data->irq_memory) {
+ dev_err(dev, "Failed to allocate memory for irq masks.\n");
+ return retval;
+ }
+
+ data->irq_status = data->irq_memory + size * 0;
+ data->fn_irq_bits = data->irq_memory + size * 1;
+ data->current_irq_mask = data->irq_memory + size * 2;
+ data->new_irq_mask = data->irq_memory + size * 3;
+
+ return retval;
+}
+
+int rmi_init_functions(struct rmi_driver_data *data)
+{
+ struct rmi_device *rmi_dev = data->rmi_dev;
+ struct device *dev = &rmi_dev->dev;
+ int irq_count;
+ int retval;
+
+ irq_count = 0;
+ rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Creating functions.\n", __func__);
+ retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_create_function);
+ if (retval < 0) {
+ dev_err(dev, "Function creation failed with code %d.\n",
+ retval);
+ goto err_destroy_functions;
+ }
+
+ if (!data->f01_container) {
+ dev_err(dev, "Missing F01 container!\n");
+ retval = -EINVAL;
+ goto err_destroy_functions;
+ }
+
+ retval = rmi_read_block(rmi_dev,
+ data->f01_container->fd.control_base_addr + 1,
+ data->current_irq_mask, data->num_of_irq_regs);
+ if (retval < 0) {
+ dev_err(dev, "%s: Failed to read current IRQ mask.\n",
+ __func__);
+ goto err_destroy_functions;
+ }
+
+ return 0;
+
+err_destroy_functions:
+ rmi_free_function_list(rmi_dev);
+ return retval;
+}
+
static int rmi_driver_probe(struct device *dev)
{
struct rmi_driver *rmi_driver;
struct rmi_driver_data *data;
struct rmi_device_platform_data *pdata;
struct rmi_device *rmi_dev;
- size_t size;
- void *irq_memory;
- int irq_count;
int retval;
rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Starting probe.\n",
@@ -916,35 +1169,12 @@ static int rmi_driver_probe(struct device *dev)
PDT_PROPERTIES_LOCATION, retval);
}
- /*
- * We need to count the IRQs and allocate their storage before scanning
- * the PDT and creating the function entries, because adding a new
- * function can trigger events that result in the IRQ related storage
- * being accessed.
- */
- rmi_dbg(RMI_DEBUG_CORE, dev, "Counting IRQs.\n");
- irq_count = 0;
- retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_count_irqs);
- if (retval < 0) {
- dev_err(dev, "IRQ counting failed with code %d.\n", retval);
- goto err;
- }
- data->irq_count = irq_count;
- data->num_of_irq_regs = (data->irq_count + 7) / 8;
-
mutex_init(&data->irq_mutex);
+ mutex_init(&data->enabled_mutex);
- size = BITS_TO_LONGS(data->irq_count) * sizeof(unsigned long);
- irq_memory = devm_kzalloc(dev, size * 4, GFP_KERNEL);
- if (!irq_memory) {
- dev_err(dev, "Failed to allocate memory for irq masks.\n");
+ retval = rmi_probe_interrupts(data);
+ if (retval)
goto err;
- }
-
- data->irq_status = irq_memory + size * 0;
- data->fn_irq_bits = irq_memory + size * 1;
- data->current_irq_mask = irq_memory + size * 2;
- data->new_irq_mask = irq_memory + size * 3;
if (rmi_dev->xport->input) {
/*
@@ -961,36 +1191,20 @@ static int rmi_driver_probe(struct device *dev)
dev_err(dev, "%s: Failed to allocate input device.\n",
__func__);
retval = -ENOMEM;
- goto err_destroy_functions;
+ goto err;
}
rmi_driver_set_input_params(rmi_dev, data->input);
data->input->phys = devm_kasprintf(dev, GFP_KERNEL,
"%s/input0", dev_name(dev));
}
- irq_count = 0;
- rmi_dbg(RMI_DEBUG_CORE, dev, "Creating functions.");
- retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_create_function);
- if (retval < 0) {
- dev_err(dev, "Function creation failed with code %d.\n",
- retval);
- goto err_destroy_functions;
- }
-
- if (!data->f01_container) {
- dev_err(dev, "Missing F01 container!\n");
- retval = -EINVAL;
- goto err_destroy_functions;
- }
+ retval = rmi_init_functions(data);
+ if (retval)
+ goto err;
- retval = rmi_read_block(rmi_dev,
- data->f01_container->fd.control_base_addr + 1,
- data->current_irq_mask, data->num_of_irq_regs);
- if (retval < 0) {
- dev_err(dev, "%s: Failed to read current IRQ mask.\n",
- __func__);
- goto err_destroy_functions;
- }
+ retval = rmi_f34_create_sysfs(rmi_dev);
+ if (retval)
+ goto err;
if (data->input) {
rmi_driver_set_input_name(rmi_dev, data->input);
@@ -1003,9 +1217,13 @@ static int rmi_driver_probe(struct device *dev)
}
}
+ retval = rmi_irq_init(rmi_dev);
+ if (retval < 0)
+ goto err_destroy_functions;
+
if (data->f01_container->dev.driver)
/* Driver already bound, so enable ATTN now. */
- return enable_sensor(rmi_dev);
+ return rmi_enable_sensor(rmi_dev);
return 0;
diff --git a/drivers/input/rmi4/rmi_driver.h b/drivers/input/rmi4/rmi_driver.h
index 8dfbebe9bf86..24f8f764d171 100644
--- a/drivers/input/rmi4/rmi_driver.h
+++ b/drivers/input/rmi4/rmi_driver.h
@@ -51,9 +51,6 @@ struct pdt_entry {
u8 function_number;
};
-int rmi_read_pdt_entry(struct rmi_device *rmi_dev, struct pdt_entry *entry,
- u16 pdt_address);
-
#define RMI_REG_DESC_PRESENSE_BITS (32 * BITS_PER_BYTE)
#define RMI_REG_DESC_SUBPACKET_BITS (37 * BITS_PER_BYTE)
@@ -95,12 +92,40 @@ bool rmi_register_desc_has_subpacket(const struct rmi_register_desc_item *item,
bool rmi_is_physical_driver(struct device_driver *);
int rmi_register_physical_driver(void);
void rmi_unregister_physical_driver(void);
+void rmi_free_function_list(struct rmi_device *rmi_dev);
+int rmi_enable_sensor(struct rmi_device *rmi_dev);
+int rmi_scan_pdt(struct rmi_device *rmi_dev, void *ctx,
+ int (*callback)(struct rmi_device *rmi_dev, void *ctx,
+ const struct pdt_entry *entry));
+int rmi_probe_interrupts(struct rmi_driver_data *data);
+void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake);
+void rmi_disable_irq(struct rmi_device *rmi_dev, bool enable_wake);
+int rmi_init_functions(struct rmi_driver_data *data);
+int rmi_initial_reset(struct rmi_device *rmi_dev, void *ctx,
+ const struct pdt_entry *pdt);
char *rmi_f01_get_product_ID(struct rmi_function *fn);
+#ifdef CONFIG_RMI4_F34
+int rmi_f34_create_sysfs(struct rmi_device *rmi_dev);
+void rmi_f34_remove_sysfs(struct rmi_device *rmi_dev);
+#else
+static inline int rmi_f34_create_sysfs(struct rmi_device *rmi_dev)
+{
+ return 0;
+}
+
+static inline void rmi_f34_remove_sysfs(struct rmi_device *rmi_dev)
+{
+}
+#endif /* CONFIG_RMI_F34 */
+
extern struct rmi_function_handler rmi_f01_handler;
+extern struct rmi_function_handler rmi_f03_handler;
extern struct rmi_function_handler rmi_f11_handler;
extern struct rmi_function_handler rmi_f12_handler;
extern struct rmi_function_handler rmi_f30_handler;
+extern struct rmi_function_handler rmi_f34_handler;
extern struct rmi_function_handler rmi_f54_handler;
+extern struct rmi_function_handler rmi_f55_handler;
#endif
diff --git a/drivers/input/rmi4/rmi_f01.c b/drivers/input/rmi4/rmi_f01.c
index b5d2dfc23bad..18baf4ceb940 100644
--- a/drivers/input/rmi4/rmi_f01.c
+++ b/drivers/input/rmi4/rmi_f01.c
@@ -62,6 +62,8 @@ struct f01_basic_properties {
#define RMI_F01_STATUS_CODE(status) ((status) & 0x0f)
/* The device has lost its configuration for some reason. */
#define RMI_F01_STATUS_UNCONFIGURED(status) (!!((status) & 0x80))
+/* The device is in bootloader mode */
+#define RMI_F01_STATUS_BOOTLOADER(status) ((status) & 0x40)
/* Control register bits */
@@ -326,12 +328,12 @@ static int rmi_f01_probe(struct rmi_function *fn)
}
switch (pdata->power_management.nosleep) {
- case RMI_F01_NOSLEEP_DEFAULT:
+ case RMI_REG_STATE_DEFAULT:
break;
- case RMI_F01_NOSLEEP_OFF:
+ case RMI_REG_STATE_OFF:
f01->device_control.ctrl0 &= ~RMI_F01_CTRL0_NOSLEEP_BIT;
break;
- case RMI_F01_NOSLEEP_ON:
+ case RMI_REG_STATE_ON:
f01->device_control.ctrl0 |= RMI_F01_CTRL0_NOSLEEP_BIT;
break;
}
@@ -593,6 +595,10 @@ static int rmi_f01_attention(struct rmi_function *fn,
return error;
}
+ if (RMI_F01_STATUS_BOOTLOADER(device_status))
+ dev_warn(&fn->dev,
+ "Device in bootloader mode, please update firmware\n");
+
if (RMI_F01_STATUS_UNCONFIGURED(device_status)) {
dev_warn(&fn->dev, "Device reset detected.\n");
error = rmi_dev->driver->reset_handler(rmi_dev);
diff --git a/drivers/input/rmi4/rmi_f03.c b/drivers/input/rmi4/rmi_f03.c
new file mode 100644
index 000000000000..8a7ca3e2f95e
--- /dev/null
+++ b/drivers/input/rmi4/rmi_f03.c
@@ -0,0 +1,250 @@
+/*
+ * Copyright (C) 2015-2016 Red Hat
+ * Copyright (C) 2015 Lyude Paul <thatslyude@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/serio.h>
+#include <linux/notifier.h>
+#include "rmi_driver.h"
+
+#define RMI_F03_RX_DATA_OFB 0x01
+#define RMI_F03_OB_SIZE 2
+
+#define RMI_F03_OB_OFFSET 2
+#define RMI_F03_OB_DATA_OFFSET 1
+#define RMI_F03_OB_FLAG_TIMEOUT BIT(6)
+#define RMI_F03_OB_FLAG_PARITY BIT(7)
+
+#define RMI_F03_DEVICE_COUNT 0x07
+#define RMI_F03_BYTES_PER_DEVICE 0x07
+#define RMI_F03_BYTES_PER_DEVICE_SHIFT 4
+#define RMI_F03_QUEUE_LENGTH 0x0F
+
+struct f03_data {
+ struct rmi_function *fn;
+
+ struct serio *serio;
+
+ u8 device_count;
+ u8 rx_queue_length;
+};
+
+static int rmi_f03_pt_write(struct serio *id, unsigned char val)
+{
+ struct f03_data *f03 = id->port_data;
+ int error;
+
+ rmi_dbg(RMI_DEBUG_FN, &f03->fn->dev,
+ "%s: Wrote %.2hhx to PS/2 passthrough address",
+ __func__, val);
+
+ error = rmi_write(f03->fn->rmi_dev, f03->fn->fd.data_base_addr, val);
+ if (error) {
+ dev_err(&f03->fn->dev,
+ "%s: Failed to write to F03 TX register (%d).\n",
+ __func__, error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int rmi_f03_initialize(struct f03_data *f03)
+{
+ struct rmi_function *fn = f03->fn;
+ struct device *dev = &fn->dev;
+ int error;
+ u8 bytes_per_device;
+ u8 query1;
+ u8 query2[RMI_F03_DEVICE_COUNT * RMI_F03_BYTES_PER_DEVICE];
+ size_t query2_len;
+
+ error = rmi_read(fn->rmi_dev, fn->fd.query_base_addr, &query1);
+ if (error) {
+ dev_err(dev, "Failed to read query register (%d).\n", error);
+ return error;
+ }
+
+ f03->device_count = query1 & RMI_F03_DEVICE_COUNT;
+ bytes_per_device = (query1 >> RMI_F03_BYTES_PER_DEVICE_SHIFT) &
+ RMI_F03_BYTES_PER_DEVICE;
+
+ query2_len = f03->device_count * bytes_per_device;
+
+ /*
+ * The first generation of image sensors don't have a second part to
+ * their f03 query, as such we have to set some of these values manually
+ */
+ if (query2_len < 1) {
+ f03->device_count = 1;
+ f03->rx_queue_length = 7;
+ } else {
+ error = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr + 1,
+ query2, query2_len);
+ if (error) {
+ dev_err(dev,
+ "Failed to read second set of query registers (%d).\n",
+ error);
+ return error;
+ }
+
+ f03->rx_queue_length = query2[0] & RMI_F03_QUEUE_LENGTH;
+ }
+
+ return 0;
+}
+
+static int rmi_f03_register_pt(struct f03_data *f03)
+{
+ struct serio *serio;
+
+ serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
+ if (!serio)
+ return -ENOMEM;
+
+ serio->id.type = SERIO_8042;
+ serio->write = rmi_f03_pt_write;
+ serio->port_data = f03;
+
+ strlcpy(serio->name, "Synaptics RMI4 PS/2 pass-through",
+ sizeof(serio->name));
+ strlcpy(serio->phys, "synaptics-rmi4-pt/serio1",
+ sizeof(serio->phys));
+ serio->dev.parent = &f03->fn->dev;
+
+ f03->serio = serio;
+
+ serio_register_port(serio);
+
+ return 0;
+}
+
+static int rmi_f03_probe(struct rmi_function *fn)
+{
+ struct device *dev = &fn->dev;
+ struct f03_data *f03;
+ int error;
+
+ f03 = devm_kzalloc(dev, sizeof(struct f03_data), GFP_KERNEL);
+ if (!f03)
+ return -ENOMEM;
+
+ f03->fn = fn;
+
+ error = rmi_f03_initialize(f03);
+ if (error < 0)
+ return error;
+
+ if (f03->device_count != 1)
+ dev_warn(dev, "found %d devices on PS/2 passthrough",
+ f03->device_count);
+
+ dev_set_drvdata(dev, f03);
+
+ error = rmi_f03_register_pt(f03);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static int rmi_f03_config(struct rmi_function *fn)
+{
+ fn->rmi_dev->driver->set_irq_bits(fn->rmi_dev, fn->irq_mask);
+
+ return 0;
+}
+
+static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
+{
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
+ struct f03_data *f03 = dev_get_drvdata(&fn->dev);
+ u16 data_addr = fn->fd.data_base_addr;
+ const u8 ob_len = f03->rx_queue_length * RMI_F03_OB_SIZE;
+ u8 obs[RMI_F03_QUEUE_LENGTH * RMI_F03_OB_SIZE];
+ u8 ob_status;
+ u8 ob_data;
+ unsigned int serio_flags;
+ int i;
+ int error;
+
+ if (!rmi_dev)
+ return -ENODEV;
+
+ if (drvdata->attn_data.data) {
+ /* First grab the data passed by the transport device */
+ if (drvdata->attn_data.size < ob_len) {
+ dev_warn(&fn->dev, "F03 interrupted, but data is missing!\n");
+ return 0;
+ }
+
+ memcpy(obs, drvdata->attn_data.data, ob_len);
+
+ drvdata->attn_data.data += ob_len;
+ drvdata->attn_data.size -= ob_len;
+ } else {
+ /* Grab all of the data registers, and check them for data */
+ error = rmi_read_block(fn->rmi_dev, data_addr + RMI_F03_OB_OFFSET,
+ &obs, ob_len);
+ if (error) {
+ dev_err(&fn->dev,
+ "%s: Failed to read F03 output buffers: %d\n",
+ __func__, error);
+ serio_interrupt(f03->serio, 0, SERIO_TIMEOUT);
+ return error;
+ }
+ }
+
+ for (i = 0; i < ob_len; i += RMI_F03_OB_SIZE) {
+ ob_status = obs[i];
+ ob_data = obs[i + RMI_F03_OB_DATA_OFFSET];
+ serio_flags = 0;
+
+ if (!(ob_status & RMI_F03_RX_DATA_OFB))
+ continue;
+
+ if (ob_status & RMI_F03_OB_FLAG_TIMEOUT)
+ serio_flags |= SERIO_TIMEOUT;
+ if (ob_status & RMI_F03_OB_FLAG_PARITY)
+ serio_flags |= SERIO_PARITY;
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev,
+ "%s: Received %.2hhx from PS2 guest T: %c P: %c\n",
+ __func__, ob_data,
+ serio_flags & SERIO_TIMEOUT ? 'Y' : 'N',
+ serio_flags & SERIO_PARITY ? 'Y' : 'N');
+
+ serio_interrupt(f03->serio, ob_data, serio_flags);
+ }
+
+ return 0;
+}
+
+static void rmi_f03_remove(struct rmi_function *fn)
+{
+ struct f03_data *f03 = dev_get_drvdata(&fn->dev);
+
+ serio_unregister_port(f03->serio);
+}
+
+struct rmi_function_handler rmi_f03_handler = {
+ .driver = {
+ .name = "rmi4_f03",
+ },
+ .func = 0x03,
+ .probe = rmi_f03_probe,
+ .config = rmi_f03_config,
+ .attention = rmi_f03_attention,
+ .remove = rmi_f03_remove,
+};
+
+MODULE_AUTHOR("Lyude Paul <thatslyude@gmail.com>");
+MODULE_DESCRIPTION("RMI F03 module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c
index f798f427a46f..bc5e37f30ac1 100644
--- a/drivers/input/rmi4/rmi_f11.c
+++ b/drivers/input/rmi4/rmi_f11.c
@@ -571,31 +571,48 @@ static inline u8 rmi_f11_parse_finger_state(const u8 *f_state, u8 n_finger)
static void rmi_f11_finger_handler(struct f11_data *f11,
struct rmi_2d_sensor *sensor,
- unsigned long *irq_bits, int num_irq_regs)
+ unsigned long *irq_bits, int num_irq_regs,
+ int size)
{
const u8 *f_state = f11->data.f_state;
u8 finger_state;
u8 i;
+ int abs_fingers;
+ int rel_fingers;
+ int abs_size = sensor->nbr_fingers * RMI_F11_ABS_BYTES;
int abs_bits = bitmap_and(f11->result_bits, irq_bits, f11->abs_mask,
num_irq_regs * 8);
int rel_bits = bitmap_and(f11->result_bits, irq_bits, f11->rel_mask,
num_irq_regs * 8);
- for (i = 0; i < sensor->nbr_fingers; i++) {
- /* Possible of having 4 fingers per f_statet register */
- finger_state = rmi_f11_parse_finger_state(f_state, i);
- if (finger_state == F11_RESERVED) {
- pr_err("Invalid finger state[%d]: 0x%02x", i,
- finger_state);
- continue;
- }
+ if (abs_bits) {
+ if (abs_size > size)
+ abs_fingers = size / RMI_F11_ABS_BYTES;
+ else
+ abs_fingers = sensor->nbr_fingers;
+
+ for (i = 0; i < abs_fingers; i++) {
+ /* Possible of having 4 fingers per f_state register */
+ finger_state = rmi_f11_parse_finger_state(f_state, i);
+ if (finger_state == F11_RESERVED) {
+ pr_err("Invalid finger state[%d]: 0x%02x", i,
+ finger_state);
+ continue;
+ }
- if (abs_bits)
rmi_f11_abs_pos_process(f11, sensor, &sensor->objs[i],
finger_state, i);
+ }
+ }
+
+ if (rel_bits) {
+ if ((abs_size + sensor->nbr_fingers * RMI_F11_REL_BYTES) > size)
+ rel_fingers = (size - abs_size) / RMI_F11_REL_BYTES;
+ else
+ rel_fingers = sensor->nbr_fingers;
- if (rel_bits)
+ for (i = 0; i < rel_fingers; i++)
rmi_f11_rel_pos_report(f11, i);
}
@@ -611,7 +628,7 @@ static void rmi_f11_finger_handler(struct f11_data *f11,
sensor->nbr_fingers,
sensor->dmax);
- for (i = 0; i < sensor->nbr_fingers; i++) {
+ for (i = 0; i < abs_fingers; i++) {
finger_state = rmi_f11_parse_finger_state(f_state, i);
if (finger_state == F11_RESERVED)
/* no need to send twice the error */
@@ -1062,8 +1079,8 @@ static int rmi_f11_initialize(struct rmi_function *fn)
rc = rmi_2d_sensor_of_probe(&fn->dev, &f11->sensor_pdata);
if (rc)
return rc;
- } else if (pdata->sensor_pdata) {
- f11->sensor_pdata = *pdata->sensor_pdata;
+ } else {
+ f11->sensor_pdata = pdata->sensor_pdata;
}
f11->rezero_wait_ms = f11->sensor_pdata.rezero_wait;
@@ -1124,6 +1141,8 @@ static int rmi_f11_initialize(struct rmi_function *fn)
sensor->topbuttonpad = f11->sensor_pdata.topbuttonpad;
sensor->kernel_tracking = f11->sensor_pdata.kernel_tracking;
sensor->dmax = f11->sensor_pdata.dmax;
+ sensor->dribble = f11->sensor_pdata.dribble;
+ sensor->palm_detect = f11->sensor_pdata.palm_detect;
if (f11->sens_query.has_physical_props) {
sensor->x_mm = f11->sens_query.x_sensor_size_mm;
@@ -1191,11 +1210,33 @@ static int rmi_f11_initialize(struct rmi_function *fn)
ctrl->ctrl0_11[RMI_F11_DELTA_Y_THRESHOLD] =
sensor->axis_align.delta_y_threshold;
- if (f11->sens_query.has_dribble)
- ctrl->ctrl0_11[0] = ctrl->ctrl0_11[0] & ~BIT(6);
+ if (f11->sens_query.has_dribble) {
+ switch (sensor->dribble) {
+ case RMI_REG_STATE_OFF:
+ ctrl->ctrl0_11[0] &= ~BIT(6);
+ break;
+ case RMI_REG_STATE_ON:
+ ctrl->ctrl0_11[0] |= BIT(6);
+ break;
+ case RMI_REG_STATE_DEFAULT:
+ default:
+ break;
+ }
+ }
- if (f11->sens_query.has_palm_det)
- ctrl->ctrl0_11[11] = ctrl->ctrl0_11[11] & ~BIT(0);
+ if (f11->sens_query.has_palm_det) {
+ switch (sensor->palm_detect) {
+ case RMI_REG_STATE_OFF:
+ ctrl->ctrl0_11[11] &= ~BIT(0);
+ break;
+ case RMI_REG_STATE_ON:
+ ctrl->ctrl0_11[11] |= BIT(0);
+ break;
+ case RMI_REG_STATE_DEFAULT:
+ default:
+ break;
+ }
+ }
rc = f11_write_control_regs(fn, &f11->sens_query,
&f11->dev_controls, fn->fd.query_base_addr);
@@ -1241,12 +1282,21 @@ static int rmi_f11_attention(struct rmi_function *fn, unsigned long *irq_bits)
struct f11_data *f11 = dev_get_drvdata(&fn->dev);
u16 data_base_addr = fn->fd.data_base_addr;
int error;
+ int valid_bytes = f11->sensor.pkt_size;
- if (rmi_dev->xport->attn_data) {
- memcpy(f11->sensor.data_pkt, rmi_dev->xport->attn_data,
- f11->sensor.attn_size);
- rmi_dev->xport->attn_data += f11->sensor.attn_size;
- rmi_dev->xport->attn_size -= f11->sensor.attn_size;
+ if (drvdata->attn_data.data) {
+ /*
+ * The valid data in the attention report is less then
+ * expected. Only process the complete fingers.
+ */
+ if (f11->sensor.attn_size > drvdata->attn_data.size)
+ valid_bytes = drvdata->attn_data.size;
+ else
+ valid_bytes = f11->sensor.attn_size;
+ memcpy(f11->sensor.data_pkt, drvdata->attn_data.data,
+ valid_bytes);
+ drvdata->attn_data.data += f11->sensor.attn_size;
+ drvdata->attn_data.size -= f11->sensor.attn_size;
} else {
error = rmi_read_block(rmi_dev,
data_base_addr, f11->sensor.data_pkt,
@@ -1256,7 +1306,7 @@ static int rmi_f11_attention(struct rmi_function *fn, unsigned long *irq_bits)
}
rmi_f11_finger_handler(f11, &f11->sensor, irq_bits,
- drvdata->num_of_irq_regs);
+ drvdata->num_of_irq_regs, valid_bytes);
return 0;
}
diff --git a/drivers/input/rmi4/rmi_f12.c b/drivers/input/rmi4/rmi_f12.c
index 332c02f0b107..07aff4356fe0 100644
--- a/drivers/input/rmi4/rmi_f12.c
+++ b/drivers/input/rmi4/rmi_f12.c
@@ -26,9 +26,12 @@ enum rmi_f12_object_type {
RMI_F12_OBJECT_SMALL_OBJECT = 0x0D,
};
+#define F12_DATA1_BYTES_PER_OBJ 8
+
struct f12_data {
struct rmi_2d_sensor sensor;
struct rmi_2d_sensor_platform_data sensor_pdata;
+ bool has_dribble;
u16 data_addr;
@@ -68,10 +71,6 @@ static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
u8 buf[15];
int pitch_x = 0;
int pitch_y = 0;
- int clip_x_low = 0;
- int clip_x_high = 0;
- int clip_y_low = 0;
- int clip_y_high = 0;
int rx_receivers = 0;
int tx_receivers = 0;
int sensor_flags = 0;
@@ -124,7 +123,9 @@ static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
}
rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s: x low: %d x high: %d y low: %d y high: %d\n",
- __func__, clip_x_low, clip_x_high, clip_y_low, clip_y_high);
+ __func__,
+ sensor->axis_align.clip_x_low, sensor->axis_align.clip_x_high,
+ sensor->axis_align.clip_y_low, sensor->axis_align.clip_y_high);
if (rmi_register_desc_has_subpacket(item, 3)) {
rx_receivers = buf[offset];
@@ -146,12 +147,16 @@ static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
return 0;
}
-static void rmi_f12_process_objects(struct f12_data *f12, u8 *data1)
+static void rmi_f12_process_objects(struct f12_data *f12, u8 *data1, int size)
{
int i;
struct rmi_2d_sensor *sensor = &f12->sensor;
+ int objects = f12->data1->num_subpackets;
+
+ if ((f12->data1->num_subpackets * F12_DATA1_BYTES_PER_OBJ) > size)
+ objects = size / F12_DATA1_BYTES_PER_OBJ;
- for (i = 0; i < f12->data1->num_subpackets; i++) {
+ for (i = 0; i < objects; i++) {
struct rmi_2d_sensor_abs_object *obj = &sensor->objs[i];
obj->type = RMI_2D_OBJECT_NONE;
@@ -182,7 +187,7 @@ static void rmi_f12_process_objects(struct f12_data *f12, u8 *data1)
rmi_2d_sensor_abs_process(sensor, obj, i);
- data1 += 8;
+ data1 += F12_DATA1_BYTES_PER_OBJ;
}
if (sensor->kernel_tracking)
@@ -192,7 +197,7 @@ static void rmi_f12_process_objects(struct f12_data *f12, u8 *data1)
sensor->nbr_fingers,
sensor->dmax);
- for (i = 0; i < sensor->nbr_fingers; i++)
+ for (i = 0; i < objects; i++)
rmi_2d_sensor_abs_report(sensor, &sensor->objs[i], i);
}
@@ -201,14 +206,20 @@ static int rmi_f12_attention(struct rmi_function *fn,
{
int retval;
struct rmi_device *rmi_dev = fn->rmi_dev;
+ struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
struct f12_data *f12 = dev_get_drvdata(&fn->dev);
struct rmi_2d_sensor *sensor = &f12->sensor;
-
- if (rmi_dev->xport->attn_data) {
- memcpy(sensor->data_pkt, rmi_dev->xport->attn_data,
- sensor->attn_size);
- rmi_dev->xport->attn_data += sensor->attn_size;
- rmi_dev->xport->attn_size -= sensor->attn_size;
+ int valid_bytes = sensor->pkt_size;
+
+ if (drvdata->attn_data.data) {
+ if (sensor->attn_size > drvdata->attn_data.size)
+ valid_bytes = drvdata->attn_data.size;
+ else
+ valid_bytes = sensor->attn_size;
+ memcpy(sensor->data_pkt, drvdata->attn_data.data,
+ valid_bytes);
+ drvdata->attn_data.data += sensor->attn_size;
+ drvdata->attn_data.size -= sensor->attn_size;
} else {
retval = rmi_read_block(rmi_dev, f12->data_addr,
sensor->data_pkt, sensor->pkt_size);
@@ -221,19 +232,83 @@ static int rmi_f12_attention(struct rmi_function *fn,
if (f12->data1)
rmi_f12_process_objects(f12,
- &sensor->data_pkt[f12->data1_offset]);
+ &sensor->data_pkt[f12->data1_offset], valid_bytes);
input_mt_sync_frame(sensor->input);
return 0;
}
+static int rmi_f12_write_control_regs(struct rmi_function *fn)
+{
+ int ret;
+ const struct rmi_register_desc_item *item;
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ struct f12_data *f12 = dev_get_drvdata(&fn->dev);
+ int control_size;
+ char buf[3];
+ u16 control_offset = 0;
+ u8 subpacket_offset = 0;
+
+ if (f12->has_dribble
+ && (f12->sensor.dribble != RMI_REG_STATE_DEFAULT)) {
+ item = rmi_get_register_desc_item(&f12->control_reg_desc, 20);
+ if (item) {
+ control_offset = rmi_register_desc_calc_reg_offset(
+ &f12->control_reg_desc, 20);
+
+ /*
+ * The byte containing the EnableDribble bit will be
+ * in either byte 0 or byte 2 of control 20. Depending
+ * on the existence of subpacket 0. If control 20 is
+ * larger then 3 bytes, just read the first 3.
+ */
+ control_size = min(item->reg_size, 3UL);
+
+ ret = rmi_read_block(rmi_dev, fn->fd.control_base_addr
+ + control_offset, buf, control_size);
+ if (ret)
+ return ret;
+
+ if (rmi_register_desc_has_subpacket(item, 0))
+ subpacket_offset += 1;
+
+ switch (f12->sensor.dribble) {
+ case RMI_REG_STATE_OFF:
+ buf[subpacket_offset] &= ~BIT(2);
+ break;
+ case RMI_REG_STATE_ON:
+ buf[subpacket_offset] |= BIT(2);
+ break;
+ case RMI_REG_STATE_DEFAULT:
+ default:
+ break;
+ }
+
+ ret = rmi_write_block(rmi_dev,
+ fn->fd.control_base_addr + control_offset,
+ buf, control_size);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+
+}
+
static int rmi_f12_config(struct rmi_function *fn)
{
struct rmi_driver *drv = fn->rmi_dev->driver;
+ int ret;
drv->set_irq_bits(fn->rmi_dev, fn->irq_mask);
+ ret = rmi_f12_write_control_regs(fn);
+ if (ret)
+ dev_warn(&fn->dev,
+ "Failed to write F12 control registers: %d\n", ret);
+
return 0;
}
@@ -247,7 +322,7 @@ static int rmi_f12_probe(struct rmi_function *fn)
const struct rmi_register_desc_item *item;
struct rmi_2d_sensor *sensor;
struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
- struct rmi_transport_dev *xport = rmi_dev->xport;
+ struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
u16 data_offset = 0;
rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s\n", __func__);
@@ -260,7 +335,7 @@ static int rmi_f12_probe(struct rmi_function *fn)
}
++query_addr;
- if (!(buf & 0x1)) {
+ if (!(buf & BIT(0))) {
dev_err(&fn->dev,
"Behavior of F12 without register descriptors is undefined.\n");
return -ENODEV;
@@ -270,12 +345,14 @@ static int rmi_f12_probe(struct rmi_function *fn)
if (!f12)
return -ENOMEM;
+ f12->has_dribble = !!(buf & BIT(3));
+
if (fn->dev.of_node) {
ret = rmi_2d_sensor_of_probe(&fn->dev, &f12->sensor_pdata);
if (ret)
return ret;
- } else if (pdata->sensor_pdata) {
- f12->sensor_pdata = *pdata->sensor_pdata;
+ } else {
+ f12->sensor_pdata = pdata->sensor_pdata;
}
ret = rmi_read_register_desc(rmi_dev, query_addr,
@@ -318,6 +395,7 @@ static int rmi_f12_probe(struct rmi_function *fn)
sensor->x_mm = f12->sensor_pdata.x_mm;
sensor->y_mm = f12->sensor_pdata.y_mm;
+ sensor->dribble = f12->sensor_pdata.dribble;
if (sensor->sensor_type == rmi_sensor_default)
sensor->sensor_type =
@@ -343,7 +421,7 @@ static int rmi_f12_probe(struct rmi_function *fn)
* HID attention reports.
*/
item = rmi_get_register_desc_item(&f12->data_reg_desc, 0);
- if (item && !xport->attn_data)
+ if (item && !drvdata->attn_data.data)
data_offset += item->reg_size;
item = rmi_get_register_desc_item(&f12->data_reg_desc, 1);
@@ -357,15 +435,15 @@ static int rmi_f12_probe(struct rmi_function *fn)
}
item = rmi_get_register_desc_item(&f12->data_reg_desc, 2);
- if (item && !xport->attn_data)
+ if (item && !drvdata->attn_data.data)
data_offset += item->reg_size;
item = rmi_get_register_desc_item(&f12->data_reg_desc, 3);
- if (item && !xport->attn_data)
+ if (item && !drvdata->attn_data.data)
data_offset += item->reg_size;
item = rmi_get_register_desc_item(&f12->data_reg_desc, 4);
- if (item && !xport->attn_data)
+ if (item && !drvdata->attn_data.data)
data_offset += item->reg_size;
item = rmi_get_register_desc_item(&f12->data_reg_desc, 5);
@@ -377,22 +455,22 @@ static int rmi_f12_probe(struct rmi_function *fn)
}
item = rmi_get_register_desc_item(&f12->data_reg_desc, 6);
- if (item && !xport->attn_data) {
+ if (item && !drvdata->attn_data.data) {
f12->data6 = item;
f12->data6_offset = data_offset;
data_offset += item->reg_size;
}
item = rmi_get_register_desc_item(&f12->data_reg_desc, 7);
- if (item && !xport->attn_data)
+ if (item && !drvdata->attn_data.data)
data_offset += item->reg_size;
item = rmi_get_register_desc_item(&f12->data_reg_desc, 8);
- if (item && !xport->attn_data)
+ if (item && !drvdata->attn_data.data)
data_offset += item->reg_size;
item = rmi_get_register_desc_item(&f12->data_reg_desc, 9);
- if (item && !xport->attn_data) {
+ if (item && !drvdata->attn_data.data) {
f12->data9 = item;
f12->data9_offset = data_offset;
data_offset += item->reg_size;
@@ -401,27 +479,27 @@ static int rmi_f12_probe(struct rmi_function *fn)
}
item = rmi_get_register_desc_item(&f12->data_reg_desc, 10);
- if (item && !xport->attn_data)
+ if (item && !drvdata->attn_data.data)
data_offset += item->reg_size;
item = rmi_get_register_desc_item(&f12->data_reg_desc, 11);
- if (item && !xport->attn_data)
+ if (item && !drvdata->attn_data.data)
data_offset += item->reg_size;
item = rmi_get_register_desc_item(&f12->data_reg_desc, 12);
- if (item && !xport->attn_data)
+ if (item && !drvdata->attn_data.data)
data_offset += item->reg_size;
item = rmi_get_register_desc_item(&f12->data_reg_desc, 13);
- if (item && !xport->attn_data)
+ if (item && !drvdata->attn_data.data)
data_offset += item->reg_size;
item = rmi_get_register_desc_item(&f12->data_reg_desc, 14);
- if (item && !xport->attn_data)
+ if (item && !drvdata->attn_data.data)
data_offset += item->reg_size;
item = rmi_get_register_desc_item(&f12->data_reg_desc, 15);
- if (item && !xport->attn_data) {
+ if (item && !drvdata->attn_data.data) {
f12->data15 = item;
f12->data15_offset = data_offset;
data_offset += item->reg_size;
diff --git a/drivers/input/rmi4/rmi_f30.c b/drivers/input/rmi4/rmi_f30.c
index 760aff1bc420..f4b491e3e0fd 100644
--- a/drivers/input/rmi4/rmi_f30.c
+++ b/drivers/input/rmi4/rmi_f30.c
@@ -99,6 +99,7 @@ static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
{
struct f30_data *f30 = dev_get_drvdata(&fn->dev);
struct rmi_device *rmi_dev = fn->rmi_dev;
+ struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
int retval;
int gpiled = 0;
int value = 0;
@@ -109,11 +110,15 @@ static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
return 0;
/* Read the gpi led data. */
- if (rmi_dev->xport->attn_data) {
- memcpy(f30->data_regs, rmi_dev->xport->attn_data,
+ if (drvdata->attn_data.data) {
+ if (drvdata->attn_data.size < f30->register_count) {
+ dev_warn(&fn->dev, "F30 interrupted, but data is missing\n");
+ return 0;
+ }
+ memcpy(f30->data_regs, drvdata->attn_data.data,
f30->register_count);
- rmi_dev->xport->attn_data += f30->register_count;
- rmi_dev->xport->attn_size -= f30->register_count;
+ drvdata->attn_data.data += f30->register_count;
+ drvdata->attn_data.size -= f30->register_count;
} else {
retval = rmi_read_block(rmi_dev, fn->fd.data_base_addr,
f30->data_regs, f30->register_count);
@@ -192,7 +197,7 @@ static int rmi_f30_config(struct rmi_function *fn)
rmi_get_platform_data(fn->rmi_dev);
int error;
- if (pdata->f30_data && pdata->f30_data->disable) {
+ if (pdata->f30_data.disable) {
drv->clear_irq_bits(fn->rmi_dev, fn->irq_mask);
} else {
/* Write Control Register values back to device */
@@ -351,7 +356,7 @@ static inline int rmi_f30_initialize(struct rmi_function *fn)
f30->gpioled_key_map = (u16 *)map_memory;
pdata = rmi_get_platform_data(rmi_dev);
- if (pdata && f30->has_gpio) {
+ if (f30->has_gpio) {
button = BTN_LEFT;
for (i = 0; i < f30->gpioled_count; i++) {
if (rmi_f30_is_valid_button(i, f30->ctrl)) {
@@ -362,8 +367,7 @@ static inline int rmi_f30_initialize(struct rmi_function *fn)
* f30->has_mech_mouse_btns, but I am
* not sure, so use only the pdata info
*/
- if (pdata->f30_data &&
- pdata->f30_data->buttonpad)
+ if (pdata->f30_data.buttonpad)
break;
}
}
@@ -378,7 +382,7 @@ static int rmi_f30_probe(struct rmi_function *fn)
const struct rmi_device_platform_data *pdata =
rmi_get_platform_data(fn->rmi_dev);
- if (pdata->f30_data && pdata->f30_data->disable)
+ if (pdata->f30_data.disable)
return 0;
rc = rmi_f30_initialize(fn);
diff --git a/drivers/input/rmi4/rmi_f34.c b/drivers/input/rmi4/rmi_f34.c
new file mode 100644
index 000000000000..9774dfbab9bb
--- /dev/null
+++ b/drivers/input/rmi4/rmi_f34.c
@@ -0,0 +1,516 @@
+/*
+ * Copyright (c) 2007-2016, Synaptics Incorporated
+ * Copyright (C) 2016 Zodiac Inflight Innovations
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/rmi.h>
+#include <linux/firmware.h>
+#include <asm/unaligned.h>
+#include <asm/unaligned.h>
+#include <linux/bitops.h>
+
+#include "rmi_driver.h"
+#include "rmi_f34.h"
+
+static int rmi_f34_write_bootloader_id(struct f34_data *f34)
+{
+ struct rmi_function *fn = f34->fn;
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ u8 bootloader_id[F34_BOOTLOADER_ID_LEN];
+ int ret;
+
+ ret = rmi_read_block(rmi_dev, fn->fd.query_base_addr,
+ bootloader_id, sizeof(bootloader_id));
+ if (ret) {
+ dev_err(&fn->dev, "%s: Reading bootloader ID failed: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s: writing bootloader id '%c%c'\n",
+ __func__, bootloader_id[0], bootloader_id[1]);
+
+ ret = rmi_write_block(rmi_dev,
+ fn->fd.data_base_addr + F34_BLOCK_DATA_OFFSET,
+ bootloader_id, sizeof(bootloader_id));
+ if (ret) {
+ dev_err(&fn->dev, "Failed to write bootloader ID: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rmi_f34_command(struct f34_data *f34, u8 command,
+ unsigned int timeout, bool write_bl_id)
+{
+ struct rmi_function *fn = f34->fn;
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ int ret;
+
+ if (write_bl_id) {
+ ret = rmi_f34_write_bootloader_id(f34);
+ if (ret)
+ return ret;
+ }
+
+ init_completion(&f34->v5.cmd_done);
+
+ ret = rmi_read(rmi_dev, f34->v5.ctrl_address, &f34->v5.status);
+ if (ret) {
+ dev_err(&f34->fn->dev,
+ "%s: Failed to read cmd register: %d (command %#02x)\n",
+ __func__, ret, command);
+ return ret;
+ }
+
+ f34->v5.status |= command & 0x0f;
+
+ ret = rmi_write(rmi_dev, f34->v5.ctrl_address, f34->v5.status);
+ if (ret < 0) {
+ dev_err(&f34->fn->dev,
+ "Failed to write F34 command %#02x: %d\n",
+ command, ret);
+ return ret;
+ }
+
+ if (!wait_for_completion_timeout(&f34->v5.cmd_done,
+ msecs_to_jiffies(timeout))) {
+
+ ret = rmi_read(rmi_dev, f34->v5.ctrl_address, &f34->v5.status);
+ if (ret) {
+ dev_err(&f34->fn->dev,
+ "%s: cmd %#02x timed out: %d\n",
+ __func__, command, ret);
+ return ret;
+ }
+
+ if (f34->v5.status & 0x7f) {
+ dev_err(&f34->fn->dev,
+ "%s: cmd %#02x timed out, status: %#02x\n",
+ __func__, command, f34->v5.status);
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+static int rmi_f34_attention(struct rmi_function *fn, unsigned long *irq_bits)
+{
+ struct f34_data *f34 = dev_get_drvdata(&fn->dev);
+ int ret;
+
+ if (f34->bl_version != 5)
+ return 0;
+
+ ret = rmi_read(f34->fn->rmi_dev, f34->v5.ctrl_address, &f34->v5.status);
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s: status: %#02x, ret: %d\n",
+ __func__, f34->v5.status, ret);
+
+ if (!ret && !(f34->v5.status & 0x7f))
+ complete(&f34->v5.cmd_done);
+
+ return 0;
+}
+
+static int rmi_f34_write_blocks(struct f34_data *f34, const void *data,
+ int block_count, u8 command)
+{
+ struct rmi_function *fn = f34->fn;
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ u16 address = fn->fd.data_base_addr + F34_BLOCK_DATA_OFFSET;
+ u8 start_address[] = { 0, 0 };
+ int i;
+ int ret;
+
+ ret = rmi_write_block(rmi_dev, fn->fd.data_base_addr,
+ start_address, sizeof(start_address));
+ if (ret) {
+ dev_err(&fn->dev, "Failed to write initial zeros: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < block_count; i++) {
+ ret = rmi_write_block(rmi_dev, address,
+ data, f34->v5.block_size);
+ if (ret) {
+ dev_err(&fn->dev,
+ "failed to write block #%d: %d\n", i, ret);
+ return ret;
+ }
+
+ ret = rmi_f34_command(f34, command, F34_IDLE_WAIT_MS, false);
+ if (ret) {
+ dev_err(&fn->dev,
+ "Failed to write command for block #%d: %d\n",
+ i, ret);
+ return ret;
+ }
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "wrote block %d of %d\n",
+ i + 1, block_count);
+
+ data += f34->v5.block_size;
+ }
+
+ return 0;
+}
+
+static int rmi_f34_write_firmware(struct f34_data *f34, const void *data)
+{
+ return rmi_f34_write_blocks(f34, data, f34->v5.fw_blocks,
+ F34_WRITE_FW_BLOCK);
+}
+
+static int rmi_f34_write_config(struct f34_data *f34, const void *data)
+{
+ return rmi_f34_write_blocks(f34, data, f34->v5.config_blocks,
+ F34_WRITE_CONFIG_BLOCK);
+}
+
+int rmi_f34_enable_flash(struct f34_data *f34)
+{
+ return rmi_f34_command(f34, F34_ENABLE_FLASH_PROG,
+ F34_ENABLE_WAIT_MS, true);
+}
+
+static int rmi_f34_flash_firmware(struct f34_data *f34,
+ const struct rmi_f34_firmware *syn_fw)
+{
+ struct rmi_function *fn = f34->fn;
+ int ret;
+
+ if (syn_fw->image_size) {
+ dev_info(&fn->dev, "Erasing firmware...\n");
+ ret = rmi_f34_command(f34, F34_ERASE_ALL,
+ F34_ERASE_WAIT_MS, true);
+ if (ret)
+ return ret;
+
+ dev_info(&fn->dev, "Writing firmware (%d bytes)...\n",
+ syn_fw->image_size);
+ ret = rmi_f34_write_firmware(f34, syn_fw->data);
+ if (ret)
+ return ret;
+ }
+
+ if (syn_fw->config_size) {
+ /*
+ * We only need to erase config if we haven't updated
+ * firmware.
+ */
+ if (!syn_fw->image_size) {
+ dev_info(&fn->dev, "Erasing config...\n");
+ ret = rmi_f34_command(f34, F34_ERASE_CONFIG,
+ F34_ERASE_WAIT_MS, true);
+ if (ret)
+ return ret;
+ }
+
+ dev_info(&fn->dev, "Writing config (%d bytes)...\n",
+ syn_fw->config_size);
+ ret = rmi_f34_write_config(f34,
+ &syn_fw->data[syn_fw->image_size]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int rmi_f34_update_firmware(struct f34_data *f34, const struct firmware *fw)
+{
+ const struct rmi_f34_firmware *syn_fw;
+ int ret;
+
+ syn_fw = (const struct rmi_f34_firmware *)fw->data;
+ BUILD_BUG_ON(offsetof(struct rmi_f34_firmware, data) !=
+ F34_FW_IMAGE_OFFSET);
+
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "FW size:%d, checksum:%08x, image_size:%d, config_size:%d\n",
+ (int)fw->size,
+ le32_to_cpu(syn_fw->checksum),
+ le32_to_cpu(syn_fw->image_size),
+ le32_to_cpu(syn_fw->config_size));
+
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "FW bootloader_id:%02x, product_id:%.*s, info: %02x%02x\n",
+ syn_fw->bootloader_version,
+ (int)sizeof(syn_fw->product_id), syn_fw->product_id,
+ syn_fw->product_info[0], syn_fw->product_info[1]);
+
+ if (syn_fw->image_size &&
+ syn_fw->image_size != f34->v5.fw_blocks * f34->v5.block_size) {
+ dev_err(&f34->fn->dev,
+ "Bad firmware image: fw size %d, expected %d\n",
+ syn_fw->image_size,
+ f34->v5.fw_blocks * f34->v5.block_size);
+ ret = -EILSEQ;
+ goto out;
+ }
+
+ if (syn_fw->config_size &&
+ syn_fw->config_size != f34->v5.config_blocks * f34->v5.block_size) {
+ dev_err(&f34->fn->dev,
+ "Bad firmware image: config size %d, expected %d\n",
+ syn_fw->config_size,
+ f34->v5.config_blocks * f34->v5.block_size);
+ ret = -EILSEQ;
+ goto out;
+ }
+
+ if (syn_fw->image_size && !syn_fw->config_size) {
+ dev_err(&f34->fn->dev, "Bad firmware image: no config data\n");
+ ret = -EILSEQ;
+ goto out;
+ }
+
+ dev_info(&f34->fn->dev, "Firmware image OK\n");
+ mutex_lock(&f34->v5.flash_mutex);
+
+ ret = rmi_f34_flash_firmware(f34, syn_fw);
+
+ mutex_unlock(&f34->v5.flash_mutex);
+
+out:
+ return ret;
+}
+
+static int rmi_firmware_update(struct rmi_driver_data *data,
+ const struct firmware *fw)
+{
+ struct rmi_device *rmi_dev = data->rmi_dev;
+ struct device *dev = &rmi_dev->dev;
+ struct f34_data *f34;
+ int ret;
+
+ if (!data->f34_container) {
+ dev_warn(dev, "%s: No F34 present!\n", __func__);
+ return -EINVAL;
+ }
+
+ f34 = dev_get_drvdata(&data->f34_container->dev);
+
+ if (f34->bl_version == 7) {
+ if (data->pdt_props & HAS_BSR) {
+ dev_err(dev, "%s: LTS not supported\n", __func__);
+ return -ENODEV;
+ }
+ } else if (f34->bl_version != 5) {
+ dev_warn(dev, "F34 V%d not supported!\n",
+ data->f34_container->fd.function_version);
+ return -ENODEV;
+ }
+
+ /* Enter flash mode */
+ if (f34->bl_version == 7)
+ ret = rmi_f34v7_start_reflash(f34, fw);
+ else
+ ret = rmi_f34_enable_flash(f34);
+ if (ret)
+ return ret;
+
+ rmi_disable_irq(rmi_dev, false);
+
+ /* Tear down functions and re-probe */
+ rmi_free_function_list(rmi_dev);
+
+ ret = rmi_probe_interrupts(data);
+ if (ret)
+ return ret;
+
+ ret = rmi_init_functions(data);
+ if (ret)
+ return ret;
+
+ if (!data->bootloader_mode || !data->f34_container) {
+ dev_warn(dev, "%s: No F34 present or not in bootloader!\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ rmi_enable_irq(rmi_dev, false);
+
+ f34 = dev_get_drvdata(&data->f34_container->dev);
+
+ /* Perform firmware update */
+ if (f34->bl_version == 7)
+ ret = rmi_f34v7_do_reflash(f34, fw);
+ else
+ ret = rmi_f34_update_firmware(f34, fw);
+
+ dev_info(&f34->fn->dev, "Firmware update complete, status:%d\n", ret);
+
+ rmi_disable_irq(rmi_dev, false);
+
+ /* Re-probe */
+ rmi_dbg(RMI_DEBUG_FN, dev, "Re-probing device\n");
+ rmi_free_function_list(rmi_dev);
+
+ ret = rmi_scan_pdt(rmi_dev, NULL, rmi_initial_reset);
+ if (ret < 0)
+ dev_warn(dev, "RMI reset failed!\n");
+
+ ret = rmi_probe_interrupts(data);
+ if (ret)
+ return ret;
+
+ ret = rmi_init_functions(data);
+ if (ret)
+ return ret;
+
+ rmi_enable_irq(rmi_dev, false);
+
+ if (data->f01_container->dev.driver)
+ /* Driver already bound, so enable ATTN now. */
+ return rmi_enable_sensor(rmi_dev);
+
+ rmi_dbg(RMI_DEBUG_FN, dev, "%s complete\n", __func__);
+
+ return ret;
+}
+
+static int rmi_firmware_update(struct rmi_driver_data *data,
+ const struct firmware *fw);
+
+static ssize_t rmi_driver_update_fw_store(struct device *dev,
+ struct device_attribute *dattr,
+ const char *buf, size_t count)
+{
+ struct rmi_driver_data *data = dev_get_drvdata(dev);
+ char fw_name[NAME_MAX];
+ const struct firmware *fw;
+ size_t copy_count = count;
+ int ret;
+
+ if (count == 0 || count >= NAME_MAX)
+ return -EINVAL;
+
+ if (buf[count - 1] == '\0' || buf[count - 1] == '\n')
+ copy_count -= 1;
+
+ strncpy(fw_name, buf, copy_count);
+ fw_name[copy_count] = '\0';
+
+ ret = request_firmware(&fw, fw_name, dev);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "Flashing %s\n", fw_name);
+
+ ret = rmi_firmware_update(data, fw);
+
+ release_firmware(fw);
+
+ return ret ?: count;
+}
+
+static DEVICE_ATTR(update_fw, 0200, NULL, rmi_driver_update_fw_store);
+
+static struct attribute *rmi_firmware_attrs[] = {
+ &dev_attr_update_fw.attr,
+ NULL
+};
+
+static struct attribute_group rmi_firmware_attr_group = {
+ .attrs = rmi_firmware_attrs,
+};
+
+static int rmi_f34_probe(struct rmi_function *fn)
+{
+ struct f34_data *f34;
+ unsigned char f34_queries[9];
+ bool has_config_id;
+ u8 version = fn->fd.function_version;
+ int ret;
+
+ f34 = devm_kzalloc(&fn->dev, sizeof(struct f34_data), GFP_KERNEL);
+ if (!f34)
+ return -ENOMEM;
+
+ f34->fn = fn;
+ dev_set_drvdata(&fn->dev, f34);
+
+ /* v5 code only supported version 0, try V7 probe */
+ if (version > 0)
+ return rmi_f34v7_probe(f34);
+ else if (version != 0)
+ return -ENODEV;
+
+ f34->bl_version = 5;
+
+ ret = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr,
+ f34_queries, sizeof(f34_queries));
+ if (ret) {
+ dev_err(&fn->dev, "%s: Failed to query properties\n",
+ __func__);
+ return ret;
+ }
+
+ snprintf(f34->bootloader_id, sizeof(f34->bootloader_id),
+ "%c%c", f34_queries[0], f34_queries[1]);
+
+ mutex_init(&f34->v5.flash_mutex);
+ init_completion(&f34->v5.cmd_done);
+
+ f34->v5.block_size = get_unaligned_le16(&f34_queries[3]);
+ f34->v5.fw_blocks = get_unaligned_le16(&f34_queries[5]);
+ f34->v5.config_blocks = get_unaligned_le16(&f34_queries[7]);
+ f34->v5.ctrl_address = fn->fd.data_base_addr + F34_BLOCK_DATA_OFFSET +
+ f34->v5.block_size;
+ has_config_id = f34_queries[2] & (1 << 2);
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Bootloader ID: %s\n",
+ f34->bootloader_id);
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Block size: %d\n",
+ f34->v5.block_size);
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "FW blocks: %d\n",
+ f34->v5.fw_blocks);
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "CFG blocks: %d\n",
+ f34->v5.config_blocks);
+
+ if (has_config_id) {
+ ret = rmi_read_block(fn->rmi_dev, fn->fd.control_base_addr,
+ f34_queries, sizeof(f34_queries));
+ if (ret) {
+ dev_err(&fn->dev, "Failed to read F34 config ID\n");
+ return ret;
+ }
+
+ snprintf(f34->configuration_id, sizeof(f34->configuration_id),
+ "%02x%02x%02x%02x",
+ f34_queries[0], f34_queries[1],
+ f34_queries[2], f34_queries[3]);
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Configuration ID: %s\n",
+ f34->configuration_id);
+ }
+
+ return 0;
+}
+
+int rmi_f34_create_sysfs(struct rmi_device *rmi_dev)
+{
+ return sysfs_create_group(&rmi_dev->dev.kobj, &rmi_firmware_attr_group);
+}
+
+void rmi_f34_remove_sysfs(struct rmi_device *rmi_dev)
+{
+ sysfs_remove_group(&rmi_dev->dev.kobj, &rmi_firmware_attr_group);
+}
+
+struct rmi_function_handler rmi_f34_handler = {
+ .driver = {
+ .name = "rmi4_f34",
+ },
+ .func = 0x34,
+ .probe = rmi_f34_probe,
+ .attention = rmi_f34_attention,
+};
diff --git a/drivers/input/rmi4/rmi_f34.h b/drivers/input/rmi4/rmi_f34.h
new file mode 100644
index 000000000000..2c21056dc375
--- /dev/null
+++ b/drivers/input/rmi4/rmi_f34.h
@@ -0,0 +1,314 @@
+/*
+ * Copyright (c) 2007-2016, Synaptics Incorporated
+ * Copyright (C) 2016 Zodiac Inflight Innovations
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _RMI_F34_H
+#define _RMI_F34_H
+
+/* F34 image file offsets. */
+#define F34_FW_IMAGE_OFFSET 0x100
+
+/* F34 register offsets. */
+#define F34_BLOCK_DATA_OFFSET 2
+
+/* F34 commands */
+#define F34_WRITE_FW_BLOCK 0x2
+#define F34_ERASE_ALL 0x3
+#define F34_READ_CONFIG_BLOCK 0x5
+#define F34_WRITE_CONFIG_BLOCK 0x6
+#define F34_ERASE_CONFIG 0x7
+#define F34_ENABLE_FLASH_PROG 0xf
+
+#define F34_STATUS_IN_PROGRESS 0xff
+#define F34_STATUS_IDLE 0x80
+
+#define F34_IDLE_WAIT_MS 500
+#define F34_ENABLE_WAIT_MS 300
+#define F34_ERASE_WAIT_MS 5000
+
+#define F34_BOOTLOADER_ID_LEN 2
+
+/* F34 V7 defines */
+#define V7_FLASH_STATUS_OFFSET 0
+#define V7_PARTITION_ID_OFFSET 1
+#define V7_BLOCK_NUMBER_OFFSET 2
+#define V7_TRANSFER_LENGTH_OFFSET 3
+#define V7_COMMAND_OFFSET 4
+#define V7_PAYLOAD_OFFSET 5
+#define V7_BOOTLOADER_ID_OFFSET 1
+
+#define IMAGE_HEADER_VERSION_10 0x10
+
+#define CONFIG_ID_SIZE 32
+#define PRODUCT_ID_SIZE 10
+
+#define ENABLE_WAIT_MS (1 * 1000)
+#define WRITE_WAIT_MS (3 * 1000)
+
+#define MIN_SLEEP_TIME_US 50
+#define MAX_SLEEP_TIME_US 100
+
+#define HAS_BSR BIT(5)
+#define HAS_CONFIG_ID BIT(3)
+#define HAS_GUEST_CODE BIT(6)
+#define HAS_DISP_CFG BIT(5)
+
+/* F34 V7 commands */
+#define CMD_V7_IDLE 0
+#define CMD_V7_ENTER_BL 1
+#define CMD_V7_READ 2
+#define CMD_V7_WRITE 3
+#define CMD_V7_ERASE 4
+#define CMD_V7_ERASE_AP 5
+#define CMD_V7_SENSOR_ID 6
+
+#define v7_CMD_IDLE 0
+#define v7_CMD_WRITE_FW 1
+#define v7_CMD_WRITE_CONFIG 2
+#define v7_CMD_WRITE_LOCKDOWN 3
+#define v7_CMD_WRITE_GUEST_CODE 4
+#define v7_CMD_READ_CONFIG 5
+#define v7_CMD_ERASE_ALL 6
+#define v7_CMD_ERASE_UI_FIRMWARE 7
+#define v7_CMD_ERASE_UI_CONFIG 8
+#define v7_CMD_ERASE_BL_CONFIG 9
+#define v7_CMD_ERASE_DISP_CONFIG 10
+#define v7_CMD_ERASE_FLASH_CONFIG 11
+#define v7_CMD_ERASE_GUEST_CODE 12
+#define v7_CMD_ENABLE_FLASH_PROG 13
+
+#define v7_UI_CONFIG_AREA 0
+#define v7_PM_CONFIG_AREA 1
+#define v7_BL_CONFIG_AREA 2
+#define v7_DP_CONFIG_AREA 3
+#define v7_FLASH_CONFIG_AREA 4
+
+/* F34 V7 partition IDs */
+#define BOOTLOADER_PARTITION 1
+#define DEVICE_CONFIG_PARTITION 2
+#define FLASH_CONFIG_PARTITION 3
+#define MANUFACTURING_BLOCK_PARTITION 4
+#define GUEST_SERIALIZATION_PARTITION 5
+#define GLOBAL_PARAMETERS_PARTITION 6
+#define CORE_CODE_PARTITION 7
+#define CORE_CONFIG_PARTITION 8
+#define GUEST_CODE_PARTITION 9
+#define DISPLAY_CONFIG_PARTITION 10
+
+/* F34 V7 container IDs */
+#define TOP_LEVEL_CONTAINER 0
+#define UI_CONTAINER 1
+#define UI_CONFIG_CONTAINER 2
+#define BL_CONTAINER 3
+#define BL_IMAGE_CONTAINER 4
+#define BL_CONFIG_CONTAINER 5
+#define BL_LOCKDOWN_INFO_CONTAINER 6
+#define PERMANENT_CONFIG_CONTAINER 7
+#define GUEST_CODE_CONTAINER 8
+#define BL_PROTOCOL_DESCRIPTOR_CONTAINER 9
+#define UI_PROTOCOL_DESCRIPTOR_CONTAINER 10
+#define RMI_SELF_DISCOVERY_CONTAINER 11
+#define RMI_PAGE_CONTENT_CONTAINER 12
+#define GENERAL_INFORMATION_CONTAINER 13
+#define DEVICE_CONFIG_CONTAINER 14
+#define FLASH_CONFIG_CONTAINER 15
+#define GUEST_SERIALIZATION_CONTAINER 16
+#define GLOBAL_PARAMETERS_CONTAINER 17
+#define CORE_CODE_CONTAINER 18
+#define CORE_CONFIG_CONTAINER 19
+#define DISPLAY_CONFIG_CONTAINER 20
+
+struct f34v7_query_1_7 {
+ u8 bl_minor_revision; /* query 1 */
+ u8 bl_major_revision;
+ __le32 bl_fw_id; /* query 2 */
+ u8 minimum_write_size; /* query 3 */
+ __le16 block_size;
+ __le16 flash_page_size;
+ __le16 adjustable_partition_area_size; /* query 4 */
+ __le16 flash_config_length; /* query 5 */
+ __le16 payload_length; /* query 6 */
+ u8 partition_support[4]; /* query 7 */
+} __packed;
+
+struct f34v7_data_1_5 {
+ u8 partition_id;
+ __le16 block_offset;
+ __le16 transfer_length;
+ u8 command;
+ u8 payload[2];
+} __packed;
+
+struct block_data {
+ const void *data;
+ int size;
+};
+
+struct partition_table {
+ u8 partition_id;
+ u8 byte_1_reserved;
+ __le16 partition_length;
+ __le16 start_physical_address;
+ __le16 partition_properties;
+} __packed;
+
+struct physical_address {
+ u16 ui_firmware;
+ u16 ui_config;
+ u16 dp_config;
+ u16 guest_code;
+};
+
+struct container_descriptor {
+ __le32 content_checksum;
+ __le16 container_id;
+ u8 minor_version;
+ u8 major_version;
+ u8 reserved_08;
+ u8 reserved_09;
+ u8 reserved_0a;
+ u8 reserved_0b;
+ u8 container_option_flags[4];
+ __le32 content_options_length;
+ __le32 content_options_address;
+ __le32 content_length;
+ __le32 content_address;
+} __packed;
+
+struct block_count {
+ u16 ui_firmware;
+ u16 ui_config;
+ u16 dp_config;
+ u16 fl_config;
+ u16 pm_config;
+ u16 bl_config;
+ u16 lockdown;
+ u16 guest_code;
+};
+
+struct image_header_10 {
+ __le32 checksum;
+ u8 reserved_04;
+ u8 reserved_05;
+ u8 minor_header_version;
+ u8 major_header_version;
+ u8 reserved_08;
+ u8 reserved_09;
+ u8 reserved_0a;
+ u8 reserved_0b;
+ __le32 top_level_container_start_addr;
+};
+
+struct image_metadata {
+ bool contains_firmware_id;
+ bool contains_bootloader;
+ bool contains_display_cfg;
+ bool contains_guest_code;
+ bool contains_flash_config;
+ unsigned int firmware_id;
+ unsigned int checksum;
+ unsigned int bootloader_size;
+ unsigned int display_cfg_offset;
+ unsigned char bl_version;
+ unsigned char product_id[PRODUCT_ID_SIZE + 1];
+ unsigned char cstmr_product_id[PRODUCT_ID_SIZE + 1];
+ struct block_data bootloader;
+ struct block_data ui_firmware;
+ struct block_data ui_config;
+ struct block_data dp_config;
+ struct block_data fl_config;
+ struct block_data bl_config;
+ struct block_data guest_code;
+ struct block_data lockdown;
+ struct block_count blkcount;
+ struct physical_address phyaddr;
+};
+
+struct register_offset {
+ u8 properties;
+ u8 properties_2;
+ u8 block_size;
+ u8 block_count;
+ u8 gc_block_count;
+ u8 flash_status;
+ u8 partition_id;
+ u8 block_number;
+ u8 transfer_length;
+ u8 flash_cmd;
+ u8 payload;
+};
+
+struct rmi_f34_firmware {
+ __le32 checksum;
+ u8 pad1[3];
+ u8 bootloader_version;
+ __le32 image_size;
+ __le32 config_size;
+ u8 product_id[10];
+ u8 product_info[2];
+ u8 pad2[228];
+ u8 data[];
+};
+
+struct f34v5_data {
+ u16 block_size;
+ u16 fw_blocks;
+ u16 config_blocks;
+ u16 ctrl_address;
+ u8 status;
+
+ struct completion cmd_done;
+ struct mutex flash_mutex;
+};
+
+struct f34v7_data {
+ bool has_display_cfg;
+ bool has_guest_code;
+ bool force_update;
+ bool in_bl_mode;
+ u8 *read_config_buf;
+ size_t read_config_buf_size;
+ u8 command;
+ u8 flash_status;
+ u16 block_size;
+ u16 config_block_count;
+ u16 config_size;
+ u16 config_area;
+ u16 flash_config_length;
+ u16 payload_length;
+ u8 partitions;
+ u16 partition_table_bytes;
+ bool new_partition_table;
+
+ struct register_offset off;
+ struct block_count blkcount;
+ struct physical_address phyaddr;
+ struct image_metadata img;
+
+ const void *config_data;
+ const void *image;
+};
+
+struct f34_data {
+ struct rmi_function *fn;
+
+ u8 bl_version;
+ unsigned char bootloader_id[5];
+ unsigned char configuration_id[CONFIG_ID_SIZE*2 + 1];
+
+ union {
+ struct f34v5_data v5;
+ struct f34v7_data v7;
+ };
+};
+
+int rmi_f34v7_start_reflash(struct f34_data *f34, const struct firmware *fw);
+int rmi_f34v7_do_reflash(struct f34_data *f34, const struct firmware *fw);
+int rmi_f34v7_probe(struct f34_data *f34);
+
+#endif /* _RMI_F34_H */
diff --git a/drivers/input/rmi4/rmi_f34v7.c b/drivers/input/rmi4/rmi_f34v7.c
new file mode 100644
index 000000000000..ca31f9539d9b
--- /dev/null
+++ b/drivers/input/rmi4/rmi_f34v7.c
@@ -0,0 +1,1372 @@
+/*
+ * Copyright (c) 2016, Zodiac Inflight Innovations
+ * Copyright (c) 2007-2016, Synaptics Incorporated
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/rmi.h>
+#include <linux/firmware.h>
+#include <asm/unaligned.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include "rmi_driver.h"
+#include "rmi_f34.h"
+
+static int rmi_f34v7_read_flash_status(struct f34_data *f34)
+{
+ u8 status;
+ u8 command;
+ int ret;
+
+ ret = rmi_read_block(f34->fn->rmi_dev,
+ f34->fn->fd.data_base_addr + f34->v7.off.flash_status,
+ &status,
+ sizeof(status));
+ if (ret < 0) {
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: Failed to read flash status\n", __func__);
+ return ret;
+ }
+
+ f34->v7.in_bl_mode = status >> 7;
+ f34->v7.flash_status = status & 0x1f;
+
+ if (f34->v7.flash_status != 0x00) {
+ dev_err(&f34->fn->dev, "%s: status=%d, command=0x%02x\n",
+ __func__, f34->v7.flash_status, f34->v7.command);
+ }
+
+ ret = rmi_read_block(f34->fn->rmi_dev,
+ f34->fn->fd.data_base_addr + f34->v7.off.flash_cmd,
+ &command,
+ sizeof(command));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to read flash command\n",
+ __func__);
+ return ret;
+ }
+
+ f34->v7.command = command;
+
+ return 0;
+}
+
+static int rmi_f34v7_wait_for_idle(struct f34_data *f34, int timeout_ms)
+{
+ int count = 0;
+ int timeout_count = ((timeout_ms * 1000) / MAX_SLEEP_TIME_US) + 1;
+
+ do {
+ usleep_range(MIN_SLEEP_TIME_US, MAX_SLEEP_TIME_US);
+
+ count++;
+
+ rmi_f34v7_read_flash_status(f34);
+
+ if ((f34->v7.command == v7_CMD_IDLE)
+ && (f34->v7.flash_status == 0x00)) {
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "Idle status detected\n");
+ return 0;
+ }
+ } while (count < timeout_count);
+
+ dev_err(&f34->fn->dev,
+ "%s: Timed out waiting for idle status\n", __func__);
+
+ return -ETIMEDOUT;
+}
+
+static int rmi_f34v7_write_command_single_transaction(struct f34_data *f34,
+ u8 cmd)
+{
+ int ret;
+ u8 base;
+ struct f34v7_data_1_5 data_1_5;
+
+ base = f34->fn->fd.data_base_addr;
+
+ memset(&data_1_5, 0, sizeof(data_1_5));
+
+ switch (cmd) {
+ case v7_CMD_ERASE_ALL:
+ data_1_5.partition_id = CORE_CODE_PARTITION;
+ data_1_5.command = CMD_V7_ERASE_AP;
+ break;
+ case v7_CMD_ERASE_UI_FIRMWARE:
+ data_1_5.partition_id = CORE_CODE_PARTITION;
+ data_1_5.command = CMD_V7_ERASE;
+ break;
+ case v7_CMD_ERASE_BL_CONFIG:
+ data_1_5.partition_id = GLOBAL_PARAMETERS_PARTITION;
+ data_1_5.command = CMD_V7_ERASE;
+ break;
+ case v7_CMD_ERASE_UI_CONFIG:
+ data_1_5.partition_id = CORE_CONFIG_PARTITION;
+ data_1_5.command = CMD_V7_ERASE;
+ break;
+ case v7_CMD_ERASE_DISP_CONFIG:
+ data_1_5.partition_id = DISPLAY_CONFIG_PARTITION;
+ data_1_5.command = CMD_V7_ERASE;
+ break;
+ case v7_CMD_ERASE_FLASH_CONFIG:
+ data_1_5.partition_id = FLASH_CONFIG_PARTITION;
+ data_1_5.command = CMD_V7_ERASE;
+ break;
+ case v7_CMD_ERASE_GUEST_CODE:
+ data_1_5.partition_id = GUEST_CODE_PARTITION;
+ data_1_5.command = CMD_V7_ERASE;
+ break;
+ case v7_CMD_ENABLE_FLASH_PROG:
+ data_1_5.partition_id = BOOTLOADER_PARTITION;
+ data_1_5.command = CMD_V7_ENTER_BL;
+ break;
+ }
+
+ data_1_5.payload[0] = f34->bootloader_id[0];
+ data_1_5.payload[1] = f34->bootloader_id[1];
+
+ ret = rmi_write_block(f34->fn->rmi_dev,
+ base + f34->v7.off.partition_id,
+ &data_1_5, sizeof(data_1_5));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev,
+ "%s: Failed to write single transaction command\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rmi_f34v7_write_command(struct f34_data *f34, u8 cmd)
+{
+ int ret;
+ u8 base;
+ u8 command;
+
+ base = f34->fn->fd.data_base_addr;
+
+ switch (cmd) {
+ case v7_CMD_WRITE_FW:
+ case v7_CMD_WRITE_CONFIG:
+ case v7_CMD_WRITE_GUEST_CODE:
+ command = CMD_V7_WRITE;
+ break;
+ case v7_CMD_READ_CONFIG:
+ command = CMD_V7_READ;
+ break;
+ case v7_CMD_ERASE_ALL:
+ command = CMD_V7_ERASE_AP;
+ break;
+ case v7_CMD_ERASE_UI_FIRMWARE:
+ case v7_CMD_ERASE_BL_CONFIG:
+ case v7_CMD_ERASE_UI_CONFIG:
+ case v7_CMD_ERASE_DISP_CONFIG:
+ case v7_CMD_ERASE_FLASH_CONFIG:
+ case v7_CMD_ERASE_GUEST_CODE:
+ command = CMD_V7_ERASE;
+ break;
+ case v7_CMD_ENABLE_FLASH_PROG:
+ command = CMD_V7_ENTER_BL;
+ break;
+ default:
+ dev_err(&f34->fn->dev, "%s: Invalid command 0x%02x\n",
+ __func__, cmd);
+ return -EINVAL;
+ }
+
+ f34->v7.command = command;
+
+ switch (cmd) {
+ case v7_CMD_ERASE_ALL:
+ case v7_CMD_ERASE_UI_FIRMWARE:
+ case v7_CMD_ERASE_BL_CONFIG:
+ case v7_CMD_ERASE_UI_CONFIG:
+ case v7_CMD_ERASE_DISP_CONFIG:
+ case v7_CMD_ERASE_FLASH_CONFIG:
+ case v7_CMD_ERASE_GUEST_CODE:
+ case v7_CMD_ENABLE_FLASH_PROG:
+ ret = rmi_f34v7_write_command_single_transaction(f34, cmd);
+ if (ret < 0)
+ return ret;
+ else
+ return 0;
+ default:
+ break;
+ }
+
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev, "%s: writing cmd %02X\n",
+ __func__, command);
+
+ ret = rmi_write_block(f34->fn->rmi_dev,
+ base + f34->v7.off.flash_cmd,
+ &command, sizeof(command));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to write flash command\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rmi_f34v7_write_partition_id(struct f34_data *f34, u8 cmd)
+{
+ int ret;
+ u8 base;
+ u8 partition;
+
+ base = f34->fn->fd.data_base_addr;
+
+ switch (cmd) {
+ case v7_CMD_WRITE_FW:
+ partition = CORE_CODE_PARTITION;
+ break;
+ case v7_CMD_WRITE_CONFIG:
+ case v7_CMD_READ_CONFIG:
+ if (f34->v7.config_area == v7_UI_CONFIG_AREA)
+ partition = CORE_CONFIG_PARTITION;
+ else if (f34->v7.config_area == v7_DP_CONFIG_AREA)
+ partition = DISPLAY_CONFIG_PARTITION;
+ else if (f34->v7.config_area == v7_PM_CONFIG_AREA)
+ partition = GUEST_SERIALIZATION_PARTITION;
+ else if (f34->v7.config_area == v7_BL_CONFIG_AREA)
+ partition = GLOBAL_PARAMETERS_PARTITION;
+ else if (f34->v7.config_area == v7_FLASH_CONFIG_AREA)
+ partition = FLASH_CONFIG_PARTITION;
+ break;
+ case v7_CMD_WRITE_GUEST_CODE:
+ partition = GUEST_CODE_PARTITION;
+ break;
+ case v7_CMD_ERASE_ALL:
+ partition = CORE_CODE_PARTITION;
+ break;
+ case v7_CMD_ERASE_BL_CONFIG:
+ partition = GLOBAL_PARAMETERS_PARTITION;
+ break;
+ case v7_CMD_ERASE_UI_CONFIG:
+ partition = CORE_CONFIG_PARTITION;
+ break;
+ case v7_CMD_ERASE_DISP_CONFIG:
+ partition = DISPLAY_CONFIG_PARTITION;
+ break;
+ case v7_CMD_ERASE_FLASH_CONFIG:
+ partition = FLASH_CONFIG_PARTITION;
+ break;
+ case v7_CMD_ERASE_GUEST_CODE:
+ partition = GUEST_CODE_PARTITION;
+ break;
+ case v7_CMD_ENABLE_FLASH_PROG:
+ partition = BOOTLOADER_PARTITION;
+ break;
+ default:
+ dev_err(&f34->fn->dev, "%s: Invalid command 0x%02x\n",
+ __func__, cmd);
+ return -EINVAL;
+ }
+
+ ret = rmi_write_block(f34->fn->rmi_dev,
+ base + f34->v7.off.partition_id,
+ &partition, sizeof(partition));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to write partition ID\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rmi_f34v7_read_f34v7_partition_table(struct f34_data *f34)
+{
+ int ret;
+ u8 base;
+ __le16 length;
+ u16 block_number = 0;
+
+ base = f34->fn->fd.data_base_addr;
+
+ f34->v7.config_area = v7_FLASH_CONFIG_AREA;
+
+ ret = rmi_f34v7_write_partition_id(f34, v7_CMD_READ_CONFIG);
+ if (ret < 0)
+ return ret;
+
+ ret = rmi_write_block(f34->fn->rmi_dev,
+ base + f34->v7.off.block_number,
+ &block_number, sizeof(block_number));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to write block number\n",
+ __func__);
+ return ret;
+ }
+
+ put_unaligned_le16(f34->v7.flash_config_length, &length);
+
+ ret = rmi_write_block(f34->fn->rmi_dev,
+ base + f34->v7.off.transfer_length,
+ &length, sizeof(length));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to write transfer length\n",
+ __func__);
+ return ret;
+ }
+
+ ret = rmi_f34v7_write_command(f34, v7_CMD_READ_CONFIG);
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to write command\n",
+ __func__);
+ return ret;
+ }
+
+ ret = rmi_f34v7_wait_for_idle(f34, WRITE_WAIT_MS);
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to wait for idle status\n",
+ __func__);
+ return ret;
+ }
+
+ ret = rmi_read_block(f34->fn->rmi_dev,
+ base + f34->v7.off.payload,
+ f34->v7.read_config_buf,
+ f34->v7.partition_table_bytes);
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to read block data\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rmi_f34v7_parse_partition_table(struct f34_data *f34,
+ const void *partition_table,
+ struct block_count *blkcount,
+ struct physical_address *phyaddr)
+{
+ int i;
+ int index;
+ u16 partition_length;
+ u16 physical_address;
+ const struct partition_table *ptable;
+
+ for (i = 0; i < f34->v7.partitions; i++) {
+ index = i * 8 + 2;
+ ptable = partition_table + index;
+ partition_length = le16_to_cpu(ptable->partition_length);
+ physical_address = le16_to_cpu(ptable->start_physical_address);
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: Partition entry %d: %*ph\n",
+ __func__, i, sizeof(struct partition_table), ptable);
+ switch (ptable->partition_id & 0x1f) {
+ case CORE_CODE_PARTITION:
+ blkcount->ui_firmware = partition_length;
+ phyaddr->ui_firmware = physical_address;
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: Core code block count: %d\n",
+ __func__, blkcount->ui_firmware);
+ break;
+ case CORE_CONFIG_PARTITION:
+ blkcount->ui_config = partition_length;
+ phyaddr->ui_config = physical_address;
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: Core config block count: %d\n",
+ __func__, blkcount->ui_config);
+ break;
+ case DISPLAY_CONFIG_PARTITION:
+ blkcount->dp_config = partition_length;
+ phyaddr->dp_config = physical_address;
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: Display config block count: %d\n",
+ __func__, blkcount->dp_config);
+ break;
+ case FLASH_CONFIG_PARTITION:
+ blkcount->fl_config = partition_length;
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: Flash config block count: %d\n",
+ __func__, blkcount->fl_config);
+ break;
+ case GUEST_CODE_PARTITION:
+ blkcount->guest_code = partition_length;
+ phyaddr->guest_code = physical_address;
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: Guest code block count: %d\n",
+ __func__, blkcount->guest_code);
+ break;
+ case GUEST_SERIALIZATION_PARTITION:
+ blkcount->pm_config = partition_length;
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: Guest serialization block count: %d\n",
+ __func__, blkcount->pm_config);
+ break;
+ case GLOBAL_PARAMETERS_PARTITION:
+ blkcount->bl_config = partition_length;
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: Global parameters block count: %d\n",
+ __func__, blkcount->bl_config);
+ break;
+ case DEVICE_CONFIG_PARTITION:
+ blkcount->lockdown = partition_length;
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: Device config block count: %d\n",
+ __func__, blkcount->lockdown);
+ break;
+ }
+ }
+}
+
+static int rmi_f34v7_read_queries_bl_version(struct f34_data *f34)
+{
+ int ret;
+ u8 base;
+ int offset;
+ u8 query_0;
+ struct f34v7_query_1_7 query_1_7;
+
+ base = f34->fn->fd.query_base_addr;
+
+ ret = rmi_read_block(f34->fn->rmi_dev,
+ base,
+ &query_0,
+ sizeof(query_0));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev,
+ "%s: Failed to read query 0\n", __func__);
+ return ret;
+ }
+
+ offset = (query_0 & 0x7) + 1;
+
+ ret = rmi_read_block(f34->fn->rmi_dev,
+ base + offset,
+ &query_1_7,
+ sizeof(query_1_7));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to read queries 1 to 7\n",
+ __func__);
+ return ret;
+ }
+
+ f34->bootloader_id[0] = query_1_7.bl_minor_revision;
+ f34->bootloader_id[1] = query_1_7.bl_major_revision;
+
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev, "Bootloader V%d.%d\n",
+ f34->bootloader_id[1], f34->bootloader_id[0]);
+
+ return 0;
+}
+
+static int rmi_f34v7_read_queries(struct f34_data *f34)
+{
+ int ret;
+ int i, j;
+ u8 base;
+ int offset;
+ u8 *ptable;
+ u8 query_0;
+ struct f34v7_query_1_7 query_1_7;
+
+ base = f34->fn->fd.query_base_addr;
+
+ ret = rmi_read_block(f34->fn->rmi_dev,
+ base,
+ &query_0,
+ sizeof(query_0));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev,
+ "%s: Failed to read query 0\n", __func__);
+ return ret;
+ }
+
+ offset = (query_0 & 0x07) + 1;
+
+ ret = rmi_read_block(f34->fn->rmi_dev,
+ base + offset,
+ &query_1_7,
+ sizeof(query_1_7));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to read queries 1 to 7\n",
+ __func__);
+ return ret;
+ }
+
+ f34->bootloader_id[0] = query_1_7.bl_minor_revision;
+ f34->bootloader_id[1] = query_1_7.bl_major_revision;
+
+ f34->v7.block_size = le16_to_cpu(query_1_7.block_size);
+ f34->v7.flash_config_length =
+ le16_to_cpu(query_1_7.flash_config_length);
+ f34->v7.payload_length = le16_to_cpu(query_1_7.payload_length);
+
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev, "%s: f34->v7.block_size = %d\n",
+ __func__, f34->v7.block_size);
+
+ f34->v7.off.flash_status = V7_FLASH_STATUS_OFFSET;
+ f34->v7.off.partition_id = V7_PARTITION_ID_OFFSET;
+ f34->v7.off.block_number = V7_BLOCK_NUMBER_OFFSET;
+ f34->v7.off.transfer_length = V7_TRANSFER_LENGTH_OFFSET;
+ f34->v7.off.flash_cmd = V7_COMMAND_OFFSET;
+ f34->v7.off.payload = V7_PAYLOAD_OFFSET;
+
+ f34->v7.has_display_cfg = query_1_7.partition_support[1] & HAS_DISP_CFG;
+ f34->v7.has_guest_code =
+ query_1_7.partition_support[1] & HAS_GUEST_CODE;
+
+ if (query_0 & HAS_CONFIG_ID) {
+ char f34_ctrl[CONFIG_ID_SIZE];
+ int i = 0;
+ u8 *p = f34->configuration_id;
+ *p = '\0';
+
+ ret = rmi_read_block(f34->fn->rmi_dev,
+ f34->fn->fd.control_base_addr,
+ f34_ctrl,
+ sizeof(f34_ctrl));
+ if (ret)
+ return ret;
+
+ /* Eat leading zeros */
+ while (i < sizeof(f34_ctrl) && !f34_ctrl[i])
+ i++;
+
+ for (; i < sizeof(f34_ctrl); i++)
+ p += snprintf(p, f34->configuration_id
+ + sizeof(f34->configuration_id) - p,
+ "%02X", f34_ctrl[i]);
+
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev, "Configuration ID: %s\n",
+ f34->configuration_id);
+ }
+
+ f34->v7.partitions = 0;
+ for (i = 0; i < sizeof(query_1_7.partition_support); i++)
+ for (j = 0; j < 8; j++)
+ if (query_1_7.partition_support[i] & (1 << j))
+ f34->v7.partitions++;
+
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev, "%s: Supported partitions: %*ph\n",
+ __func__, sizeof(query_1_7.partition_support),
+ query_1_7.partition_support);
+
+
+ f34->v7.partition_table_bytes = f34->v7.partitions * 8 + 2;
+
+ f34->v7.read_config_buf = devm_kzalloc(&f34->fn->dev,
+ f34->v7.partition_table_bytes,
+ GFP_KERNEL);
+ if (!f34->v7.read_config_buf) {
+ f34->v7.read_config_buf_size = 0;
+ return -ENOMEM;
+ }
+
+ f34->v7.read_config_buf_size = f34->v7.partition_table_bytes;
+ ptable = f34->v7.read_config_buf;
+
+ ret = rmi_f34v7_read_f34v7_partition_table(f34);
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to read partition table\n",
+ __func__);
+ return ret;
+ }
+
+ rmi_f34v7_parse_partition_table(f34, ptable,
+ &f34->v7.blkcount, &f34->v7.phyaddr);
+
+ return 0;
+}
+
+static int rmi_f34v7_check_ui_firmware_size(struct f34_data *f34)
+{
+ u16 block_count;
+
+ block_count = f34->v7.img.ui_firmware.size / f34->v7.block_size;
+
+ if (block_count != f34->v7.blkcount.ui_firmware) {
+ dev_err(&f34->fn->dev,
+ "UI firmware size mismatch: %d != %d\n",
+ block_count, f34->v7.blkcount.ui_firmware);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rmi_f34v7_check_ui_config_size(struct f34_data *f34)
+{
+ u16 block_count;
+
+ block_count = f34->v7.img.ui_config.size / f34->v7.block_size;
+
+ if (block_count != f34->v7.blkcount.ui_config) {
+ dev_err(&f34->fn->dev, "UI config size mismatch\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rmi_f34v7_check_dp_config_size(struct f34_data *f34)
+{
+ u16 block_count;
+
+ block_count = f34->v7.img.dp_config.size / f34->v7.block_size;
+
+ if (block_count != f34->v7.blkcount.dp_config) {
+ dev_err(&f34->fn->dev, "Display config size mismatch\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rmi_f34v7_check_guest_code_size(struct f34_data *f34)
+{
+ u16 block_count;
+
+ block_count = f34->v7.img.guest_code.size / f34->v7.block_size;
+ if (block_count != f34->v7.blkcount.guest_code) {
+ dev_err(&f34->fn->dev, "Guest code size mismatch\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rmi_f34v7_check_bl_config_size(struct f34_data *f34)
+{
+ u16 block_count;
+
+ block_count = f34->v7.img.bl_config.size / f34->v7.block_size;
+
+ if (block_count != f34->v7.blkcount.bl_config) {
+ dev_err(&f34->fn->dev, "Bootloader config size mismatch\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rmi_f34v7_erase_config(struct f34_data *f34)
+{
+ int ret;
+
+ dev_info(&f34->fn->dev, "Erasing config...\n");
+
+ switch (f34->v7.config_area) {
+ case v7_UI_CONFIG_AREA:
+ ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_UI_CONFIG);
+ if (ret < 0)
+ return ret;
+ break;
+ case v7_DP_CONFIG_AREA:
+ ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_DISP_CONFIG);
+ if (ret < 0)
+ return ret;
+ break;
+ case v7_BL_CONFIG_AREA:
+ ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_BL_CONFIG);
+ if (ret < 0)
+ return ret;
+ break;
+ }
+
+ ret = rmi_f34v7_wait_for_idle(f34, ENABLE_WAIT_MS);
+ if (ret < 0)
+ return ret;
+
+ return ret;
+}
+
+static int rmi_f34v7_erase_guest_code(struct f34_data *f34)
+{
+ int ret;
+
+ dev_info(&f34->fn->dev, "Erasing guest code...\n");
+
+ ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_GUEST_CODE);
+ if (ret < 0)
+ return ret;
+
+ ret = rmi_f34v7_wait_for_idle(f34, ENABLE_WAIT_MS);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int rmi_f34v7_erase_all(struct f34_data *f34)
+{
+ int ret;
+
+ dev_info(&f34->fn->dev, "Erasing firmware...\n");
+
+ ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_UI_FIRMWARE);
+ if (ret < 0)
+ return ret;
+
+ ret = rmi_f34v7_wait_for_idle(f34, ENABLE_WAIT_MS);
+ if (ret < 0)
+ return ret;
+
+ f34->v7.config_area = v7_UI_CONFIG_AREA;
+ ret = rmi_f34v7_erase_config(f34);
+ if (ret < 0)
+ return ret;
+
+ if (f34->v7.has_display_cfg) {
+ f34->v7.config_area = v7_DP_CONFIG_AREA;
+ ret = rmi_f34v7_erase_config(f34);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (f34->v7.new_partition_table && f34->v7.has_guest_code) {
+ ret = rmi_f34v7_erase_guest_code(f34);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rmi_f34v7_read_f34v7_blocks(struct f34_data *f34, u16 block_cnt,
+ u8 command)
+{
+ int ret;
+ u8 base;
+ __le16 length;
+ u16 transfer;
+ u16 max_transfer;
+ u16 remaining = block_cnt;
+ u16 block_number = 0;
+ u16 index = 0;
+
+ base = f34->fn->fd.data_base_addr;
+
+ ret = rmi_f34v7_write_partition_id(f34, command);
+ if (ret < 0)
+ return ret;
+
+ ret = rmi_write_block(f34->fn->rmi_dev,
+ base + f34->v7.off.block_number,
+ &block_number, sizeof(block_number));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to write block number\n",
+ __func__);
+ return ret;
+ }
+
+ max_transfer = min(f34->v7.payload_length,
+ (u16)(PAGE_SIZE / f34->v7.block_size));
+
+ do {
+ transfer = min(remaining, max_transfer);
+ put_unaligned_le16(transfer, &length);
+
+ ret = rmi_write_block(f34->fn->rmi_dev,
+ base + f34->v7.off.transfer_length,
+ &length, sizeof(length));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev,
+ "%s: Write transfer length fail (%d remaining)\n",
+ __func__, remaining);
+ return ret;
+ }
+
+ ret = rmi_f34v7_write_command(f34, command);
+ if (ret < 0)
+ return ret;
+
+ ret = rmi_f34v7_wait_for_idle(f34, ENABLE_WAIT_MS);
+ if (ret < 0) {
+ dev_err(&f34->fn->dev,
+ "%s: Wait for idle failed (%d blks remaining)\n",
+ __func__, remaining);
+ return ret;
+ }
+
+ ret = rmi_read_block(f34->fn->rmi_dev,
+ base + f34->v7.off.payload,
+ &f34->v7.read_config_buf[index],
+ transfer * f34->v7.block_size);
+ if (ret < 0) {
+ dev_err(&f34->fn->dev,
+ "%s: Read block failed (%d blks remaining)\n",
+ __func__, remaining);
+ return ret;
+ }
+
+ index += (transfer * f34->v7.block_size);
+ remaining -= transfer;
+ } while (remaining);
+
+ return 0;
+}
+
+static int rmi_f34v7_write_f34v7_blocks(struct f34_data *f34,
+ const void *block_ptr, u16 block_cnt,
+ u8 command)
+{
+ int ret;
+ u8 base;
+ __le16 length;
+ u16 transfer;
+ u16 max_transfer;
+ u16 remaining = block_cnt;
+ u16 block_number = 0;
+
+ base = f34->fn->fd.data_base_addr;
+
+ ret = rmi_f34v7_write_partition_id(f34, command);
+ if (ret < 0)
+ return ret;
+
+ ret = rmi_write_block(f34->fn->rmi_dev,
+ base + f34->v7.off.block_number,
+ &block_number, sizeof(block_number));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to write block number\n",
+ __func__);
+ return ret;
+ }
+
+ if (f34->v7.payload_length > (PAGE_SIZE / f34->v7.block_size))
+ max_transfer = PAGE_SIZE / f34->v7.block_size;
+ else
+ max_transfer = f34->v7.payload_length;
+
+ do {
+ transfer = min(remaining, max_transfer);
+ put_unaligned_le16(transfer, &length);
+
+ ret = rmi_write_block(f34->fn->rmi_dev,
+ base + f34->v7.off.transfer_length,
+ &length, sizeof(length));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev,
+ "%s: Write transfer length fail (%d remaining)\n",
+ __func__, remaining);
+ return ret;
+ }
+
+ ret = rmi_f34v7_write_command(f34, command);
+ if (ret < 0)
+ return ret;
+
+ ret = rmi_write_block(f34->fn->rmi_dev,
+ base + f34->v7.off.payload,
+ block_ptr, transfer * f34->v7.block_size);
+ if (ret < 0) {
+ dev_err(&f34->fn->dev,
+ "%s: Failed writing data (%d blks remaining)\n",
+ __func__, remaining);
+ return ret;
+ }
+
+ ret = rmi_f34v7_wait_for_idle(f34, ENABLE_WAIT_MS);
+ if (ret < 0) {
+ dev_err(&f34->fn->dev,
+ "%s: Failed wait for idle (%d blks remaining)\n",
+ __func__, remaining);
+ return ret;
+ }
+
+ block_ptr += (transfer * f34->v7.block_size);
+ remaining -= transfer;
+ } while (remaining);
+
+ return 0;
+}
+
+static int rmi_f34v7_write_config(struct f34_data *f34)
+{
+ return rmi_f34v7_write_f34v7_blocks(f34, f34->v7.config_data,
+ f34->v7.config_block_count,
+ v7_CMD_WRITE_CONFIG);
+}
+
+static int rmi_f34v7_write_ui_config(struct f34_data *f34)
+{
+ f34->v7.config_area = v7_UI_CONFIG_AREA;
+ f34->v7.config_data = f34->v7.img.ui_config.data;
+ f34->v7.config_size = f34->v7.img.ui_config.size;
+ f34->v7.config_block_count = f34->v7.config_size / f34->v7.block_size;
+
+ return rmi_f34v7_write_config(f34);
+}
+
+static int rmi_f34v7_write_dp_config(struct f34_data *f34)
+{
+ f34->v7.config_area = v7_DP_CONFIG_AREA;
+ f34->v7.config_data = f34->v7.img.dp_config.data;
+ f34->v7.config_size = f34->v7.img.dp_config.size;
+ f34->v7.config_block_count = f34->v7.config_size / f34->v7.block_size;
+
+ return rmi_f34v7_write_config(f34);
+}
+
+static int rmi_f34v7_write_guest_code(struct f34_data *f34)
+{
+ return rmi_f34v7_write_f34v7_blocks(f34, f34->v7.img.guest_code.data,
+ f34->v7.img.guest_code.size /
+ f34->v7.block_size,
+ v7_CMD_WRITE_GUEST_CODE);
+}
+
+static int rmi_f34v7_write_flash_config(struct f34_data *f34)
+{
+ int ret;
+
+ f34->v7.config_area = v7_FLASH_CONFIG_AREA;
+ f34->v7.config_data = f34->v7.img.fl_config.data;
+ f34->v7.config_size = f34->v7.img.fl_config.size;
+ f34->v7.config_block_count = f34->v7.config_size / f34->v7.block_size;
+
+ if (f34->v7.config_block_count != f34->v7.blkcount.fl_config) {
+ dev_err(&f34->fn->dev, "%s: Flash config size mismatch\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_FLASH_CONFIG);
+ if (ret < 0)
+ return ret;
+
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: Erase flash config command written\n", __func__);
+
+ ret = rmi_f34v7_wait_for_idle(f34, ENABLE_WAIT_MS);
+ if (ret < 0)
+ return ret;
+
+ ret = rmi_f34v7_write_config(f34);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int rmi_f34v7_write_partition_table(struct f34_data *f34)
+{
+ u16 block_count;
+ int ret;
+
+ block_count = f34->v7.blkcount.bl_config;
+ f34->v7.config_area = v7_BL_CONFIG_AREA;
+ f34->v7.config_size = f34->v7.block_size * block_count;
+ devm_kfree(&f34->fn->dev, f34->v7.read_config_buf);
+ f34->v7.read_config_buf = devm_kzalloc(&f34->fn->dev,
+ f34->v7.config_size, GFP_KERNEL);
+ if (!f34->v7.read_config_buf) {
+ f34->v7.read_config_buf_size = 0;
+ return -ENOMEM;
+ }
+
+ f34->v7.read_config_buf_size = f34->v7.config_size;
+
+ ret = rmi_f34v7_read_f34v7_blocks(f34, block_count, v7_CMD_READ_CONFIG);
+ if (ret < 0)
+ return ret;
+
+ ret = rmi_f34v7_erase_config(f34);
+ if (ret < 0)
+ return ret;
+
+ ret = rmi_f34v7_write_flash_config(f34);
+ if (ret < 0)
+ return ret;
+
+ f34->v7.config_area = v7_BL_CONFIG_AREA;
+ f34->v7.config_data = f34->v7.read_config_buf;
+ f34->v7.config_size = f34->v7.img.bl_config.size;
+ f34->v7.config_block_count = f34->v7.config_size / f34->v7.block_size;
+
+ ret = rmi_f34v7_write_config(f34);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int rmi_f34v7_write_firmware(struct f34_data *f34)
+{
+ u16 blk_count;
+
+ blk_count = f34->v7.img.ui_firmware.size / f34->v7.block_size;
+
+ return rmi_f34v7_write_f34v7_blocks(f34, f34->v7.img.ui_firmware.data,
+ blk_count, v7_CMD_WRITE_FW);
+}
+
+static void rmi_f34v7_compare_partition_tables(struct f34_data *f34)
+{
+ if (f34->v7.phyaddr.ui_firmware != f34->v7.img.phyaddr.ui_firmware) {
+ f34->v7.new_partition_table = true;
+ return;
+ }
+
+ if (f34->v7.phyaddr.ui_config != f34->v7.img.phyaddr.ui_config) {
+ f34->v7.new_partition_table = true;
+ return;
+ }
+
+ if (f34->v7.has_display_cfg &&
+ f34->v7.phyaddr.dp_config != f34->v7.img.phyaddr.dp_config) {
+ f34->v7.new_partition_table = true;
+ return;
+ }
+
+ if (f34->v7.has_guest_code &&
+ f34->v7.phyaddr.guest_code != f34->v7.img.phyaddr.guest_code) {
+ f34->v7.new_partition_table = true;
+ return;
+ }
+
+ f34->v7.new_partition_table = false;
+}
+
+static void rmi_f34v7_parse_img_header_10_bl_container(struct f34_data *f34,
+ const void *image)
+{
+ int i;
+ int num_of_containers;
+ unsigned int addr;
+ unsigned int container_id;
+ unsigned int length;
+ const void *content;
+ const struct container_descriptor *descriptor;
+
+ num_of_containers = f34->v7.img.bootloader.size / 4 - 1;
+
+ for (i = 1; i <= num_of_containers; i++) {
+ addr = get_unaligned_le32(f34->v7.img.bootloader.data + i * 4);
+ descriptor = image + addr;
+ container_id = le16_to_cpu(descriptor->container_id);
+ content = image + le32_to_cpu(descriptor->content_address);
+ length = le32_to_cpu(descriptor->content_length);
+ switch (container_id) {
+ case BL_CONFIG_CONTAINER:
+ case GLOBAL_PARAMETERS_CONTAINER:
+ f34->v7.img.bl_config.data = content;
+ f34->v7.img.bl_config.size = length;
+ break;
+ case BL_LOCKDOWN_INFO_CONTAINER:
+ case DEVICE_CONFIG_CONTAINER:
+ f34->v7.img.lockdown.data = content;
+ f34->v7.img.lockdown.size = length;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void rmi_f34v7_parse_image_header_10(struct f34_data *f34)
+{
+ unsigned int i;
+ unsigned int num_of_containers;
+ unsigned int addr;
+ unsigned int offset;
+ unsigned int container_id;
+ unsigned int length;
+ const void *image = f34->v7.image;
+ const u8 *content;
+ const struct container_descriptor *descriptor;
+ const struct image_header_10 *header = image;
+
+ f34->v7.img.checksum = le32_to_cpu(header->checksum);
+
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev, "%s: f34->v7.img.checksum=%X\n",
+ __func__, f34->v7.img.checksum);
+
+ /* address of top level container */
+ offset = le32_to_cpu(header->top_level_container_start_addr);
+ descriptor = image + offset;
+
+ /* address of top level container content */
+ offset = le32_to_cpu(descriptor->content_address);
+ num_of_containers = le32_to_cpu(descriptor->content_length) / 4;
+
+ for (i = 0; i < num_of_containers; i++) {
+ addr = get_unaligned_le32(image + offset);
+ offset += 4;
+ descriptor = image + addr;
+ container_id = le16_to_cpu(descriptor->container_id);
+ content = image + le32_to_cpu(descriptor->content_address);
+ length = le32_to_cpu(descriptor->content_length);
+
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: container_id=%d, length=%d\n", __func__,
+ container_id, length);
+
+ switch (container_id) {
+ case UI_CONTAINER:
+ case CORE_CODE_CONTAINER:
+ f34->v7.img.ui_firmware.data = content;
+ f34->v7.img.ui_firmware.size = length;
+ break;
+ case UI_CONFIG_CONTAINER:
+ case CORE_CONFIG_CONTAINER:
+ f34->v7.img.ui_config.data = content;
+ f34->v7.img.ui_config.size = length;
+ break;
+ case BL_CONTAINER:
+ f34->v7.img.bl_version = *content;
+ f34->v7.img.bootloader.data = content;
+ f34->v7.img.bootloader.size = length;
+ rmi_f34v7_parse_img_header_10_bl_container(f34, image);
+ break;
+ case GUEST_CODE_CONTAINER:
+ f34->v7.img.contains_guest_code = true;
+ f34->v7.img.guest_code.data = content;
+ f34->v7.img.guest_code.size = length;
+ break;
+ case DISPLAY_CONFIG_CONTAINER:
+ f34->v7.img.contains_display_cfg = true;
+ f34->v7.img.dp_config.data = content;
+ f34->v7.img.dp_config.size = length;
+ break;
+ case FLASH_CONFIG_CONTAINER:
+ f34->v7.img.contains_flash_config = true;
+ f34->v7.img.fl_config.data = content;
+ f34->v7.img.fl_config.size = length;
+ break;
+ case GENERAL_INFORMATION_CONTAINER:
+ f34->v7.img.contains_firmware_id = true;
+ f34->v7.img.firmware_id =
+ get_unaligned_le32(content + 4);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static int rmi_f34v7_parse_image_info(struct f34_data *f34)
+{
+ const struct image_header_10 *header = f34->v7.image;
+
+ memset(&f34->v7.img, 0x00, sizeof(f34->v7.img));
+
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: header->major_header_version = %d\n",
+ __func__, header->major_header_version);
+
+ switch (header->major_header_version) {
+ case IMAGE_HEADER_VERSION_10:
+ rmi_f34v7_parse_image_header_10(f34);
+ break;
+ default:
+ dev_err(&f34->fn->dev, "Unsupported image file format %02X\n",
+ header->major_header_version);
+ return -EINVAL;
+ }
+
+ if (!f34->v7.img.contains_flash_config) {
+ dev_err(&f34->fn->dev, "%s: No flash config in fw image\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ rmi_f34v7_parse_partition_table(f34, f34->v7.img.fl_config.data,
+ &f34->v7.img.blkcount, &f34->v7.img.phyaddr);
+
+ rmi_f34v7_compare_partition_tables(f34);
+
+ return 0;
+}
+
+int rmi_f34v7_do_reflash(struct f34_data *f34, const struct firmware *fw)
+{
+ int ret;
+
+ rmi_f34v7_read_queries_bl_version(f34);
+
+ f34->v7.image = fw->data;
+
+ ret = rmi_f34v7_parse_image_info(f34);
+ if (ret < 0)
+ goto fail;
+
+ if (!f34->v7.new_partition_table) {
+ ret = rmi_f34v7_check_ui_firmware_size(f34);
+ if (ret < 0)
+ goto fail;
+
+ ret = rmi_f34v7_check_ui_config_size(f34);
+ if (ret < 0)
+ goto fail;
+
+ if (f34->v7.has_display_cfg &&
+ f34->v7.img.contains_display_cfg) {
+ ret = rmi_f34v7_check_dp_config_size(f34);
+ if (ret < 0)
+ goto fail;
+ }
+
+ if (f34->v7.has_guest_code && f34->v7.img.contains_guest_code) {
+ ret = rmi_f34v7_check_guest_code_size(f34);
+ if (ret < 0)
+ goto fail;
+ }
+ } else {
+ ret = rmi_f34v7_check_bl_config_size(f34);
+ if (ret < 0)
+ goto fail;
+ }
+
+ ret = rmi_f34v7_erase_all(f34);
+ if (ret < 0)
+ goto fail;
+
+ if (f34->v7.new_partition_table) {
+ ret = rmi_f34v7_write_partition_table(f34);
+ if (ret < 0)
+ goto fail;
+ dev_info(&f34->fn->dev, "%s: Partition table programmed\n",
+ __func__);
+ }
+
+ dev_info(&f34->fn->dev, "Writing firmware (%d bytes)...\n",
+ f34->v7.img.ui_firmware.size);
+
+ ret = rmi_f34v7_write_firmware(f34);
+ if (ret < 0)
+ goto fail;
+
+ dev_info(&f34->fn->dev, "Writing config (%d bytes)...\n",
+ f34->v7.img.ui_config.size);
+
+ f34->v7.config_area = v7_UI_CONFIG_AREA;
+ ret = rmi_f34v7_write_ui_config(f34);
+ if (ret < 0)
+ goto fail;
+
+ if (f34->v7.has_display_cfg && f34->v7.img.contains_display_cfg) {
+ dev_info(&f34->fn->dev, "Writing display config...\n");
+
+ ret = rmi_f34v7_write_dp_config(f34);
+ if (ret < 0)
+ goto fail;
+ }
+
+ if (f34->v7.new_partition_table) {
+ if (f34->v7.has_guest_code && f34->v7.img.contains_guest_code) {
+ dev_info(&f34->fn->dev, "Writing guest code...\n");
+
+ ret = rmi_f34v7_write_guest_code(f34);
+ if (ret < 0)
+ goto fail;
+ }
+ }
+
+fail:
+ return ret;
+}
+
+static int rmi_f34v7_enter_flash_prog(struct f34_data *f34)
+{
+ int ret;
+
+ ret = rmi_f34v7_read_flash_status(f34);
+ if (ret < 0)
+ return ret;
+
+ if (f34->v7.in_bl_mode)
+ return 0;
+
+ ret = rmi_f34v7_write_command(f34, v7_CMD_ENABLE_FLASH_PROG);
+ if (ret < 0)
+ return ret;
+
+ ret = rmi_f34v7_wait_for_idle(f34, ENABLE_WAIT_MS);
+ if (ret < 0)
+ return ret;
+
+ if (!f34->v7.in_bl_mode) {
+ dev_err(&f34->fn->dev, "%s: BL mode not entered\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int rmi_f34v7_start_reflash(struct f34_data *f34, const struct firmware *fw)
+{
+ int ret = 0;
+
+ f34->v7.config_area = v7_UI_CONFIG_AREA;
+ f34->v7.image = fw->data;
+
+ ret = rmi_f34v7_parse_image_info(f34);
+ if (ret < 0)
+ goto exit;
+
+ if (!f34->v7.force_update && f34->v7.new_partition_table) {
+ dev_err(&f34->fn->dev, "%s: Partition table mismatch\n",
+ __func__);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ dev_info(&f34->fn->dev, "Firmware image OK\n");
+
+ ret = rmi_f34v7_read_flash_status(f34);
+ if (ret < 0)
+ goto exit;
+
+ if (f34->v7.in_bl_mode) {
+ dev_info(&f34->fn->dev, "%s: Device in bootloader mode\n",
+ __func__);
+ }
+
+ rmi_f34v7_enter_flash_prog(f34);
+
+ return 0;
+
+exit:
+ return ret;
+}
+
+int rmi_f34v7_probe(struct f34_data *f34)
+{
+ int ret;
+
+ /* Read bootloader version */
+ ret = rmi_read_block(f34->fn->rmi_dev,
+ f34->fn->fd.query_base_addr + V7_BOOTLOADER_ID_OFFSET,
+ f34->bootloader_id,
+ sizeof(f34->bootloader_id));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to read bootloader ID\n",
+ __func__);
+ return ret;
+ }
+
+ if (f34->bootloader_id[1] == '5') {
+ f34->bl_version = 5;
+ } else if (f34->bootloader_id[1] == '6') {
+ f34->bl_version = 6;
+ } else if (f34->bootloader_id[1] == 7) {
+ f34->bl_version = 7;
+ } else {
+ dev_err(&f34->fn->dev, "%s: Unrecognized bootloader version\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ memset(&f34->v7.blkcount, 0x00, sizeof(f34->v7.blkcount));
+ memset(&f34->v7.phyaddr, 0x00, sizeof(f34->v7.phyaddr));
+ rmi_f34v7_read_queries(f34);
+
+ f34->v7.force_update = false;
+ return 0;
+}
diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c
index cf805b960866..dea63e2db3e6 100644
--- a/drivers/input/rmi4/rmi_f54.c
+++ b/drivers/input/rmi4/rmi_f54.c
@@ -200,7 +200,7 @@ static int rmi_f54_request_report(struct rmi_function *fn, u8 report_type)
error = rmi_write(rmi_dev, fn->fd.command_base_addr, F54_GET_REPORT);
if (error < 0)
- return error;
+ goto unlock;
init_completion(&f54->cmd_done);
@@ -209,15 +209,18 @@ static int rmi_f54_request_report(struct rmi_function *fn, u8 report_type)
queue_delayed_work(f54->workqueue, &f54->work, 0);
+unlock:
mutex_unlock(&f54->data_mutex);
- return 0;
+ return error;
}
static size_t rmi_f54_get_report_size(struct f54_data *f54)
{
- u8 rx = f54->num_rx_electrodes ? : f54->num_rx_electrodes;
- u8 tx = f54->num_tx_electrodes ? : f54->num_tx_electrodes;
+ struct rmi_device *rmi_dev = f54->fn->rmi_dev;
+ struct rmi_driver_data *drv_data = dev_get_drvdata(&rmi_dev->dev);
+ u8 rx = drv_data->num_rx_electrodes ? : f54->num_rx_electrodes;
+ u8 tx = drv_data->num_tx_electrodes ? : f54->num_tx_electrodes;
size_t size;
switch (rmi_f54_get_reptype(f54, f54->input)) {
@@ -401,6 +404,10 @@ static int rmi_f54_vidioc_enum_input(struct file *file, void *priv,
static int rmi_f54_set_input(struct f54_data *f54, unsigned int i)
{
+ struct rmi_device *rmi_dev = f54->fn->rmi_dev;
+ struct rmi_driver_data *drv_data = dev_get_drvdata(&rmi_dev->dev);
+ u8 rx = drv_data->num_rx_electrodes ? : f54->num_rx_electrodes;
+ u8 tx = drv_data->num_tx_electrodes ? : f54->num_tx_electrodes;
struct v4l2_pix_format *f = &f54->format;
enum rmi_f54_report_type reptype;
int ret;
@@ -415,8 +422,8 @@ static int rmi_f54_set_input(struct f54_data *f54, unsigned int i)
f54->input = i;
- f->width = f54->num_rx_electrodes;
- f->height = f54->num_tx_electrodes;
+ f->width = rx;
+ f->height = tx;
f->field = V4L2_FIELD_NONE;
f->colorspace = V4L2_COLORSPACE_RAW;
f->bytesperline = f->width * sizeof(u16);
diff --git a/drivers/input/rmi4/rmi_f55.c b/drivers/input/rmi4/rmi_f55.c
new file mode 100644
index 000000000000..37390ca6a924
--- /dev/null
+++ b/drivers/input/rmi4/rmi_f55.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2012-2015 Synaptics Incorporated
+ * Copyright (C) 2016 Zodiac Inflight Innovations
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/rmi.h>
+#include <linux/slab.h>
+#include "rmi_driver.h"
+
+#define F55_NAME "rmi4_f55"
+
+/* F55 data offsets */
+#define F55_NUM_RX_OFFSET 0
+#define F55_NUM_TX_OFFSET 1
+#define F55_PHYS_CHAR_OFFSET 2
+
+/* Only read required query registers */
+#define F55_QUERY_LEN 3
+
+/* F55 capabilities */
+#define F55_CAP_SENSOR_ASSIGN BIT(0)
+
+struct f55_data {
+ struct rmi_function *fn;
+
+ u8 qry[F55_QUERY_LEN];
+ u8 num_rx_electrodes;
+ u8 cfg_num_rx_electrodes;
+ u8 num_tx_electrodes;
+ u8 cfg_num_tx_electrodes;
+};
+
+static int rmi_f55_detect(struct rmi_function *fn)
+{
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ struct rmi_driver_data *drv_data = dev_get_drvdata(&rmi_dev->dev);
+ struct f55_data *f55;
+ int error;
+
+ f55 = dev_get_drvdata(&fn->dev);
+
+ error = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr,
+ &f55->qry, sizeof(f55->qry));
+ if (error) {
+ dev_err(&fn->dev, "%s: Failed to query F55 properties\n",
+ __func__);
+ return error;
+ }
+
+ f55->num_rx_electrodes = f55->qry[F55_NUM_RX_OFFSET];
+ f55->num_tx_electrodes = f55->qry[F55_NUM_TX_OFFSET];
+
+ f55->cfg_num_rx_electrodes = f55->num_rx_electrodes;
+ f55->cfg_num_tx_electrodes = f55->num_rx_electrodes;
+
+ drv_data->num_rx_electrodes = f55->cfg_num_rx_electrodes;
+ drv_data->num_tx_electrodes = f55->cfg_num_rx_electrodes;
+
+ if (f55->qry[F55_PHYS_CHAR_OFFSET] & F55_CAP_SENSOR_ASSIGN) {
+ int i, total;
+ u8 buf[256];
+
+ /*
+ * Calculate the number of enabled receive and transmit
+ * electrodes by reading F55:Ctrl1 (sensor receiver assignment)
+ * and F55:Ctrl2 (sensor transmitter assignment). The number of
+ * enabled electrodes is the sum of all field entries with a
+ * value other than 0xff.
+ */
+ error = rmi_read_block(fn->rmi_dev,
+ fn->fd.control_base_addr + 1,
+ buf, f55->num_rx_electrodes);
+ if (!error) {
+ total = 0;
+ for (i = 0; i < f55->num_rx_electrodes; i++) {
+ if (buf[i] != 0xff)
+ total++;
+ }
+ f55->cfg_num_rx_electrodes = total;
+ drv_data->num_rx_electrodes = total;
+ }
+
+ error = rmi_read_block(fn->rmi_dev,
+ fn->fd.control_base_addr + 2,
+ buf, f55->num_tx_electrodes);
+ if (!error) {
+ total = 0;
+ for (i = 0; i < f55->num_tx_electrodes; i++) {
+ if (buf[i] != 0xff)
+ total++;
+ }
+ f55->cfg_num_tx_electrodes = total;
+ drv_data->num_tx_electrodes = total;
+ }
+ }
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "F55 num_rx_electrodes: %d (raw %d)\n",
+ f55->cfg_num_rx_electrodes, f55->num_rx_electrodes);
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "F55 num_tx_electrodes: %d (raw %d)\n",
+ f55->cfg_num_tx_electrodes, f55->num_tx_electrodes);
+
+ return 0;
+}
+
+static int rmi_f55_probe(struct rmi_function *fn)
+{
+ struct f55_data *f55;
+
+ f55 = devm_kzalloc(&fn->dev, sizeof(struct f55_data), GFP_KERNEL);
+ if (!f55)
+ return -ENOMEM;
+
+ f55->fn = fn;
+ dev_set_drvdata(&fn->dev, f55);
+
+ return rmi_f55_detect(fn);
+}
+
+struct rmi_function_handler rmi_f55_handler = {
+ .driver = {
+ .name = F55_NAME,
+ },
+ .func = 0x55,
+ .probe = rmi_f55_probe,
+};
diff --git a/drivers/input/rmi4/rmi_i2c.c b/drivers/input/rmi4/rmi_i2c.c
index 1ebc2c1debae..082306d7c207 100644
--- a/drivers/input/rmi4/rmi_i2c.c
+++ b/drivers/input/rmi4/rmi_i2c.c
@@ -9,7 +9,6 @@
#include <linux/i2c.h>
#include <linux/rmi.h>
-#include <linux/irq.h>
#include <linux/of.h>
#include <linux/delay.h>
#include <linux/regulator/consumer.h>
@@ -35,8 +34,6 @@ struct rmi_i2c_xport {
struct mutex page_mutex;
int page;
- int irq;
-
u8 *tx_buf;
size_t tx_buf_size;
@@ -177,42 +174,6 @@ static const struct rmi_transport_ops rmi_i2c_ops = {
.read_block = rmi_i2c_read_block,
};
-static irqreturn_t rmi_i2c_irq(int irq, void *dev_id)
-{
- struct rmi_i2c_xport *rmi_i2c = dev_id;
- struct rmi_device *rmi_dev = rmi_i2c->xport.rmi_dev;
- int ret;
-
- ret = rmi_process_interrupt_requests(rmi_dev);
- if (ret)
- rmi_dbg(RMI_DEBUG_XPORT, &rmi_dev->dev,
- "Failed to process interrupt request: %d\n", ret);
-
- return IRQ_HANDLED;
-}
-
-static int rmi_i2c_init_irq(struct i2c_client *client)
-{
- struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client);
- int irq_flags = irqd_get_trigger_type(irq_get_irq_data(rmi_i2c->irq));
- int ret;
-
- if (!irq_flags)
- irq_flags = IRQF_TRIGGER_LOW;
-
- ret = devm_request_threaded_irq(&client->dev, rmi_i2c->irq, NULL,
- rmi_i2c_irq, irq_flags | IRQF_ONESHOT, client->name,
- rmi_i2c);
- if (ret < 0) {
- dev_warn(&client->dev, "Failed to register interrupt %d\n",
- rmi_i2c->irq);
-
- return ret;
- }
-
- return 0;
-}
-
#ifdef CONFIG_OF
static const struct of_device_id rmi_i2c_of_match[] = {
{ .compatible = "syna,rmi4-i2c" },
@@ -255,8 +216,7 @@ static int rmi_i2c_probe(struct i2c_client *client,
if (!client->dev.of_node && client_pdata)
*pdata = *client_pdata;
- if (client->irq > 0)
- rmi_i2c->irq = client->irq;
+ pdata->irq = client->irq;
rmi_dbg(RMI_DEBUG_XPORT, &client->dev, "Probing %s.\n",
dev_name(&client->dev));
@@ -321,10 +281,6 @@ static int rmi_i2c_probe(struct i2c_client *client,
if (retval)
return retval;
- retval = rmi_i2c_init_irq(client);
- if (retval < 0)
- return retval;
-
dev_info(&client->dev, "registered rmi i2c driver at %#04x.\n",
client->addr);
return 0;
@@ -337,18 +293,10 @@ static int rmi_i2c_suspend(struct device *dev)
struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client);
int ret;
- ret = rmi_driver_suspend(rmi_i2c->xport.rmi_dev);
+ ret = rmi_driver_suspend(rmi_i2c->xport.rmi_dev, true);
if (ret)
dev_warn(dev, "Failed to resume device: %d\n", ret);
- disable_irq(rmi_i2c->irq);
- if (device_may_wakeup(&client->dev)) {
- ret = enable_irq_wake(rmi_i2c->irq);
- if (!ret)
- dev_warn(dev, "Failed to enable irq for wake: %d\n",
- ret);
- }
-
regulator_bulk_disable(ARRAY_SIZE(rmi_i2c->supplies),
rmi_i2c->supplies);
@@ -368,15 +316,7 @@ static int rmi_i2c_resume(struct device *dev)
msleep(rmi_i2c->startup_delay);
- enable_irq(rmi_i2c->irq);
- if (device_may_wakeup(&client->dev)) {
- ret = disable_irq_wake(rmi_i2c->irq);
- if (!ret)
- dev_warn(dev, "Failed to disable irq for wake: %d\n",
- ret);
- }
-
- ret = rmi_driver_resume(rmi_i2c->xport.rmi_dev);
+ ret = rmi_driver_resume(rmi_i2c->xport.rmi_dev, true);
if (ret)
dev_warn(dev, "Failed to resume device: %d\n", ret);
@@ -391,12 +331,10 @@ static int rmi_i2c_runtime_suspend(struct device *dev)
struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client);
int ret;
- ret = rmi_driver_suspend(rmi_i2c->xport.rmi_dev);
+ ret = rmi_driver_suspend(rmi_i2c->xport.rmi_dev, false);
if (ret)
dev_warn(dev, "Failed to resume device: %d\n", ret);
- disable_irq(rmi_i2c->irq);
-
regulator_bulk_disable(ARRAY_SIZE(rmi_i2c->supplies),
rmi_i2c->supplies);
@@ -416,9 +354,7 @@ static int rmi_i2c_runtime_resume(struct device *dev)
msleep(rmi_i2c->startup_delay);
- enable_irq(rmi_i2c->irq);
-
- ret = rmi_driver_resume(rmi_i2c->xport.rmi_dev);
+ ret = rmi_driver_resume(rmi_i2c->xport.rmi_dev, false);
if (ret)
dev_warn(dev, "Failed to resume device: %d\n", ret);
diff --git a/drivers/input/rmi4/rmi_smbus.c b/drivers/input/rmi4/rmi_smbus.c
new file mode 100644
index 000000000000..76752555d809
--- /dev/null
+++ b/drivers/input/rmi4/rmi_smbus.c
@@ -0,0 +1,447 @@
+/*
+ * Copyright (c) 2015 - 2016 Red Hat, Inc
+ * Copyright (c) 2011, 2012 Synaptics Incorporated
+ * Copyright (c) 2011 Unixphere
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kconfig.h>
+#include <linux/lockdep.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/rmi.h>
+#include <linux/slab.h>
+#include "rmi_driver.h"
+
+#define SMB_PROTOCOL_VERSION_ADDRESS 0xfd
+#define SMB_MAX_COUNT 32
+#define RMI_SMB2_MAP_SIZE 8 /* 8 entry of 4 bytes each */
+#define RMI_SMB2_MAP_FLAGS_WE 0x01
+
+struct mapping_table_entry {
+ __le16 rmiaddr;
+ u8 readcount;
+ u8 flags;
+};
+
+struct rmi_smb_xport {
+ struct rmi_transport_dev xport;
+ struct i2c_client *client;
+
+ struct mutex page_mutex;
+ int page;
+ u8 table_index;
+ struct mutex mappingtable_mutex;
+ struct mapping_table_entry mapping_table[RMI_SMB2_MAP_SIZE];
+};
+
+static int rmi_smb_get_version(struct rmi_smb_xport *rmi_smb)
+{
+ struct i2c_client *client = rmi_smb->client;
+ int retval;
+
+ /* Check if for SMBus new version device by reading version byte. */
+ retval = i2c_smbus_read_byte_data(client, SMB_PROTOCOL_VERSION_ADDRESS);
+ if (retval < 0) {
+ dev_err(&client->dev, "failed to get SMBus version number!\n");
+ return retval;
+ }
+ return retval + 1;
+}
+
+/* SMB block write - wrapper over ic2_smb_write_block */
+static int smb_block_write(struct rmi_transport_dev *xport,
+ u8 commandcode, const void *buf, size_t len)
+{
+ struct rmi_smb_xport *rmi_smb =
+ container_of(xport, struct rmi_smb_xport, xport);
+ struct i2c_client *client = rmi_smb->client;
+ int retval;
+
+ retval = i2c_smbus_write_block_data(client, commandcode, len, buf);
+
+ rmi_dbg(RMI_DEBUG_XPORT, &client->dev,
+ "wrote %zd bytes at %#04x: %d (%*ph)\n",
+ len, commandcode, retval, (int)len, buf);
+
+ return retval;
+}
+
+/*
+ * The function to get command code for smbus operations and keeps
+ * records to the driver mapping table
+ */
+static int rmi_smb_get_command_code(struct rmi_transport_dev *xport,
+ u16 rmiaddr, int bytecount, bool isread, u8 *commandcode)
+{
+ struct rmi_smb_xport *rmi_smb =
+ container_of(xport, struct rmi_smb_xport, xport);
+ int i;
+ int retval;
+ struct mapping_table_entry mapping_data[1];
+
+ mutex_lock(&rmi_smb->mappingtable_mutex);
+ for (i = 0; i < RMI_SMB2_MAP_SIZE; i++) {
+ if (rmi_smb->mapping_table[i].rmiaddr == rmiaddr) {
+ if (isread) {
+ if (rmi_smb->mapping_table[i].readcount
+ == bytecount) {
+ *commandcode = i;
+ retval = 0;
+ goto exit;
+ }
+ } else {
+ if (rmi_smb->mapping_table[i].flags &
+ RMI_SMB2_MAP_FLAGS_WE) {
+ *commandcode = i;
+ retval = 0;
+ goto exit;
+ }
+ }
+ }
+ }
+ i = rmi_smb->table_index;
+ rmi_smb->table_index = (i + 1) % RMI_SMB2_MAP_SIZE;
+
+ /* constructs mapping table data entry. 4 bytes each entry */
+ memset(mapping_data, 0, sizeof(mapping_data));
+
+ mapping_data[0].rmiaddr = cpu_to_le16(rmiaddr);
+ mapping_data[0].readcount = bytecount;
+ mapping_data[0].flags = !isread ? RMI_SMB2_MAP_FLAGS_WE : 0;
+
+ retval = smb_block_write(xport, i + 0x80, mapping_data,
+ sizeof(mapping_data));
+
+ if (retval < 0) {
+ /*
+ * if not written to device mapping table
+ * clear the driver mapping table records
+ */
+ rmi_smb->mapping_table[i].rmiaddr = 0x0000;
+ rmi_smb->mapping_table[i].readcount = 0;
+ rmi_smb->mapping_table[i].flags = 0;
+ goto exit;
+ }
+ /* save to the driver level mapping table */
+ rmi_smb->mapping_table[i].rmiaddr = rmiaddr;
+ rmi_smb->mapping_table[i].readcount = bytecount;
+ rmi_smb->mapping_table[i].flags = !isread ? RMI_SMB2_MAP_FLAGS_WE : 0;
+ *commandcode = i;
+
+exit:
+ mutex_unlock(&rmi_smb->mappingtable_mutex);
+
+ return retval;
+}
+
+static int rmi_smb_write_block(struct rmi_transport_dev *xport, u16 rmiaddr,
+ const void *databuff, size_t len)
+{
+ int retval = 0;
+ u8 commandcode;
+ struct rmi_smb_xport *rmi_smb =
+ container_of(xport, struct rmi_smb_xport, xport);
+ int cur_len = (int)len;
+
+ mutex_lock(&rmi_smb->page_mutex);
+
+ while (cur_len > 0) {
+ /*
+ * break into 32 bytes chunks to write get command code
+ */
+ int block_len = min_t(int, len, SMB_MAX_COUNT);
+
+ retval = rmi_smb_get_command_code(xport, rmiaddr, block_len,
+ false, &commandcode);
+ if (retval < 0)
+ goto exit;
+
+ retval = smb_block_write(xport, commandcode,
+ databuff, block_len);
+ if (retval < 0)
+ goto exit;
+
+ /* prepare to write next block of bytes */
+ cur_len -= SMB_MAX_COUNT;
+ databuff += SMB_MAX_COUNT;
+ rmiaddr += SMB_MAX_COUNT;
+ }
+exit:
+ mutex_unlock(&rmi_smb->page_mutex);
+ return retval;
+}
+
+/* SMB block read - wrapper over ic2_smb_read_block */
+static int smb_block_read(struct rmi_transport_dev *xport,
+ u8 commandcode, void *buf, size_t len)
+{
+ struct rmi_smb_xport *rmi_smb =
+ container_of(xport, struct rmi_smb_xport, xport);
+ struct i2c_client *client = rmi_smb->client;
+ int retval;
+
+ retval = i2c_smbus_read_block_data(client, commandcode, buf);
+ if (retval < 0)
+ return retval;
+
+ return retval;
+}
+
+static int rmi_smb_read_block(struct rmi_transport_dev *xport, u16 rmiaddr,
+ void *databuff, size_t len)
+{
+ struct rmi_smb_xport *rmi_smb =
+ container_of(xport, struct rmi_smb_xport, xport);
+ int retval;
+ u8 commandcode;
+ int cur_len = (int)len;
+
+ mutex_lock(&rmi_smb->page_mutex);
+ memset(databuff, 0, len);
+
+ while (cur_len > 0) {
+ /* break into 32 bytes chunks to write get command code */
+ int block_len = min_t(int, cur_len, SMB_MAX_COUNT);
+
+ retval = rmi_smb_get_command_code(xport, rmiaddr, block_len,
+ true, &commandcode);
+ if (retval < 0)
+ goto exit;
+
+ retval = smb_block_read(xport, commandcode,
+ databuff, block_len);
+ if (retval < 0)
+ goto exit;
+
+ /* prepare to read next block of bytes */
+ cur_len -= SMB_MAX_COUNT;
+ databuff += SMB_MAX_COUNT;
+ rmiaddr += SMB_MAX_COUNT;
+ }
+
+ retval = 0;
+
+exit:
+ mutex_unlock(&rmi_smb->page_mutex);
+ return retval;
+}
+
+static void rmi_smb_clear_state(struct rmi_smb_xport *rmi_smb)
+{
+ /* the mapping table has been flushed, discard the current one */
+ mutex_lock(&rmi_smb->mappingtable_mutex);
+ memset(rmi_smb->mapping_table, 0, sizeof(rmi_smb->mapping_table));
+ mutex_unlock(&rmi_smb->mappingtable_mutex);
+}
+
+static int rmi_smb_enable_smbus_mode(struct rmi_smb_xport *rmi_smb)
+{
+ int retval;
+
+ /* we need to get the smbus version to activate the touchpad */
+ retval = rmi_smb_get_version(rmi_smb);
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+static int rmi_smb_reset(struct rmi_transport_dev *xport, u16 reset_addr)
+{
+ struct rmi_smb_xport *rmi_smb =
+ container_of(xport, struct rmi_smb_xport, xport);
+
+ rmi_smb_clear_state(rmi_smb);
+
+ /*
+ * we do not call the actual reset command, it has to be handled in
+ * PS/2 or there will be races between PS/2 and SMBus.
+ * PS/2 should ensure that a psmouse_reset is called before
+ * intializing the device and after it has been removed to be in a known
+ * state.
+ */
+ return rmi_smb_enable_smbus_mode(rmi_smb);
+}
+
+static const struct rmi_transport_ops rmi_smb_ops = {
+ .write_block = rmi_smb_write_block,
+ .read_block = rmi_smb_read_block,
+ .reset = rmi_smb_reset,
+};
+
+static int rmi_smb_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct rmi_device_platform_data *pdata = dev_get_platdata(&client->dev);
+ struct rmi_smb_xport *rmi_smb;
+ int retval;
+ int smbus_version;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+ I2C_FUNC_SMBUS_HOST_NOTIFY)) {
+ dev_err(&client->dev,
+ "adapter does not support required functionality.\n");
+ return -ENODEV;
+ }
+
+ if (client->irq <= 0) {
+ dev_err(&client->dev, "no IRQ provided, giving up.\n");
+ return client->irq ? client->irq : -ENODEV;
+ }
+
+ rmi_smb = devm_kzalloc(&client->dev, sizeof(struct rmi_smb_xport),
+ GFP_KERNEL);
+ if (!rmi_smb)
+ return -ENOMEM;
+
+ if (!pdata) {
+ dev_err(&client->dev, "no platform data, aborting\n");
+ return -ENOMEM;
+ }
+
+ rmi_dbg(RMI_DEBUG_XPORT, &client->dev, "Probing %s.\n",
+ dev_name(&client->dev));
+
+ rmi_smb->client = client;
+ mutex_init(&rmi_smb->page_mutex);
+ mutex_init(&rmi_smb->mappingtable_mutex);
+
+ rmi_smb->xport.dev = &client->dev;
+ rmi_smb->xport.pdata = *pdata;
+ rmi_smb->xport.pdata.irq = client->irq;
+ rmi_smb->xport.proto_name = "smb2";
+ rmi_smb->xport.ops = &rmi_smb_ops;
+
+ retval = rmi_smb_get_version(rmi_smb);
+ if (retval < 0)
+ return retval;
+
+ smbus_version = retval;
+ rmi_dbg(RMI_DEBUG_XPORT, &client->dev, "Smbus version is %d",
+ smbus_version);
+
+ if (smbus_version != 2) {
+ dev_err(&client->dev, "Unrecognized SMB version %d.\n",
+ smbus_version);
+ return -ENODEV;
+ }
+
+ i2c_set_clientdata(client, rmi_smb);
+
+ retval = rmi_register_transport_device(&rmi_smb->xport);
+ if (retval) {
+ dev_err(&client->dev, "Failed to register transport driver at 0x%.2X.\n",
+ client->addr);
+ i2c_set_clientdata(client, NULL);
+ return retval;
+ }
+
+ dev_info(&client->dev, "registered rmi smb driver at %#04x.\n",
+ client->addr);
+ return 0;
+
+}
+
+static int rmi_smb_remove(struct i2c_client *client)
+{
+ struct rmi_smb_xport *rmi_smb = i2c_get_clientdata(client);
+
+ rmi_unregister_transport_device(&rmi_smb->xport);
+
+ return 0;
+}
+
+static int __maybe_unused rmi_smb_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct rmi_smb_xport *rmi_smb = i2c_get_clientdata(client);
+ int ret;
+
+ ret = rmi_driver_suspend(rmi_smb->xport.rmi_dev, true);
+ if (ret)
+ dev_warn(dev, "Failed to suspend device: %d\n", ret);
+
+ return ret;
+}
+
+static int __maybe_unused rmi_smb_runtime_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct rmi_smb_xport *rmi_smb = i2c_get_clientdata(client);
+ int ret;
+
+ ret = rmi_driver_suspend(rmi_smb->xport.rmi_dev, false);
+ if (ret)
+ dev_warn(dev, "Failed to suspend device: %d\n", ret);
+
+ return ret;
+}
+
+static int __maybe_unused rmi_smb_resume(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct rmi_smb_xport *rmi_smb = i2c_get_clientdata(client);
+ struct rmi_device *rmi_dev = rmi_smb->xport.rmi_dev;
+ int ret;
+
+ rmi_smb_reset(&rmi_smb->xport, 0);
+
+ rmi_reset(rmi_dev);
+
+ ret = rmi_driver_resume(rmi_smb->xport.rmi_dev, true);
+ if (ret)
+ dev_warn(dev, "Failed to resume device: %d\n", ret);
+
+ return 0;
+}
+
+static int __maybe_unused rmi_smb_runtime_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct rmi_smb_xport *rmi_smb = i2c_get_clientdata(client);
+ int ret;
+
+ ret = rmi_driver_resume(rmi_smb->xport.rmi_dev, false);
+ if (ret)
+ dev_warn(dev, "Failed to resume device: %d\n", ret);
+
+ return 0;
+}
+
+static const struct dev_pm_ops rmi_smb_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(rmi_smb_suspend, rmi_smb_resume)
+ SET_RUNTIME_PM_OPS(rmi_smb_runtime_suspend, rmi_smb_runtime_resume,
+ NULL)
+};
+
+static const struct i2c_device_id rmi_id[] = {
+ { "rmi4_smbus", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, rmi_id);
+
+static struct i2c_driver rmi_smb_driver = {
+ .driver = {
+ .name = "rmi4_smbus",
+ .pm = &rmi_smb_pm,
+ },
+ .id_table = rmi_id,
+ .probe = rmi_smb_probe,
+ .remove = rmi_smb_remove,
+};
+
+module_i2c_driver(rmi_smb_driver);
+
+MODULE_AUTHOR("Andrew Duggan <aduggan@synaptics.com>");
+MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@redhat.com>");
+MODULE_DESCRIPTION("RMI4 SMBus driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/rmi4/rmi_spi.c b/drivers/input/rmi4/rmi_spi.c
index 4ebef607e214..69548d7d1f10 100644
--- a/drivers/input/rmi4/rmi_spi.c
+++ b/drivers/input/rmi4/rmi_spi.c
@@ -12,7 +12,6 @@
#include <linux/rmi.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
-#include <linux/irq.h>
#include <linux/of.h>
#include "rmi_driver.h"
@@ -44,8 +43,6 @@ struct rmi_spi_xport {
struct mutex page_mutex;
int page;
- int irq;
-
u8 *rx_buf;
u8 *tx_buf;
int xfer_buf_size;
@@ -326,41 +323,6 @@ static const struct rmi_transport_ops rmi_spi_ops = {
.read_block = rmi_spi_read_block,
};
-static irqreturn_t rmi_spi_irq(int irq, void *dev_id)
-{
- struct rmi_spi_xport *rmi_spi = dev_id;
- struct rmi_device *rmi_dev = rmi_spi->xport.rmi_dev;
- int ret;
-
- ret = rmi_process_interrupt_requests(rmi_dev);
- if (ret)
- rmi_dbg(RMI_DEBUG_XPORT, &rmi_dev->dev,
- "Failed to process interrupt request: %d\n", ret);
-
- return IRQ_HANDLED;
-}
-
-static int rmi_spi_init_irq(struct spi_device *spi)
-{
- struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
- int irq_flags = irqd_get_trigger_type(irq_get_irq_data(rmi_spi->irq));
- int ret;
-
- if (!irq_flags)
- irq_flags = IRQF_TRIGGER_LOW;
-
- ret = devm_request_threaded_irq(&spi->dev, rmi_spi->irq, NULL,
- rmi_spi_irq, irq_flags | IRQF_ONESHOT,
- dev_name(&spi->dev), rmi_spi);
- if (ret < 0) {
- dev_warn(&spi->dev, "Failed to register interrupt %d\n",
- rmi_spi->irq);
- return ret;
- }
-
- return 0;
-}
-
#ifdef CONFIG_OF
static int rmi_spi_of_probe(struct spi_device *spi,
struct rmi_device_platform_data *pdata)
@@ -440,8 +402,7 @@ static int rmi_spi_probe(struct spi_device *spi)
return retval;
}
- if (spi->irq > 0)
- rmi_spi->irq = spi->irq;
+ pdata->irq = spi->irq;
rmi_spi->spi = spi;
mutex_init(&rmi_spi->page_mutex);
@@ -477,10 +438,6 @@ static int rmi_spi_probe(struct spi_device *spi)
if (retval)
return retval;
- retval = rmi_spi_init_irq(spi);
- if (retval < 0)
- return retval;
-
dev_info(&spi->dev, "registered RMI SPI driver\n");
return 0;
}
@@ -492,17 +449,10 @@ static int rmi_spi_suspend(struct device *dev)
struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
int ret;
- ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev);
+ ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev, true);
if (ret)
dev_warn(dev, "Failed to resume device: %d\n", ret);
- disable_irq(rmi_spi->irq);
- if (device_may_wakeup(&spi->dev)) {
- ret = enable_irq_wake(rmi_spi->irq);
- if (!ret)
- dev_warn(dev, "Failed to enable irq for wake: %d\n",
- ret);
- }
return ret;
}
@@ -512,15 +462,7 @@ static int rmi_spi_resume(struct device *dev)
struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
int ret;
- enable_irq(rmi_spi->irq);
- if (device_may_wakeup(&spi->dev)) {
- ret = disable_irq_wake(rmi_spi->irq);
- if (!ret)
- dev_warn(dev, "Failed to disable irq for wake: %d\n",
- ret);
- }
-
- ret = rmi_driver_resume(rmi_spi->xport.rmi_dev);
+ ret = rmi_driver_resume(rmi_spi->xport.rmi_dev, true);
if (ret)
dev_warn(dev, "Failed to resume device: %d\n", ret);
@@ -535,12 +477,10 @@ static int rmi_spi_runtime_suspend(struct device *dev)
struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
int ret;
- ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev);
+ ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev, false);
if (ret)
dev_warn(dev, "Failed to resume device: %d\n", ret);
- disable_irq(rmi_spi->irq);
-
return 0;
}
@@ -550,9 +490,7 @@ static int rmi_spi_runtime_resume(struct device *dev)
struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
int ret;
- enable_irq(rmi_spi->irq);
-
- ret = rmi_driver_resume(rmi_spi->xport.rmi_dev);
+ ret = rmi_driver_resume(rmi_spi->xport.rmi_dev, false);
if (ret)
dev_warn(dev, "Failed to resume device: %d\n", ret);
diff --git a/drivers/input/serio/hp_sdc.c b/drivers/input/serio/hp_sdc.c
index 852858e5d8d0..559c99ca6592 100644
--- a/drivers/input/serio/hp_sdc.c
+++ b/drivers/input/serio/hp_sdc.c
@@ -79,7 +79,7 @@
# define sdc_readb(p) gsc_readb(p)
# define sdc_writeb(v,p) gsc_writeb((v),(p))
#elif defined(__mc68000__)
-# include <asm/uaccess.h>
+#include <linux/uaccess.h>
# define sdc_readb(p) in_8(p)
# define sdc_writeb(v,p) out_8((p),(v))
#else
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 073246c7d163..a7618776705a 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -211,6 +211,12 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"),
},
},
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
+ },
+ },
{ }
};
@@ -517,79 +523,7 @@ static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "A455LD"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "K401LB"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "K501LB"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "K501LX"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "R409L"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "V502LX"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X302LA"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X450LD"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X455LAB"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X455LDB"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X455LF"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Z450LA"),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
},
},
{ }
@@ -1055,7 +989,11 @@ static int __init i8042_pnp_init(void)
#if defined(__ia64__)
return -ENODEV;
#else
- pr_info("PNP: No PS/2 controller found. Probing ports directly.\n");
+ pr_info("PNP: No PS/2 controller found.\n");
+ if (x86_platform.legacy.i8042 !=
+ X86_LEGACY_I8042_EXPECTED_PRESENT)
+ return -ENODEV;
+ pr_info("Probing ports directly.\n");
return 0;
#endif
}
@@ -1131,10 +1069,10 @@ static int __init i8042_pnp_init(void)
return 0;
}
-#else
+#else /* !CONFIG_PNP */
static inline int i8042_pnp_init(void) { return 0; }
static inline void i8042_pnp_exit(void) { }
-#endif
+#endif /* CONFIG_PNP */
static int __init i8042_platform_init(void)
{
@@ -1142,8 +1080,8 @@ static int __init i8042_platform_init(void)
#ifdef CONFIG_X86
u8 a20_on = 0xdf;
- /* Just return if pre-detection shows no i8042 controller exist */
- if (!x86_platform.i8042_detect())
+ /* Just return if platform does not have i8042 controller */
+ if (x86_platform.legacy.i8042 == X86_LEGACY_I8042_PLATFORM_ABSENT)
return -ENODEV;
#endif
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 89abfdb539ac..62685a768913 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -387,7 +387,7 @@ static int i8042_aux_write(struct serio *serio, unsigned char c)
/*
- * i8042_aux_close attempts to clear AUX or KBD port state by disabling
+ * i8042_port_close attempts to clear AUX or KBD port state by disabling
* and then re-enabling it.
*/
diff --git a/drivers/input/serio/q40kbd.c b/drivers/input/serio/q40kbd.c
index 5a9d521510bf..d0fccc8ec259 100644
--- a/drivers/input/serio/q40kbd.c
+++ b/drivers/input/serio/q40kbd.c
@@ -38,7 +38,7 @@
#include <linux/slab.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/q40_master.h>
#include <asm/irq.h>
#include <asm/q40ints.h>
diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c
index d189843f3727..f8ead9f9c77e 100644
--- a/drivers/input/serio/serport.c
+++ b/drivers/input/serio/serport.c
@@ -13,7 +13,7 @@
* the Free Software Foundation.
*/
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index 4613f0aefd08..d67547bded3e 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -75,7 +75,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/usb/input.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/unaligned.h>
/*
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index abf09ac42ce4..b796e891e2ee 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -56,7 +56,7 @@ Scott Hill shill@gtcocalcomp.com
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/usb.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include <asm/byteorder.h>
#include <linux/bitops.h>
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index 02aec284deca..3e6003d32e56 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -914,9 +914,9 @@ static irqreturn_t elants_i2c_irq(int irq, void *_dev)
case QUEUE_HEADER_NORMAL:
report_count = ts->buf[FW_HDR_COUNT];
- if (report_count > 3) {
+ if (report_count == 0 || report_count > 3) {
dev_err(&client->dev,
- "too large report count: %*ph\n",
+ "bad report count: %*ph\n",
HEADER_SIZE, ts->buf);
break;
}
diff --git a/drivers/input/touchscreen/fsl-imx25-tcq.c b/drivers/input/touchscreen/fsl-imx25-tcq.c
index fe9877a6af9e..d50ee490c9cc 100644
--- a/drivers/input/touchscreen/fsl-imx25-tcq.c
+++ b/drivers/input/touchscreen/fsl-imx25-tcq.c
@@ -55,6 +55,7 @@ static const struct of_device_id mx25_tcq_ids[] = {
{ .compatible = "fsl,imx25-tcq", },
{ /* Sentinel */ }
};
+MODULE_DEVICE_TABLE(of, mx25_tcq_ids);
#define TSC_4WIRE_PRE_INDEX 0
#define TSC_4WIRE_X_INDEX 1
diff --git a/drivers/input/touchscreen/imx6ul_tsc.c b/drivers/input/touchscreen/imx6ul_tsc.c
index 8275267eac25..7098e0a47019 100644
--- a/drivers/input/touchscreen/imx6ul_tsc.c
+++ b/drivers/input/touchscreen/imx6ul_tsc.c
@@ -21,17 +21,25 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/log2.h>
/* ADC configuration registers field define */
#define ADC_AIEN (0x1 << 7)
#define ADC_CONV_DISABLE 0x1F
+#define ADC_AVGE (0x1 << 5)
#define ADC_CAL (0x1 << 7)
#define ADC_CALF 0x2
#define ADC_12BIT_MODE (0x2 << 2)
+#define ADC_CONV_MODE_MASK (0x3 << 2)
#define ADC_IPG_CLK 0x00
+#define ADC_INPUT_CLK_MASK 0x3
#define ADC_CLK_DIV_8 (0x03 << 5)
+#define ADC_CLK_DIV_MASK (0x3 << 5)
#define ADC_SHORT_SAMPLE_MODE (0x0 << 4)
+#define ADC_SAMPLE_MODE_MASK (0x1 << 4)
#define ADC_HARDWARE_TRIGGER (0x1 << 13)
+#define ADC_AVGS_SHIFT 14
+#define ADC_AVGS_MASK (0x3 << 14)
#define SELECT_CHANNEL_4 0x04
#define SELECT_CHANNEL_1 0x01
#define DISABLE_CONVERSION_INT (0x0 << 7)
@@ -84,8 +92,10 @@ struct imx6ul_tsc {
struct clk *adc_clk;
struct gpio_desc *xnur_gpio;
- int measure_delay_time;
- int pre_charge_time;
+ u32 measure_delay_time;
+ u32 pre_charge_time;
+ bool average_enable;
+ u32 average_select;
struct completion completion;
};
@@ -96,17 +106,23 @@ struct imx6ul_tsc {
*/
static int imx6ul_adc_init(struct imx6ul_tsc *tsc)
{
- int adc_hc = 0;
- int adc_gc;
- int adc_gs;
- int adc_cfg;
- int timeout;
+ u32 adc_hc = 0;
+ u32 adc_gc;
+ u32 adc_gs;
+ u32 adc_cfg;
+ unsigned long timeout;
reinit_completion(&tsc->completion);
adc_cfg = readl(tsc->adc_regs + REG_ADC_CFG);
+ adc_cfg &= ~(ADC_CONV_MODE_MASK | ADC_INPUT_CLK_MASK);
adc_cfg |= ADC_12BIT_MODE | ADC_IPG_CLK;
+ adc_cfg &= ~(ADC_CLK_DIV_MASK | ADC_SAMPLE_MODE_MASK);
adc_cfg |= ADC_CLK_DIV_8 | ADC_SHORT_SAMPLE_MODE;
+ if (tsc->average_enable) {
+ adc_cfg &= ~ADC_AVGS_MASK;
+ adc_cfg |= (tsc->average_select) << ADC_AVGS_SHIFT;
+ }
adc_cfg &= ~ADC_HARDWARE_TRIGGER;
writel(adc_cfg, tsc->adc_regs + REG_ADC_CFG);
@@ -118,6 +134,8 @@ static int imx6ul_adc_init(struct imx6ul_tsc *tsc)
/* start ADC calibration */
adc_gc = readl(tsc->adc_regs + REG_ADC_GC);
adc_gc |= ADC_CAL;
+ if (tsc->average_enable)
+ adc_gc |= ADC_AVGE;
writel(adc_gc, tsc->adc_regs + REG_ADC_GC);
timeout = wait_for_completion_timeout
@@ -148,7 +166,7 @@ static int imx6ul_adc_init(struct imx6ul_tsc *tsc)
*/
static void imx6ul_tsc_channel_config(struct imx6ul_tsc *tsc)
{
- int adc_hc0, adc_hc1, adc_hc2, adc_hc3, adc_hc4;
+ u32 adc_hc0, adc_hc1, adc_hc2, adc_hc3, adc_hc4;
adc_hc0 = DISABLE_CONVERSION_INT;
writel(adc_hc0, tsc->adc_regs + REG_ADC_HC0);
@@ -173,8 +191,8 @@ static void imx6ul_tsc_channel_config(struct imx6ul_tsc *tsc)
*/
static void imx6ul_tsc_set(struct imx6ul_tsc *tsc)
{
- int basic_setting = 0;
- int start;
+ u32 basic_setting = 0;
+ u32 start;
basic_setting |= tsc->measure_delay_time << 8;
basic_setting |= DETECT_4_WIRE_MODE | AUTO_MEASURE;
@@ -209,8 +227,8 @@ static int imx6ul_tsc_init(struct imx6ul_tsc *tsc)
static void imx6ul_tsc_disable(struct imx6ul_tsc *tsc)
{
- int tsc_flow;
- int adc_cfg;
+ u32 tsc_flow;
+ u32 adc_cfg;
/* TSC controller enters to idle status */
tsc_flow = readl(tsc->tsc_regs + REG_TSC_FLOW_CONTROL);
@@ -227,8 +245,8 @@ static void imx6ul_tsc_disable(struct imx6ul_tsc *tsc)
static bool tsc_wait_detect_mode(struct imx6ul_tsc *tsc)
{
unsigned long timeout = jiffies + msecs_to_jiffies(2);
- int state_machine;
- int debug_mode2;
+ u32 state_machine;
+ u32 debug_mode2;
do {
if (time_after(jiffies, timeout))
@@ -246,10 +264,10 @@ static bool tsc_wait_detect_mode(struct imx6ul_tsc *tsc)
static irqreturn_t tsc_irq_fn(int irq, void *dev_id)
{
struct imx6ul_tsc *tsc = dev_id;
- int status;
- int value;
- int x, y;
- int start;
+ u32 status;
+ u32 value;
+ u32 x, y;
+ u32 start;
status = readl(tsc->tsc_regs + REG_TSC_INT_STATUS);
@@ -289,8 +307,8 @@ static irqreturn_t tsc_irq_fn(int irq, void *dev_id)
static irqreturn_t adc_irq_fn(int irq, void *dev_id)
{
struct imx6ul_tsc *tsc = dev_id;
- int coco;
- int value;
+ u32 coco;
+ u32 value;
coco = readl(tsc->adc_regs + REG_ADC_HS);
if (coco & 0x01) {
@@ -346,6 +364,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev)
int err;
int tsc_irq;
int adc_irq;
+ u32 average_samples;
tsc = devm_kzalloc(&pdev->dev, sizeof(*tsc), GFP_KERNEL);
if (!tsc)
@@ -450,6 +469,30 @@ static int imx6ul_tsc_probe(struct platform_device *pdev)
if (err)
tsc->pre_charge_time = 0xfff;
+ err = of_property_read_u32(np, "touchscreen-average-samples",
+ &average_samples);
+ if (err)
+ average_samples = 1;
+
+ switch (average_samples) {
+ case 1:
+ tsc->average_enable = false;
+ tsc->average_select = 0; /* value unused; initialize anyway */
+ break;
+ case 4:
+ case 8:
+ case 16:
+ case 32:
+ tsc->average_enable = true;
+ tsc->average_select = ilog2(average_samples) - 2;
+ break;
+ default:
+ dev_err(&pdev->dev,
+ "touchscreen-average-samples (%u) must be 1, 4, 8, 16 or 32\n",
+ average_samples);
+ return -EINVAL;
+ }
+
err = input_register_device(tsc->input);
if (err) {
dev_err(&pdev->dev,
diff --git a/drivers/input/touchscreen/melfas_mip4.c b/drivers/input/touchscreen/melfas_mip4.c
index 552a3773f79d..703d7f983d0a 100644
--- a/drivers/input/touchscreen/melfas_mip4.c
+++ b/drivers/input/touchscreen/melfas_mip4.c
@@ -33,7 +33,7 @@
/*****************************************************************
* Protocol
- * Version : MIP 4.0 Rev 4.6
+ * Version : MIP 4.0 Rev 5.4
*****************************************************************/
/* Address */
@@ -81,6 +81,9 @@
#define MIP4_R1_INFO_IC_HW_CATEGORY 0x77
#define MIP4_R1_INFO_CONTACT_THD_SCR 0x78
#define MIP4_R1_INFO_CONTACT_THD_KEY 0x7A
+#define MIP4_R1_INFO_PID 0x7C
+#define MIP4_R1_INFO_VID 0x7E
+#define MIP4_R1_INFO_SLAVE_ADDR 0x80
#define MIP4_R0_EVENT 0x02
#define MIP4_R1_EVENT_SUPPORTED_FUNC 0x00
@@ -157,7 +160,9 @@ struct mip4_ts {
char phys[32];
char product_name[16];
+ u16 product_id;
char ic_name[4];
+ char fw_name[32];
unsigned int max_x;
unsigned int max_y;
@@ -264,6 +269,23 @@ static int mip4_query_device(struct mip4_ts *ts)
dev_dbg(&ts->client->dev, "product name: %.*s\n",
(int)sizeof(ts->product_name), ts->product_name);
+ /* Product ID */
+ cmd[0] = MIP4_R0_INFO;
+ cmd[1] = MIP4_R1_INFO_PID;
+ error = mip4_i2c_xfer(ts, cmd, sizeof(cmd), buf, 2);
+ if (error) {
+ dev_warn(&ts->client->dev,
+ "Failed to retrieve product id: %d\n", error);
+ } else {
+ ts->product_id = get_unaligned_le16(&buf[0]);
+ dev_dbg(&ts->client->dev, "product id: %04X\n", ts->product_id);
+ }
+
+ /* Firmware name */
+ snprintf(ts->fw_name, sizeof(ts->fw_name),
+ "melfas_mip4_%04X.fw", ts->product_id);
+ dev_dbg(&ts->client->dev, "firmware name: %s\n", ts->fw_name);
+
/* IC name */
cmd[0] = MIP4_R0_INFO;
cmd[1] = MIP4_R1_INFO_IC_NAME;
@@ -1269,11 +1291,11 @@ static ssize_t mip4_sysfs_fw_update(struct device *dev,
const struct firmware *fw;
int error;
- error = request_firmware(&fw, MIP4_FW_NAME, dev);
+ error = request_firmware(&fw, ts->fw_name, dev);
if (error) {
dev_err(&ts->client->dev,
"Failed to retrieve firmware %s: %d\n",
- MIP4_FW_NAME, error);
+ ts->fw_name, error);
return error;
}
@@ -1348,6 +1370,25 @@ static ssize_t mip4_sysfs_read_hw_version(struct device *dev,
static DEVICE_ATTR(hw_version, S_IRUGO, mip4_sysfs_read_hw_version, NULL);
+static ssize_t mip4_sysfs_read_product_id(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct mip4_ts *ts = i2c_get_clientdata(client);
+ size_t count;
+
+ mutex_lock(&ts->input->mutex);
+
+ count = snprintf(buf, PAGE_SIZE, "%04X\n", ts->product_id);
+
+ mutex_unlock(&ts->input->mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR(product_id, S_IRUGO, mip4_sysfs_read_product_id, NULL);
+
static ssize_t mip4_sysfs_read_ic_name(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1371,6 +1412,7 @@ static DEVICE_ATTR(ic_name, S_IRUGO, mip4_sysfs_read_ic_name, NULL);
static struct attribute *mip4_attrs[] = {
&dev_attr_fw_version.attr,
&dev_attr_hw_version.attr,
+ &dev_attr_product_id.attr,
&dev_attr_ic_name.attr,
&dev_attr_update_fw.attr,
NULL,
@@ -1435,6 +1477,7 @@ static int mip4_probe(struct i2c_client *client, const struct i2c_device_id *id)
input->id.bustype = BUS_I2C;
input->id.vendor = 0x13c5;
+ input->id.product = ts->product_id;
input->open = mip4_input_open;
input->close = mip4_input_close;
@@ -1572,6 +1615,6 @@ static struct i2c_driver mip4_driver = {
module_i2c_driver(mip4_driver);
MODULE_DESCRIPTION("MELFAS MIP4 Touchscreen");
-MODULE_VERSION("2016.09.28");
+MODULE_VERSION("2016.10.31");
MODULE_AUTHOR("Sangwon Jee <jeesw@melfas.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c
index a99fb5cac5a0..2658afa016c9 100644
--- a/drivers/input/touchscreen/raydium_i2c_ts.c
+++ b/drivers/input/touchscreen/raydium_i2c_ts.c
@@ -669,7 +669,7 @@ static int raydium_i2c_do_update_firmware(struct raydium_data *ts,
if (ts->boot_mode == RAYDIUM_TS_MAIN) {
dev_err(&client->dev,
- "failied to jump to boot loader: %d\n",
+ "failed to jump to boot loader: %d\n",
error);
return -EIO;
}
diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
index f502c8488be8..404830a4a366 100644
--- a/drivers/input/touchscreen/silead.c
+++ b/drivers/input/touchscreen/silead.c
@@ -29,6 +29,7 @@
#include <linux/input/touchscreen.h>
#include <linux/pm.h>
#include <linux/irq.h>
+#include <linux/regulator/consumer.h>
#include <asm/unaligned.h>
@@ -73,6 +74,7 @@ struct silead_ts_data {
struct i2c_client *client;
struct gpio_desc *gpio_power;
struct input_dev *input;
+ struct regulator_bulk_data regulators[2];
char fw_name[64];
struct touchscreen_properties prop;
u32 max_fingers;
@@ -433,6 +435,13 @@ static int silead_ts_set_default_fw_name(struct silead_ts_data *data,
}
#endif
+static void silead_disable_regulator(void *arg)
+{
+ struct silead_ts_data *data = arg;
+
+ regulator_bulk_disable(ARRAY_SIZE(data->regulators), data->regulators);
+}
+
static int silead_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -465,6 +474,26 @@ static int silead_ts_probe(struct i2c_client *client,
if (client->irq <= 0)
return -ENODEV;
+ data->regulators[0].supply = "vddio";
+ data->regulators[1].supply = "avdd";
+ error = devm_regulator_bulk_get(dev, ARRAY_SIZE(data->regulators),
+ data->regulators);
+ if (error)
+ return error;
+
+ /*
+ * Enable regulators at probe and disable them at remove, we need
+ * to keep the chip powered otherwise it forgets its firmware.
+ */
+ error = regulator_bulk_enable(ARRAY_SIZE(data->regulators),
+ data->regulators);
+ if (error)
+ return error;
+
+ error = devm_add_action_or_reset(dev, silead_disable_regulator, data);
+ if (error)
+ return error;
+
/* Power GPIO pin */
data->gpio_power = devm_gpiod_get_optional(dev, "power", GPIOD_OUT_LOW);
if (IS_ERR(data->gpio_power)) {
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index 90d6be3c26cc..83cf11312fd9 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -682,7 +682,7 @@ static int wm97xx_probe(struct device *dev)
}
platform_set_drvdata(wm->battery_dev, wm);
wm->battery_dev->dev.parent = dev;
- wm->battery_dev->dev.platform_data = pdata;
+ wm->battery_dev->dev.platform_data = pdata->batt_pdata;
ret = platform_device_add(wm->battery_dev);
if (ret < 0)
goto batt_reg_err;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 754595ee11b6..3ef0f42984f2 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -373,6 +373,8 @@ static struct iommu_group *acpihid_device_group(struct device *dev)
if (!entry->group)
entry->group = generic_device_group(dev);
+ else
+ iommu_group_ref_get(entry->group);
return entry->group;
}
@@ -1021,7 +1023,7 @@ again:
next_tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
left = (head - next_tail) % CMD_BUFFER_SIZE;
- if (left <= 2) {
+ if (left <= 0x20) {
struct iommu_cmd sync_cmd;
int ret;
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 157e93421fb8..6799cf9713f7 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -28,6 +28,7 @@
#include <linux/amd-iommu.h>
#include <linux/export.h>
#include <linux/iommu.h>
+#include <linux/kmemleak.h>
#include <asm/pci-direct.h>
#include <asm/iommu.h>
#include <asm/gart.h>
@@ -2090,6 +2091,7 @@ static struct syscore_ops amd_iommu_syscore_ops = {
static void __init free_on_init_error(void)
{
+ kmemleak_free(irq_lookup_table);
free_pages((unsigned long)irq_lookup_table,
get_order(rlookup_table_size));
@@ -2207,14 +2209,13 @@ static void __init free_dma_resources(void)
static int __init early_amd_iommu_init(void)
{
struct acpi_table_header *ivrs_base;
- acpi_size ivrs_size;
acpi_status status;
int i, remap_cache_sz, ret = 0;
if (!amd_iommu_detected)
return -ENODEV;
- status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size);
+ status = acpi_get_table("IVRS", 0, &ivrs_base);
if (status == AE_NOT_FOUND)
return -ENODEV;
else if (ACPI_FAILURE(status)) {
@@ -2321,6 +2322,8 @@ static int __init early_amd_iommu_init(void)
irq_lookup_table = (void *)__get_free_pages(
GFP_KERNEL | __GFP_ZERO,
get_order(rlookup_table_size));
+ kmemleak_alloc(irq_lookup_table, rlookup_table_size,
+ 1, GFP_KERNEL);
if (!irq_lookup_table)
goto out;
}
@@ -2334,7 +2337,7 @@ static int __init early_amd_iommu_init(void)
out:
/* Don't leak any ACPI memory */
- early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
+ acpi_put_table(ivrs_base);
ivrs_base = NULL;
return ret;
@@ -2358,10 +2361,9 @@ out:
static bool detect_ivrs(void)
{
struct acpi_table_header *ivrs_base;
- acpi_size ivrs_size;
acpi_status status;
- status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size);
+ status = acpi_get_table("IVRS", 0, &ivrs_base);
if (status == AE_NOT_FOUND)
return false;
else if (ACPI_FAILURE(status)) {
@@ -2370,7 +2372,7 @@ static bool detect_ivrs(void)
return false;
}
- early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
+ acpi_put_table(ivrs_base);
/* Make sure ACS will be enabled during PCI probe */
pci_request_acs();
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 594849a3a9be..f8ed8c95b685 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -805,8 +805,10 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
goto out_free_domain;
group = iommu_group_get(&pdev->dev);
- if (!group)
+ if (!group) {
+ ret = -EINVAL;
goto out_free_domain;
+ }
ret = iommu_attach_group(dev_state->domain, group);
if (ret != 0)
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index e6f9b2d745ca..4d6ec444a9d6 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -20,6 +20,8 @@
* This driver is powered by bad coffee and bombay mix.
*/
+#include <linux/acpi.h>
+#include <linux/acpi_iort.h>
#include <linux/delay.h>
#include <linux/dma-iommu.h>
#include <linux/err.h>
@@ -1358,7 +1360,7 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
} while (size -= granule);
}
-static struct iommu_gather_ops arm_smmu_gather_ops = {
+static const struct iommu_gather_ops arm_smmu_gather_ops = {
.tlb_flush_all = arm_smmu_tlb_inv_context,
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
.tlb_sync = arm_smmu_tlb_sync,
@@ -1723,13 +1725,14 @@ static struct platform_driver arm_smmu_driver;
static int arm_smmu_match_node(struct device *dev, void *data)
{
- return dev->of_node == data;
+ return dev->fwnode == data;
}
-static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
+static
+struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
{
struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
- np, arm_smmu_match_node);
+ fwnode, arm_smmu_match_node);
put_device(dev);
return dev ? dev_get_drvdata(dev) : NULL;
}
@@ -1765,7 +1768,7 @@ static int arm_smmu_add_device(struct device *dev)
master = fwspec->iommu_priv;
smmu = master->smmu;
} else {
- smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
+ smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
if (!smmu)
return -ENODEV;
master = kzalloc(sizeof(*master), GFP_KERNEL);
@@ -2380,10 +2383,10 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
return 0;
}
-static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
+static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
{
u32 reg;
- bool coherent;
+ bool coherent = smmu->features & ARM_SMMU_FEAT_COHERENCY;
/* IDR0 */
reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);
@@ -2435,13 +2438,9 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
smmu->features |= ARM_SMMU_FEAT_HYP;
/*
- * The dma-coherent property is used in preference to the ID
+ * The coherency feature as set by FW is used in preference to the ID
* register, but warn on mismatch.
*/
- coherent = of_dma_is_coherent(smmu->dev->of_node);
- if (coherent)
- smmu->features |= ARM_SMMU_FEAT_COHERENCY;
-
if (!!(reg & IDR0_COHACC) != coherent)
dev_warn(smmu->dev, "IDR0.COHACC overridden by dma-coherent property (%s)\n",
coherent ? "true" : "false");
@@ -2562,21 +2561,61 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
return 0;
}
-static int arm_smmu_device_dt_probe(struct platform_device *pdev)
+#ifdef CONFIG_ACPI
+static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
+ struct arm_smmu_device *smmu)
+{
+ struct acpi_iort_smmu_v3 *iort_smmu;
+ struct device *dev = smmu->dev;
+ struct acpi_iort_node *node;
+
+ node = *(struct acpi_iort_node **)dev_get_platdata(dev);
+
+ /* Retrieve SMMUv3 specific data */
+ iort_smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
+
+ if (iort_smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE)
+ smmu->features |= ARM_SMMU_FEAT_COHERENCY;
+
+ return 0;
+}
+#else
+static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
+ struct arm_smmu_device *smmu)
+{
+ return -ENODEV;
+}
+#endif
+
+static int arm_smmu_device_dt_probe(struct platform_device *pdev,
+ struct arm_smmu_device *smmu)
{
- int irq, ret;
- struct resource *res;
- struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev;
- bool bypass = true;
u32 cells;
+ int ret = -EINVAL;
if (of_property_read_u32(dev->of_node, "#iommu-cells", &cells))
dev_err(dev, "missing #iommu-cells property\n");
else if (cells != 1)
dev_err(dev, "invalid #iommu-cells value (%d)\n", cells);
else
- bypass = false;
+ ret = 0;
+
+ parse_driver_options(smmu);
+
+ if (of_dma_is_coherent(dev->of_node))
+ smmu->features |= ARM_SMMU_FEAT_COHERENCY;
+
+ return ret;
+}
+
+static int arm_smmu_device_probe(struct platform_device *pdev)
+{
+ int irq, ret;
+ struct resource *res;
+ struct arm_smmu_device *smmu;
+ struct device *dev = &pdev->dev;
+ bool bypass;
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
if (!smmu) {
@@ -2613,10 +2652,19 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
if (irq > 0)
smmu->gerr_irq = irq;
- parse_driver_options(smmu);
+ if (dev->of_node) {
+ ret = arm_smmu_device_dt_probe(pdev, smmu);
+ } else {
+ ret = arm_smmu_device_acpi_probe(pdev, smmu);
+ if (ret == -ENODEV)
+ return ret;
+ }
+
+ /* Set bypass mode according to firmware probing result */
+ bypass = !!ret;
/* Probe the h/w */
- ret = arm_smmu_device_probe(smmu);
+ ret = arm_smmu_device_hw_probe(smmu);
if (ret)
return ret;
@@ -2634,7 +2682,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
return ret;
/* And we're up. Go go go! */
- of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
+ iommu_register_instance(dev->fwnode, &arm_smmu_ops);
+
#ifdef CONFIG_PCI
if (pci_bus_type.iommu_ops != &arm_smmu_ops) {
pci_request_acs();
@@ -2677,7 +2726,7 @@ static struct platform_driver arm_smmu_driver = {
.name = "arm-smmu-v3",
.of_match_table = of_match_ptr(arm_smmu_of_match),
},
- .probe = arm_smmu_device_dt_probe,
+ .probe = arm_smmu_device_probe,
.remove = arm_smmu_device_remove,
};
@@ -2715,6 +2764,17 @@ static int __init arm_smmu_of_init(struct device_node *np)
}
IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3", arm_smmu_of_init);
+#ifdef CONFIG_ACPI
+static int __init acpi_smmu_v3_init(struct acpi_table_header *table)
+{
+ if (iort_node_match(ACPI_IORT_NODE_SMMU_V3))
+ return arm_smmu_init();
+
+ return 0;
+}
+IORT_ACPI_DECLARE(arm_smmu_v3, ACPI_SIG_IORT, acpi_smmu_v3_init);
+#endif
+
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 8f7281444551..a60cded8a6ed 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -28,6 +28,8 @@
#define pr_fmt(fmt) "arm-smmu: " fmt
+#include <linux/acpi.h>
+#include <linux/acpi_iort.h>
#include <linux/atomic.h>
#include <linux/delay.h>
#include <linux/dma-iommu.h>
@@ -247,6 +249,7 @@ enum arm_smmu_s2cr_privcfg {
#define ARM_MMU500_ACTLR_CPRE (1 << 1)
#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
+#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
#define CB_PAR_F (1 << 0)
@@ -642,7 +645,7 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
}
}
-static struct iommu_gather_ops arm_smmu_gather_ops = {
+static const struct iommu_gather_ops arm_smmu_gather_ops = {
.tlb_flush_all = arm_smmu_tlb_inv_context,
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
.tlb_sync = arm_smmu_tlb_sync,
@@ -1379,13 +1382,14 @@ static bool arm_smmu_capable(enum iommu_cap cap)
static int arm_smmu_match_node(struct device *dev, void *data)
{
- return dev->of_node == data;
+ return dev->fwnode == data;
}
-static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
+static
+struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
{
struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
- np, arm_smmu_match_node);
+ fwnode, arm_smmu_match_node);
put_device(dev);
return dev ? dev_get_drvdata(dev) : NULL;
}
@@ -1403,7 +1407,7 @@ static int arm_smmu_add_device(struct device *dev)
if (ret)
goto out_free;
} else if (fwspec && fwspec->ops == &arm_smmu_ops) {
- smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
+ smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
} else {
return -ENODEV;
}
@@ -1478,7 +1482,7 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
}
if (group)
- return group;
+ return iommu_group_ref_get(group);
if (dev_is_pci(dev))
group = pci_device_group(dev);
@@ -1581,16 +1585,22 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
for (i = 0; i < smmu->num_mapping_groups; ++i)
arm_smmu_write_sme(smmu, i);
- /*
- * Before clearing ARM_MMU500_ACTLR_CPRE, need to
- * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
- * bit is only present in MMU-500r2 onwards.
- */
- reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
- major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
- if ((smmu->model == ARM_MMU500) && (major >= 2)) {
+ if (smmu->model == ARM_MMU500) {
+ /*
+ * Before clearing ARM_MMU500_ACTLR_CPRE, need to
+ * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
+ * bit is only present in MMU-500r2 onwards.
+ */
+ reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
+ major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
- reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
+ if (major >= 2)
+ reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
+ /*
+ * Allow unmatched Stream IDs to allocate bypass
+ * TLB entries for reduced latency.
+ */
+ reg |= ARM_MMU500_ACR_SMTNMB_TLBEN;
writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
}
@@ -1667,7 +1677,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
unsigned long size;
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
u32 id;
- bool cttw_dt, cttw_reg;
+ bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
int i;
dev_notice(smmu->dev, "probing hardware configuration...\n");
@@ -1712,20 +1722,17 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
/*
* In order for DMA API calls to work properly, we must defer to what
- * the DT says about coherency, regardless of what the hardware claims.
+ * the FW says about coherency, regardless of what the hardware claims.
* Fortunately, this also opens up a workaround for systems where the
* ID register value has ended up configured incorrectly.
*/
- cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
cttw_reg = !!(id & ID0_CTTW);
- if (cttw_dt)
- smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
- if (cttw_dt || cttw_reg)
+ if (cttw_fw || cttw_reg)
dev_notice(smmu->dev, "\t%scoherent table walk\n",
- cttw_dt ? "" : "non-");
- if (cttw_dt != cttw_reg)
+ cttw_fw ? "" : "non-");
+ if (cttw_fw != cttw_reg)
dev_notice(smmu->dev,
- "\t(IDR0.CTTW overridden by dma-coherent property)\n");
+ "\t(IDR0.CTTW overridden by FW configuration)\n");
/* Max. number of entries we have for stream matching/indexing */
size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
@@ -1906,15 +1913,83 @@ static const struct of_device_id arm_smmu_of_match[] = {
};
MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
-static int arm_smmu_device_dt_probe(struct platform_device *pdev)
+#ifdef CONFIG_ACPI
+static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
+{
+ int ret = 0;
+
+ switch (model) {
+ case ACPI_IORT_SMMU_V1:
+ case ACPI_IORT_SMMU_CORELINK_MMU400:
+ smmu->version = ARM_SMMU_V1;
+ smmu->model = GENERIC_SMMU;
+ break;
+ case ACPI_IORT_SMMU_V2:
+ smmu->version = ARM_SMMU_V2;
+ smmu->model = GENERIC_SMMU;
+ break;
+ case ACPI_IORT_SMMU_CORELINK_MMU500:
+ smmu->version = ARM_SMMU_V2;
+ smmu->model = ARM_MMU500;
+ break;
+ default:
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
+ struct arm_smmu_device *smmu)
+{
+ struct device *dev = smmu->dev;
+ struct acpi_iort_node *node =
+ *(struct acpi_iort_node **)dev_get_platdata(dev);
+ struct acpi_iort_smmu *iort_smmu;
+ int ret;
+
+ /* Retrieve SMMU1/2 specific data */
+ iort_smmu = (struct acpi_iort_smmu *)node->node_data;
+
+ ret = acpi_smmu_get_data(iort_smmu->model, smmu);
+ if (ret < 0)
+ return ret;
+
+ /* Ignore the configuration access interrupt */
+ smmu->num_global_irqs = 1;
+
+ if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
+ smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
+
+ return 0;
+}
+#else
+static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
+ struct arm_smmu_device *smmu)
+{
+ return -ENODEV;
+}
+#endif
+
+static int arm_smmu_device_dt_probe(struct platform_device *pdev,
+ struct arm_smmu_device *smmu)
{
const struct arm_smmu_match_data *data;
- struct resource *res;
- struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev;
- int num_irqs, i, err;
bool legacy_binding;
+ if (of_property_read_u32(dev->of_node, "#global-interrupts",
+ &smmu->num_global_irqs)) {
+ dev_err(dev, "missing #global-interrupts property\n");
+ return -ENODEV;
+ }
+
+ data = of_device_get_match_data(dev);
+ smmu->version = data->version;
+ smmu->model = data->model;
+
+ parse_driver_options(smmu);
+
legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
if (legacy_binding && !using_generic_binding) {
if (!using_legacy_binding)
@@ -1927,6 +2002,19 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
return -ENODEV;
}
+ if (of_dma_is_coherent(dev->of_node))
+ smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
+
+ return 0;
+}
+
+static int arm_smmu_device_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct arm_smmu_device *smmu;
+ struct device *dev = &pdev->dev;
+ int num_irqs, i, err;
+
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
if (!smmu) {
dev_err(dev, "failed to allocate arm_smmu_device\n");
@@ -1934,9 +2022,13 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
}
smmu->dev = dev;
- data = of_device_get_match_data(dev);
- smmu->version = data->version;
- smmu->model = data->model;
+ if (dev->of_node)
+ err = arm_smmu_device_dt_probe(pdev, smmu);
+ else
+ err = arm_smmu_device_acpi_probe(pdev, smmu);
+
+ if (err)
+ return err;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
smmu->base = devm_ioremap_resource(dev, res);
@@ -1944,12 +2036,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
return PTR_ERR(smmu->base);
smmu->size = resource_size(res);
- if (of_property_read_u32(dev->of_node, "#global-interrupts",
- &smmu->num_global_irqs)) {
- dev_err(dev, "missing #global-interrupts property\n");
- return -ENODEV;
- }
-
num_irqs = 0;
while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
num_irqs++;
@@ -1984,8 +2070,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
if (err)
return err;
- parse_driver_options(smmu);
-
if (smmu->version == ARM_SMMU_V2 &&
smmu->num_context_banks != smmu->num_context_irqs) {
dev_err(dev,
@@ -2007,7 +2091,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
}
}
- of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
+ iommu_register_instance(dev->fwnode, &arm_smmu_ops);
platform_set_drvdata(pdev, smmu);
arm_smmu_device_reset(smmu);
@@ -2047,7 +2131,7 @@ static struct platform_driver arm_smmu_driver = {
.name = "arm-smmu",
.of_match_table = of_match_ptr(arm_smmu_of_match),
},
- .probe = arm_smmu_device_dt_probe,
+ .probe = arm_smmu_device_probe,
.remove = arm_smmu_device_remove,
};
@@ -2090,6 +2174,17 @@ IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
+#ifdef CONFIG_ACPI
+static int __init arm_smmu_acpi_init(struct acpi_table_header *table)
+{
+ if (iort_node_match(ACPI_IORT_NODE_SMMU))
+ return arm_smmu_init();
+
+ return 0;
+}
+IORT_ACPI_DECLARE(arm_smmu, ACPI_SIG_IORT, arm_smmu_acpi_init);
+#endif
+
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index c5ab8667e6f2..2db0d641cf45 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -432,13 +432,12 @@ int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
return ret;
}
-dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, int prot)
+static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
+ size_t size, int prot)
{
dma_addr_t dma_addr;
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct iova_domain *iovad = cookie_iovad(domain);
- phys_addr_t phys = page_to_phys(page) + offset;
size_t iova_off = iova_offset(iovad, phys);
size_t len = iova_align(iovad, size + iova_off);
struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev));
@@ -454,6 +453,12 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
return dma_addr + iova_off;
}
+dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, int prot)
+{
+ return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot);
+}
+
void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
@@ -624,6 +629,19 @@ void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
__iommu_dma_unmap(iommu_get_domain_for_dev(dev), sg_dma_address(sg));
}
+dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ return __iommu_dma_map(dev, phys, size,
+ dma_direction_to_prot(dir, false) | IOMMU_MMIO);
+}
+
+void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle);
+}
+
int iommu_dma_supported(struct device *dev, u64 mask)
{
/*
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 8c53748a769d..8ccbd7023194 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -68,7 +68,6 @@ DECLARE_RWSEM(dmar_global_lock);
LIST_HEAD(dmar_drhd_units);
struct acpi_table_header * __initdata dmar_tbl;
-static acpi_size dmar_tbl_size;
static int dmar_dev_scope_status = 1;
static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
@@ -543,9 +542,7 @@ static int __init dmar_table_detect(void)
acpi_status status = AE_OK;
/* if we could find DMAR table, then there are DMAR devices */
- status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
- (struct acpi_table_header **)&dmar_tbl,
- &dmar_tbl_size);
+ status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl);
if (ACPI_SUCCESS(status) && !dmar_tbl) {
pr_warn("Unable to map DMAR\n");
@@ -906,8 +903,10 @@ int __init detect_intel_iommu(void)
x86_init.iommu.iommu_init = intel_iommu_init;
#endif
- early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
- dmar_tbl = NULL;
+ if (dmar_tbl) {
+ acpi_put_table(dmar_tbl);
+ dmar_tbl = NULL;
+ }
up_write(&dmar_global_lock);
return ret ? 1 : -ENODEV;
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 30808e91b775..57ba0d3091ea 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -70,6 +70,36 @@ static short PG_ENT_SHIFT = -1;
#define SYSMMU_PG_ENT_SHIFT 0
#define SYSMMU_V5_PG_ENT_SHIFT 4
+static const sysmmu_pte_t *LV1_PROT;
+static const sysmmu_pte_t SYSMMU_LV1_PROT[] = {
+ ((0 << 15) | (0 << 10)), /* no access */
+ ((1 << 15) | (1 << 10)), /* IOMMU_READ only */
+ ((0 << 15) | (1 << 10)), /* IOMMU_WRITE not supported, use read/write */
+ ((0 << 15) | (1 << 10)), /* IOMMU_READ | IOMMU_WRITE */
+};
+static const sysmmu_pte_t SYSMMU_V5_LV1_PROT[] = {
+ (0 << 4), /* no access */
+ (1 << 4), /* IOMMU_READ only */
+ (2 << 4), /* IOMMU_WRITE only */
+ (3 << 4), /* IOMMU_READ | IOMMU_WRITE */
+};
+
+static const sysmmu_pte_t *LV2_PROT;
+static const sysmmu_pte_t SYSMMU_LV2_PROT[] = {
+ ((0 << 9) | (0 << 4)), /* no access */
+ ((1 << 9) | (1 << 4)), /* IOMMU_READ only */
+ ((0 << 9) | (1 << 4)), /* IOMMU_WRITE not supported, use read/write */
+ ((0 << 9) | (1 << 4)), /* IOMMU_READ | IOMMU_WRITE */
+};
+static const sysmmu_pte_t SYSMMU_V5_LV2_PROT[] = {
+ (0 << 2), /* no access */
+ (1 << 2), /* IOMMU_READ only */
+ (2 << 2), /* IOMMU_WRITE only */
+ (3 << 2), /* IOMMU_READ | IOMMU_WRITE */
+};
+
+#define SYSMMU_SUPPORTED_PROT_BITS (IOMMU_READ | IOMMU_WRITE)
+
#define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT)
#define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK)
#define section_offs(iova) (iova & (SECT_SIZE - 1))
@@ -97,16 +127,17 @@ static u32 lv2ent_offset(sysmmu_iova_t iova)
#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
#define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0))
-#define mk_lv1ent_sect(pa) ((pa >> PG_ENT_SHIFT) | 2)
+#define mk_lv1ent_sect(pa, prot) ((pa >> PG_ENT_SHIFT) | LV1_PROT[prot] | 2)
#define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1)
-#define mk_lv2ent_lpage(pa) ((pa >> PG_ENT_SHIFT) | 1)
-#define mk_lv2ent_spage(pa) ((pa >> PG_ENT_SHIFT) | 2)
+#define mk_lv2ent_lpage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 1)
+#define mk_lv2ent_spage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 2)
#define CTRL_ENABLE 0x5
#define CTRL_BLOCK 0x7
#define CTRL_DISABLE 0x0
#define CFG_LRU 0x1
+#define CFG_EAP (1 << 2)
#define CFG_QOS(n) ((n & 0xF) << 7)
#define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
#define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
@@ -206,6 +237,7 @@ static const struct sysmmu_fault_info sysmmu_v5_faults[] = {
struct exynos_iommu_owner {
struct list_head controllers; /* list of sysmmu_drvdata.owner_node */
struct iommu_domain *domain; /* domain this device is attached */
+ struct mutex rpm_lock; /* for runtime pm of all sysmmus */
};
/*
@@ -237,8 +269,8 @@ struct sysmmu_drvdata {
struct clk *aclk; /* SYSMMU's aclk clock */
struct clk *pclk; /* SYSMMU's pclk clock */
struct clk *clk_master; /* master's device clock */
- int activations; /* number of calls to sysmmu_enable */
spinlock_t lock; /* lock for modyfying state */
+ bool active; /* current status */
struct exynos_iommu_domain *domain; /* domain we belong to */
struct list_head domain_node; /* node for domain clients list */
struct list_head owner_node; /* node for owner controllers list */
@@ -251,25 +283,6 @@ static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
return container_of(dom, struct exynos_iommu_domain, domain);
}
-static bool set_sysmmu_active(struct sysmmu_drvdata *data)
-{
- /* return true if the System MMU was not active previously
- and it needs to be initialized */
- return ++data->activations == 1;
-}
-
-static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
-{
- /* return true if the System MMU is needed to be disabled */
- BUG_ON(data->activations < 1);
- return --data->activations == 0;
-}
-
-static bool is_sysmmu_active(struct sysmmu_drvdata *data)
-{
- return data->activations > 0;
-}
-
static void sysmmu_unblock(struct sysmmu_drvdata *data)
{
writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
@@ -388,7 +401,7 @@ static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
unsigned short reg_status, reg_clear;
int ret = -ENOSYS;
- WARN_ON(!is_sysmmu_active(data));
+ WARN_ON(!data->active);
if (MMU_MAJ_VER(data->version) < 5) {
reg_status = REG_INT_STATUS;
@@ -434,40 +447,19 @@ static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data)
+static void __sysmmu_disable(struct sysmmu_drvdata *data)
{
+ unsigned long flags;
+
clk_enable(data->clk_master);
+ spin_lock_irqsave(&data->lock, flags);
writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
writel(0, data->sfrbase + REG_MMU_CFG);
-
- __sysmmu_disable_clocks(data);
-}
-
-static bool __sysmmu_disable(struct sysmmu_drvdata *data)
-{
- bool disabled;
- unsigned long flags;
-
- spin_lock_irqsave(&data->lock, flags);
-
- disabled = set_sysmmu_inactive(data);
-
- if (disabled) {
- data->pgtable = 0;
- data->domain = NULL;
-
- __sysmmu_disable_nocount(data);
-
- dev_dbg(data->sysmmu, "Disabled\n");
- } else {
- dev_dbg(data->sysmmu, "%d times left to disable\n",
- data->activations);
- }
-
+ data->active = false;
spin_unlock_irqrestore(&data->lock, flags);
- return disabled;
+ __sysmmu_disable_clocks(data);
}
static void __sysmmu_init_config(struct sysmmu_drvdata *data)
@@ -481,20 +473,24 @@ static void __sysmmu_init_config(struct sysmmu_drvdata *data)
else
cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN;
+ cfg |= CFG_EAP; /* enable access protection bits check */
+
writel(cfg, data->sfrbase + REG_MMU_CFG);
}
-static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
+static void __sysmmu_enable(struct sysmmu_drvdata *data)
{
+ unsigned long flags;
+
__sysmmu_enable_clocks(data);
+ spin_lock_irqsave(&data->lock, flags);
writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
-
__sysmmu_init_config(data);
-
__sysmmu_set_ptbase(data, data->pgtable);
-
writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
+ data->active = true;
+ spin_unlock_irqrestore(&data->lock, flags);
/*
* SYSMMU driver keeps master's clock enabled only for the short
@@ -505,48 +501,18 @@ static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
clk_disable(data->clk_master);
}
-static int __sysmmu_enable(struct sysmmu_drvdata *data, phys_addr_t pgtable,
- struct exynos_iommu_domain *domain)
-{
- int ret = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&data->lock, flags);
- if (set_sysmmu_active(data)) {
- data->pgtable = pgtable;
- data->domain = domain;
-
- __sysmmu_enable_nocount(data);
-
- dev_dbg(data->sysmmu, "Enabled\n");
- } else {
- ret = (pgtable == data->pgtable) ? 1 : -EBUSY;
-
- dev_dbg(data->sysmmu, "already enabled\n");
- }
-
- if (WARN_ON(ret < 0))
- set_sysmmu_inactive(data); /* decrement count */
-
- spin_unlock_irqrestore(&data->lock, flags);
-
- return ret;
-}
-
static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
sysmmu_iova_t iova)
{
unsigned long flags;
-
spin_lock_irqsave(&data->lock, flags);
- if (is_sysmmu_active(data) && data->version >= MAKE_MMU_VER(3, 3)) {
+ if (data->active && data->version >= MAKE_MMU_VER(3, 3)) {
clk_enable(data->clk_master);
__sysmmu_tlb_invalidate_entry(data, iova, 1);
clk_disable(data->clk_master);
}
spin_unlock_irqrestore(&data->lock, flags);
-
}
static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
@@ -555,7 +521,7 @@ static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
unsigned long flags;
spin_lock_irqsave(&data->lock, flags);
- if (is_sysmmu_active(data)) {
+ if (data->active) {
unsigned int num_inv = 1;
clk_enable(data->clk_master);
@@ -578,9 +544,6 @@ static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
sysmmu_unblock(data);
}
clk_disable(data->clk_master);
- } else {
- dev_dbg(data->master,
- "disabled. Skipping TLB invalidation @ %#x\n", iova);
}
spin_unlock_irqrestore(&data->lock, flags);
}
@@ -652,10 +615,15 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
__sysmmu_get_version(data);
if (PG_ENT_SHIFT < 0) {
- if (MMU_MAJ_VER(data->version) < 5)
+ if (MMU_MAJ_VER(data->version) < 5) {
PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
- else
+ LV1_PROT = SYSMMU_LV1_PROT;
+ LV2_PROT = SYSMMU_LV2_PROT;
+ } else {
PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT;
+ LV1_PROT = SYSMMU_V5_LV1_PROT;
+ LV2_PROT = SYSMMU_V5_LV2_PROT;
+ }
}
pm_runtime_enable(dev);
@@ -665,34 +633,46 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int exynos_sysmmu_suspend(struct device *dev)
+static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
{
struct sysmmu_drvdata *data = dev_get_drvdata(dev);
+ struct device *master = data->master;
+
+ if (master) {
+ struct exynos_iommu_owner *owner = master->archdata.iommu;
- dev_dbg(dev, "suspend\n");
- if (is_sysmmu_active(data)) {
- __sysmmu_disable_nocount(data);
- pm_runtime_put(dev);
+ mutex_lock(&owner->rpm_lock);
+ if (data->domain) {
+ dev_dbg(data->sysmmu, "saving state\n");
+ __sysmmu_disable(data);
+ }
+ mutex_unlock(&owner->rpm_lock);
}
return 0;
}
-static int exynos_sysmmu_resume(struct device *dev)
+static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
{
struct sysmmu_drvdata *data = dev_get_drvdata(dev);
+ struct device *master = data->master;
+
+ if (master) {
+ struct exynos_iommu_owner *owner = master->archdata.iommu;
- dev_dbg(dev, "resume\n");
- if (is_sysmmu_active(data)) {
- pm_runtime_get_sync(dev);
- __sysmmu_enable_nocount(data);
+ mutex_lock(&owner->rpm_lock);
+ if (data->domain) {
+ dev_dbg(data->sysmmu, "restoring state\n");
+ __sysmmu_enable(data);
+ }
+ mutex_unlock(&owner->rpm_lock);
}
return 0;
}
-#endif
static const struct dev_pm_ops sysmmu_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume)
+ SET_RUNTIME_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static const struct of_device_id sysmmu_of_match[] __initconst = {
@@ -796,9 +776,12 @@ static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
- if (__sysmmu_disable(data))
- data->master = NULL;
+ spin_lock(&data->lock);
+ __sysmmu_disable(data);
+ data->pgtable = 0;
+ data->domain = NULL;
list_del_init(&data->domain_node);
+ spin_unlock(&data->lock);
}
spin_unlock_irqrestore(&domain->lock, flags);
@@ -832,31 +815,34 @@ static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
phys_addr_t pagetable = virt_to_phys(domain->pgtable);
struct sysmmu_drvdata *data, *next;
unsigned long flags;
- bool found = false;
if (!has_sysmmu(dev) || owner->domain != iommu_domain)
return;
+ mutex_lock(&owner->rpm_lock);
+
+ list_for_each_entry(data, &owner->controllers, owner_node) {
+ pm_runtime_get_noresume(data->sysmmu);
+ if (pm_runtime_active(data->sysmmu))
+ __sysmmu_disable(data);
+ pm_runtime_put(data->sysmmu);
+ }
+
spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
- if (data->master == dev) {
- if (__sysmmu_disable(data)) {
- data->master = NULL;
- list_del_init(&data->domain_node);
- }
- pm_runtime_put(data->sysmmu);
- found = true;
- }
+ spin_lock(&data->lock);
+ data->pgtable = 0;
+ data->domain = NULL;
+ list_del_init(&data->domain_node);
+ spin_unlock(&data->lock);
}
+ owner->domain = NULL;
spin_unlock_irqrestore(&domain->lock, flags);
- owner->domain = NULL;
+ mutex_unlock(&owner->rpm_lock);
- if (found)
- dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
- __func__, &pagetable);
- else
- dev_err(dev, "%s: No IOMMU is attached\n", __func__);
+ dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", __func__,
+ &pagetable);
}
static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
@@ -867,7 +853,6 @@ static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
struct sysmmu_drvdata *data;
phys_addr_t pagetable = virt_to_phys(domain->pgtable);
unsigned long flags;
- int ret = -ENODEV;
if (!has_sysmmu(dev))
return -ENODEV;
@@ -875,29 +860,32 @@ static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
if (owner->domain)
exynos_iommu_detach_device(owner->domain, dev);
+ mutex_lock(&owner->rpm_lock);
+
+ spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(data, &owner->controllers, owner_node) {
- pm_runtime_get_sync(data->sysmmu);
- ret = __sysmmu_enable(data, pagetable, domain);
- if (ret >= 0) {
- data->master = dev;
-
- spin_lock_irqsave(&domain->lock, flags);
- list_add_tail(&data->domain_node, &domain->clients);
- spin_unlock_irqrestore(&domain->lock, flags);
- }
+ spin_lock(&data->lock);
+ data->pgtable = pagetable;
+ data->domain = domain;
+ list_add_tail(&data->domain_node, &domain->clients);
+ spin_unlock(&data->lock);
}
+ owner->domain = iommu_domain;
+ spin_unlock_irqrestore(&domain->lock, flags);
- if (ret < 0) {
- dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
- __func__, &pagetable);
- return ret;
+ list_for_each_entry(data, &owner->controllers, owner_node) {
+ pm_runtime_get_noresume(data->sysmmu);
+ if (pm_runtime_active(data->sysmmu))
+ __sysmmu_enable(data);
+ pm_runtime_put(data->sysmmu);
}
- owner->domain = iommu_domain;
- dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
- __func__, &pagetable, (ret == 0) ? "" : ", again");
+ mutex_unlock(&owner->rpm_lock);
- return ret;
+ dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa\n", __func__,
+ &pagetable);
+
+ return 0;
}
static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
@@ -954,7 +942,7 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
static int lv1set_section(struct exynos_iommu_domain *domain,
sysmmu_pte_t *sent, sysmmu_iova_t iova,
- phys_addr_t paddr, short *pgcnt)
+ phys_addr_t paddr, int prot, short *pgcnt)
{
if (lv1ent_section(sent)) {
WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
@@ -973,7 +961,7 @@ static int lv1set_section(struct exynos_iommu_domain *domain,
*pgcnt = 0;
}
- update_pte(sent, mk_lv1ent_sect(paddr));
+ update_pte(sent, mk_lv1ent_sect(paddr, prot));
spin_lock(&domain->lock);
if (lv1ent_page_zero(sent)) {
@@ -991,13 +979,13 @@ static int lv1set_section(struct exynos_iommu_domain *domain,
}
static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
- short *pgcnt)
+ int prot, short *pgcnt)
{
if (size == SPAGE_SIZE) {
if (WARN_ON(!lv2ent_fault(pent)))
return -EADDRINUSE;
- update_pte(pent, mk_lv2ent_spage(paddr));
+ update_pte(pent, mk_lv2ent_spage(paddr, prot));
*pgcnt -= 1;
} else { /* size == LPAGE_SIZE */
int i;
@@ -1013,7 +1001,7 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
return -EADDRINUSE;
}
- *pent = mk_lv2ent_lpage(paddr);
+ *pent = mk_lv2ent_lpage(paddr, prot);
}
dma_sync_single_for_device(dma_dev, pent_base,
sizeof(*pent) * SPAGES_PER_LPAGE,
@@ -1061,13 +1049,14 @@ static int exynos_iommu_map(struct iommu_domain *iommu_domain,
int ret = -ENOMEM;
BUG_ON(domain->pgtable == NULL);
+ prot &= SYSMMU_SUPPORTED_PROT_BITS;
spin_lock_irqsave(&domain->pgtablelock, flags);
entry = section_entry(domain->pgtable, iova);
if (size == SECT_SIZE) {
- ret = lv1set_section(domain, entry, iova, paddr,
+ ret = lv1set_section(domain, entry, iova, paddr, prot,
&domain->lv2entcnt[lv1ent_offset(iova)]);
} else {
sysmmu_pte_t *pent;
@@ -1078,7 +1067,7 @@ static int exynos_iommu_map(struct iommu_domain *iommu_domain,
if (IS_ERR(pent))
ret = PTR_ERR(pent);
else
- ret = lv2set_page(pent, paddr, size,
+ ret = lv2set_page(pent, paddr, size, prot,
&domain->lv2entcnt[lv1ent_offset(iova)]);
}
@@ -1268,10 +1257,20 @@ static int exynos_iommu_of_xlate(struct device *dev,
return -ENOMEM;
INIT_LIST_HEAD(&owner->controllers);
+ mutex_init(&owner->rpm_lock);
dev->archdata.iommu = owner;
}
list_add_tail(&data->owner_node, &owner->controllers);
+ data->master = dev;
+
+ /*
+ * SYSMMU will be runtime activated via device link (dependency) to its
+ * master device, so there are no direct calls to pm_runtime_get/put
+ * in this driver.
+ */
+ device_link_add(dev, data->sysmmu, DL_FLAG_PM_RUNTIME);
+
return 0;
}
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index c66c273dfd8a..8a185250ae5a 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2037,6 +2037,25 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
if (context_present(context))
goto out_unlock;
+ /*
+ * For kdump cases, old valid entries may be cached due to the
+ * in-flight DMA and copied pgtable, but there is no unmapping
+ * behaviour for them, thus we need an explicit cache flush for
+ * the newly-mapped device. For kdump, at this point, the device
+ * is supposed to finish reset at its driver probe stage, so no
+ * in-flight DMA will exist, and we don't need to worry anymore
+ * hereafter.
+ */
+ if (context_copied(context)) {
+ u16 did_old = context_domain_id(context);
+
+ if (did_old >= 0 && did_old < cap_ndoms(iommu->cap))
+ iommu->flush.flush_context(iommu, did_old,
+ (((u16)bus) << 8) | devfn,
+ DMA_CCMD_MASK_NOBIT,
+ DMA_CCMD_DEVICE_INVL);
+ }
+
pgd = domain->pgd;
context_clear_entry(context);
@@ -5185,6 +5204,25 @@ static void intel_iommu_remove_device(struct device *dev)
}
#ifdef CONFIG_INTEL_IOMMU_SVM
+#define MAX_NR_PASID_BITS (20)
+static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu)
+{
+ /*
+ * Convert ecap_pss to extend context entry pts encoding, also
+ * respect the soft pasid_max value set by the iommu.
+ * - number of PASID bits = ecap_pss + 1
+ * - number of PASID table entries = 2^(pts + 5)
+ * Therefore, pts = ecap_pss - 4
+ * e.g. KBL ecap_pss = 0x13, PASID has 20 bits, pts = 15
+ */
+ if (ecap_pss(iommu->ecap) < 5)
+ return 0;
+
+ /* pasid_max is encoded as actual number of entries not the bits */
+ return find_first_bit((unsigned long *)&iommu->pasid_max,
+ MAX_NR_PASID_BITS) - 5;
+}
+
int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
{
struct device_domain_info *info;
@@ -5217,7 +5255,9 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
if (!(ctx_lo & CONTEXT_PASIDE)) {
context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table);
- context[1].lo = (u64)virt_to_phys(iommu->pasid_table) | ecap_pss(iommu->ecap);
+ context[1].lo = (u64)virt_to_phys(iommu->pasid_table) |
+ intel_iommu_get_pts(iommu);
+
wmb();
/* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both
* extended to permit requests-with-PASID if the PASIDE bit
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index f50e51c1a9c8..0769276c0537 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -793,8 +793,7 @@ static int __init arm_v7s_do_selftests(void)
* Distinct mappings of different granule sizes.
*/
iova = 0;
- i = find_first_bit(&cfg.pgsize_bitmap, BITS_PER_LONG);
- while (i != BITS_PER_LONG) {
+ for_each_set_bit(i, &cfg.pgsize_bitmap, BITS_PER_LONG) {
size = 1UL << i;
if (ops->map(ops, iova, iova, size, IOMMU_READ |
IOMMU_WRITE |
@@ -811,8 +810,6 @@ static int __init arm_v7s_do_selftests(void)
return __FAIL(ops);
iova += SZ_16M;
- i++;
- i = find_next_bit(&cfg.pgsize_bitmap, BITS_PER_LONG, i);
loopnr++;
}
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index f5c90e1366ce..a40ce3406fef 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -916,7 +916,7 @@ static void dummy_tlb_sync(void *cookie)
WARN_ON(cookie != cfg_cookie);
}
-static struct iommu_gather_ops dummy_tlb_ops __initdata = {
+static const struct iommu_gather_ops dummy_tlb_ops __initconst = {
.tlb_flush_all = dummy_tlb_flush_all,
.tlb_add_flush = dummy_tlb_add_flush,
.tlb_sync = dummy_tlb_sync,
@@ -980,8 +980,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
* Distinct mappings of different granule sizes.
*/
iova = 0;
- j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
- while (j != BITS_PER_LONG) {
+ for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
size = 1UL << j;
if (ops->map(ops, iova, iova, size, IOMMU_READ |
@@ -999,8 +998,6 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
return __FAIL(ops, i);
iova += SZ_1G;
- j++;
- j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
}
/* Partial unmap */
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 9a2f1960873b..dbe7f653bb7c 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -552,6 +552,19 @@ struct iommu_group *iommu_group_get(struct device *dev)
EXPORT_SYMBOL_GPL(iommu_group_get);
/**
+ * iommu_group_ref_get - Increment reference on a group
+ * @group: the group to use, must not be NULL
+ *
+ * This function is called by iommu drivers to take additional references on an
+ * existing group. Returns the given group for convenience.
+ */
+struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
+{
+ kobject_get(group->devices_kobj);
+ return group;
+}
+
+/**
* iommu_group_put - Decrement group reference
* @group: the group to use
*
@@ -1615,6 +1628,46 @@ out:
return ret;
}
+struct iommu_instance {
+ struct list_head list;
+ struct fwnode_handle *fwnode;
+ const struct iommu_ops *ops;
+};
+static LIST_HEAD(iommu_instance_list);
+static DEFINE_SPINLOCK(iommu_instance_lock);
+
+void iommu_register_instance(struct fwnode_handle *fwnode,
+ const struct iommu_ops *ops)
+{
+ struct iommu_instance *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
+
+ if (WARN_ON(!iommu))
+ return;
+
+ of_node_get(to_of_node(fwnode));
+ INIT_LIST_HEAD(&iommu->list);
+ iommu->fwnode = fwnode;
+ iommu->ops = ops;
+ spin_lock(&iommu_instance_lock);
+ list_add_tail(&iommu->list, &iommu_instance_list);
+ spin_unlock(&iommu_instance_lock);
+}
+
+const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode)
+{
+ struct iommu_instance *instance;
+ const struct iommu_ops *ops = NULL;
+
+ spin_lock(&iommu_instance_lock);
+ list_for_each_entry(instance, &iommu_instance_list, list)
+ if (instance->fwnode == fwnode) {
+ ops = instance->ops;
+ break;
+ }
+ spin_unlock(&iommu_instance_lock);
+ return ops;
+}
+
int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
const struct iommu_ops *ops)
{
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index e23001bfcfee..080beca0197d 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -56,7 +56,7 @@ EXPORT_SYMBOL_GPL(init_iova_domain);
static struct rb_node *
__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
{
- if ((*limit_pfn != iovad->dma_32bit_pfn) ||
+ if ((*limit_pfn > iovad->dma_32bit_pfn) ||
(iovad->cached32_node == NULL))
return rb_last(&iovad->rbroot);
else {
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index b12c12d74c33..1479c76ece9e 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -195,14 +195,14 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
static void mtk_iommu_config(struct mtk_iommu_data *data,
struct device *dev, bool enable)
{
- struct mtk_iommu_client_priv *head, *cur, *next;
struct mtk_smi_larb_iommu *larb_mmu;
unsigned int larbid, portid;
+ struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+ int i;
- head = dev->archdata.iommu;
- list_for_each_entry_safe(cur, next, &head->client, client) {
- larbid = MTK_M4U_TO_LARB(cur->mtk_m4u_id);
- portid = MTK_M4U_TO_PORT(cur->mtk_m4u_id);
+ for (i = 0; i < fwspec->num_ids; ++i) {
+ larbid = MTK_M4U_TO_LARB(fwspec->ids[i]);
+ portid = MTK_M4U_TO_PORT(fwspec->ids[i]);
larb_mmu = &data->smi_imu.larb_imu[larbid];
dev_dbg(dev, "%s iommu port: %d\n",
@@ -282,14 +282,12 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
- struct mtk_iommu_client_priv *priv = dev->archdata.iommu;
- struct mtk_iommu_data *data;
+ struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
int ret;
- if (!priv)
+ if (!data)
return -ENODEV;
- data = dev_get_drvdata(priv->m4udev);
if (!data->m4u_dom) {
data->m4u_dom = dom;
ret = mtk_iommu_domain_finalise(data);
@@ -310,13 +308,11 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
static void mtk_iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
- struct mtk_iommu_client_priv *priv = dev->archdata.iommu;
- struct mtk_iommu_data *data;
+ struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
- if (!priv)
+ if (!data)
return;
- data = dev_get_drvdata(priv->m4udev);
mtk_iommu_config(data, dev, false);
}
@@ -366,8 +362,8 @@ static int mtk_iommu_add_device(struct device *dev)
{
struct iommu_group *group;
- if (!dev->archdata.iommu) /* Not a iommu client device */
- return -ENODEV;
+ if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
+ return -ENODEV; /* Not a iommu client device */
group = iommu_group_get_for_dev(dev);
if (IS_ERR(group))
@@ -379,44 +375,33 @@ static int mtk_iommu_add_device(struct device *dev)
static void mtk_iommu_remove_device(struct device *dev)
{
- struct mtk_iommu_client_priv *head, *cur, *next;
-
- head = dev->archdata.iommu;
- if (!head)
+ if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
return;
- list_for_each_entry_safe(cur, next, &head->client, client) {
- list_del(&cur->client);
- kfree(cur);
- }
- kfree(head);
- dev->archdata.iommu = NULL;
-
iommu_group_remove_device(dev);
+ iommu_fwspec_free(dev);
}
static struct iommu_group *mtk_iommu_device_group(struct device *dev)
{
- struct mtk_iommu_data *data;
- struct mtk_iommu_client_priv *priv;
+ struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
- priv = dev->archdata.iommu;
- if (!priv)
+ if (!data)
return ERR_PTR(-ENODEV);
/* All the client devices are in the same m4u iommu-group */
- data = dev_get_drvdata(priv->m4udev);
if (!data->m4u_group) {
data->m4u_group = iommu_group_alloc();
if (IS_ERR(data->m4u_group))
dev_err(dev, "Failed to allocate M4U IOMMU group\n");
+ } else {
+ iommu_group_ref_get(data->m4u_group);
}
return data->m4u_group;
}
static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
{
- struct mtk_iommu_client_priv *head, *priv, *next;
struct platform_device *m4updev;
if (args->args_count != 1) {
@@ -425,38 +410,16 @@ static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
return -EINVAL;
}
- if (!dev->archdata.iommu) {
+ if (!dev->iommu_fwspec->iommu_priv) {
/* Get the m4u device */
m4updev = of_find_device_by_node(args->np);
if (WARN_ON(!m4updev))
return -EINVAL;
- head = kzalloc(sizeof(*head), GFP_KERNEL);
- if (!head)
- return -ENOMEM;
-
- dev->archdata.iommu = head;
- INIT_LIST_HEAD(&head->client);
- head->m4udev = &m4updev->dev;
- } else {
- head = dev->archdata.iommu;
+ dev->iommu_fwspec->iommu_priv = platform_get_drvdata(m4updev);
}
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- goto err_free_mem;
-
- priv->mtk_m4u_id = args->args[0];
- list_add_tail(&priv->client, &head->client);
-
- return 0;
-
-err_free_mem:
- list_for_each_entry_safe(priv, next, &head->client, client)
- kfree(priv);
- kfree(head);
- dev->archdata.iommu = NULL;
- return -ENOMEM;
+ return iommu_fwspec_add_ids(dev, args->args, 1);
}
static struct iommu_ops mtk_iommu_ops = {
@@ -583,17 +546,19 @@ static int mtk_iommu_probe(struct platform_device *pdev)
continue;
plarbdev = of_find_device_by_node(larbnode);
- of_node_put(larbnode);
if (!plarbdev) {
plarbdev = of_platform_device_create(
larbnode, NULL,
platform_bus_type.dev_root);
- if (!plarbdev)
+ if (!plarbdev) {
+ of_node_put(larbnode);
return -EPROBE_DEFER;
+ }
}
data->smi_imu.larb_imu[i].dev = &plarbdev->dev;
- component_match_add(dev, &match, compare_of, larbnode);
+ component_match_add_release(dev, &match, release_of,
+ compare_of, larbnode);
}
platform_set_drvdata(pdev, data);
diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h
index 3dab13b4a211..50177f738e4e 100644
--- a/drivers/iommu/mtk_iommu.h
+++ b/drivers/iommu/mtk_iommu.h
@@ -34,12 +34,6 @@ struct mtk_iommu_suspend_reg {
u32 int_main_control;
};
-struct mtk_iommu_client_priv {
- struct list_head client;
- unsigned int mtk_m4u_id;
- struct device *m4udev;
-};
-
struct mtk_iommu_domain;
struct mtk_iommu_data {
@@ -60,6 +54,11 @@ static inline int compare_of(struct device *dev, void *data)
return dev->of_node == data;
}
+static inline void release_of(struct device *dev, void *data)
+{
+ of_node_put(data);
+}
+
static inline int mtk_iommu_bind(struct device *dev)
{
struct mtk_iommu_data *data = dev_get_drvdata(dev);
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index b8aeb0768483..19e010083408 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -204,14 +204,14 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
static void mtk_iommu_config(struct mtk_iommu_data *data,
struct device *dev, bool enable)
{
- struct mtk_iommu_client_priv *head, *cur, *next;
struct mtk_smi_larb_iommu *larb_mmu;
unsigned int larbid, portid;
+ struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+ int i;
- head = dev->archdata.iommu;
- list_for_each_entry_safe(cur, next, &head->client, client) {
- larbid = mt2701_m4u_to_larb(cur->mtk_m4u_id);
- portid = mt2701_m4u_to_port(cur->mtk_m4u_id);
+ for (i = 0; i < fwspec->num_ids; ++i) {
+ larbid = mt2701_m4u_to_larb(fwspec->ids[i]);
+ portid = mt2701_m4u_to_port(fwspec->ids[i]);
larb_mmu = &data->smi_imu.larb_imu[larbid];
dev_dbg(dev, "%s iommu port: %d\n",
@@ -271,14 +271,12 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
- struct mtk_iommu_client_priv *priv = dev->archdata.iommu;
- struct mtk_iommu_data *data;
+ struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
int ret;
- if (!priv)
+ if (!data)
return -ENODEV;
- data = dev_get_drvdata(priv->m4udev);
if (!data->m4u_dom) {
data->m4u_dom = dom;
ret = mtk_iommu_domain_finalise(data);
@@ -295,13 +293,11 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
static void mtk_iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
- struct mtk_iommu_client_priv *priv = dev->archdata.iommu;
- struct mtk_iommu_data *data;
+ struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
- if (!priv)
+ if (!data)
return;
- data = dev_get_drvdata(priv->m4udev);
mtk_iommu_config(data, dev, false);
}
@@ -366,6 +362,8 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
return pa;
}
+static struct iommu_ops mtk_iommu_ops;
+
/*
* MTK generation one iommu HW only support one iommu domain, and all the client
* sharing the same iova address space.
@@ -373,7 +371,7 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
static int mtk_iommu_create_mapping(struct device *dev,
struct of_phandle_args *args)
{
- struct mtk_iommu_client_priv *head, *priv, *next;
+ struct mtk_iommu_data *data;
struct platform_device *m4updev;
struct dma_iommu_mapping *mtk_mapping;
struct device *m4udev;
@@ -385,41 +383,37 @@ static int mtk_iommu_create_mapping(struct device *dev,
return -EINVAL;
}
- if (!dev->archdata.iommu) {
+ if (!dev->iommu_fwspec) {
+ ret = iommu_fwspec_init(dev, &args->np->fwnode, &mtk_iommu_ops);
+ if (ret)
+ return ret;
+ } else if (dev->iommu_fwspec->ops != &mtk_iommu_ops) {
+ return -EINVAL;
+ }
+
+ if (!dev->iommu_fwspec->iommu_priv) {
/* Get the m4u device */
m4updev = of_find_device_by_node(args->np);
if (WARN_ON(!m4updev))
return -EINVAL;
- head = kzalloc(sizeof(*head), GFP_KERNEL);
- if (!head)
- return -ENOMEM;
-
- dev->archdata.iommu = head;
- INIT_LIST_HEAD(&head->client);
- head->m4udev = &m4updev->dev;
- } else {
- head = dev->archdata.iommu;
+ dev->iommu_fwspec->iommu_priv = platform_get_drvdata(m4updev);
}
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- ret = -ENOMEM;
- goto err_free_mem;
- }
- priv->mtk_m4u_id = args->args[0];
- list_add_tail(&priv->client, &head->client);
+ ret = iommu_fwspec_add_ids(dev, args->args, 1);
+ if (ret)
+ return ret;
- m4udev = head->m4udev;
+ data = dev->iommu_fwspec->iommu_priv;
+ m4udev = data->dev;
mtk_mapping = m4udev->archdata.iommu;
if (!mtk_mapping) {
/* MTK iommu support 4GB iova address space. */
mtk_mapping = arm_iommu_create_mapping(&platform_bus_type,
0, 1ULL << 32);
- if (IS_ERR(mtk_mapping)) {
- ret = PTR_ERR(mtk_mapping);
- goto err_free_mem;
- }
+ if (IS_ERR(mtk_mapping))
+ return PTR_ERR(mtk_mapping);
+
m4udev->archdata.iommu = mtk_mapping;
}
@@ -432,11 +426,6 @@ static int mtk_iommu_create_mapping(struct device *dev,
err_release_mapping:
arm_iommu_release_mapping(mtk_mapping);
m4udev->archdata.iommu = NULL;
-err_free_mem:
- list_for_each_entry_safe(priv, next, &head->client, client)
- kfree(priv);
- kfree(head);
- dev->archdata.iommu = NULL;
return ret;
}
@@ -458,8 +447,8 @@ static int mtk_iommu_add_device(struct device *dev)
of_node_put(iommu_spec.np);
}
- if (!dev->archdata.iommu) /* Not a iommu client device */
- return -ENODEV;
+ if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
+ return -ENODEV; /* Not a iommu client device */
group = iommu_group_get_for_dev(dev);
if (IS_ERR(group))
@@ -471,37 +460,27 @@ static int mtk_iommu_add_device(struct device *dev)
static void mtk_iommu_remove_device(struct device *dev)
{
- struct mtk_iommu_client_priv *head, *cur, *next;
-
- head = dev->archdata.iommu;
- if (!head)
+ if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
return;
- list_for_each_entry_safe(cur, next, &head->client, client) {
- list_del(&cur->client);
- kfree(cur);
- }
- kfree(head);
- dev->archdata.iommu = NULL;
-
iommu_group_remove_device(dev);
+ iommu_fwspec_free(dev);
}
static struct iommu_group *mtk_iommu_device_group(struct device *dev)
{
- struct mtk_iommu_data *data;
- struct mtk_iommu_client_priv *priv;
+ struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
- priv = dev->archdata.iommu;
- if (!priv)
+ if (!data)
return ERR_PTR(-ENODEV);
/* All the client devices are in the same m4u iommu-group */
- data = dev_get_drvdata(priv->m4udev);
if (!data->m4u_group) {
data->m4u_group = iommu_group_alloc();
if (IS_ERR(data->m4u_group))
dev_err(dev, "Failed to allocate M4U IOMMU group\n");
+ } else {
+ iommu_group_ref_get(data->m4u_group);
}
return data->m4u_group;
}
@@ -624,17 +603,19 @@ static int mtk_iommu_probe(struct platform_device *pdev)
continue;
plarbdev = of_find_device_by_node(larb_spec.np);
- of_node_put(larb_spec.np);
if (!plarbdev) {
plarbdev = of_platform_device_create(
larb_spec.np, NULL,
platform_bus_type.dev_root);
- if (!plarbdev)
+ if (!plarbdev) {
+ of_node_put(larb_spec.np);
return -EPROBE_DEFER;
+ }
}
data->smi_imu.larb_imu[larb_nr].dev = &plarbdev->dev;
- component_match_add(dev, &match, compare_of, larb_spec.np);
+ component_match_add_release(dev, &match, release_of,
+ compare_of, larb_spec.np);
larb_nr++;
}
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 5b82862f571f..0f57ddc4ecc2 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -96,45 +96,6 @@ int of_get_dma_window(struct device_node *dn, const char *prefix, int index,
}
EXPORT_SYMBOL_GPL(of_get_dma_window);
-struct of_iommu_node {
- struct list_head list;
- struct device_node *np;
- const struct iommu_ops *ops;
-};
-static LIST_HEAD(of_iommu_list);
-static DEFINE_SPINLOCK(of_iommu_lock);
-
-void of_iommu_set_ops(struct device_node *np, const struct iommu_ops *ops)
-{
- struct of_iommu_node *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
-
- if (WARN_ON(!iommu))
- return;
-
- of_node_get(np);
- INIT_LIST_HEAD(&iommu->list);
- iommu->np = np;
- iommu->ops = ops;
- spin_lock(&of_iommu_lock);
- list_add_tail(&iommu->list, &of_iommu_list);
- spin_unlock(&of_iommu_lock);
-}
-
-const struct iommu_ops *of_iommu_get_ops(struct device_node *np)
-{
- struct of_iommu_node *node;
- const struct iommu_ops *ops = NULL;
-
- spin_lock(&of_iommu_lock);
- list_for_each_entry(node, &of_iommu_list, list)
- if (node->np == np) {
- ops = node->ops;
- break;
- }
- spin_unlock(&of_iommu_lock);
- return ops;
-}
-
static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data)
{
struct of_phandle_args *iommu_spec = data;
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index 3b44b1d82f3b..179e636a4d91 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -8,7 +8,6 @@
#include <linux/pci.h>
#include <linux/iommu.h>
#include <linux/iommu-helper.h>
-#include <linux/pci.h>
#include <linux/sizes.h>
#include <asm/pci_dma.h>
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 8bcee65a0b8c..eb0d4d41b156 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -578,13 +578,13 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
#ifdef CONFIG_SMP
set_smp_cross_call(armada_mpic_send_doorbell);
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING,
- "AP_IRQ_ARMADA_XP_STARTING",
+ "irqchip/armada/ipi:starting",
armada_xp_mpic_starting_cpu, NULL);
#endif
} else {
#ifdef CONFIG_SMP
- cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_CASC_STARTING,
- "AP_IRQ_ARMADA_CASC_STARTING",
+ cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING,
+ "irqchip/armada/cascade:starting",
mpic_cascaded_starting_cpu, NULL);
#endif
irq_set_chained_handler(parent_irq,
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index d96b2c947e74..e7463e3c0814 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -245,7 +245,7 @@ bcm2836_arm_irqchip_smp_init(void)
#ifdef CONFIG_SMP
/* Unmask IPIs to the boot CPU. */
cpuhp_setup_state(CPUHP_AP_IRQ_BCM2836_STARTING,
- "AP_IRQ_BCM2836_STARTING", bcm2836_cpu_starting,
+ "irqchip/bcm2836:starting", bcm2836_cpu_starting,
bcm2836_cpu_dying);
set_smp_cross_call(bcm2836_arm_irqchip_send_ipi);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 26e1d7fafb1e..c132f29322cc 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -632,9 +632,9 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
static void gic_smp_init(void)
{
set_smp_cross_call(gic_raise_softirq);
- cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GICV3_STARTING,
- "AP_IRQ_GICV3_STARTING", gic_starting_cpu,
- NULL);
+ cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
+ "irqchip/arm/gicv3:starting",
+ gic_starting_cpu, NULL);
}
static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index d6c404b3584d..1b1df4f770bd 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1191,7 +1191,7 @@ static int __init __gic_init_bases(struct gic_chip_data *gic,
set_smp_cross_call(gic_raise_softirq);
#endif
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
- "AP_IRQ_GIC_STARTING",
+ "irqchip/arm/gic:starting",
gic_starting_cpu, NULL);
set_handle_irq(gic_handle_irq);
if (static_key_true(&supports_deactivate))
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index 021b0e0833c1..c1b4ee955dbe 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -407,7 +407,7 @@ hip04_of_init(struct device_node *node, struct device_node *parent)
set_handle_irq(hip04_handle_irq);
hip04_irq_dist_init(&hip04_data);
- cpuhp_setup_state(CPUHP_AP_IRQ_HIP04_STARTING, "AP_IRQ_HIP04_STARTING",
+ cpuhp_setup_state(CPUHP_AP_IRQ_HIP04_STARTING, "irqchip/hip04:starting",
hip04_irq_starting_cpu, NULL);
return 0;
}
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index c0178a122940..c01c09e9916d 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -152,12 +152,12 @@ static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
}
#ifdef CONFIG_CLKSRC_MIPS_GIC
-cycle_t gic_read_count(void)
+u64 gic_read_count(void)
{
unsigned int hi, hi2, lo;
if (mips_cm_is64)
- return (cycle_t)gic_read(GIC_REG(SHARED, GIC_SH_COUNTER));
+ return (u64)gic_read(GIC_REG(SHARED, GIC_SH_COUNTER));
do {
hi = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
@@ -165,7 +165,7 @@ cycle_t gic_read_count(void)
hi2 = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
} while (hi2 != hi);
- return (((cycle_t) hi) << 32) + lo;
+ return (((u64) hi) << 32) + lo;
}
unsigned int gic_get_count_width(void)
@@ -179,7 +179,7 @@ unsigned int gic_get_count_width(void)
return bits;
}
-void gic_write_compare(cycle_t cnt)
+void gic_write_compare(u64 cnt)
{
if (mips_cm_is64) {
gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt);
@@ -191,7 +191,7 @@ void gic_write_compare(cycle_t cnt)
}
}
-void gic_write_cpu_compare(cycle_t cnt, int cpu)
+void gic_write_cpu_compare(u64 cnt, int cpu)
{
unsigned long flags;
@@ -211,17 +211,17 @@ void gic_write_cpu_compare(cycle_t cnt, int cpu)
local_irq_restore(flags);
}
-cycle_t gic_read_compare(void)
+u64 gic_read_compare(void)
{
unsigned int hi, lo;
if (mips_cm_is64)
- return (cycle_t)gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE));
+ return (u64)gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE));
hi = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI));
lo = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO));
- return (((cycle_t) hi) << 32) + lo;
+ return (((u64) hi) << 32) + lo;
}
void gic_start_count(void)
diff --git a/drivers/irqchip/irq-st.c b/drivers/irqchip/irq-st.c
index 9af48a85c16f..5e0e250db0be 100644
--- a/drivers/irqchip/irq-st.c
+++ b/drivers/irqchip/irq-st.c
@@ -180,7 +180,7 @@ static int st_irq_syscfg_probe(struct platform_device *pdev)
return st_irq_syscfg_enable(pdev);
}
-static int st_irq_syscfg_resume(struct device *dev)
+static int __maybe_unused st_irq_syscfg_resume(struct device *dev)
{
struct st_irq_syscfg *ddata = dev_get_drvdata(dev);
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index 823f6985b260..49d0f70c2bae 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -28,7 +28,7 @@
#include <linux/moduleparam.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/isdn/capicmd.h>
#include <linux/isdn/capiutil.h>
#ifdef AVMB1_COMPAT
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index aecec6d32463..11e13c56126f 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -2565,22 +2565,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
static const struct gigaset_ops gigops = {
- gigaset_write_cmd,
- gigaset_write_room,
- gigaset_chars_in_buffer,
- gigaset_brkchars,
- gigaset_init_bchannel,
- gigaset_close_bchannel,
- gigaset_initbcshw,
- gigaset_freebcshw,
- gigaset_reinitbcshw,
- gigaset_initcshw,
- gigaset_freecshw,
- gigaset_set_modem_ctrl,
- gigaset_baud_rate,
- gigaset_set_line_ctrl,
- gigaset_isoc_send_skb,
- gigaset_isoc_input,
+ .write_cmd = gigaset_write_cmd,
+ .write_room = gigaset_write_room,
+ .chars_in_buffer = gigaset_chars_in_buffer,
+ .brkchars = gigaset_brkchars,
+ .init_bchannel = gigaset_init_bchannel,
+ .close_bchannel = gigaset_close_bchannel,
+ .initbcshw = gigaset_initbcshw,
+ .freebcshw = gigaset_freebcshw,
+ .reinitbcshw = gigaset_reinitbcshw,
+ .initcshw = gigaset_initcshw,
+ .freecshw = gigaset_freecshw,
+ .set_modem_ctrl = gigaset_set_modem_ctrl,
+ .baud_rate = gigaset_baud_rate,
+ .set_line_ctrl = gigaset_set_line_ctrl,
+ .send_skb = gigaset_isoc_send_skb,
+ .handle_input = gigaset_isoc_input,
};
/* bas_gigaset_init
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index b90776ef56ec..ab0b63a4d045 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -445,22 +445,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
}
static const struct gigaset_ops ops = {
- gigaset_write_cmd,
- gigaset_write_room,
- gigaset_chars_in_buffer,
- gigaset_brkchars,
- gigaset_init_bchannel,
- gigaset_close_bchannel,
- gigaset_initbcshw,
- gigaset_freebcshw,
- gigaset_reinitbcshw,
- gigaset_initcshw,
- gigaset_freecshw,
- gigaset_set_modem_ctrl,
- gigaset_baud_rate,
- gigaset_set_line_ctrl,
- gigaset_m10x_send_skb, /* asyncdata.c */
- gigaset_m10x_input, /* asyncdata.c */
+ .write_cmd = gigaset_write_cmd,
+ .write_room = gigaset_write_room,
+ .chars_in_buffer = gigaset_chars_in_buffer,
+ .brkchars = gigaset_brkchars,
+ .init_bchannel = gigaset_init_bchannel,
+ .close_bchannel = gigaset_close_bchannel,
+ .initbcshw = gigaset_initbcshw,
+ .freebcshw = gigaset_freebcshw,
+ .reinitbcshw = gigaset_reinitbcshw,
+ .initcshw = gigaset_initcshw,
+ .freecshw = gigaset_freecshw,
+ .set_modem_ctrl = gigaset_set_modem_ctrl,
+ .baud_rate = gigaset_baud_rate,
+ .set_line_ctrl = gigaset_set_line_ctrl,
+ .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
+ .handle_input = gigaset_m10x_input, /* asyncdata.c */
};
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index 5f306e2eece5..eade36dafa34 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -862,22 +862,22 @@ static int gigaset_pre_reset(struct usb_interface *intf)
}
static const struct gigaset_ops ops = {
- gigaset_write_cmd,
- gigaset_write_room,
- gigaset_chars_in_buffer,
- gigaset_brkchars,
- gigaset_init_bchannel,
- gigaset_close_bchannel,
- gigaset_initbcshw,
- gigaset_freebcshw,
- gigaset_reinitbcshw,
- gigaset_initcshw,
- gigaset_freecshw,
- gigaset_set_modem_ctrl,
- gigaset_baud_rate,
- gigaset_set_line_ctrl,
- gigaset_m10x_send_skb,
- gigaset_m10x_input,
+ .write_cmd = gigaset_write_cmd,
+ .write_room = gigaset_write_room,
+ .chars_in_buffer = gigaset_chars_in_buffer,
+ .brkchars = gigaset_brkchars,
+ .init_bchannel = gigaset_init_bchannel,
+ .close_bchannel = gigaset_close_bchannel,
+ .initbcshw = gigaset_initbcshw,
+ .freebcshw = gigaset_freebcshw,
+ .reinitbcshw = gigaset_reinitbcshw,
+ .initcshw = gigaset_initcshw,
+ .freecshw = gigaset_freecshw,
+ .set_modem_ctrl = gigaset_set_modem_ctrl,
+ .baud_rate = gigaset_baud_rate,
+ .set_line_ctrl = gigaset_set_line_ctrl,
+ .send_skb = gigaset_m10x_send_skb,
+ .handle_input = gigaset_m10x_input,
};
/*
diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
index 4d9b195547c5..9fdbd99c7547 100644
--- a/drivers/isdn/hardware/avm/b1.c
+++ b/drivers/isdn/hardware/avm/b1.c
@@ -24,7 +24,7 @@
#include <linux/slab.h>
#include <asm/io.h>
#include <linux/init.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/netdevice.h>
#include <linux/isdn/capilli.h>
#include "avmcard.h"
diff --git a/drivers/isdn/hardware/avm/b1dma.c b/drivers/isdn/hardware/avm/b1dma.c
index 19b113faeb7b..818bd8f231db 100644
--- a/drivers/isdn/hardware/avm/b1dma.c
+++ b/drivers/isdn/hardware/avm/b1dma.c
@@ -23,7 +23,7 @@
#include <linux/gfp.h>
#include <asm/io.h>
#include <linux/init.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/netdevice.h>
#include <linux/isdn/capilli.h>
#include "avmcard.h"
diff --git a/drivers/isdn/hardware/avm/c4.c b/drivers/isdn/hardware/avm/c4.c
index 5d00d72fe482..17beb2869dc1 100644
--- a/drivers/isdn/hardware/avm/c4.c
+++ b/drivers/isdn/hardware/avm/c4.c
@@ -24,7 +24,7 @@
#include <linux/init.h>
#include <linux/gfp.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/netdevice.h>
#include <linux/isdn/capicmd.h>
#include <linux/isdn/capiutil.h>
diff --git a/drivers/isdn/hardware/eicon/capimain.c b/drivers/isdn/hardware/eicon/capimain.c
index 997d46abf5b2..be36d82004d6 100644
--- a/drivers/isdn/hardware/eicon/capimain.c
+++ b/drivers/isdn/hardware/eicon/capimain.c
@@ -13,7 +13,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/seq_file.h>
#include <linux/skbuff.h>
diff --git a/drivers/isdn/hardware/eicon/divamnt.c b/drivers/isdn/hardware/eicon/divamnt.c
index 0de29b7b712f..72e58bf07577 100644
--- a/drivers/isdn/hardware/eicon/divamnt.c
+++ b/drivers/isdn/hardware/eicon/divamnt.c
@@ -15,7 +15,7 @@
#include <linux/kernel.h>
#include <linux/poll.h>
#include <linux/mutex.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "platform.h"
#include "di_defs.h"
diff --git a/drivers/isdn/hardware/eicon/divasi.c b/drivers/isdn/hardware/eicon/divasi.c
index 4103a8c178d7..cb88090f9cea 100644
--- a/drivers/isdn/hardware/eicon/divasi.c
+++ b/drivers/isdn/hardware/eicon/divasi.c
@@ -18,7 +18,7 @@
#include <linux/proc_fs.h>
#include <linux/skbuff.h>
#include <linux/seq_file.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "platform.h"
#include "di_defs.h"
diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
index 32f34511c416..8b7ad4f1ab01 100644
--- a/drivers/isdn/hardware/eicon/divasmain.c
+++ b/drivers/isdn/hardware/eicon/divasmain.c
@@ -12,7 +12,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
#include <linux/ioport.h>
#include <linux/pci.h>
diff --git a/drivers/isdn/hardware/eicon/divasproc.c b/drivers/isdn/hardware/eicon/divasproc.c
index 56ce98a4e248..b57efd6ad916 100644
--- a/drivers/isdn/hardware/eicon/divasproc.c
+++ b/drivers/isdn/hardware/eicon/divasproc.c
@@ -16,7 +16,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/list.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "platform.h"
#include "debuglib.h"
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index bf04d2a3cf4a..2d12c6ceeb89 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -659,7 +659,7 @@ int jiftime(char *s, long mark)
static u_char tmpbuf[HISAX_STATUS_BUFSIZE];
-void VHiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt,
+void VHiSax_putstatus(struct IsdnCardState *cs, char *head, const char *fmt,
va_list args)
{
/* if head == NULL the fmt contains the full info */
@@ -669,23 +669,24 @@ void VHiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt,
u_char *p;
isdn_ctrl ic;
int len;
+ const u_char *data;
if (!cs) {
printk(KERN_WARNING "HiSax: No CardStatus for message");
return;
}
spin_lock_irqsave(&cs->statlock, flags);
- p = tmpbuf;
if (head) {
+ p = tmpbuf;
p += jiftime(p, jiffies);
p += sprintf(p, " %s", head);
p += vsprintf(p, fmt, args);
*p++ = '\n';
*p = 0;
len = p - tmpbuf;
- p = tmpbuf;
+ data = tmpbuf;
} else {
- p = fmt;
+ data = fmt;
len = strlen(fmt);
}
if (len > HISAX_STATUS_BUFSIZE) {
@@ -699,13 +700,12 @@ void VHiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt,
if (i >= len)
i = len;
len -= i;
- memcpy(cs->status_write, p, i);
+ memcpy(cs->status_write, data, i);
cs->status_write += i;
if (cs->status_write > cs->status_end)
cs->status_write = cs->status_buf;
- p += i;
if (len) {
- memcpy(cs->status_write, p, len);
+ memcpy(cs->status_write, data + i, len);
cs->status_write += len;
}
#ifdef KERNELSTACK_DEBUG
@@ -729,7 +729,7 @@ void VHiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt,
}
}
-void HiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt, ...)
+void HiSax_putstatus(struct IsdnCardState *cs, char *head, const char *fmt, ...)
{
va_list args;
diff --git a/drivers/isdn/hisax/hisax.h b/drivers/isdn/hisax/hisax.h
index 6ead6314e6d2..338d0408b377 100644
--- a/drivers/isdn/hisax/hisax.h
+++ b/drivers/isdn/hisax/hisax.h
@@ -1288,9 +1288,9 @@ int jiftime(char *s, long mark);
int HiSax_command(isdn_ctrl *ic);
int HiSax_writebuf_skb(int id, int chan, int ack, struct sk_buff *skb);
__printf(3, 4)
-void HiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt, ...);
+void HiSax_putstatus(struct IsdnCardState *cs, char *head, const char *fmt, ...);
__printf(3, 0)
-void VHiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt, va_list args);
+void VHiSax_putstatus(struct IsdnCardState *cs, char *head, const char *fmt, va_list args);
void HiSax_reportcard(int cardnr, int sel);
int QuickHex(char *txt, u_char *p, int cnt);
void LogFrame(struct IsdnCardState *cs, u_char *p, int size);
diff --git a/drivers/isdn/hisax/q931.c b/drivers/isdn/hisax/q931.c
index ba4beb25d872..298c8dba0321 100644
--- a/drivers/isdn/hisax/q931.c
+++ b/drivers/isdn/hisax/q931.c
@@ -855,7 +855,7 @@ struct DTag { /* Display tags */
{ 0x8c, "Reason" },
{ 0x8d, "Calling party name" },
{ 0x8e, "Called party name" },
- { 0x8f, "Orignal called name" },
+ { 0x8f, "Original called name" },
{ 0x90, "Redirecting name" },
{ 0x91, "Connected name" },
{ 0x92, "Originating restrictions" },
diff --git a/drivers/isdn/hysdn/hysdn_boot.c b/drivers/isdn/hysdn/hysdn_boot.c
index eda4741e3f2f..4a0425378f37 100644
--- a/drivers/isdn/hysdn/hysdn_boot.c
+++ b/drivers/isdn/hysdn/hysdn_boot.c
@@ -13,7 +13,7 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "hysdn_defs.h"
#include "hysdn_pof.h"
diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
index 91d57304d4d3..336523ec077c 100644
--- a/drivers/isdn/i4l/isdn_concap.c
+++ b/drivers/isdn/i4l/isdn_concap.c
@@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
}
struct concap_device_ops isdn_concap_reliable_dl_dops = {
- &isdn_concap_dl_data_req,
- &isdn_concap_dl_connect_req,
- &isdn_concap_dl_disconn_req
+ .data_req = &isdn_concap_dl_data_req,
+ .connect_req = &isdn_concap_dl_connect_req,
+ .disconn_req = &isdn_concap_dl_disconn_req
};
/* The following should better go into a dedicated source file such that
diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
index 0c5d8de41b23..ba60076e0b95 100644
--- a/drivers/isdn/i4l/isdn_x25iface.c
+++ b/drivers/isdn/i4l/isdn_x25iface.c
@@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *);
static struct concap_proto_ops ix25_pops = {
- &isdn_x25iface_proto_new,
- &isdn_x25iface_proto_del,
- &isdn_x25iface_proto_restart,
- &isdn_x25iface_proto_close,
- &isdn_x25iface_xmit,
- &isdn_x25iface_receive,
- &isdn_x25iface_connect_ind,
- &isdn_x25iface_disconn_ind
+ .proto_new = &isdn_x25iface_proto_new,
+ .proto_del = &isdn_x25iface_proto_del,
+ .restart = &isdn_x25iface_proto_restart,
+ .close = &isdn_x25iface_proto_close,
+ .encap_and_xmit = &isdn_x25iface_xmit,
+ .data_ind = &isdn_x25iface_receive,
+ .connect_ind = &isdn_x25iface_connect_ind,
+ .disconn_ind = &isdn_x25iface_disconn_ind
};
/* error message helper function */
diff --git a/drivers/leds/trigger/ledtrig-cpu.c b/drivers/leds/trigger/ledtrig-cpu.c
index 9719caf7437c..a41896468cb3 100644
--- a/drivers/leds/trigger/ledtrig-cpu.c
+++ b/drivers/leds/trigger/ledtrig-cpu.c
@@ -127,7 +127,7 @@ static int __init ledtrig_cpu_init(void)
register_syscore_ops(&ledtrig_cpu_syscore_ops);
- ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_LEDTRIG_STARTING",
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "leds/trigger:starting",
ledtrig_online_cpu, ledtrig_prepare_down_cpu);
if (ret < 0)
pr_err("CPU hotplug notifier for ledtrig-cpu could not be registered: %d\n",
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
index 9e385b38debf..ac219045daf7 100644
--- a/drivers/lguest/core.c
+++ b/drivers/lguest/core.c
@@ -15,7 +15,7 @@
#include <linux/slab.h>
#include <asm/paravirt.h>
#include <asm/pgtable.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/poll.h>
#include <asm/asm-offsets.h>
#include "lg.h"
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index e3abebc912c0..0bc127e9f16a 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -16,7 +16,7 @@
#include <linux/random.h>
#include <linux/percpu.h>
#include <asm/tlbflush.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "lg.h"
/*M:008
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 743253fc638f..d71f6323ac00 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -45,7 +45,7 @@
#include <asm/desc.h>
#include <asm/setup.h>
#include <asm/lguest.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/fpu/internal.h>
#include <asm/tlbflush.h>
#include "../lg.h"
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index d28690f6e262..5d80810934df 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -102,7 +102,6 @@ config ADB_PMU_LED_DISK
bool "Use front LED as DISK LED by default"
depends on ADB_PMU_LED
depends on LEDS_CLASS
- depends on IDE_GD_ATA
select LEDS_TRIGGERS
select LEDS_TRIGGER_DISK
help
diff --git a/drivers/macintosh/ans-lcd.c b/drivers/macintosh/ans-lcd.c
index cd35079c8c98..281fa9e6fc1f 100644
--- a/drivers/macintosh/ans-lcd.c
+++ b/drivers/macintosh/ans-lcd.c
@@ -11,7 +11,7 @@
#include <linux/delay.h>
#include <linux/fs.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/sections.h>
#include <asm/prom.h>
#include <asm/io.h>
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 08edb2c25b60..227869159ac0 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -47,7 +47,7 @@
#include <asm/pmac_feature.h>
#include <asm/smu.h>
#include <asm/sections.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define VERSION "0.7"
#define AUTHOR "(c) 2005 Benjamin Herrenschmidt, IBM Corp."
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 91081dcdc272..43b8db2b5445 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -57,7 +57,7 @@
#include <asm/pmac_feature.h>
#include <asm/pmac_pfunc.h>
#include <asm/pmac_low_i2c.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/cputable.h>
#include <asm/time.h>
diff --git a/drivers/macintosh/via-pmu68k.c b/drivers/macintosh/via-pmu68k.c
index a00ee41f0573..a411c5cb77a1 100644
--- a/drivers/macintosh/via-pmu68k.c
+++ b/drivers/macintosh/via-pmu68k.c
@@ -38,7 +38,7 @@
#include <asm/pgtable.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/* Misc minor number allocated for /dev/pmu */
#define PMU_MINOR 154
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 11eebfe8a4cb..ceff415f201c 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -124,6 +124,15 @@ config MAILBOX_TEST
Test client to help with testing new Controller driver
implementations.
+config TEGRA_HSP_MBOX
+ bool "Tegra HSP (Hardware Synchronization Primitives) Driver"
+ depends on ARCH_TEGRA_186_SOC
+ help
+ The Tegra HSP driver is used for the interprocessor communication
+ between different remote processors and host processors on Tegra186
+ and later SoCs. Say Y here if you want to have this support.
+ If unsure say N.
+
config XGENE_SLIMPRO_MBOX
tristate "APM SoC X-Gene SLIMpro Mailbox Controller"
depends on ARCH_XGENE
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index ace6fed8fea9..7dde4f609ae8 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -29,3 +29,5 @@ obj-$(CONFIG_XGENE_SLIMPRO_MBOX) += mailbox-xgene-slimpro.o
obj-$(CONFIG_HI6220_MBOX) += hi6220-mailbox.o
obj-$(CONFIG_BCM_PDC_MBOX) += bcm-pdc-mailbox.o
+
+obj-$(CONFIG_TEGRA_HSP_MBOX) += tegra-hsp.o
diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c
index c19dd820ea9b..2aeb034d5fb9 100644
--- a/drivers/mailbox/bcm-pdc-mailbox.c
+++ b/drivers/mailbox/bcm-pdc-mailbox.c
@@ -60,7 +60,13 @@
#define RING_ENTRY_SIZE sizeof(struct dma64dd)
/* # entries in PDC dma ring */
-#define PDC_RING_ENTRIES 128
+#define PDC_RING_ENTRIES 512
+/*
+ * Minimum number of ring descriptor entries that must be free to tell mailbox
+ * framework that it can submit another request
+ */
+#define PDC_RING_SPACE_MIN 15
+
#define PDC_RING_SIZE (PDC_RING_ENTRIES * RING_ENTRY_SIZE)
/* Rings are 8k aligned */
#define RING_ALIGN_ORDER 13
@@ -93,11 +99,9 @@
* Interrupt mask and status definitions. Enable interrupts for tx and rx on
* ring 0
*/
-#define PDC_XMTINT_0 (24 + PDC_RINGSET)
#define PDC_RCVINT_0 (16 + PDC_RINGSET)
-#define PDC_XMTINTEN_0 BIT(PDC_XMTINT_0)
#define PDC_RCVINTEN_0 BIT(PDC_RCVINT_0)
-#define PDC_INTMASK (PDC_XMTINTEN_0 | PDC_RCVINTEN_0)
+#define PDC_INTMASK (PDC_RCVINTEN_0)
#define PDC_LAZY_FRAMECOUNT 1
#define PDC_LAZY_TIMEOUT 10000
#define PDC_LAZY_INT (PDC_LAZY_TIMEOUT | (PDC_LAZY_FRAMECOUNT << 24))
@@ -117,15 +121,16 @@
/*
* Sets the following bits for write to transmit control reg:
- * 0 - XmtEn - enable activity on the tx channel
* 11 - PtyChkDisable - parity check is disabled
* 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
*/
-#define PDC_TX_CTL 0x000C0801
+#define PDC_TX_CTL 0x000C0800
+
+/* Bit in tx control reg to enable tx channel */
+#define PDC_TX_ENABLE 0x1
/*
* Sets the following bits for write to receive control reg:
- * 0 - RcvEn - enable activity on the rx channel
* 7:1 - RcvOffset - size in bytes of status region at start of rx frame buf
* 9 - SepRxHdrDescEn - place start of new frames only in descriptors
* that have StartOfFrame set
@@ -135,7 +140,10 @@
* 11 - PtyChkDisable - parity check is disabled
* 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
*/
-#define PDC_RX_CTL 0x000C0E01
+#define PDC_RX_CTL 0x000C0E00
+
+/* Bit in rx control reg to enable rx channel */
+#define PDC_RX_ENABLE 0x1
#define CRYPTO_D64_RS0_CD_MASK ((PDC_RING_ENTRIES * RING_ENTRY_SIZE) - 1)
@@ -252,11 +260,29 @@ struct pdc_ring_alloc {
u32 size; /* ring allocation size in bytes */
};
+/*
+ * context associated with a receive descriptor.
+ * @rxp_ctx: opaque context associated with frame that starts at each
+ * rx ring index.
+ * @dst_sg: Scatterlist used to form reply frames beginning at a given ring
+ * index. Retained in order to unmap each sg after reply is processed.
+ * @rxin_numd: Number of rx descriptors associated with the message that starts
+ * at a descriptor index. Not set for every index. For example,
+ * if descriptor index i points to a scatterlist with 4 entries,
+ * then the next three descriptor indexes don't have a value set.
+ * @resp_hdr: Virtual address of buffer used to catch DMA rx status
+ * @resp_hdr_daddr: physical address of DMA rx status buffer
+ */
+struct pdc_rx_ctx {
+ void *rxp_ctx;
+ struct scatterlist *dst_sg;
+ u32 rxin_numd;
+ void *resp_hdr;
+ dma_addr_t resp_hdr_daddr;
+};
+
/* PDC state structure */
struct pdc_state {
- /* synchronize access to this PDC state structure */
- spinlock_t pdc_lock;
-
/* Index of the PDC whose state is in this structure instance */
u8 pdc_idx;
@@ -272,13 +298,8 @@ struct pdc_state {
unsigned int pdc_irq;
- /*
- * Last interrupt status read from PDC device. Saved in interrupt
- * handler so the handler can clear the interrupt in the device,
- * and the interrupt thread called later can know which interrupt
- * bits are active.
- */
- unsigned long intstatus;
+ /* tasklet for deferred processing after DMA rx interrupt */
+ struct tasklet_struct rx_tasklet;
/* Number of bytes of receive status prior to each rx frame */
u32 rx_status_len;
@@ -369,11 +390,7 @@ struct pdc_state {
/* Index of next rx descriptor to post. */
u32 rxout;
- /*
- * opaque context associated with frame that starts at each
- * rx ring index.
- */
- void *rxp_ctx[PDC_RING_ENTRIES];
+ struct pdc_rx_ctx rx_ctx[PDC_RING_ENTRIES];
/*
* Scatterlists used to form request and reply frames beginning at a
@@ -381,27 +398,18 @@ struct pdc_state {
* is processed
*/
struct scatterlist *src_sg[PDC_RING_ENTRIES];
- struct scatterlist *dst_sg[PDC_RING_ENTRIES];
-
- /*
- * Number of rx descriptors associated with the message that starts
- * at this descriptor index. Not set for every index. For example,
- * if descriptor index i points to a scatterlist with 4 entries, then
- * the next three descriptor indexes don't have a value set.
- */
- u32 rxin_numd[PDC_RING_ENTRIES];
-
- void *resp_hdr[PDC_RING_ENTRIES];
- dma_addr_t resp_hdr_daddr[PDC_RING_ENTRIES];
struct dentry *debugfs_stats; /* debug FS stats file for this PDC */
/* counters */
- u32 pdc_requests; /* number of request messages submitted */
- u32 pdc_replies; /* number of reply messages received */
- u32 txnobuf; /* count of tx ring full */
- u32 rxnobuf; /* count of rx ring full */
- u32 rx_oflow; /* count of rx overflows */
+ u32 pdc_requests; /* number of request messages submitted */
+ u32 pdc_replies; /* number of reply messages received */
+ u32 last_tx_not_done; /* too few tx descriptors to indicate done */
+ u32 tx_ring_full; /* unable to accept msg because tx ring full */
+ u32 rx_ring_full; /* unable to accept msg because rx ring full */
+ u32 txnobuf; /* unable to create tx descriptor */
+ u32 rxnobuf; /* unable to create rx descriptor */
+ u32 rx_oflow; /* count of rx overflows */
};
/* Global variables */
@@ -434,20 +442,33 @@ static ssize_t pdc_debugfs_read(struct file *filp, char __user *ubuf,
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"SPU %u stats:\n", pdcs->pdc_idx);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "PDC requests............%u\n",
+ "PDC requests....................%u\n",
pdcs->pdc_requests);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "PDC responses...........%u\n",
+ "PDC responses...................%u\n",
pdcs->pdc_replies);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "Tx err ring full........%u\n",
+ "Tx not done.....................%u\n",
+ pdcs->last_tx_not_done);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "Tx ring full....................%u\n",
+ pdcs->tx_ring_full);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "Rx ring full....................%u\n",
+ pdcs->rx_ring_full);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "Tx desc write fail. Ring full...%u\n",
pdcs->txnobuf);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "Rx err ring full........%u\n",
+ "Rx desc write fail. Ring full...%u\n",
pdcs->rxnobuf);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "Receive overflow........%u\n",
+ "Receive overflow................%u\n",
pdcs->rx_oflow);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "Num frags in rx ring............%u\n",
+ NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr,
+ pdcs->nrxpost));
if (out_offset > out_count)
out_offset = out_count;
@@ -480,17 +501,16 @@ static void pdc_setup_debugfs(struct pdc_state *pdcs)
if (!debugfs_dir)
debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
- pdcs->debugfs_stats = debugfs_create_file(spu_stats_name, S_IRUSR,
+ /* S_IRUSR == 0400 */
+ pdcs->debugfs_stats = debugfs_create_file(spu_stats_name, 0400,
debugfs_dir, pdcs,
&pdc_debugfs_stats);
}
static void pdc_free_debugfs(void)
{
- if (debugfs_dir && simple_empty(debugfs_dir)) {
- debugfs_remove_recursive(debugfs_dir);
- debugfs_dir = NULL;
- }
+ debugfs_remove_recursive(debugfs_dir);
+ debugfs_dir = NULL;
}
/**
@@ -505,17 +525,17 @@ pdc_build_rxd(struct pdc_state *pdcs, dma_addr_t dma_addr,
u32 buf_len, u32 flags)
{
struct device *dev = &pdcs->pdev->dev;
+ struct dma64dd *rxd = &pdcs->rxd_64[pdcs->rxout];
dev_dbg(dev,
"Writing rx descriptor for PDC %u at index %u with length %u. flags %#x\n",
pdcs->pdc_idx, pdcs->rxout, buf_len, flags);
- iowrite32(lower_32_bits(dma_addr),
- (void *)&pdcs->rxd_64[pdcs->rxout].addrlow);
- iowrite32(upper_32_bits(dma_addr),
- (void *)&pdcs->rxd_64[pdcs->rxout].addrhigh);
- iowrite32(flags, (void *)&pdcs->rxd_64[pdcs->rxout].ctrl1);
- iowrite32(buf_len, (void *)&pdcs->rxd_64[pdcs->rxout].ctrl2);
+ rxd->addrlow = cpu_to_le32(lower_32_bits(dma_addr));
+ rxd->addrhigh = cpu_to_le32(upper_32_bits(dma_addr));
+ rxd->ctrl1 = cpu_to_le32(flags);
+ rxd->ctrl2 = cpu_to_le32(buf_len);
+
/* bump ring index and return */
pdcs->rxout = NEXTRXD(pdcs->rxout, pdcs->nrxpost);
}
@@ -533,53 +553,50 @@ pdc_build_txd(struct pdc_state *pdcs, dma_addr_t dma_addr, u32 buf_len,
u32 flags)
{
struct device *dev = &pdcs->pdev->dev;
+ struct dma64dd *txd = &pdcs->txd_64[pdcs->txout];
dev_dbg(dev,
"Writing tx descriptor for PDC %u at index %u with length %u, flags %#x\n",
pdcs->pdc_idx, pdcs->txout, buf_len, flags);
- iowrite32(lower_32_bits(dma_addr),
- (void *)&pdcs->txd_64[pdcs->txout].addrlow);
- iowrite32(upper_32_bits(dma_addr),
- (void *)&pdcs->txd_64[pdcs->txout].addrhigh);
- iowrite32(flags, (void *)&pdcs->txd_64[pdcs->txout].ctrl1);
- iowrite32(buf_len, (void *)&pdcs->txd_64[pdcs->txout].ctrl2);
+ txd->addrlow = cpu_to_le32(lower_32_bits(dma_addr));
+ txd->addrhigh = cpu_to_le32(upper_32_bits(dma_addr));
+ txd->ctrl1 = cpu_to_le32(flags);
+ txd->ctrl2 = cpu_to_le32(buf_len);
/* bump ring index and return */
pdcs->txout = NEXTTXD(pdcs->txout, pdcs->ntxpost);
}
/**
- * pdc_receive() - Receive a response message from a given SPU.
+ * pdc_receive_one() - Receive a response message from a given SPU.
* @pdcs: PDC state for the SPU to receive from
- * @mssg: mailbox message to be returned to client
*
* When the return code indicates success, the response message is available in
* the receive buffers provided prior to submission of the request.
*
- * Input:
- * pdcs - PDC state structure for the SPU to be polled
- * mssg - mailbox message to be returned to client. This function sets the
- * context pointer on the message to help the client associate the
- * response with a request.
- *
* Return: PDC_SUCCESS if one or more receive descriptors was processed
* -EAGAIN indicates that no response message is available
* -EIO an error occurred
*/
static int
-pdc_receive(struct pdc_state *pdcs, struct brcm_message *mssg)
+pdc_receive_one(struct pdc_state *pdcs)
{
struct device *dev = &pdcs->pdev->dev;
+ struct mbox_controller *mbc;
+ struct mbox_chan *chan;
+ struct brcm_message mssg;
u32 len, rx_status;
u32 num_frags;
- int i;
u8 *resp_hdr; /* virtual addr of start of resp message DMA header */
u32 frags_rdy; /* number of fragments ready to read */
u32 rx_idx; /* ring index of start of receive frame */
dma_addr_t resp_hdr_daddr;
+ struct pdc_rx_ctx *rx_ctx;
- spin_lock(&pdcs->pdc_lock);
+ mbc = &pdcs->mbc;
+ chan = &mbc->chans[0];
+ mssg.type = BRCM_MESSAGE_SPU;
/*
* return if a complete response message is not yet ready.
@@ -587,47 +604,34 @@ pdc_receive(struct pdc_state *pdcs, struct brcm_message *mssg)
* to read.
*/
frags_rdy = NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr, pdcs->nrxpost);
- if ((frags_rdy == 0) || (frags_rdy < pdcs->rxin_numd[pdcs->rxin])) {
- /* See if the hw has written more fragments than we know */
- pdcs->last_rx_curr =
- (ioread32((void *)&pdcs->rxregs_64->status0) &
- CRYPTO_D64_RS0_CD_MASK) / RING_ENTRY_SIZE;
- frags_rdy = NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr,
- pdcs->nrxpost);
- if ((frags_rdy == 0) ||
- (frags_rdy < pdcs->rxin_numd[pdcs->rxin])) {
- /* No response ready */
- spin_unlock(&pdcs->pdc_lock);
- return -EAGAIN;
- }
- /* can't read descriptors/data until write index is read */
- rmb();
- }
+ if ((frags_rdy == 0) ||
+ (frags_rdy < pdcs->rx_ctx[pdcs->rxin].rxin_numd))
+ /* No response ready */
+ return -EAGAIN;
num_frags = pdcs->txin_numd[pdcs->txin];
+ WARN_ON(num_frags == 0);
+
dma_unmap_sg(dev, pdcs->src_sg[pdcs->txin],
sg_nents(pdcs->src_sg[pdcs->txin]), DMA_TO_DEVICE);
- for (i = 0; i < num_frags; i++)
- pdcs->txin = NEXTTXD(pdcs->txin, pdcs->ntxpost);
+ pdcs->txin = (pdcs->txin + num_frags) & pdcs->ntxpost;
dev_dbg(dev, "PDC %u reclaimed %d tx descriptors",
pdcs->pdc_idx, num_frags);
rx_idx = pdcs->rxin;
- num_frags = pdcs->rxin_numd[rx_idx];
+ rx_ctx = &pdcs->rx_ctx[rx_idx];
+ num_frags = rx_ctx->rxin_numd;
/* Return opaque context with result */
- mssg->ctx = pdcs->rxp_ctx[rx_idx];
- pdcs->rxp_ctx[rx_idx] = NULL;
- resp_hdr = pdcs->resp_hdr[rx_idx];
- resp_hdr_daddr = pdcs->resp_hdr_daddr[rx_idx];
- dma_unmap_sg(dev, pdcs->dst_sg[rx_idx],
- sg_nents(pdcs->dst_sg[rx_idx]), DMA_FROM_DEVICE);
-
- for (i = 0; i < num_frags; i++)
- pdcs->rxin = NEXTRXD(pdcs->rxin, pdcs->nrxpost);
+ mssg.ctx = rx_ctx->rxp_ctx;
+ rx_ctx->rxp_ctx = NULL;
+ resp_hdr = rx_ctx->resp_hdr;
+ resp_hdr_daddr = rx_ctx->resp_hdr_daddr;
+ dma_unmap_sg(dev, rx_ctx->dst_sg, sg_nents(rx_ctx->dst_sg),
+ DMA_FROM_DEVICE);
- spin_unlock(&pdcs->pdc_lock);
+ pdcs->rxin = (pdcs->rxin + num_frags) & pdcs->nrxpost;
dev_dbg(dev, "PDC %u reclaimed %d rx descriptors",
pdcs->pdc_idx, num_frags);
@@ -659,12 +663,35 @@ pdc_receive(struct pdc_state *pdcs, struct brcm_message *mssg)
dma_pool_free(pdcs->rx_buf_pool, resp_hdr, resp_hdr_daddr);
+ mbox_chan_received_data(chan, &mssg);
+
pdcs->pdc_replies++;
- /* if we read one or more rx descriptors, claim success */
- if (num_frags > 0)
- return PDC_SUCCESS;
- else
- return -EIO;
+ return PDC_SUCCESS;
+}
+
+/**
+ * pdc_receive() - Process as many responses as are available in the rx ring.
+ * @pdcs: PDC state
+ *
+ * Called within the hard IRQ.
+ * Return:
+ */
+static int
+pdc_receive(struct pdc_state *pdcs)
+{
+ int rx_status;
+
+ /* read last_rx_curr from register once */
+ pdcs->last_rx_curr =
+ (ioread32(&pdcs->rxregs_64->status0) &
+ CRYPTO_D64_RS0_CD_MASK) / RING_ENTRY_SIZE;
+
+ do {
+ /* Could be many frames ready */
+ rx_status = pdc_receive_one(pdcs);
+ } while (rx_status == PDC_SUCCESS);
+
+ return 0;
}
/**
@@ -766,8 +793,8 @@ static int pdc_tx_list_final(struct pdc_state *pdcs)
* before chip starts to process new request
*/
wmb();
- iowrite32(pdcs->rxout << 4, (void *)&pdcs->rxregs_64->ptr);
- iowrite32(pdcs->txout << 4, (void *)&pdcs->txregs_64->ptr);
+ iowrite32(pdcs->rxout << 4, &pdcs->rxregs_64->ptr);
+ iowrite32(pdcs->txout << 4, &pdcs->txregs_64->ptr);
pdcs->pdc_requests++;
return PDC_SUCCESS;
@@ -796,6 +823,7 @@ static int pdc_rx_list_init(struct pdc_state *pdcs, struct scatterlist *dst_sg,
u32 rx_pkt_cnt = 1; /* Adding a single rx buffer */
dma_addr_t daddr;
void *vaddr;
+ struct pdc_rx_ctx *rx_ctx;
rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
pdcs->nrxpost);
@@ -806,7 +834,7 @@ static int pdc_rx_list_init(struct pdc_state *pdcs, struct scatterlist *dst_sg,
/* allocate a buffer for the dma rx status */
vaddr = dma_pool_zalloc(pdcs->rx_buf_pool, GFP_ATOMIC, &daddr);
- if (!vaddr)
+ if (unlikely(!vaddr))
return -ENOMEM;
/*
@@ -819,15 +847,16 @@ static int pdc_rx_list_init(struct pdc_state *pdcs, struct scatterlist *dst_sg,
/* This is always the first descriptor in the receive sequence */
flags = D64_CTRL1_SOF;
- pdcs->rxin_numd[pdcs->rx_msg_start] = 1;
+ pdcs->rx_ctx[pdcs->rx_msg_start].rxin_numd = 1;
if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
flags |= D64_CTRL1_EOT;
- pdcs->rxp_ctx[pdcs->rxout] = ctx;
- pdcs->dst_sg[pdcs->rxout] = dst_sg;
- pdcs->resp_hdr[pdcs->rxout] = vaddr;
- pdcs->resp_hdr_daddr[pdcs->rxout] = daddr;
+ rx_ctx = &pdcs->rx_ctx[pdcs->rxout];
+ rx_ctx->rxp_ctx = ctx;
+ rx_ctx->dst_sg = dst_sg;
+ rx_ctx->resp_hdr = vaddr;
+ rx_ctx->resp_hdr_daddr = daddr;
pdc_build_rxd(pdcs, daddr, pdcs->pdc_resp_hdr_len, flags);
return PDC_SUCCESS;
}
@@ -895,7 +924,7 @@ static int pdc_rx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
desc_w++;
sg = sg_next(sg);
}
- pdcs->rxin_numd[pdcs->rx_msg_start] += desc_w;
+ pdcs->rx_ctx[pdcs->rx_msg_start].rxin_numd += desc_w;
return PDC_SUCCESS;
}
@@ -903,7 +932,7 @@ static int pdc_rx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
/**
* pdc_irq_handler() - Interrupt handler called in interrupt context.
* @irq: Interrupt number that has fired
- * @cookie: PDC state for DMA engine that generated the interrupt
+ * @data: device struct for DMA engine that generated the interrupt
*
* We have to clear the device interrupt status flags here. So cache the
* status for later use in the thread function. Other than that, just return
@@ -912,88 +941,39 @@ static int pdc_rx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
* Return: IRQ_WAKE_THREAD if interrupt is ours
* IRQ_NONE otherwise
*/
-static irqreturn_t pdc_irq_handler(int irq, void *cookie)
+static irqreturn_t pdc_irq_handler(int irq, void *data)
{
- struct pdc_state *pdcs = cookie;
+ struct device *dev = (struct device *)data;
+ struct pdc_state *pdcs = dev_get_drvdata(dev);
u32 intstatus = ioread32(pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
- if (intstatus & PDC_XMTINTEN_0)
- set_bit(PDC_XMTINT_0, &pdcs->intstatus);
- if (intstatus & PDC_RCVINTEN_0)
- set_bit(PDC_RCVINT_0, &pdcs->intstatus);
+ if (unlikely(intstatus == 0))
+ return IRQ_NONE;
+
+ /* Disable interrupts until soft handler runs */
+ iowrite32(0, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
/* Clear interrupt flags in device */
iowrite32(intstatus, pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
/* Wakeup IRQ thread */
- if (pdcs && (irq == pdcs->pdc_irq) && (intstatus & PDC_INTMASK))
- return IRQ_WAKE_THREAD;
-
- return IRQ_NONE;
+ tasklet_schedule(&pdcs->rx_tasklet);
+ return IRQ_HANDLED;
}
/**
- * pdc_irq_thread() - Function invoked on deferred thread when a DMA tx has
- * completed or data is available to receive.
- * @irq: Interrupt number
- * @cookie: PDC state for PDC that generated the interrupt
- *
- * On DMA tx complete, notify the mailbox client. On DMA rx complete, process
- * as many SPU response messages as are available and send each to the mailbox
- * client.
- *
- * Return: IRQ_HANDLED if we recognized and handled the interrupt
- * IRQ_NONE otherwise
+ * pdc_tasklet_cb() - Tasklet callback that runs the deferred processing after
+ * a DMA receive interrupt. Reenables the receive interrupt.
+ * @data: PDC state structure
*/
-static irqreturn_t pdc_irq_thread(int irq, void *cookie)
+static void pdc_tasklet_cb(unsigned long data)
{
- struct pdc_state *pdcs = cookie;
- struct mbox_controller *mbc;
- struct mbox_chan *chan;
- bool tx_int;
- bool rx_int;
- int rx_status;
- struct brcm_message mssg;
+ struct pdc_state *pdcs = (struct pdc_state *)data;
- tx_int = test_and_clear_bit(PDC_XMTINT_0, &pdcs->intstatus);
- rx_int = test_and_clear_bit(PDC_RCVINT_0, &pdcs->intstatus);
+ pdc_receive(pdcs);
- if (pdcs && (tx_int || rx_int)) {
- dev_dbg(&pdcs->pdev->dev,
- "%s() got irq %d with tx_int %s, rx_int %s",
- __func__, irq,
- tx_int ? "set" : "clear", rx_int ? "set" : "clear");
-
- mbc = &pdcs->mbc;
- chan = &mbc->chans[0];
-
- if (tx_int) {
- dev_dbg(&pdcs->pdev->dev, "%s(): tx done", __func__);
- /* only one frame in flight at a time */
- mbox_chan_txdone(chan, PDC_SUCCESS);
- }
- if (rx_int) {
- while (1) {
- /* Could be many frames ready */
- memset(&mssg, 0, sizeof(mssg));
- mssg.type = BRCM_MESSAGE_SPU;
- rx_status = pdc_receive(pdcs, &mssg);
- if (rx_status >= 0) {
- dev_dbg(&pdcs->pdev->dev,
- "%s(): invoking client rx cb",
- __func__);
- mbox_chan_received_data(chan, &mssg);
- } else {
- dev_dbg(&pdcs->pdev->dev,
- "%s(): no SPU response available",
- __func__);
- break;
- }
- }
- }
- return IRQ_HANDLED;
- }
- return IRQ_NONE;
+ /* reenable interrupts */
+ iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
}
/**
@@ -1016,14 +996,14 @@ static int pdc_ring_init(struct pdc_state *pdcs, int ringset)
/* Allocate tx ring */
tx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &tx.dmabase);
- if (!tx.vbase) {
+ if (unlikely(!tx.vbase)) {
err = -ENOMEM;
goto done;
}
/* Allocate rx ring */
rx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &rx.dmabase);
- if (!rx.vbase) {
+ if (unlikely(!rx.vbase)) {
err = -ENOMEM;
goto fail_dealloc;
}
@@ -1033,9 +1013,6 @@ static int pdc_ring_init(struct pdc_state *pdcs, int ringset)
dev_dbg(dev, " - base DMA addr of rx ring %pad", &rx.dmabase);
dev_dbg(dev, " - base virtual addr of rx ring %p", rx.vbase);
- /* lock after ring allocation to avoid scheduling while atomic */
- spin_lock(&pdcs->pdc_lock);
-
memcpy(&pdcs->tx_ring_alloc, &tx, sizeof(tx));
memcpy(&pdcs->rx_ring_alloc, &rx, sizeof(rx));
@@ -1053,40 +1030,52 @@ static int pdc_ring_init(struct pdc_state *pdcs, int ringset)
/* Tell device the base DMA address of each ring */
dma_reg = &pdcs->regs->dmaregs[ringset];
+
+ /* But first disable DMA and set curptr to 0 for both TX & RX */
+ iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
+ iowrite32((PDC_RX_CTL + (pdcs->rx_status_len << 1)),
+ &dma_reg->dmarcv.control);
+ iowrite32(0, &dma_reg->dmaxmt.ptr);
+ iowrite32(0, &dma_reg->dmarcv.ptr);
+
+ /* Set base DMA addresses */
iowrite32(lower_32_bits(pdcs->tx_ring_alloc.dmabase),
- (void *)&dma_reg->dmaxmt.addrlow);
+ &dma_reg->dmaxmt.addrlow);
iowrite32(upper_32_bits(pdcs->tx_ring_alloc.dmabase),
- (void *)&dma_reg->dmaxmt.addrhigh);
+ &dma_reg->dmaxmt.addrhigh);
iowrite32(lower_32_bits(pdcs->rx_ring_alloc.dmabase),
- (void *)&dma_reg->dmarcv.addrlow);
+ &dma_reg->dmarcv.addrlow);
iowrite32(upper_32_bits(pdcs->rx_ring_alloc.dmabase),
- (void *)&dma_reg->dmarcv.addrhigh);
+ &dma_reg->dmarcv.addrhigh);
+
+ /* Re-enable DMA */
+ iowrite32(PDC_TX_CTL | PDC_TX_ENABLE, &dma_reg->dmaxmt.control);
+ iowrite32((PDC_RX_CTL | PDC_RX_ENABLE | (pdcs->rx_status_len << 1)),
+ &dma_reg->dmarcv.control);
/* Initialize descriptors */
for (i = 0; i < PDC_RING_ENTRIES; i++) {
/* Every tx descriptor can be used for start of frame. */
if (i != pdcs->ntxpost) {
iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF,
- (void *)&pdcs->txd_64[i].ctrl1);
+ &pdcs->txd_64[i].ctrl1);
} else {
/* Last descriptor in ringset. Set End of Table. */
iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF |
- D64_CTRL1_EOT,
- (void *)&pdcs->txd_64[i].ctrl1);
+ D64_CTRL1_EOT, &pdcs->txd_64[i].ctrl1);
}
/* Every rx descriptor can be used for start of frame */
if (i != pdcs->nrxpost) {
iowrite32(D64_CTRL1_SOF,
- (void *)&pdcs->rxd_64[i].ctrl1);
+ &pdcs->rxd_64[i].ctrl1);
} else {
/* Last descriptor in ringset. Set End of Table. */
iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOT,
- (void *)&pdcs->rxd_64[i].ctrl1);
+ &pdcs->rxd_64[i].ctrl1);
}
}
- spin_unlock(&pdcs->pdc_lock);
return PDC_SUCCESS;
fail_dealloc:
@@ -1111,6 +1100,80 @@ static void pdc_ring_free(struct pdc_state *pdcs)
}
/**
+ * pdc_desc_count() - Count the number of DMA descriptors that will be required
+ * for a given scatterlist. Account for the max length of a DMA buffer.
+ * @sg: Scatterlist to be DMA'd
+ * Return: Number of descriptors required
+ */
+static u32 pdc_desc_count(struct scatterlist *sg)
+{
+ u32 cnt = 0;
+
+ while (sg) {
+ cnt += ((sg->length / PDC_DMA_BUF_MAX) + 1);
+ sg = sg_next(sg);
+ }
+ return cnt;
+}
+
+/**
+ * pdc_rings_full() - Check whether the tx ring has room for tx_cnt descriptors
+ * and the rx ring has room for rx_cnt descriptors.
+ * @pdcs: PDC state
+ * @tx_cnt: The number of descriptors required in the tx ring
+ * @rx_cnt: The number of descriptors required i the rx ring
+ *
+ * Return: true if one of the rings does not have enough space
+ * false if sufficient space is available in both rings
+ */
+static bool pdc_rings_full(struct pdc_state *pdcs, int tx_cnt, int rx_cnt)
+{
+ u32 rx_avail;
+ u32 tx_avail;
+ bool full = false;
+
+ /* Check if the tx and rx rings are likely to have enough space */
+ rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
+ pdcs->nrxpost);
+ if (unlikely(rx_cnt > rx_avail)) {
+ pdcs->rx_ring_full++;
+ full = true;
+ }
+
+ if (likely(!full)) {
+ tx_avail = pdcs->ntxpost - NTXDACTIVE(pdcs->txin, pdcs->txout,
+ pdcs->ntxpost);
+ if (unlikely(tx_cnt > tx_avail)) {
+ pdcs->tx_ring_full++;
+ full = true;
+ }
+ }
+ return full;
+}
+
+/**
+ * pdc_last_tx_done() - If both the tx and rx rings have at least
+ * PDC_RING_SPACE_MIN descriptors available, then indicate that the mailbox
+ * framework can submit another message.
+ * @chan: mailbox channel to check
+ * Return: true if PDC can accept another message on this channel
+ */
+static bool pdc_last_tx_done(struct mbox_chan *chan)
+{
+ struct pdc_state *pdcs = chan->con_priv;
+ bool ret;
+
+ if (unlikely(pdc_rings_full(pdcs, PDC_RING_SPACE_MIN,
+ PDC_RING_SPACE_MIN))) {
+ pdcs->last_tx_not_done++;
+ ret = false;
+ } else {
+ ret = true;
+ }
+ return ret;
+}
+
+/**
* pdc_send_data() - mailbox send_data function
* @chan: The mailbox channel on which the data is sent. The channel
* corresponds to a DMA ringset.
@@ -1141,29 +1204,43 @@ static int pdc_send_data(struct mbox_chan *chan, void *data)
int src_nent;
int dst_nent;
int nent;
+ u32 tx_desc_req;
+ u32 rx_desc_req;
- if (mssg->type != BRCM_MESSAGE_SPU)
+ if (unlikely(mssg->type != BRCM_MESSAGE_SPU))
return -ENOTSUPP;
src_nent = sg_nents(mssg->spu.src);
- if (src_nent) {
+ if (likely(src_nent)) {
nent = dma_map_sg(dev, mssg->spu.src, src_nent, DMA_TO_DEVICE);
- if (nent == 0)
+ if (unlikely(nent == 0))
return -EIO;
}
dst_nent = sg_nents(mssg->spu.dst);
- if (dst_nent) {
+ if (likely(dst_nent)) {
nent = dma_map_sg(dev, mssg->spu.dst, dst_nent,
DMA_FROM_DEVICE);
- if (nent == 0) {
+ if (unlikely(nent == 0)) {
dma_unmap_sg(dev, mssg->spu.src, src_nent,
DMA_TO_DEVICE);
return -EIO;
}
}
- spin_lock(&pdcs->pdc_lock);
+ /*
+ * Check if the tx and rx rings have enough space. Do this prior to
+ * writing any tx or rx descriptors. Need to ensure that we do not write
+ * a partial set of descriptors, or write just rx descriptors but
+ * corresponding tx descriptors don't fit. Note that we want this check
+ * and the entire sequence of descriptor to happen without another
+ * thread getting in. The channel spin lock in the mailbox framework
+ * ensures this.
+ */
+ tx_desc_req = pdc_desc_count(mssg->spu.src);
+ rx_desc_req = pdc_desc_count(mssg->spu.dst);
+ if (unlikely(pdc_rings_full(pdcs, tx_desc_req, rx_desc_req + 1)))
+ return -ENOSPC;
/* Create rx descriptors to SPU catch response */
err = pdc_rx_list_init(pdcs, mssg->spu.dst, mssg->ctx);
@@ -1173,9 +1250,7 @@ static int pdc_send_data(struct mbox_chan *chan, void *data)
err |= pdc_tx_list_sg_add(pdcs, mssg->spu.src);
err |= pdc_tx_list_final(pdcs); /* initiate transfer */
- spin_unlock(&pdcs->pdc_lock);
-
- if (err)
+ if (unlikely(err))
dev_err(&pdcs->pdev->dev,
"%s failed with error %d", __func__, err);
@@ -1224,26 +1299,29 @@ void pdc_hw_init(struct pdc_state *pdcs)
/* initialize data structures */
pdcs->regs = (struct pdc_regs *)pdcs->pdc_reg_vbase;
pdcs->txregs_64 = (struct dma64_regs *)
- (void *)(((u8 *)pdcs->pdc_reg_vbase) +
+ (((u8 *)pdcs->pdc_reg_vbase) +
PDC_TXREGS_OFFSET + (sizeof(struct dma64) * ringset));
pdcs->rxregs_64 = (struct dma64_regs *)
- (void *)(((u8 *)pdcs->pdc_reg_vbase) +
+ (((u8 *)pdcs->pdc_reg_vbase) +
PDC_RXREGS_OFFSET + (sizeof(struct dma64) * ringset));
pdcs->ntxd = PDC_RING_ENTRIES;
pdcs->nrxd = PDC_RING_ENTRIES;
pdcs->ntxpost = PDC_RING_ENTRIES - 1;
pdcs->nrxpost = PDC_RING_ENTRIES - 1;
- pdcs->regs->intmask = 0;
+ iowrite32(0, &pdcs->regs->intmask);
dma_reg = &pdcs->regs->dmaregs[ringset];
- iowrite32(0, (void *)&dma_reg->dmaxmt.ptr);
- iowrite32(0, (void *)&dma_reg->dmarcv.ptr);
- iowrite32(PDC_TX_CTL, (void *)&dma_reg->dmaxmt.control);
+ /* Configure DMA but will enable later in pdc_ring_init() */
+ iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
iowrite32(PDC_RX_CTL + (pdcs->rx_status_len << 1),
- (void *)&dma_reg->dmarcv.control);
+ &dma_reg->dmarcv.control);
+
+ /* Reset current index pointers after making sure DMA is disabled */
+ iowrite32(0, &dma_reg->dmaxmt.ptr);
+ iowrite32(0, &dma_reg->dmarcv.ptr);
if (pdcs->pdc_resp_hdr_len == PDC_SPU2_RESP_HDR_LEN)
iowrite32(PDC_CKSUM_CTRL,
@@ -1251,6 +1329,21 @@ void pdc_hw_init(struct pdc_state *pdcs)
}
/**
+ * pdc_hw_disable() - Disable the tx and rx control in the hw.
+ * @pdcs: PDC state structure
+ *
+ */
+static void pdc_hw_disable(struct pdc_state *pdcs)
+{
+ struct dma64 *dma_reg;
+
+ dma_reg = &pdcs->regs->dmaregs[PDC_RINGSET];
+ iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
+ iowrite32(PDC_RX_CTL + (pdcs->rx_status_len << 1),
+ &dma_reg->dmarcv.control);
+}
+
+/**
* pdc_rx_buf_pool_create() - Pool of receive buffers used to catch the metadata
* header returned with each response message.
* @pdcs: PDC state structure
@@ -1301,8 +1394,6 @@ static int pdc_interrupts_init(struct pdc_state *pdcs)
struct device_node *dn = pdev->dev.of_node;
int err;
- pdcs->intstatus = 0;
-
/* interrupt configuration */
iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
iowrite32(PDC_LAZY_INT, pdcs->pdc_reg_vbase + PDC_RCVLAZY0_OFFSET);
@@ -1311,11 +1402,11 @@ static int pdc_interrupts_init(struct pdc_state *pdcs)
pdcs->pdc_irq = irq_of_parse_and_map(dn, 0);
dev_dbg(dev, "pdc device %s irq %u for pdcs %p",
dev_name(dev), pdcs->pdc_irq, pdcs);
- err = devm_request_threaded_irq(dev, pdcs->pdc_irq,
- pdc_irq_handler,
- pdc_irq_thread, 0, dev_name(dev), pdcs);
+
+ err = devm_request_irq(dev, pdcs->pdc_irq, pdc_irq_handler, 0,
+ dev_name(dev), dev);
if (err) {
- dev_err(dev, "threaded tx IRQ %u request failed with err %d\n",
+ dev_err(dev, "IRQ %u request failed with err %d\n",
pdcs->pdc_irq, err);
return err;
}
@@ -1324,6 +1415,7 @@ static int pdc_interrupts_init(struct pdc_state *pdcs)
static const struct mbox_chan_ops pdc_mbox_chan_ops = {
.send_data = pdc_send_data,
+ .last_tx_done = pdc_last_tx_done,
.startup = pdc_startup,
.shutdown = pdc_shutdown
};
@@ -1356,8 +1448,9 @@ static int pdc_mb_init(struct pdc_state *pdcs)
if (!mbc->chans)
return -ENOMEM;
- mbc->txdone_irq = true;
- mbc->txdone_poll = false;
+ mbc->txdone_irq = false;
+ mbc->txdone_poll = true;
+ mbc->txpoll_period = 1;
for (chan_index = 0; chan_index < mbc->num_chans; chan_index++)
mbc->chans[chan_index].con_priv = pdcs;
@@ -1427,7 +1520,6 @@ static int pdc_probe(struct platform_device *pdev)
goto cleanup;
}
- spin_lock_init(&pdcs->pdc_lock);
pdcs->pdev = pdev;
platform_set_drvdata(pdev, pdcs);
pdcs->pdc_idx = pdcg.num_spu;
@@ -1473,6 +1565,9 @@ static int pdc_probe(struct platform_device *pdev)
pdc_hw_init(pdcs);
+ /* Init tasklet for deferred DMA rx processing */
+ tasklet_init(&pdcs->rx_tasklet, pdc_tasklet_cb, (unsigned long)pdcs);
+
err = pdc_interrupts_init(pdcs);
if (err)
goto cleanup_buf_pool;
@@ -1489,6 +1584,7 @@ static int pdc_probe(struct platform_device *pdev)
return PDC_SUCCESS;
cleanup_buf_pool:
+ tasklet_kill(&pdcs->rx_tasklet);
dma_pool_destroy(pdcs->rx_buf_pool);
cleanup_ring_pool:
@@ -1504,6 +1600,10 @@ static int pdc_remove(struct platform_device *pdev)
pdc_free_debugfs();
+ tasklet_kill(&pdcs->rx_tasklet);
+
+ pdc_hw_disable(pdcs);
+
mbox_controller_unregister(&pdcs->mbc);
dma_pool_destroy(pdcs->rx_buf_pool);
diff --git a/drivers/mailbox/mailbox-sti.c b/drivers/mailbox/mailbox-sti.c
index a334db5c9f1c..41bcd339b68a 100644
--- a/drivers/mailbox/mailbox-sti.c
+++ b/drivers/mailbox/mailbox-sti.c
@@ -403,6 +403,7 @@ static const struct of_device_id sti_mailbox_match[] = {
},
{ }
};
+MODULE_DEVICE_TABLE(of, sti_mailbox_match);
static int sti_mbox_probe(struct platform_device *pdev)
{
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
index 9ca96e9db6bf..9c79f8019d2a 100644
--- a/drivers/mailbox/mailbox-test.c
+++ b/drivers/mailbox/mailbox-test.c
@@ -11,12 +11,14 @@
#include <linux/debugfs.h>
#include <linux/err.h>
+#include <linux/fs.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
@@ -39,6 +41,8 @@ struct mbox_test_device {
char *signal;
char *message;
spinlock_t lock;
+ wait_queue_head_t waitq;
+ struct fasync_struct *async_queue;
};
static ssize_t mbox_test_signal_write(struct file *filp,
@@ -81,6 +85,13 @@ static const struct file_operations mbox_test_signal_ops = {
.llseek = generic_file_llseek,
};
+static int mbox_test_message_fasync(int fd, struct file *filp, int on)
+{
+ struct mbox_test_device *tdev = filp->private_data;
+
+ return fasync_helper(fd, filp, on, &tdev->async_queue);
+}
+
static ssize_t mbox_test_message_write(struct file *filp,
const char __user *userbuf,
size_t count, loff_t *ppos)
@@ -138,6 +149,20 @@ out:
return ret < 0 ? ret : count;
}
+static bool mbox_test_message_data_ready(struct mbox_test_device *tdev)
+{
+ unsigned char data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tdev->lock, flags);
+ data = tdev->rx_buffer[0];
+ spin_unlock_irqrestore(&tdev->lock, flags);
+
+ if (data != '\0')
+ return true;
+ return false;
+}
+
static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf,
size_t count, loff_t *ppos)
{
@@ -147,6 +172,8 @@ static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf,
int l = 0;
int ret;
+ DECLARE_WAITQUEUE(wait, current);
+
touser = kzalloc(MBOX_HEXDUMP_MAX_LEN + 1, GFP_KERNEL);
if (!touser)
return -ENOMEM;
@@ -155,15 +182,29 @@ static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf,
ret = snprintf(touser, 20, "<NO RX CAPABILITY>\n");
ret = simple_read_from_buffer(userbuf, count, ppos,
touser, ret);
- goto out;
+ goto kfree_err;
}
- if (tdev->rx_buffer[0] == '\0') {
- ret = snprintf(touser, 9, "<EMPTY>\n");
- ret = simple_read_from_buffer(userbuf, count, ppos,
- touser, ret);
- goto out;
- }
+ add_wait_queue(&tdev->waitq, &wait);
+
+ do {
+ __set_current_state(TASK_INTERRUPTIBLE);
+
+ if (mbox_test_message_data_ready(tdev))
+ break;
+
+ if (filp->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ goto waitq_err;
+ }
+
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ goto waitq_err;
+ }
+ schedule();
+
+ } while (1);
spin_lock_irqsave(&tdev->lock, flags);
@@ -185,14 +226,31 @@ static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf,
spin_unlock_irqrestore(&tdev->lock, flags);
ret = simple_read_from_buffer(userbuf, count, ppos, touser, MBOX_HEXDUMP_MAX_LEN);
-out:
+waitq_err:
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&tdev->waitq, &wait);
+kfree_err:
kfree(touser);
return ret;
}
+static unsigned int
+mbox_test_message_poll(struct file *filp, struct poll_table_struct *wait)
+{
+ struct mbox_test_device *tdev = filp->private_data;
+
+ poll_wait(filp, &tdev->waitq, wait);
+
+ if (mbox_test_message_data_ready(tdev))
+ return POLLIN | POLLRDNORM;
+ return 0;
+}
+
static const struct file_operations mbox_test_message_ops = {
.write = mbox_test_message_write,
.read = mbox_test_message_read,
+ .fasync = mbox_test_message_fasync,
+ .poll = mbox_test_message_poll,
.open = simple_open,
.llseek = generic_file_llseek,
};
@@ -234,6 +292,10 @@ static void mbox_test_receive_message(struct mbox_client *client, void *message)
memcpy(tdev->rx_buffer, message, MBOX_MAX_MSG_LEN);
}
spin_unlock_irqrestore(&tdev->lock, flags);
+
+ wake_up_interruptible(&tdev->waitq);
+
+ kill_fasync(&tdev->async_queue, SIGIO, POLL_IN);
}
static void mbox_test_prepare_message(struct mbox_client *client, void *message)
@@ -290,6 +352,7 @@ static int mbox_test_probe(struct platform_device *pdev)
{
struct mbox_test_device *tdev;
struct resource *res;
+ resource_size_t size;
int ret;
tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL);
@@ -298,14 +361,21 @@ static int mbox_test_probe(struct platform_device *pdev)
/* It's okay for MMIO to be NULL */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ size = resource_size(res);
tdev->tx_mmio = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(tdev->tx_mmio))
+ if (PTR_ERR(tdev->tx_mmio) == -EBUSY)
+ /* if reserved area in SRAM, try just ioremap */
+ tdev->tx_mmio = devm_ioremap(&pdev->dev, res->start, size);
+ else if (IS_ERR(tdev->tx_mmio))
tdev->tx_mmio = NULL;
/* If specified, second reg entry is Rx MMIO */
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ size = resource_size(res);
tdev->rx_mmio = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(tdev->rx_mmio))
+ if (PTR_ERR(tdev->rx_mmio) == -EBUSY)
+ tdev->rx_mmio = devm_ioremap(&pdev->dev, res->start, size);
+ else if (IS_ERR(tdev->rx_mmio))
tdev->rx_mmio = tdev->tx_mmio;
tdev->tx_channel = mbox_test_request_channel(pdev, "tx");
@@ -334,6 +404,7 @@ static int mbox_test_probe(struct platform_device *pdev)
if (ret)
return ret;
+ init_waitqueue_head(&tdev->waitq);
dev_info(&pdev->dev, "Successfully registered\n");
return 0;
@@ -357,6 +428,7 @@ static const struct of_device_id mbox_test_match[] = {
{ .compatible = "mailbox-test" },
{},
};
+MODULE_DEVICE_TABLE(of, mbox_test_match);
static struct platform_driver mbox_test_driver = {
.driver = {
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 4a36632c236f..4671f8a12872 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -87,8 +87,7 @@ exit:
if (!err && (chan->txdone_method & TXDONE_BY_POLL))
/* kick start the timer immediately to avoid delays */
- hrtimer_start(&chan->mbox->poll_hrt, ktime_set(0, 0),
- HRTIMER_MODE_REL);
+ hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
}
static void tx_tick(struct mbox_chan *chan, int r)
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 1f32688c312d..dd9ecd354a3e 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -447,7 +447,6 @@ static int pcc_parse_subspace_irq(int id,
*/
static int __init acpi_pcc_probe(void)
{
- acpi_size pcct_tbl_header_size;
struct acpi_table_header *pcct_tbl;
struct acpi_subtable_header *pcct_entry;
struct acpi_table_pcct *acpi_pcct_tbl;
@@ -456,9 +455,7 @@ static int __init acpi_pcc_probe(void)
acpi_status status = AE_OK;
/* Search for PCCT */
- status = acpi_get_table_with_size(ACPI_SIG_PCCT, 0,
- &pcct_tbl,
- &pcct_tbl_header_size);
+ status = acpi_get_table(ACPI_SIG_PCCT, 0, &pcct_tbl);
if (ACPI_FAILURE(status) || !pcct_tbl) {
pr_warn("PCCT header not found.\n");
diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c
new file mode 100644
index 000000000000..0cde356c11ab
--- /dev/null
+++ b/drivers/mailbox/tegra-hsp.c
@@ -0,0 +1,479 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mailbox_controller.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/mailbox/tegra186-hsp.h>
+
+#define HSP_INT_DIMENSIONING 0x380
+#define HSP_nSM_SHIFT 0
+#define HSP_nSS_SHIFT 4
+#define HSP_nAS_SHIFT 8
+#define HSP_nDB_SHIFT 12
+#define HSP_nSI_SHIFT 16
+#define HSP_nINT_MASK 0xf
+
+#define HSP_DB_TRIGGER 0x0
+#define HSP_DB_ENABLE 0x4
+#define HSP_DB_RAW 0x8
+#define HSP_DB_PENDING 0xc
+
+#define HSP_DB_CCPLEX 1
+#define HSP_DB_BPMP 3
+#define HSP_DB_MAX 7
+
+struct tegra_hsp_channel;
+struct tegra_hsp;
+
+struct tegra_hsp_channel {
+ struct tegra_hsp *hsp;
+ struct mbox_chan *chan;
+ void __iomem *regs;
+};
+
+struct tegra_hsp_doorbell {
+ struct tegra_hsp_channel channel;
+ struct list_head list;
+ const char *name;
+ unsigned int master;
+ unsigned int index;
+};
+
+struct tegra_hsp_db_map {
+ const char *name;
+ unsigned int master;
+ unsigned int index;
+};
+
+struct tegra_hsp_soc {
+ const struct tegra_hsp_db_map *map;
+};
+
+struct tegra_hsp {
+ const struct tegra_hsp_soc *soc;
+ struct mbox_controller mbox;
+ void __iomem *regs;
+ unsigned int irq;
+ unsigned int num_sm;
+ unsigned int num_as;
+ unsigned int num_ss;
+ unsigned int num_db;
+ unsigned int num_si;
+ spinlock_t lock;
+
+ struct list_head doorbells;
+};
+
+static inline struct tegra_hsp *
+to_tegra_hsp(struct mbox_controller *mbox)
+{
+ return container_of(mbox, struct tegra_hsp, mbox);
+}
+
+static inline u32 tegra_hsp_readl(struct tegra_hsp *hsp, unsigned int offset)
+{
+ return readl(hsp->regs + offset);
+}
+
+static inline void tegra_hsp_writel(struct tegra_hsp *hsp, u32 value,
+ unsigned int offset)
+{
+ writel(value, hsp->regs + offset);
+}
+
+static inline u32 tegra_hsp_channel_readl(struct tegra_hsp_channel *channel,
+ unsigned int offset)
+{
+ return readl(channel->regs + offset);
+}
+
+static inline void tegra_hsp_channel_writel(struct tegra_hsp_channel *channel,
+ u32 value, unsigned int offset)
+{
+ writel(value, channel->regs + offset);
+}
+
+static bool tegra_hsp_doorbell_can_ring(struct tegra_hsp_doorbell *db)
+{
+ u32 value;
+
+ value = tegra_hsp_channel_readl(&db->channel, HSP_DB_ENABLE);
+
+ return (value & BIT(TEGRA_HSP_DB_MASTER_CCPLEX)) != 0;
+}
+
+static struct tegra_hsp_doorbell *
+__tegra_hsp_doorbell_get(struct tegra_hsp *hsp, unsigned int master)
+{
+ struct tegra_hsp_doorbell *entry;
+
+ list_for_each_entry(entry, &hsp->doorbells, list)
+ if (entry->master == master)
+ return entry;
+
+ return NULL;
+}
+
+static struct tegra_hsp_doorbell *
+tegra_hsp_doorbell_get(struct tegra_hsp *hsp, unsigned int master)
+{
+ struct tegra_hsp_doorbell *db;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsp->lock, flags);
+ db = __tegra_hsp_doorbell_get(hsp, master);
+ spin_unlock_irqrestore(&hsp->lock, flags);
+
+ return db;
+}
+
+static irqreturn_t tegra_hsp_doorbell_irq(int irq, void *data)
+{
+ struct tegra_hsp *hsp = data;
+ struct tegra_hsp_doorbell *db;
+ unsigned long master, value;
+
+ db = tegra_hsp_doorbell_get(hsp, TEGRA_HSP_DB_MASTER_CCPLEX);
+ if (!db)
+ return IRQ_NONE;
+
+ value = tegra_hsp_channel_readl(&db->channel, HSP_DB_PENDING);
+ tegra_hsp_channel_writel(&db->channel, value, HSP_DB_PENDING);
+
+ spin_lock(&hsp->lock);
+
+ for_each_set_bit(master, &value, hsp->mbox.num_chans) {
+ struct tegra_hsp_doorbell *db;
+
+ db = __tegra_hsp_doorbell_get(hsp, master);
+ /*
+ * Depending on the bootloader chain, the CCPLEX doorbell will
+ * have some doorbells enabled, which means that requesting an
+ * interrupt will immediately fire.
+ *
+ * In that case, db->channel.chan will still be NULL here and
+ * cause a crash if not properly guarded.
+ *
+ * It remains to be seen if ignoring the doorbell in that case
+ * is the correct solution.
+ */
+ if (db && db->channel.chan)
+ mbox_chan_received_data(db->channel.chan, NULL);
+ }
+
+ spin_unlock(&hsp->lock);
+
+ return IRQ_HANDLED;
+}
+
+static struct tegra_hsp_channel *
+tegra_hsp_doorbell_create(struct tegra_hsp *hsp, const char *name,
+ unsigned int master, unsigned int index)
+{
+ struct tegra_hsp_doorbell *db;
+ unsigned int offset;
+ unsigned long flags;
+
+ db = kzalloc(sizeof(*db), GFP_KERNEL);
+ if (!db)
+ return ERR_PTR(-ENOMEM);
+
+ offset = (1 + (hsp->num_sm / 2) + hsp->num_ss + hsp->num_as) << 16;
+ offset += index * 0x100;
+
+ db->channel.regs = hsp->regs + offset;
+ db->channel.hsp = hsp;
+
+ db->name = kstrdup_const(name, GFP_KERNEL);
+ db->master = master;
+ db->index = index;
+
+ spin_lock_irqsave(&hsp->lock, flags);
+ list_add_tail(&db->list, &hsp->doorbells);
+ spin_unlock_irqrestore(&hsp->lock, flags);
+
+ return &db->channel;
+}
+
+static void __tegra_hsp_doorbell_destroy(struct tegra_hsp_doorbell *db)
+{
+ list_del(&db->list);
+ kfree_const(db->name);
+ kfree(db);
+}
+
+static int tegra_hsp_doorbell_send_data(struct mbox_chan *chan, void *data)
+{
+ struct tegra_hsp_doorbell *db = chan->con_priv;
+
+ tegra_hsp_channel_writel(&db->channel, 1, HSP_DB_TRIGGER);
+
+ return 0;
+}
+
+static int tegra_hsp_doorbell_startup(struct mbox_chan *chan)
+{
+ struct tegra_hsp_doorbell *db = chan->con_priv;
+ struct tegra_hsp *hsp = db->channel.hsp;
+ struct tegra_hsp_doorbell *ccplex;
+ unsigned long flags;
+ u32 value;
+
+ if (db->master >= hsp->mbox.num_chans) {
+ dev_err(hsp->mbox.dev,
+ "invalid master ID %u for HSP channel\n",
+ db->master);
+ return -EINVAL;
+ }
+
+ ccplex = tegra_hsp_doorbell_get(hsp, TEGRA_HSP_DB_MASTER_CCPLEX);
+ if (!ccplex)
+ return -ENODEV;
+
+ if (!tegra_hsp_doorbell_can_ring(db))
+ return -ENODEV;
+
+ spin_lock_irqsave(&hsp->lock, flags);
+
+ value = tegra_hsp_channel_readl(&ccplex->channel, HSP_DB_ENABLE);
+ value |= BIT(db->master);
+ tegra_hsp_channel_writel(&ccplex->channel, value, HSP_DB_ENABLE);
+
+ spin_unlock_irqrestore(&hsp->lock, flags);
+
+ return 0;
+}
+
+static void tegra_hsp_doorbell_shutdown(struct mbox_chan *chan)
+{
+ struct tegra_hsp_doorbell *db = chan->con_priv;
+ struct tegra_hsp *hsp = db->channel.hsp;
+ struct tegra_hsp_doorbell *ccplex;
+ unsigned long flags;
+ u32 value;
+
+ ccplex = tegra_hsp_doorbell_get(hsp, TEGRA_HSP_DB_MASTER_CCPLEX);
+ if (!ccplex)
+ return;
+
+ spin_lock_irqsave(&hsp->lock, flags);
+
+ value = tegra_hsp_channel_readl(&ccplex->channel, HSP_DB_ENABLE);
+ value &= ~BIT(db->master);
+ tegra_hsp_channel_writel(&ccplex->channel, value, HSP_DB_ENABLE);
+
+ spin_unlock_irqrestore(&hsp->lock, flags);
+}
+
+static const struct mbox_chan_ops tegra_hsp_doorbell_ops = {
+ .send_data = tegra_hsp_doorbell_send_data,
+ .startup = tegra_hsp_doorbell_startup,
+ .shutdown = tegra_hsp_doorbell_shutdown,
+};
+
+static struct mbox_chan *of_tegra_hsp_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *args)
+{
+ struct tegra_hsp_channel *channel = ERR_PTR(-ENODEV);
+ struct tegra_hsp *hsp = to_tegra_hsp(mbox);
+ unsigned int type = args->args[0];
+ unsigned int master = args->args[1];
+ struct tegra_hsp_doorbell *db;
+ struct mbox_chan *chan;
+ unsigned long flags;
+ unsigned int i;
+
+ switch (type) {
+ case TEGRA_HSP_MBOX_TYPE_DB:
+ db = tegra_hsp_doorbell_get(hsp, master);
+ if (db)
+ channel = &db->channel;
+
+ break;
+
+ default:
+ break;
+ }
+
+ if (IS_ERR(channel))
+ return ERR_CAST(channel);
+
+ spin_lock_irqsave(&hsp->lock, flags);
+
+ for (i = 0; i < hsp->mbox.num_chans; i++) {
+ chan = &hsp->mbox.chans[i];
+ if (!chan->con_priv) {
+ chan->con_priv = channel;
+ channel->chan = chan;
+ break;
+ }
+
+ chan = NULL;
+ }
+
+ spin_unlock_irqrestore(&hsp->lock, flags);
+
+ return chan ?: ERR_PTR(-EBUSY);
+}
+
+static void tegra_hsp_remove_doorbells(struct tegra_hsp *hsp)
+{
+ struct tegra_hsp_doorbell *db, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsp->lock, flags);
+
+ list_for_each_entry_safe(db, tmp, &hsp->doorbells, list)
+ __tegra_hsp_doorbell_destroy(db);
+
+ spin_unlock_irqrestore(&hsp->lock, flags);
+}
+
+static int tegra_hsp_add_doorbells(struct tegra_hsp *hsp)
+{
+ const struct tegra_hsp_db_map *map = hsp->soc->map;
+ struct tegra_hsp_channel *channel;
+
+ while (map->name) {
+ channel = tegra_hsp_doorbell_create(hsp, map->name,
+ map->master, map->index);
+ if (IS_ERR(channel)) {
+ tegra_hsp_remove_doorbells(hsp);
+ return PTR_ERR(channel);
+ }
+
+ map++;
+ }
+
+ return 0;
+}
+
+static int tegra_hsp_probe(struct platform_device *pdev)
+{
+ struct tegra_hsp *hsp;
+ struct resource *res;
+ u32 value;
+ int err;
+
+ hsp = devm_kzalloc(&pdev->dev, sizeof(*hsp), GFP_KERNEL);
+ if (!hsp)
+ return -ENOMEM;
+
+ hsp->soc = of_device_get_match_data(&pdev->dev);
+ INIT_LIST_HEAD(&hsp->doorbells);
+ spin_lock_init(&hsp->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hsp->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hsp->regs))
+ return PTR_ERR(hsp->regs);
+
+ value = tegra_hsp_readl(hsp, HSP_INT_DIMENSIONING);
+ hsp->num_sm = (value >> HSP_nSM_SHIFT) & HSP_nINT_MASK;
+ hsp->num_ss = (value >> HSP_nSS_SHIFT) & HSP_nINT_MASK;
+ hsp->num_as = (value >> HSP_nAS_SHIFT) & HSP_nINT_MASK;
+ hsp->num_db = (value >> HSP_nDB_SHIFT) & HSP_nINT_MASK;
+ hsp->num_si = (value >> HSP_nSI_SHIFT) & HSP_nINT_MASK;
+
+ err = platform_get_irq_byname(pdev, "doorbell");
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to get doorbell IRQ: %d\n", err);
+ return err;
+ }
+
+ hsp->irq = err;
+
+ hsp->mbox.of_xlate = of_tegra_hsp_xlate;
+ hsp->mbox.num_chans = 32;
+ hsp->mbox.dev = &pdev->dev;
+ hsp->mbox.txdone_irq = false;
+ hsp->mbox.txdone_poll = false;
+ hsp->mbox.ops = &tegra_hsp_doorbell_ops;
+
+ hsp->mbox.chans = devm_kcalloc(&pdev->dev, hsp->mbox.num_chans,
+ sizeof(*hsp->mbox.chans),
+ GFP_KERNEL);
+ if (!hsp->mbox.chans)
+ return -ENOMEM;
+
+ err = tegra_hsp_add_doorbells(hsp);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to add doorbells: %d\n", err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, hsp);
+
+ err = mbox_controller_register(&hsp->mbox);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register mailbox: %d\n", err);
+ tegra_hsp_remove_doorbells(hsp);
+ return err;
+ }
+
+ err = devm_request_irq(&pdev->dev, hsp->irq, tegra_hsp_doorbell_irq,
+ IRQF_NO_SUSPEND, dev_name(&pdev->dev), hsp);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n",
+ hsp->irq, err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int tegra_hsp_remove(struct platform_device *pdev)
+{
+ struct tegra_hsp *hsp = platform_get_drvdata(pdev);
+
+ mbox_controller_unregister(&hsp->mbox);
+ tegra_hsp_remove_doorbells(hsp);
+
+ return 0;
+}
+
+static const struct tegra_hsp_db_map tegra186_hsp_db_map[] = {
+ { "ccplex", TEGRA_HSP_DB_MASTER_CCPLEX, HSP_DB_CCPLEX, },
+ { "bpmp", TEGRA_HSP_DB_MASTER_BPMP, HSP_DB_BPMP, },
+ { /* sentinel */ }
+};
+
+static const struct tegra_hsp_soc tegra186_hsp_soc = {
+ .map = tegra186_hsp_db_map,
+};
+
+static const struct of_device_id tegra_hsp_match[] = {
+ { .compatible = "nvidia,tegra186-hsp", .data = &tegra186_hsp_soc },
+ { }
+};
+
+static struct platform_driver tegra_hsp_driver = {
+ .driver = {
+ .name = "tegra-hsp",
+ .of_match_table = tegra_hsp_match,
+ },
+ .probe = tegra_hsp_probe,
+ .remove = tegra_hsp_remove,
+};
+
+static int __init tegra_hsp_init(void)
+{
+ return platform_driver_register(&tegra_hsp_driver);
+}
+core_initcall(tegra_hsp_init);
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 6b420a55c745..c3ea03c9a1a8 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -425,7 +425,7 @@ struct cache {
* until a gc finishes - otherwise we could pointlessly burn a ton of
* cpu
*/
- unsigned invalidate_needs_gc:1;
+ unsigned invalidate_needs_gc;
bool discard; /* Get rid of? */
@@ -593,8 +593,8 @@ struct cache_set {
/* Counts how many sectors bio_insert has added to the cache */
atomic_t sectors_to_gc;
+ wait_queue_head_t gc_wait;
- wait_queue_head_t moving_gc_wait;
struct keybuf moving_gc_keys;
/* Number of moving GC bios in flight */
struct semaphore moving_in_flight;
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 6fdd8e252760..a43eedd5804d 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -1757,32 +1757,34 @@ static void bch_btree_gc(struct cache_set *c)
bch_moving_gc(c);
}
-static int bch_gc_thread(void *arg)
+static bool gc_should_run(struct cache_set *c)
{
- struct cache_set *c = arg;
struct cache *ca;
unsigned i;
- while (1) {
-again:
- bch_btree_gc(c);
+ for_each_cache(ca, c, i)
+ if (ca->invalidate_needs_gc)
+ return true;
- set_current_state(TASK_INTERRUPTIBLE);
- if (kthread_should_stop())
- break;
+ if (atomic_read(&c->sectors_to_gc) < 0)
+ return true;
- mutex_lock(&c->bucket_lock);
+ return false;
+}
- for_each_cache(ca, c, i)
- if (ca->invalidate_needs_gc) {
- mutex_unlock(&c->bucket_lock);
- set_current_state(TASK_RUNNING);
- goto again;
- }
+static int bch_gc_thread(void *arg)
+{
+ struct cache_set *c = arg;
- mutex_unlock(&c->bucket_lock);
+ while (1) {
+ wait_event_interruptible(c->gc_wait,
+ kthread_should_stop() || gc_should_run(c));
- schedule();
+ if (kthread_should_stop())
+ break;
+
+ set_gc_sectors(c);
+ bch_btree_gc(c);
}
return 0;
@@ -1790,11 +1792,10 @@ again:
int bch_gc_thread_start(struct cache_set *c)
{
- c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc");
+ c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
if (IS_ERR(c->gc_thread))
return PTR_ERR(c->gc_thread);
- set_task_state(c->gc_thread, TASK_INTERRUPTIBLE);
return 0;
}
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 5c391fa01bed..9b80417cd547 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -260,8 +260,7 @@ void bch_initial_mark_key(struct cache_set *, int, struct bkey *);
static inline void wake_up_gc(struct cache_set *c)
{
- if (c->gc_thread)
- wake_up_process(c->gc_thread);
+ wake_up(&c->gc_wait);
}
#define MAP_DONE 0
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index f49c5417527d..76d20875503c 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -196,10 +196,8 @@ static void bch_data_insert_start(struct closure *cl)
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
struct bio *bio = op->bio, *n;
- if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
- set_gc_sectors(op->c);
+ if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
wake_up_gc(op->c);
- }
if (op->bypass)
return bch_data_invalidate(cl);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 2fb5bfeb43e2..3a19cbc8b230 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -58,6 +58,7 @@ static wait_queue_head_t unregister_wait;
struct workqueue_struct *bcache_wq;
#define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
+#define BCACHE_MINORS 16 /* partition support */
/* Superblock */
@@ -783,8 +784,10 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
if (minor < 0)
return minor;
+ minor *= BCACHE_MINORS;
+
if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
- !(d->disk = alloc_disk(1))) {
+ !(d->disk = alloc_disk(BCACHE_MINORS))) {
ida_simple_remove(&bcache_minor, minor);
return -ENOMEM;
}
@@ -1489,6 +1492,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
mutex_init(&c->bucket_lock);
init_waitqueue_head(&c->btree_cache_wait);
init_waitqueue_head(&c->bucket_wait);
+ init_waitqueue_head(&c->gc_wait);
sema_init(&c->uuid_write_mutex, 1);
spin_lock_init(&c->btree_gc_time.lock);
@@ -1548,6 +1552,7 @@ static void run_cache_set(struct cache_set *c)
for_each_cache(ca, c, i)
c->nbuckets += ca->sb.nbuckets;
+ set_gc_sectors(c);
if (CACHE_SYNC(&c->sb)) {
LIST_HEAD(journal);
diff --git a/drivers/md/dm-cache-block-types.h b/drivers/md/dm-cache-block-types.h
index bed4ad4e1b7c..389c9e8ac785 100644
--- a/drivers/md/dm-cache-block-types.h
+++ b/drivers/md/dm-cache-block-types.h
@@ -17,9 +17,9 @@
* discard bitset.
*/
-typedef dm_block_t __bitwise__ dm_oblock_t;
-typedef uint32_t __bitwise__ dm_cblock_t;
-typedef dm_block_t __bitwise__ dm_dblock_t;
+typedef dm_block_t __bitwise dm_oblock_t;
+typedef uint32_t __bitwise dm_cblock_t;
+typedef dm_block_t __bitwise dm_dblock_t;
static inline dm_oblock_t to_oblock(dm_block_t b)
{
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index c72a77048b73..a5a9b17f0f7f 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -17,7 +17,7 @@
#include <linux/hdreg.h>
#include <linux/compat.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define DM_MSG_PREFIX "ioctl"
#define DM_DRIVER_EMAIL "dm-devel@redhat.com"
diff --git a/drivers/md/md.h b/drivers/md/md.h
index e38936d05df1..2a514036a83d 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -212,6 +212,7 @@ extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
int is_new);
struct md_cluster_info;
+/* change UNSUPPORTED_MDDEV_FLAGS for each array type if new flag is added */
enum mddev_flags {
MD_ARRAY_FIRST_USE, /* First use of array, needs initialization */
MD_CLOSING, /* If set, we are closing the array, do not open
@@ -702,4 +703,11 @@ static inline int mddev_is_clustered(struct mddev *mddev)
{
return mddev->cluster_info && mddev->bitmap_info.nodes > 1;
}
+
+/* clear unsupported mddev_flags */
+static inline void mddev_clear_unsupported_flags(struct mddev *mddev,
+ unsigned long unsupported_flags)
+{
+ mddev->flags &= ~unsupported_flags;
+}
#endif /* _MD_MD_H */
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index a162fedeb51a..848365d474f3 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -26,6 +26,11 @@
#include "raid0.h"
#include "raid5.h"
+#define UNSUPPORTED_MDDEV_FLAGS \
+ ((1L << MD_HAS_JOURNAL) | \
+ (1L << MD_JOURNAL_CLEAN) | \
+ (1L << MD_FAILFAST_SUPPORTED))
+
static int raid0_congested(struct mddev *mddev, int bits)
{
struct r0conf *conf = mddev->private;
@@ -539,8 +544,7 @@ static void *raid0_takeover_raid45(struct mddev *mddev)
mddev->delta_disks = -1;
/* make sure it will be not marked as dirty */
mddev->recovery_cp = MaxSector;
- clear_bit(MD_HAS_JOURNAL, &mddev->flags);
- clear_bit(MD_JOURNAL_CLEAN, &mddev->flags);
+ mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
create_strip_zones(mddev, &priv_conf);
@@ -583,7 +587,7 @@ static void *raid0_takeover_raid10(struct mddev *mddev)
mddev->degraded = 0;
/* make sure it will be not marked as dirty */
mddev->recovery_cp = MaxSector;
- clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
+ mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
create_strip_zones(mddev, &priv_conf);
return priv_conf;
@@ -626,7 +630,7 @@ static void *raid0_takeover_raid1(struct mddev *mddev)
mddev->raid_disks = 1;
/* make sure it will be not marked as dirty */
mddev->recovery_cp = MaxSector;
- clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
+ mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
create_strip_zones(mddev, &priv_conf);
return priv_conf;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a1f3fbed9100..7b0f647bcccb 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -42,6 +42,10 @@
#include "raid1.h"
#include "bitmap.h"
+#define UNSUPPORTED_MDDEV_FLAGS \
+ ((1L << MD_HAS_JOURNAL) | \
+ (1L << MD_JOURNAL_CLEAN))
+
/*
* Number of guaranteed r1bios in case of extreme VM load:
*/
@@ -1066,17 +1070,107 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
kfree(plug);
}
-static void raid1_make_request(struct mddev *mddev, struct bio * bio)
+static void raid1_read_request(struct mddev *mddev, struct bio *bio,
+ struct r1bio *r1_bio)
{
struct r1conf *conf = mddev->private;
struct raid1_info *mirror;
- struct r1bio *r1_bio;
struct bio *read_bio;
+ struct bitmap *bitmap = mddev->bitmap;
+ const int op = bio_op(bio);
+ const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
+ int sectors_handled;
+ int max_sectors;
+ int rdisk;
+
+ wait_barrier(conf, bio);
+
+read_again:
+ rdisk = read_balance(conf, r1_bio, &max_sectors);
+
+ if (rdisk < 0) {
+ /* couldn't find anywhere to read from */
+ raid_end_bio_io(r1_bio);
+ return;
+ }
+ mirror = conf->mirrors + rdisk;
+
+ if (test_bit(WriteMostly, &mirror->rdev->flags) &&
+ bitmap) {
+ /*
+ * Reading from a write-mostly device must take care not to
+ * over-take any writes that are 'behind'
+ */
+ raid1_log(mddev, "wait behind writes");
+ wait_event(bitmap->behind_wait,
+ atomic_read(&bitmap->behind_writes) == 0);
+ }
+ r1_bio->read_disk = rdisk;
+ r1_bio->start_next_window = 0;
+
+ read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+ bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
+ max_sectors);
+
+ r1_bio->bios[rdisk] = read_bio;
+
+ read_bio->bi_iter.bi_sector = r1_bio->sector +
+ mirror->rdev->data_offset;
+ read_bio->bi_bdev = mirror->rdev->bdev;
+ read_bio->bi_end_io = raid1_end_read_request;
+ bio_set_op_attrs(read_bio, op, do_sync);
+ if (test_bit(FailFast, &mirror->rdev->flags) &&
+ test_bit(R1BIO_FailFast, &r1_bio->state))
+ read_bio->bi_opf |= MD_FAILFAST;
+ read_bio->bi_private = r1_bio;
+
+ if (mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
+ read_bio, disk_devt(mddev->gendisk),
+ r1_bio->sector);
+
+ if (max_sectors < r1_bio->sectors) {
+ /*
+ * could not read all from this device, so we will need another
+ * r1_bio.
+ */
+ sectors_handled = (r1_bio->sector + max_sectors
+ - bio->bi_iter.bi_sector);
+ r1_bio->sectors = max_sectors;
+ spin_lock_irq(&conf->device_lock);
+ if (bio->bi_phys_segments == 0)
+ bio->bi_phys_segments = 2;
+ else
+ bio->bi_phys_segments++;
+ spin_unlock_irq(&conf->device_lock);
+
+ /*
+ * Cannot call generic_make_request directly as that will be
+ * queued in __make_request and subsequent mempool_alloc might
+ * block waiting for it. So hand bio over to raid1d.
+ */
+ reschedule_retry(r1_bio);
+
+ r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
+
+ r1_bio->master_bio = bio;
+ r1_bio->sectors = bio_sectors(bio) - sectors_handled;
+ r1_bio->state = 0;
+ r1_bio->mddev = mddev;
+ r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
+ goto read_again;
+ } else
+ generic_make_request(read_bio);
+}
+
+static void raid1_write_request(struct mddev *mddev, struct bio *bio,
+ struct r1bio *r1_bio)
+{
+ struct r1conf *conf = mddev->private;
int i, disks;
- struct bitmap *bitmap;
+ struct bitmap *bitmap = mddev->bitmap;
unsigned long flags;
const int op = bio_op(bio);
- const int rw = bio_data_dir(bio);
const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
const unsigned long do_flush_fua = (bio->bi_opf &
(REQ_PREFLUSH | REQ_FUA));
@@ -1096,15 +1190,15 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
md_write_start(mddev, bio); /* wait on superblock update early */
- if (bio_data_dir(bio) == WRITE &&
- ((bio_end_sector(bio) > mddev->suspend_lo &&
+ if ((bio_end_sector(bio) > mddev->suspend_lo &&
bio->bi_iter.bi_sector < mddev->suspend_hi) ||
(mddev_is_clustered(mddev) &&
md_cluster_ops->area_resyncing(mddev, WRITE,
- bio->bi_iter.bi_sector, bio_end_sector(bio))))) {
- /* As the suspend_* range is controlled by
- * userspace, we want an interruptible
- * wait.
+ bio->bi_iter.bi_sector, bio_end_sector(bio)))) {
+
+ /*
+ * As the suspend_* range is controlled by userspace, we want
+ * an interruptible wait.
*/
DEFINE_WAIT(w);
for (;;) {
@@ -1115,128 +1209,15 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
bio->bi_iter.bi_sector >= mddev->suspend_hi ||
(mddev_is_clustered(mddev) &&
!md_cluster_ops->area_resyncing(mddev, WRITE,
- bio->bi_iter.bi_sector, bio_end_sector(bio))))
+ bio->bi_iter.bi_sector,
+ bio_end_sector(bio))))
break;
schedule();
}
finish_wait(&conf->wait_barrier, &w);
}
-
start_next_window = wait_barrier(conf, bio);
- bitmap = mddev->bitmap;
-
- /*
- * make_request() can abort the operation when read-ahead is being
- * used and no empty request is available.
- *
- */
- r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
-
- r1_bio->master_bio = bio;
- r1_bio->sectors = bio_sectors(bio);
- r1_bio->state = 0;
- r1_bio->mddev = mddev;
- r1_bio->sector = bio->bi_iter.bi_sector;
-
- /* We might need to issue multiple reads to different
- * devices if there are bad blocks around, so we keep
- * track of the number of reads in bio->bi_phys_segments.
- * If this is 0, there is only one r1_bio and no locking
- * will be needed when requests complete. If it is
- * non-zero, then it is the number of not-completed requests.
- */
- bio->bi_phys_segments = 0;
- bio_clear_flag(bio, BIO_SEG_VALID);
-
- if (rw == READ) {
- /*
- * read balancing logic:
- */
- int rdisk;
-
-read_again:
- rdisk = read_balance(conf, r1_bio, &max_sectors);
-
- if (rdisk < 0) {
- /* couldn't find anywhere to read from */
- raid_end_bio_io(r1_bio);
- return;
- }
- mirror = conf->mirrors + rdisk;
-
- if (test_bit(WriteMostly, &mirror->rdev->flags) &&
- bitmap) {
- /* Reading from a write-mostly device must
- * take care not to over-take any writes
- * that are 'behind'
- */
- raid1_log(mddev, "wait behind writes");
- wait_event(bitmap->behind_wait,
- atomic_read(&bitmap->behind_writes) == 0);
- }
- r1_bio->read_disk = rdisk;
- r1_bio->start_next_window = 0;
-
- read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
- max_sectors);
-
- r1_bio->bios[rdisk] = read_bio;
-
- read_bio->bi_iter.bi_sector = r1_bio->sector +
- mirror->rdev->data_offset;
- read_bio->bi_bdev = mirror->rdev->bdev;
- read_bio->bi_end_io = raid1_end_read_request;
- bio_set_op_attrs(read_bio, op, do_sync);
- if (test_bit(FailFast, &mirror->rdev->flags) &&
- test_bit(R1BIO_FailFast, &r1_bio->state))
- read_bio->bi_opf |= MD_FAILFAST;
- read_bio->bi_private = r1_bio;
-
- if (mddev->gendisk)
- trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
- read_bio, disk_devt(mddev->gendisk),
- r1_bio->sector);
-
- if (max_sectors < r1_bio->sectors) {
- /* could not read all from this device, so we will
- * need another r1_bio.
- */
-
- sectors_handled = (r1_bio->sector + max_sectors
- - bio->bi_iter.bi_sector);
- r1_bio->sectors = max_sectors;
- spin_lock_irq(&conf->device_lock);
- if (bio->bi_phys_segments == 0)
- bio->bi_phys_segments = 2;
- else
- bio->bi_phys_segments++;
- spin_unlock_irq(&conf->device_lock);
- /* Cannot call generic_make_request directly
- * as that will be queued in __make_request
- * and subsequent mempool_alloc might block waiting
- * for it. So hand bio over to raid1d.
- */
- reschedule_retry(r1_bio);
-
- r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
-
- r1_bio->master_bio = bio;
- r1_bio->sectors = bio_sectors(bio) - sectors_handled;
- r1_bio->state = 0;
- r1_bio->mddev = mddev;
- r1_bio->sector = bio->bi_iter.bi_sector +
- sectors_handled;
- goto read_again;
- } else
- generic_make_request(read_bio);
- return;
- }
-
- /*
- * WRITE:
- */
if (conf->pending_count >= max_queued_requests) {
md_wakeup_thread(mddev->thread);
raid1_log(mddev, "wait queued");
@@ -1280,8 +1261,7 @@ read_again:
int bad_sectors;
int is_bad;
- is_bad = is_badblock(rdev, r1_bio->sector,
- max_sectors,
+ is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
&first_bad, &bad_sectors);
if (is_bad < 0) {
/* mustn't write here until the bad block is
@@ -1370,7 +1350,8 @@ read_again:
continue;
mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors);
+ bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector,
+ max_sectors);
if (first_clone) {
/* do behind I/O ?
@@ -1464,6 +1445,40 @@ read_again:
wake_up(&conf->wait_barrier);
}
+static void raid1_make_request(struct mddev *mddev, struct bio *bio)
+{
+ struct r1conf *conf = mddev->private;
+ struct r1bio *r1_bio;
+
+ /*
+ * make_request() can abort the operation when read-ahead is being
+ * used and no empty request is available.
+ *
+ */
+ r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
+
+ r1_bio->master_bio = bio;
+ r1_bio->sectors = bio_sectors(bio);
+ r1_bio->state = 0;
+ r1_bio->mddev = mddev;
+ r1_bio->sector = bio->bi_iter.bi_sector;
+
+ /*
+ * We might need to issue multiple reads to different devices if there
+ * are bad blocks around, so we keep track of the number of reads in
+ * bio->bi_phys_segments. If this is 0, there is only one r1_bio and
+ * no locking will be needed when requests complete. If it is
+ * non-zero, then it is the number of not-completed requests.
+ */
+ bio->bi_phys_segments = 0;
+ bio_clear_flag(bio, BIO_SEG_VALID);
+
+ if (bio_data_dir(bio) == READ)
+ raid1_read_request(mddev, bio, r1_bio);
+ else
+ raid1_write_request(mddev, bio, r1_bio);
+}
+
static void raid1_status(struct seq_file *seq, struct mddev *mddev)
{
struct r1conf *conf = mddev->private;
@@ -3246,8 +3261,8 @@ static void *raid1_takeover(struct mddev *mddev)
if (!IS_ERR(conf)) {
/* Array must appear to be quiesced */
conf->array_frozen = 1;
- clear_bit(MD_HAS_JOURNAL, &mddev->flags);
- clear_bit(MD_JOURNAL_CLEAN, &mddev->flags);
+ mddev_clear_unsupported_flags(mddev,
+ UNSUPPORTED_MDDEV_FLAGS);
}
return conf;
}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index ab5e86209322..1920756828df 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1087,23 +1087,122 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
kfree(plug);
}
-static void __make_request(struct mddev *mddev, struct bio *bio)
+static void raid10_read_request(struct mddev *mddev, struct bio *bio,
+ struct r10bio *r10_bio)
{
struct r10conf *conf = mddev->private;
- struct r10bio *r10_bio;
struct bio *read_bio;
+ const int op = bio_op(bio);
+ const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
+ int sectors_handled;
+ int max_sectors;
+ sector_t sectors;
+ struct md_rdev *rdev;
+ int slot;
+
+ /*
+ * Register the new request and wait if the reconstruction
+ * thread has put up a bar for new requests.
+ * Continue immediately if no resync is active currently.
+ */
+ wait_barrier(conf);
+
+ sectors = bio_sectors(bio);
+ while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
+ bio->bi_iter.bi_sector < conf->reshape_progress &&
+ bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
+ /*
+ * IO spans the reshape position. Need to wait for reshape to
+ * pass
+ */
+ raid10_log(conf->mddev, "wait reshape");
+ allow_barrier(conf);
+ wait_event(conf->wait_barrier,
+ conf->reshape_progress <= bio->bi_iter.bi_sector ||
+ conf->reshape_progress >= bio->bi_iter.bi_sector +
+ sectors);
+ wait_barrier(conf);
+ }
+
+read_again:
+ rdev = read_balance(conf, r10_bio, &max_sectors);
+ if (!rdev) {
+ raid_end_bio_io(r10_bio);
+ return;
+ }
+ slot = r10_bio->read_slot;
+
+ read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+ bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector,
+ max_sectors);
+
+ r10_bio->devs[slot].bio = read_bio;
+ r10_bio->devs[slot].rdev = rdev;
+
+ read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
+ choose_data_offset(r10_bio, rdev);
+ read_bio->bi_bdev = rdev->bdev;
+ read_bio->bi_end_io = raid10_end_read_request;
+ bio_set_op_attrs(read_bio, op, do_sync);
+ if (test_bit(FailFast, &rdev->flags) &&
+ test_bit(R10BIO_FailFast, &r10_bio->state))
+ read_bio->bi_opf |= MD_FAILFAST;
+ read_bio->bi_private = r10_bio;
+
+ if (mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
+ read_bio, disk_devt(mddev->gendisk),
+ r10_bio->sector);
+ if (max_sectors < r10_bio->sectors) {
+ /*
+ * Could not read all from this device, so we will need another
+ * r10_bio.
+ */
+ sectors_handled = (r10_bio->sector + max_sectors
+ - bio->bi_iter.bi_sector);
+ r10_bio->sectors = max_sectors;
+ spin_lock_irq(&conf->device_lock);
+ if (bio->bi_phys_segments == 0)
+ bio->bi_phys_segments = 2;
+ else
+ bio->bi_phys_segments++;
+ spin_unlock_irq(&conf->device_lock);
+ /*
+ * Cannot call generic_make_request directly as that will be
+ * queued in __generic_make_request and subsequent
+ * mempool_alloc might block waiting for it. so hand bio over
+ * to raid10d.
+ */
+ reschedule_retry(r10_bio);
+
+ r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
+
+ r10_bio->master_bio = bio;
+ r10_bio->sectors = bio_sectors(bio) - sectors_handled;
+ r10_bio->state = 0;
+ r10_bio->mddev = mddev;
+ r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
+ goto read_again;
+ } else
+ generic_make_request(read_bio);
+ return;
+}
+
+static void raid10_write_request(struct mddev *mddev, struct bio *bio,
+ struct r10bio *r10_bio)
+{
+ struct r10conf *conf = mddev->private;
int i;
const int op = bio_op(bio);
- const int rw = bio_data_dir(bio);
const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
const unsigned long do_fua = (bio->bi_opf & REQ_FUA);
unsigned long flags;
struct md_rdev *blocked_rdev;
struct blk_plug_cb *cb;
struct raid10_plug_cb *plug = NULL;
+ sector_t sectors;
int sectors_handled;
int max_sectors;
- int sectors;
md_write_start(mddev, bio);
@@ -1118,8 +1217,9 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
bio->bi_iter.bi_sector < conf->reshape_progress &&
bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
- /* IO spans the reshape position. Need to wait for
- * reshape to pass
+ /*
+ * IO spans the reshape position. Need to wait for reshape to
+ * pass
*/
raid10_log(conf->mddev, "wait reshape");
allow_barrier(conf);
@@ -1129,8 +1229,8 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
sectors);
wait_barrier(conf);
}
+
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
- bio_data_dir(bio) == WRITE &&
(mddev->reshape_backwards
? (bio->bi_iter.bi_sector < conf->reshape_safe &&
bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
@@ -1148,98 +1248,6 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
conf->reshape_safe = mddev->reshape_position;
}
- r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
-
- r10_bio->master_bio = bio;
- r10_bio->sectors = sectors;
-
- r10_bio->mddev = mddev;
- r10_bio->sector = bio->bi_iter.bi_sector;
- r10_bio->state = 0;
-
- /* We might need to issue multiple reads to different
- * devices if there are bad blocks around, so we keep
- * track of the number of reads in bio->bi_phys_segments.
- * If this is 0, there is only one r10_bio and no locking
- * will be needed when the request completes. If it is
- * non-zero, then it is the number of not-completed requests.
- */
- bio->bi_phys_segments = 0;
- bio_clear_flag(bio, BIO_SEG_VALID);
-
- if (rw == READ) {
- /*
- * read balancing logic:
- */
- struct md_rdev *rdev;
- int slot;
-
-read_again:
- rdev = read_balance(conf, r10_bio, &max_sectors);
- if (!rdev) {
- raid_end_bio_io(r10_bio);
- return;
- }
- slot = r10_bio->read_slot;
-
- read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector,
- max_sectors);
-
- r10_bio->devs[slot].bio = read_bio;
- r10_bio->devs[slot].rdev = rdev;
-
- read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
- choose_data_offset(r10_bio, rdev);
- read_bio->bi_bdev = rdev->bdev;
- read_bio->bi_end_io = raid10_end_read_request;
- bio_set_op_attrs(read_bio, op, do_sync);
- if (test_bit(FailFast, &rdev->flags) &&
- test_bit(R10BIO_FailFast, &r10_bio->state))
- read_bio->bi_opf |= MD_FAILFAST;
- read_bio->bi_private = r10_bio;
-
- if (mddev->gendisk)
- trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
- read_bio, disk_devt(mddev->gendisk),
- r10_bio->sector);
- if (max_sectors < r10_bio->sectors) {
- /* Could not read all from this device, so we will
- * need another r10_bio.
- */
- sectors_handled = (r10_bio->sector + max_sectors
- - bio->bi_iter.bi_sector);
- r10_bio->sectors = max_sectors;
- spin_lock_irq(&conf->device_lock);
- if (bio->bi_phys_segments == 0)
- bio->bi_phys_segments = 2;
- else
- bio->bi_phys_segments++;
- spin_unlock_irq(&conf->device_lock);
- /* Cannot call generic_make_request directly
- * as that will be queued in __generic_make_request
- * and subsequent mempool_alloc might block
- * waiting for it. so hand bio over to raid10d.
- */
- reschedule_retry(r10_bio);
-
- r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
-
- r10_bio->master_bio = bio;
- r10_bio->sectors = bio_sectors(bio) - sectors_handled;
- r10_bio->state = 0;
- r10_bio->mddev = mddev;
- r10_bio->sector = bio->bi_iter.bi_sector +
- sectors_handled;
- goto read_again;
- } else
- generic_make_request(read_bio);
- return;
- }
-
- /*
- * WRITE:
- */
if (conf->pending_count >= max_queued_requests) {
md_wakeup_thread(mddev->thread);
raid10_log(mddev, "wait queued");
@@ -1300,8 +1308,7 @@ retry_write:
int bad_sectors;
int is_bad;
- is_bad = is_badblock(rdev, dev_sector,
- max_sectors,
+ is_bad = is_badblock(rdev, dev_sector, max_sectors,
&first_bad, &bad_sectors);
if (is_bad < 0) {
/* Mustn't write here until the bad block
@@ -1405,8 +1412,7 @@ retry_write:
r10_bio->devs[i].bio = mbio;
mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
- choose_data_offset(r10_bio,
- rdev));
+ choose_data_offset(r10_bio, rdev));
mbio->bi_bdev = rdev->bdev;
mbio->bi_end_io = raid10_end_write_request;
bio_set_op_attrs(mbio, op, do_sync | do_fua);
@@ -1457,8 +1463,7 @@ retry_write:
r10_bio->devs[i].repl_bio = mbio;
mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr +
- choose_data_offset(
- r10_bio, rdev));
+ choose_data_offset(r10_bio, rdev));
mbio->bi_bdev = rdev->bdev;
mbio->bi_end_io = raid10_end_write_request;
bio_set_op_attrs(mbio, op, do_sync | do_fua);
@@ -1503,6 +1508,36 @@ retry_write:
one_write_done(r10_bio);
}
+static void __make_request(struct mddev *mddev, struct bio *bio)
+{
+ struct r10conf *conf = mddev->private;
+ struct r10bio *r10_bio;
+
+ r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
+
+ r10_bio->master_bio = bio;
+ r10_bio->sectors = bio_sectors(bio);
+
+ r10_bio->mddev = mddev;
+ r10_bio->sector = bio->bi_iter.bi_sector;
+ r10_bio->state = 0;
+
+ /*
+ * We might need to issue multiple reads to different devices if there
+ * are bad blocks around, so we keep track of the number of reads in
+ * bio->bi_phys_segments. If this is 0, there is only one r10_bio and
+ * no locking will be needed when the request completes. If it is
+ * non-zero, then it is the number of not-completed requests.
+ */
+ bio->bi_phys_segments = 0;
+ bio_clear_flag(bio, BIO_SEG_VALID);
+
+ if (bio_data_dir(bio) == READ)
+ raid10_read_request(mddev, bio, r10_bio);
+ else
+ raid10_write_request(mddev, bio, r10_bio);
+}
+
static void raid10_make_request(struct mddev *mddev, struct bio *bio)
{
struct r10conf *conf = mddev->private;
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index d7bfb6fc8aef..0e8ed2c327b0 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1682,8 +1682,7 @@ out:
static struct stripe_head *
r5c_recovery_alloc_stripe(struct r5conf *conf,
- sector_t stripe_sect,
- sector_t log_start)
+ sector_t stripe_sect)
{
struct stripe_head *sh;
@@ -1692,7 +1691,6 @@ r5c_recovery_alloc_stripe(struct r5conf *conf,
return NULL; /* no more stripe available */
r5l_recovery_reset_stripe(sh);
- sh->log_start = log_start;
return sh;
}
@@ -1862,7 +1860,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
stripe_sect);
if (!sh) {
- sh = r5c_recovery_alloc_stripe(conf, stripe_sect, ctx->pos);
+ sh = r5c_recovery_alloc_stripe(conf, stripe_sect);
/*
* cannot get stripe from raid5_get_active_stripe
* try replay some stripes
@@ -1871,7 +1869,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
r5c_recovery_replay_stripes(
cached_stripe_list, ctx);
sh = r5c_recovery_alloc_stripe(
- conf, stripe_sect, ctx->pos);
+ conf, stripe_sect);
}
if (!sh) {
pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
@@ -1879,8 +1877,8 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
conf->min_nr_stripes * 2);
raid5_set_cache_size(mddev,
conf->min_nr_stripes * 2);
- sh = r5c_recovery_alloc_stripe(
- conf, stripe_sect, ctx->pos);
+ sh = r5c_recovery_alloc_stripe(conf,
+ stripe_sect);
}
if (!sh) {
pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
@@ -1894,7 +1892,6 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
if (!test_bit(STRIPE_R5C_CACHING, &sh->state) &&
test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) {
r5l_recovery_replay_one_stripe(conf, sh, ctx);
- sh->log_start = ctx->pos;
list_move_tail(&sh->lru, cached_stripe_list);
}
r5l_recovery_load_data(log, sh, ctx, payload,
@@ -1933,8 +1930,6 @@ static void r5c_recovery_load_one_stripe(struct r5l_log *log,
set_bit(R5_UPTODATE, &dev->flags);
}
}
- list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
- atomic_inc(&log->stripe_in_journal_count);
}
/*
@@ -2070,6 +2065,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
struct stripe_head *sh, *next;
struct mddev *mddev = log->rdev->mddev;
struct page *page;
+ sector_t next_checkpoint = MaxSector;
page = alloc_page(GFP_KERNEL);
if (!page) {
@@ -2078,6 +2074,8 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
return -ENOMEM;
}
+ WARN_ON(list_empty(&ctx->cached_list));
+
list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
struct r5l_meta_block *mb;
int i;
@@ -2123,12 +2121,15 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
REQ_OP_WRITE, REQ_FUA, false);
sh->log_start = ctx->pos;
+ list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
+ atomic_inc(&log->stripe_in_journal_count);
ctx->pos = write_pos;
ctx->seq += 1;
-
+ next_checkpoint = sh->log_start;
list_del_init(&sh->lru);
raid5_release_stripe(sh);
}
+ log->next_checkpoint = next_checkpoint;
__free_page(page);
return 0;
}
@@ -2139,7 +2140,6 @@ static int r5l_recovery_log(struct r5l_log *log)
struct r5l_recovery_ctx ctx;
int ret;
sector_t pos;
- struct stripe_head *sh;
ctx.pos = log->last_checkpoint;
ctx.seq = log->last_cp_seq;
@@ -2164,16 +2164,13 @@ static int r5l_recovery_log(struct r5l_log *log)
log->next_checkpoint = ctx.pos;
r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++);
ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
- } else {
- sh = list_last_entry(&ctx.cached_list, struct stripe_head, lru);
- log->next_checkpoint = sh->log_start;
}
if ((ctx.data_only_stripes == 0) && (ctx.data_parity_stripes == 0))
pr_debug("md/raid:%s: starting from clean shutdown\n",
mdname(mddev));
else {
- pr_debug("md/raid:%s: recoverying %d data-only stripes and %d data-parity stripes\n",
+ pr_debug("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
mdname(mddev), ctx.data_only_stripes,
ctx.data_parity_stripes);
@@ -2418,9 +2415,6 @@ void r5c_finish_stripe_write_out(struct r5conf *conf,
if (do_wakeup)
wake_up(&conf->wait_for_overlap);
- if (conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
- return;
-
spin_lock_irq(&conf->log->stripe_in_journal_lock);
list_del_init(&sh->r5c);
spin_unlock_irq(&conf->log->stripe_in_journal_lock);
@@ -2639,14 +2633,16 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
spin_lock_init(&log->stripe_in_journal_lock);
atomic_set(&log->stripe_in_journal_count, 0);
+ rcu_assign_pointer(conf->log, log);
+
if (r5l_load_log(log))
goto error;
- rcu_assign_pointer(conf->log, log);
set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
return 0;
error:
+ rcu_assign_pointer(conf->log, NULL);
md_unregister_thread(&log->reclaim_thread);
reclaim_thread:
mempool_destroy(log->meta_pool);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 06d7279bdd04..36c13e4be9c9 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -62,6 +62,8 @@
#include "raid0.h"
#include "bitmap.h"
+#define UNSUPPORTED_MDDEV_FLAGS (1L << MD_FAILFAST_SUPPORTED)
+
#define cpu_to_group(cpu) cpu_to_node(cpu)
#define ANY_GROUP NUMA_NO_NODE
@@ -7829,8 +7831,9 @@ static void *raid5_takeover_raid1(struct mddev *mddev)
mddev->new_chunk_sectors = chunksect;
ret = setup_conf(mddev);
- if (!IS_ERR_VALUE(ret))
- clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
+ if (!IS_ERR(ret))
+ mddev_clear_unsupported_flags(mddev,
+ UNSUPPORTED_MDDEV_FLAGS);
return ret;
}
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 7b8540291217..3512316e7a46 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -80,6 +80,22 @@ config MEDIA_RC_SUPPORT
Say Y when you have a TV or an IR device.
+config MEDIA_CEC_SUPPORT
+ bool "HDMI CEC support"
+ select MEDIA_CEC_EDID
+ ---help---
+ Enable support for HDMI CEC (Consumer Electronics Control),
+ which is an optional HDMI feature.
+
+ Say Y when you have an HDMI receiver, transmitter or a USB CEC
+ adapter that supports HDMI CEC.
+
+config MEDIA_CEC_DEBUG
+ bool "HDMI CEC debugfs interface"
+ depends on MEDIA_CEC_SUPPORT && DEBUG_FS
+ ---help---
+ Turns on the DebugFS interface for CEC devices.
+
config MEDIA_CEC_EDID
bool
@@ -99,7 +115,7 @@ config MEDIA_CONTROLLER
config MEDIA_CONTROLLER_DVB
bool "Enable Media controller for DVB (EXPERIMENTAL)"
- depends on MEDIA_CONTROLLER
+ depends on MEDIA_CONTROLLER && DVB_CORE
---help---
Enable the media controller API support for DVB.
diff --git a/drivers/media/Makefile b/drivers/media/Makefile
index 0deaa93efdee..d87ccb8eeabe 100644
--- a/drivers/media/Makefile
+++ b/drivers/media/Makefile
@@ -6,6 +6,10 @@ ifeq ($(CONFIG_MEDIA_CEC_EDID),y)
obj-$(CONFIG_MEDIA_SUPPORT) += cec-edid.o
endif
+ifeq ($(CONFIG_MEDIA_CEC_SUPPORT),y)
+ obj-$(CONFIG_MEDIA_SUPPORT) += cec/
+endif
+
media-objs := media-device.o media-devnode.o media-entity.o
#
diff --git a/drivers/staging/media/cec/Makefile b/drivers/media/cec/Makefile
index bd7f3c593468..d6686337275f 100644
--- a/drivers/staging/media/cec/Makefile
+++ b/drivers/media/cec/Makefile
@@ -1,5 +1,5 @@
cec-objs := cec-core.o cec-adap.o cec-api.o
-ifeq ($(CONFIG_MEDIA_CEC),y)
+ifeq ($(CONFIG_MEDIA_CEC_SUPPORT),y)
obj-$(CONFIG_MEDIA_SUPPORT) += cec.o
endif
diff --git a/drivers/staging/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index 611e07b78bfe..0ea4efb3de66 100644
--- a/drivers/staging/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -587,7 +587,6 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
msg->tx_nack_cnt = 0;
msg->tx_low_drive_cnt = 0;
msg->tx_error_cnt = 0;
- msg->flags = 0;
msg->sequence = ++adap->sequence;
if (!msg->sequence)
msg->sequence = ++adap->sequence;
@@ -596,6 +595,10 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
/* Make sure the timeout isn't 0. */
msg->timeout = 1000;
}
+ if (msg->timeout)
+ msg->flags &= CEC_MSG_FL_REPLY_TO_FOLLOWERS;
+ else
+ msg->flags = 0;
/* Sanity checks */
if (msg->len == 0 || msg->len > CEC_MAX_MSG_SIZE) {
@@ -763,23 +766,133 @@ EXPORT_SYMBOL_GPL(cec_transmit_msg);
static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
bool is_reply);
+#define DIRECTED 0x80
+#define BCAST1_4 0x40
+#define BCAST2_0 0x20 /* broadcast only allowed for >= 2.0 */
+#define BCAST (BCAST1_4 | BCAST2_0)
+#define BOTH (BCAST | DIRECTED)
+
+/*
+ * Specify minimum length and whether the message is directed, broadcast
+ * or both. Messages that do not match the criteria are ignored as per
+ * the CEC specification.
+ */
+static const u8 cec_msg_size[256] = {
+ [CEC_MSG_ACTIVE_SOURCE] = 4 | BCAST,
+ [CEC_MSG_IMAGE_VIEW_ON] = 2 | DIRECTED,
+ [CEC_MSG_TEXT_VIEW_ON] = 2 | DIRECTED,
+ [CEC_MSG_INACTIVE_SOURCE] = 4 | DIRECTED,
+ [CEC_MSG_REQUEST_ACTIVE_SOURCE] = 2 | BCAST,
+ [CEC_MSG_ROUTING_CHANGE] = 6 | BCAST,
+ [CEC_MSG_ROUTING_INFORMATION] = 4 | BCAST,
+ [CEC_MSG_SET_STREAM_PATH] = 4 | BCAST,
+ [CEC_MSG_STANDBY] = 2 | BOTH,
+ [CEC_MSG_RECORD_OFF] = 2 | DIRECTED,
+ [CEC_MSG_RECORD_ON] = 3 | DIRECTED,
+ [CEC_MSG_RECORD_STATUS] = 3 | DIRECTED,
+ [CEC_MSG_RECORD_TV_SCREEN] = 2 | DIRECTED,
+ [CEC_MSG_CLEAR_ANALOGUE_TIMER] = 13 | DIRECTED,
+ [CEC_MSG_CLEAR_DIGITAL_TIMER] = 16 | DIRECTED,
+ [CEC_MSG_CLEAR_EXT_TIMER] = 13 | DIRECTED,
+ [CEC_MSG_SET_ANALOGUE_TIMER] = 13 | DIRECTED,
+ [CEC_MSG_SET_DIGITAL_TIMER] = 16 | DIRECTED,
+ [CEC_MSG_SET_EXT_TIMER] = 13 | DIRECTED,
+ [CEC_MSG_SET_TIMER_PROGRAM_TITLE] = 2 | DIRECTED,
+ [CEC_MSG_TIMER_CLEARED_STATUS] = 3 | DIRECTED,
+ [CEC_MSG_TIMER_STATUS] = 3 | DIRECTED,
+ [CEC_MSG_CEC_VERSION] = 3 | DIRECTED,
+ [CEC_MSG_GET_CEC_VERSION] = 2 | DIRECTED,
+ [CEC_MSG_GIVE_PHYSICAL_ADDR] = 2 | DIRECTED,
+ [CEC_MSG_GET_MENU_LANGUAGE] = 2 | DIRECTED,
+ [CEC_MSG_REPORT_PHYSICAL_ADDR] = 5 | BCAST,
+ [CEC_MSG_SET_MENU_LANGUAGE] = 5 | BCAST,
+ [CEC_MSG_REPORT_FEATURES] = 6 | BCAST,
+ [CEC_MSG_GIVE_FEATURES] = 2 | DIRECTED,
+ [CEC_MSG_DECK_CONTROL] = 3 | DIRECTED,
+ [CEC_MSG_DECK_STATUS] = 3 | DIRECTED,
+ [CEC_MSG_GIVE_DECK_STATUS] = 3 | DIRECTED,
+ [CEC_MSG_PLAY] = 3 | DIRECTED,
+ [CEC_MSG_GIVE_TUNER_DEVICE_STATUS] = 3 | DIRECTED,
+ [CEC_MSG_SELECT_ANALOGUE_SERVICE] = 6 | DIRECTED,
+ [CEC_MSG_SELECT_DIGITAL_SERVICE] = 9 | DIRECTED,
+ [CEC_MSG_TUNER_DEVICE_STATUS] = 7 | DIRECTED,
+ [CEC_MSG_TUNER_STEP_DECREMENT] = 2 | DIRECTED,
+ [CEC_MSG_TUNER_STEP_INCREMENT] = 2 | DIRECTED,
+ [CEC_MSG_DEVICE_VENDOR_ID] = 5 | BCAST,
+ [CEC_MSG_GIVE_DEVICE_VENDOR_ID] = 2 | DIRECTED,
+ [CEC_MSG_VENDOR_COMMAND] = 2 | DIRECTED,
+ [CEC_MSG_VENDOR_COMMAND_WITH_ID] = 5 | BOTH,
+ [CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN] = 2 | BOTH,
+ [CEC_MSG_VENDOR_REMOTE_BUTTON_UP] = 2 | BOTH,
+ [CEC_MSG_SET_OSD_STRING] = 3 | DIRECTED,
+ [CEC_MSG_GIVE_OSD_NAME] = 2 | DIRECTED,
+ [CEC_MSG_SET_OSD_NAME] = 2 | DIRECTED,
+ [CEC_MSG_MENU_REQUEST] = 3 | DIRECTED,
+ [CEC_MSG_MENU_STATUS] = 3 | DIRECTED,
+ [CEC_MSG_USER_CONTROL_PRESSED] = 3 | DIRECTED,
+ [CEC_MSG_USER_CONTROL_RELEASED] = 2 | DIRECTED,
+ [CEC_MSG_GIVE_DEVICE_POWER_STATUS] = 2 | DIRECTED,
+ [CEC_MSG_REPORT_POWER_STATUS] = 3 | DIRECTED | BCAST2_0,
+ [CEC_MSG_FEATURE_ABORT] = 4 | DIRECTED,
+ [CEC_MSG_ABORT] = 2 | DIRECTED,
+ [CEC_MSG_GIVE_AUDIO_STATUS] = 2 | DIRECTED,
+ [CEC_MSG_GIVE_SYSTEM_AUDIO_MODE_STATUS] = 2 | DIRECTED,
+ [CEC_MSG_REPORT_AUDIO_STATUS] = 3 | DIRECTED,
+ [CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR] = 2 | DIRECTED,
+ [CEC_MSG_REQUEST_SHORT_AUDIO_DESCRIPTOR] = 2 | DIRECTED,
+ [CEC_MSG_SET_SYSTEM_AUDIO_MODE] = 3 | BOTH,
+ [CEC_MSG_SYSTEM_AUDIO_MODE_REQUEST] = 2 | DIRECTED,
+ [CEC_MSG_SYSTEM_AUDIO_MODE_STATUS] = 3 | DIRECTED,
+ [CEC_MSG_SET_AUDIO_RATE] = 3 | DIRECTED,
+ [CEC_MSG_INITIATE_ARC] = 2 | DIRECTED,
+ [CEC_MSG_REPORT_ARC_INITIATED] = 2 | DIRECTED,
+ [CEC_MSG_REPORT_ARC_TERMINATED] = 2 | DIRECTED,
+ [CEC_MSG_REQUEST_ARC_INITIATION] = 2 | DIRECTED,
+ [CEC_MSG_REQUEST_ARC_TERMINATION] = 2 | DIRECTED,
+ [CEC_MSG_TERMINATE_ARC] = 2 | DIRECTED,
+ [CEC_MSG_REQUEST_CURRENT_LATENCY] = 4 | BCAST,
+ [CEC_MSG_REPORT_CURRENT_LATENCY] = 7 | BCAST,
+ [CEC_MSG_CDC_MESSAGE] = 2 | BCAST,
+};
+
/* Called by the CEC adapter if a message is received */
void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg)
{
struct cec_data *data;
u8 msg_init = cec_msg_initiator(msg);
u8 msg_dest = cec_msg_destination(msg);
+ u8 cmd = msg->msg[1];
bool is_reply = false;
bool valid_la = true;
+ u8 min_len = 0;
if (WARN_ON(!msg->len || msg->len > CEC_MAX_MSG_SIZE))
return;
+ /*
+ * Some CEC adapters will receive the messages that they transmitted.
+ * This test filters out those messages by checking if we are the
+ * initiator, and just returning in that case.
+ *
+ * Note that this won't work if this is an Unregistered device.
+ *
+ * It is bad practice if the hardware receives the message that it
+ * transmitted and luckily most CEC adapters behave correctly in this
+ * respect.
+ */
+ if (msg_init != CEC_LOG_ADDR_UNREGISTERED &&
+ cec_has_log_addr(adap, msg_init))
+ return;
+
msg->rx_ts = ktime_get_ns();
msg->rx_status = CEC_RX_STATUS_OK;
msg->sequence = msg->reply = msg->timeout = 0;
msg->tx_status = 0;
msg->tx_ts = 0;
+ msg->tx_arb_lost_cnt = 0;
+ msg->tx_nack_cnt = 0;
+ msg->tx_low_drive_cnt = 0;
+ msg->tx_error_cnt = 0;
msg->flags = 0;
memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len);
@@ -790,9 +903,71 @@ void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg)
if (!cec_msg_is_broadcast(msg))
valid_la = cec_has_log_addr(adap, msg_dest);
+ /*
+ * Check if the length is not too short or if the message is a
+ * broadcast message where a directed message was expected or
+ * vice versa. If so, then the message has to be ignored (according
+ * to section CEC 7.3 and CEC 12.2).
+ */
+ if (valid_la && msg->len > 1 && cec_msg_size[cmd]) {
+ u8 dir_fl = cec_msg_size[cmd] & BOTH;
+
+ min_len = cec_msg_size[cmd] & 0x1f;
+ if (msg->len < min_len)
+ valid_la = false;
+ else if (!cec_msg_is_broadcast(msg) && !(dir_fl & DIRECTED))
+ valid_la = false;
+ else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST1_4))
+ valid_la = false;
+ else if (cec_msg_is_broadcast(msg) &&
+ adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0 &&
+ !(dir_fl & BCAST2_0))
+ valid_la = false;
+ }
+ if (valid_la && min_len) {
+ /* These messages have special length requirements */
+ switch (cmd) {
+ case CEC_MSG_TIMER_STATUS:
+ if (msg->msg[2] & 0x10) {
+ switch (msg->msg[2] & 0xf) {
+ case CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE:
+ case CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE:
+ if (msg->len < 5)
+ valid_la = false;
+ break;
+ }
+ } else if ((msg->msg[2] & 0xf) == CEC_OP_PROG_ERROR_DUPLICATE) {
+ if (msg->len < 5)
+ valid_la = false;
+ }
+ break;
+ case CEC_MSG_RECORD_ON:
+ switch (msg->msg[2]) {
+ case CEC_OP_RECORD_SRC_OWN:
+ break;
+ case CEC_OP_RECORD_SRC_DIGITAL:
+ if (msg->len < 10)
+ valid_la = false;
+ break;
+ case CEC_OP_RECORD_SRC_ANALOG:
+ if (msg->len < 7)
+ valid_la = false;
+ break;
+ case CEC_OP_RECORD_SRC_EXT_PLUG:
+ if (msg->len < 4)
+ valid_la = false;
+ break;
+ case CEC_OP_RECORD_SRC_EXT_PHYS_ADDR:
+ if (msg->len < 5)
+ valid_la = false;
+ break;
+ }
+ break;
+ }
+ }
+
/* It's a valid message and not a poll or CDC message */
- if (valid_la && msg->len > 1 && msg->msg[1] != CEC_MSG_CDC_MESSAGE) {
- u8 cmd = msg->msg[1];
+ if (valid_la && msg->len > 1 && cmd != CEC_MSG_CDC_MESSAGE) {
bool abort = cmd == CEC_MSG_FEATURE_ABORT;
/* The aborted command is in msg[2] */
@@ -806,6 +981,18 @@ void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg)
list_for_each_entry(data, &adap->wait_queue, list) {
struct cec_msg *dst = &data->msg;
+ /*
+ * The *only* CEC message that has two possible replies
+ * is CEC_MSG_INITIATE_ARC.
+ * In this case allow either of the two replies.
+ */
+ if (!abort && dst->msg[1] == CEC_MSG_INITIATE_ARC &&
+ (cmd == CEC_MSG_REPORT_ARC_INITIATED ||
+ cmd == CEC_MSG_REPORT_ARC_TERMINATED) &&
+ (dst->reply == CEC_MSG_REPORT_ARC_INITIATED ||
+ dst->reply == CEC_MSG_REPORT_ARC_TERMINATED))
+ dst->reply = cmd;
+
/* Does the command match? */
if ((abort && cmd != dst->msg[1]) ||
(!abort && cmd != dst->reply))
@@ -823,6 +1010,7 @@ void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg)
dst->rx_status = msg->rx_status;
if (abort)
dst->rx_status |= CEC_RX_STATUS_FEATURE_ABORT;
+ msg->flags = dst->flags;
/* Remove it from the wait_queue */
list_del_init(&data->list);
@@ -1068,7 +1256,8 @@ configured:
mutex_unlock(&adap->lock);
for (i = 0; i < las->num_log_addrs; i++) {
- if (las->log_addr[i] == CEC_LOG_ADDR_INVALID)
+ if (las->log_addr[i] == CEC_LOG_ADDR_INVALID ||
+ (las->flags & CEC_LOG_ADDRS_FL_CDC_ONLY))
continue;
/*
@@ -1190,6 +1379,29 @@ int __cec_s_log_addrs(struct cec_adapter *adap,
return 0;
}
+ if (log_addrs->flags & CEC_LOG_ADDRS_FL_CDC_ONLY) {
+ /*
+ * Sanitize log_addrs fields if a CDC-Only device is
+ * requested.
+ */
+ log_addrs->num_log_addrs = 1;
+ log_addrs->osd_name[0] = '\0';
+ log_addrs->vendor_id = CEC_VENDOR_ID_NONE;
+ log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_UNREGISTERED;
+ /*
+ * This is just an internal convention since a CDC-Only device
+ * doesn't have to be a switch. But switches already use
+ * unregistered, so it makes some kind of sense to pick this
+ * as the primary device. Since a CDC-Only device never sends
+ * any 'normal' CEC messages this primary device type is never
+ * sent over the CEC bus.
+ */
+ log_addrs->primary_device_type[0] = CEC_OP_PRIM_DEVTYPE_SWITCH;
+ log_addrs->all_device_types[0] = 0;
+ log_addrs->features[0][0] = 0;
+ log_addrs->features[0][1] = 0;
+ }
+
/* Ensure the osd name is 0-terminated */
log_addrs->osd_name[sizeof(log_addrs->osd_name) - 1] = '\0';
@@ -1223,6 +1435,7 @@ int __cec_s_log_addrs(struct cec_adapter *adap,
const u8 feature_sz = ARRAY_SIZE(log_addrs->features[0]);
u8 *features = log_addrs->features[i];
bool op_is_dev_features = false;
+ unsigned j;
log_addrs->log_addr[i] = CEC_LOG_ADDR_INVALID;
if (type_mask & (1 << log_addrs->log_addr_type[i])) {
@@ -1249,19 +1462,19 @@ int __cec_s_log_addrs(struct cec_adapter *adap,
dprintk(1, "unknown logical address type\n");
return -EINVAL;
}
- for (i = 0; i < feature_sz; i++) {
- if ((features[i] & 0x80) == 0) {
+ for (j = 0; j < feature_sz; j++) {
+ if ((features[j] & 0x80) == 0) {
if (op_is_dev_features)
break;
op_is_dev_features = true;
}
}
- if (!op_is_dev_features || i == feature_sz) {
+ if (!op_is_dev_features || j == feature_sz) {
dprintk(1, "malformed features\n");
return -EINVAL;
}
/* Zero unused part of the feature array */
- memset(features + i + 1, 0, feature_sz - i - 1);
+ memset(features + j + 1, 0, feature_sz - j - 1);
}
if (log_addrs->cec_version >= CEC_OP_CEC_VERSION_2_0) {
@@ -1410,6 +1623,11 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
dprintk(1, "cec_receive_notify: %*ph\n", msg->len, msg->msg);
+ /* If this is a CDC-Only device, then ignore any non-CDC messages */
+ if (cec_is_cdc_only(&adap->log_addrs) &&
+ msg->msg[1] != CEC_MSG_CDC_MESSAGE)
+ return 0;
+
if (adap->ops->received) {
/* Allow drivers to process the message first */
if (adap->ops->received(adap, msg) != -ENOMSG)
@@ -1478,7 +1696,8 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
}
case CEC_MSG_USER_CONTROL_PRESSED:
- if (!(adap->capabilities & CEC_CAP_RC))
+ if (!(adap->capabilities & CEC_CAP_RC) ||
+ !(adap->log_addrs.flags & CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU))
break;
#if IS_REACHABLE(CONFIG_RC_CORE)
@@ -1515,7 +1734,8 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
break;
case CEC_MSG_USER_CONTROL_RELEASED:
- if (!(adap->capabilities & CEC_CAP_RC))
+ if (!(adap->capabilities & CEC_CAP_RC) ||
+ !(adap->log_addrs.flags & CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU))
break;
#if IS_REACHABLE(CONFIG_RC_CORE)
rc_keyup(adap->rc);
@@ -1573,8 +1793,8 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
}
skip_processing:
- /* If this was a reply, then we're done */
- if (is_reply)
+ /* If this was a reply, then we're done, unless otherwise specified */
+ if (is_reply && !(msg->flags & CEC_MSG_FL_REPLY_TO_FOLLOWERS))
return 0;
/*
diff --git a/drivers/staging/media/cec/cec-api.c b/drivers/media/cec/cec-api.c
index e274e2f22398..8950b6c9d6a9 100644
--- a/drivers/staging/media/cec/cec-api.c
+++ b/drivers/media/cec/cec-api.c
@@ -88,7 +88,7 @@ static long cec_adap_g_caps(struct cec_adapter *adap,
{
struct cec_caps caps = {};
- strlcpy(caps.driver, adap->devnode.parent->driver->name,
+ strlcpy(caps.driver, adap->devnode.dev.parent->driver->name,
sizeof(caps.driver));
strlcpy(caps.name, adap->name, sizeof(caps.name));
caps.available_log_addrs = adap->available_log_addrs;
@@ -162,7 +162,9 @@ static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
return -ENOTTY;
if (copy_from_user(&log_addrs, parg, sizeof(log_addrs)))
return -EFAULT;
- log_addrs.flags &= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK;
+ log_addrs.flags &= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK |
+ CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU |
+ CEC_LOG_ADDRS_FL_CDC_ONLY;
mutex_lock(&adap->lock);
if (!adap->is_configuring &&
(!log_addrs.num_log_addrs || !adap->is_configured) &&
@@ -189,6 +191,12 @@ static long cec_transmit(struct cec_adapter *adap, struct cec_fh *fh,
return -ENOTTY;
if (copy_from_user(&msg, parg, sizeof(msg)))
return -EFAULT;
+
+ /* A CDC-Only device can only send CDC messages */
+ if ((adap->log_addrs.flags & CEC_LOG_ADDRS_FL_CDC_ONLY) &&
+ (msg.len == 1 || msg.msg[1] != CEC_MSG_CDC_MESSAGE))
+ return -EINVAL;
+
mutex_lock(&adap->lock);
if (!adap->is_configured)
err = -ENONET;
@@ -273,6 +281,7 @@ static long cec_receive(struct cec_adapter *adap, struct cec_fh *fh,
err = cec_receive_msg(fh, &msg, block);
if (err)
return err;
+ msg.flags = 0;
if (copy_to_user(parg, &msg, sizeof(msg)))
return -EFAULT;
return 0;
diff --git a/drivers/staging/media/cec/cec-core.c b/drivers/media/cec/cec-core.c
index b0137e247dc9..aca3ab83a8a1 100644
--- a/drivers/staging/media/cec/cec-core.c
+++ b/drivers/media/cec/cec-core.c
@@ -132,7 +132,6 @@ static int __must_check cec_devnode_register(struct cec_devnode *devnode,
devnode->dev.bus = &cec_bus_type;
devnode->dev.devt = MKDEV(MAJOR(cec_dev_t), minor);
devnode->dev.release = cec_devnode_release;
- devnode->dev.parent = devnode->parent;
dev_set_name(&devnode->dev, "cec%d", devnode->minor);
device_initialize(&devnode->dev);
@@ -198,13 +197,11 @@ static void cec_devnode_unregister(struct cec_devnode *devnode)
struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
void *priv, const char *name, u32 caps,
- u8 available_las, struct device *parent)
+ u8 available_las)
{
struct cec_adapter *adap;
int res;
- if (WARN_ON(!parent))
- return ERR_PTR(-EINVAL);
if (WARN_ON(!caps))
return ERR_PTR(-EINVAL);
if (WARN_ON(!ops))
@@ -214,8 +211,6 @@ struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
adap = kzalloc(sizeof(*adap), GFP_KERNEL);
if (!adap)
return ERR_PTR(-ENOMEM);
- adap->owner = parent->driver->owner;
- adap->devnode.parent = parent;
strlcpy(adap->name, name, sizeof(adap->name));
adap->phys_addr = CEC_PHYS_ADDR_INVALID;
adap->log_addrs.cec_version = CEC_OP_CEC_VERSION_2_0;
@@ -264,7 +259,6 @@ struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
adap->rc->input_id.vendor = 0;
adap->rc->input_id.product = 0;
adap->rc->input_id.version = 1;
- adap->rc->dev.parent = parent;
adap->rc->driver_type = RC_DRIVER_SCANCODE;
adap->rc->driver_name = CEC_NAME;
adap->rc->allowed_protocols = RC_BIT_CEC;
@@ -278,14 +272,22 @@ struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
}
EXPORT_SYMBOL_GPL(cec_allocate_adapter);
-int cec_register_adapter(struct cec_adapter *adap)
+int cec_register_adapter(struct cec_adapter *adap,
+ struct device *parent)
{
int res;
if (IS_ERR_OR_NULL(adap))
return 0;
+ if (WARN_ON(!parent))
+ return -EINVAL;
+
+ adap->owner = parent->driver->owner;
+ adap->devnode.dev.parent = parent;
+
#if IS_REACHABLE(CONFIG_RC_CORE)
+ adap->rc->dev.parent = parent;
if (adap->capabilities & CEC_CAP_RC) {
res = rc_register_device(adap->rc);
diff --git a/drivers/staging/media/cec/cec-priv.h b/drivers/media/cec/cec-priv.h
index 70767a7900f2..70767a7900f2 100644
--- a/drivers/staging/media/cec/cec-priv.h
+++ b/drivers/media/cec/cec-priv.h
diff --git a/drivers/media/common/b2c2/flexcop-common.h b/drivers/media/common/b2c2/flexcop-common.h
index 2b2460e9e6b4..2533574c0cf4 100644
--- a/drivers/media/common/b2c2/flexcop-common.h
+++ b/drivers/media/common/b2c2/flexcop-common.h
@@ -14,7 +14,6 @@
#include "dmxdev.h"
#include "dvb_demux.h"
-#include "dvb_filter.h"
#include "dvb_net.h"
#include "dvb_frontend.h"
diff --git a/drivers/media/common/b2c2/flexcop-eeprom.c b/drivers/media/common/b2c2/flexcop-eeprom.c
index a25373a9bd84..844c7836c2a6 100644
--- a/drivers/media/common/b2c2/flexcop-eeprom.c
+++ b/drivers/media/common/b2c2/flexcop-eeprom.c
@@ -136,8 +136,7 @@ int flexcop_eeprom_check_mac_addr(struct flexcop_device *fc, int extended)
if ((ret = flexcop_eeprom_lrc_read(fc,0x3f8,buf,8,4)) == 0) {
if (extended != 0) {
- err("TODO: extended (EUI64) MAC addresses aren't "
- "completely supported yet");
+ err("TODO: extended (EUI64) MAC addresses aren't completely supported yet");
ret = -EINVAL;
} else
memcpy(fc->dvb_adapter.proposed_mac,buf,6);
diff --git a/drivers/media/common/b2c2/flexcop-i2c.c b/drivers/media/common/b2c2/flexcop-i2c.c
index 965d5eb33752..58d39a59fc09 100644
--- a/drivers/media/common/b2c2/flexcop-i2c.c
+++ b/drivers/media/common/b2c2/flexcop-i2c.c
@@ -33,8 +33,8 @@ static int flexcop_i2c_operation(struct flexcop_device *fc,
return -EREMOTEIO;
}
}
- deb_i2c("tried %d times i2c operation, "
- "never finished or too many ack errors.\n", i);
+ deb_i2c("tried %d times i2c operation, never finished or too many ack errors.\n",
+ i);
return -EREMOTEIO;
}
@@ -124,10 +124,10 @@ int flexcop_i2c_request(struct flexcop_i2c_adapter *i2c,
#ifdef DUMP_I2C_MESSAGES
printk(KERN_DEBUG "%d ", i2c->port);
if (op == FC_READ)
- printk("rd(");
+ printk(KERN_CONT "rd(");
else
- printk("wr(");
- printk("%02x): %02x ", chipaddr, addr);
+ printk(KERN_CONT "wr(");
+ printk(KERN_CONT "%02x): %02x ", chipaddr, addr);
#endif
/* in that case addr is the only value ->
@@ -151,7 +151,7 @@ int flexcop_i2c_request(struct flexcop_i2c_adapter *i2c,
#ifdef DUMP_I2C_MESSAGES
for (i = 0; i < bytes_to_transfer; i++)
- printk("%02x ", buf[i]);
+ printk(KERN_CONT "%02x ", buf[i]);
#endif
if (ret < 0)
@@ -163,7 +163,7 @@ int flexcop_i2c_request(struct flexcop_i2c_adapter *i2c,
}
#ifdef DUMP_I2C_MESSAGES
- printk("\n");
+ printk(KERN_CONT "\n");
#endif
return 0;
diff --git a/drivers/media/common/b2c2/flexcop-misc.c b/drivers/media/common/b2c2/flexcop-misc.c
index b8eff235367d..bb0d95fe64f9 100644
--- a/drivers/media/common/b2c2/flexcop-misc.c
+++ b/drivers/media/common/b2c2/flexcop-misc.c
@@ -23,18 +23,15 @@ void flexcop_determine_revision(struct flexcop_device *fc)
fc->rev = FLEXCOP_III;
break;
default:
- err("unknown FlexCop Revision: %x. Please report this to "
- "linux-dvb@linuxtv.org.",
+ err("unknown FlexCop Revision: %x. Please report this to linux-dvb@linuxtv.org.",
v.misc_204.Rev_N_sig_revision_hi);
break;
}
if ((fc->has_32_hw_pid_filter = v.misc_204.Rev_N_sig_caps))
- deb_info("this FlexCop has "
- "the additional 32 hardware pid filter.\n");
+ deb_info("this FlexCop has the additional 32 hardware pid filter.\n");
else
- deb_info("this FlexCop has "
- "the 6 basic main hardware pid filter.\n");
+ deb_info("this FlexCop has the 6 basic main hardware pid filter.\n");
/* bus parts have to decide if hw pid filtering is used or not. */
}
diff --git a/drivers/media/common/b2c2/flexcop.c b/drivers/media/common/b2c2/flexcop.c
index 0f5114d406f8..4338ab0043b4 100644
--- a/drivers/media/common/b2c2/flexcop.c
+++ b/drivers/media/common/b2c2/flexcop.c
@@ -46,8 +46,7 @@ int b2c2_flexcop_debug;
EXPORT_SYMBOL_GPL(b2c2_flexcop_debug);
module_param_named(debug, b2c2_flexcop_debug, int, 0644);
MODULE_PARM_DESC(debug,
- "set debug level (1=info,2=tuner,4=i2c,8=ts,"
- "16=sram,32=reg (|-able))."
+ "set debug level (1=info,2=tuner,4=i2c,8=ts,16=sram,32=reg (|-able))."
DEBSTATUS);
#undef DEBSTATUS
diff --git a/drivers/media/common/cx2341x.c b/drivers/media/common/cx2341x.c
index 5e4afa0131e6..2725702eda7b 100644
--- a/drivers/media/common/cx2341x.c
+++ b/drivers/media/common/cx2341x.c
@@ -1190,8 +1190,8 @@ void cx2341x_log_status(const struct cx2341x_mpeg_params *p, const char *prefix)
prefix,
cx2341x_menu_item(p, V4L2_CID_MPEG_STREAM_TYPE));
if (p->stream_insert_nav_packets)
- printk(" (with navigation packets)");
- printk("\n");
+ printk(KERN_CONT " (with navigation packets)");
+ printk(KERN_CONT "\n");
printk(KERN_INFO "%s: VBI Format: %s\n",
prefix,
cx2341x_menu_item(p, V4L2_CID_MPEG_STREAM_VBI_FMT));
@@ -1209,8 +1209,8 @@ void cx2341x_log_status(const struct cx2341x_mpeg_params *p, const char *prefix)
cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_BITRATE_MODE),
p->video_bitrate);
if (p->video_bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR)
- printk(", Peak %d", p->video_bitrate_peak);
- printk("\n");
+ printk(KERN_CONT ", Peak %d", p->video_bitrate_peak);
+ printk(KERN_CONT "\n");
printk(KERN_INFO
"%s: Video: GOP Size %d, %d B-Frames, %sGOP Closure\n",
prefix,
@@ -1232,9 +1232,9 @@ void cx2341x_log_status(const struct cx2341x_mpeg_params *p, const char *prefix)
cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_MODE),
p->audio_mute ? " (muted)" : "");
if (p->audio_mode == V4L2_MPEG_AUDIO_MODE_JOINT_STEREO)
- printk(", %s", cx2341x_menu_item(p,
+ printk(KERN_CONT ", %s", cx2341x_menu_item(p,
V4L2_CID_MPEG_AUDIO_MODE_EXTENSION));
- printk(", %s, %s\n",
+ printk(KERN_CONT ", %s, %s\n",
cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_EMPHASIS),
cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_CRC));
diff --git a/drivers/media/common/saa7146/saa7146_video.c b/drivers/media/common/saa7146/saa7146_video.c
index ea2f3bf7368b..e034bcfcf757 100644
--- a/drivers/media/common/saa7146/saa7146_video.c
+++ b/drivers/media/common/saa7146/saa7146_video.c
@@ -390,6 +390,7 @@ static int video_end(struct saa7146_fh *fh, struct file *file)
{
struct saa7146_dev *dev = fh->dev;
struct saa7146_vv *vv = dev->vv_data;
+ struct saa7146_dmaqueue *q = &vv->video_dmaq;
struct saa7146_format *fmt = NULL;
unsigned long flags;
unsigned int resource;
@@ -428,6 +429,9 @@ static int video_end(struct saa7146_fh *fh, struct file *file)
/* shut down all used video dma transfers */
saa7146_write(dev, MC1, dmas);
+ if (q->curr)
+ saa7146_buffer_finish(dev, q, VIDEOBUF_DONE);
+
spin_unlock_irqrestore(&dev->slock, flags);
vv->video_fh = NULL;
diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c
index 9148e14c9d07..affde1426b7a 100644
--- a/drivers/media/common/siano/smsdvb-main.c
+++ b/drivers/media/common/siano/smsdvb-main.c
@@ -1044,7 +1044,7 @@ static void smsdvb_release(struct dvb_frontend *fe)
/* do nothing */
}
-static struct dvb_frontend_ops smsdvb_fe_ops = {
+static const struct dvb_frontend_ops smsdvb_fe_ops = {
.info = {
.name = "Siano Mobile Digital MDTV Receiver",
.frequency_min = 44250000,
diff --git a/drivers/media/common/tveeprom.c b/drivers/media/common/tveeprom.c
index 47da0378cad8..11976031aff8 100644
--- a/drivers/media/common/tveeprom.c
+++ b/drivers/media/common/tveeprom.c
@@ -28,6 +28,7 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/errno.h>
@@ -45,22 +46,9 @@ MODULE_DESCRIPTION("i2c Hauppauge eeprom decoder driver");
MODULE_AUTHOR("John Klar");
MODULE_LICENSE("GPL");
-static int debug;
-module_param(debug, int, 0644);
-MODULE_PARM_DESC(debug, "Debug level (0-1)");
-
#define STRM(array, i) \
(i < sizeof(array) / sizeof(char *) ? array[i] : "unknown")
-#define tveeprom_info(fmt, arg...) \
- v4l_printk(KERN_INFO, "tveeprom", c->adapter, c->addr, fmt , ## arg)
-#define tveeprom_warn(fmt, arg...) \
- v4l_printk(KERN_WARNING, "tveeprom", c->adapter, c->addr, fmt , ## arg)
-#define tveeprom_dbg(fmt, arg...) do { \
- if (debug) \
- v4l_printk(KERN_DEBUG, "tveeprom", \
- c->adapter, c->addr, fmt , ## arg); \
- } while (0)
/*
* The Hauppauge eeprom uses an 8bit field to determine which
@@ -510,19 +498,13 @@ void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee,
len = eeprom_data[i] & 0x07;
++i;
} else {
- tveeprom_warn("Encountered bad packet header [%02x]. "
- "Corrupt or not a Hauppauge eeprom.\n",
+ pr_warn("Encountered bad packet header [%02x]. Corrupt or not a Hauppauge eeprom.\n",
eeprom_data[i]);
return;
}
- if (debug) {
- tveeprom_info("Tag [%02x] + %d bytes:",
- eeprom_data[i], len - 1);
- for (j = 1; j < len; j++)
- printk(KERN_CONT " %02x", eeprom_data[i + j]);
- printk(KERN_CONT "\n");
- }
+ pr_debug("Tag [%02x] + %d bytes: %*ph\n",
+ eeprom_data[i], len - 1, len, &eeprom_data[i]);
/* process by tag */
tag = eeprom_data[i];
@@ -662,14 +644,14 @@ void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee,
/* case 0x12: tag 'InfoBits' */
default:
- tveeprom_dbg("Not sure what to do with tag [%02x]\n",
+ pr_debug("Not sure what to do with tag [%02x]\n",
tag);
/* dump the rest of the packet? */
}
}
if (!done) {
- tveeprom_warn("Ran out of data!\n");
+ pr_warn("Ran out of data!\n");
return;
}
@@ -682,8 +664,8 @@ void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee,
}
if (hasRadioTuner(tuner1) && !tvee->has_radio) {
- tveeprom_info("The eeprom says no radio is present, but the tuner type\n");
- tveeprom_info("indicates otherwise. I will assume that radio is present.\n");
+ pr_info("The eeprom says no radio is present, but the tuner type\n");
+ pr_info("indicates otherwise. I will assume that radio is present.\n");
tvee->has_radio = 1;
}
@@ -718,46 +700,46 @@ void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee,
}
}
- tveeprom_info("Hauppauge model %d, rev %s, serial# %u\n",
+ pr_info("Hauppauge model %d, rev %s, serial# %u\n",
tvee->model, tvee->rev_str, tvee->serial_number);
if (tvee->has_MAC_address == 1)
- tveeprom_info("MAC address is %pM\n", tvee->MAC_address);
- tveeprom_info("tuner model is %s (idx %d, type %d)\n",
+ pr_info("MAC address is %pM\n", tvee->MAC_address);
+ pr_info("tuner model is %s (idx %d, type %d)\n",
t_name1, tuner1, tvee->tuner_type);
- tveeprom_info("TV standards%s%s%s%s%s%s%s%s (eeprom 0x%02x)\n",
+ pr_info("TV standards%s%s%s%s%s%s%s%s (eeprom 0x%02x)\n",
t_fmt_name1[0], t_fmt_name1[1], t_fmt_name1[2],
t_fmt_name1[3], t_fmt_name1[4], t_fmt_name1[5],
t_fmt_name1[6], t_fmt_name1[7], t_format1);
if (tuner2)
- tveeprom_info("second tuner model is %s (idx %d, type %d)\n",
+ pr_info("second tuner model is %s (idx %d, type %d)\n",
t_name2, tuner2, tvee->tuner2_type);
if (t_format2)
- tveeprom_info("TV standards%s%s%s%s%s%s%s%s (eeprom 0x%02x)\n",
+ pr_info("TV standards%s%s%s%s%s%s%s%s (eeprom 0x%02x)\n",
t_fmt_name2[0], t_fmt_name2[1], t_fmt_name2[2],
t_fmt_name2[3], t_fmt_name2[4], t_fmt_name2[5],
t_fmt_name2[6], t_fmt_name2[7], t_format2);
if (audioic < 0) {
- tveeprom_info("audio processor is unknown (no idx)\n");
+ pr_info("audio processor is unknown (no idx)\n");
tvee->audio_processor = TVEEPROM_AUDPROC_OTHER;
} else {
if (audioic < ARRAY_SIZE(audio_ic))
- tveeprom_info("audio processor is %s (idx %d)\n",
+ pr_info("audio processor is %s (idx %d)\n",
audio_ic[audioic].name, audioic);
else
- tveeprom_info("audio processor is unknown (idx %d)\n",
+ pr_info("audio processor is unknown (idx %d)\n",
audioic);
}
if (tvee->decoder_processor)
- tveeprom_info("decoder processor is %s (idx %d)\n",
+ pr_info("decoder processor is %s (idx %d)\n",
STRM(decoderIC, tvee->decoder_processor),
tvee->decoder_processor);
if (tvee->has_ir)
- tveeprom_info("has %sradio, has %sIR receiver, has %sIR transmitter\n",
+ pr_info("has %sradio, has %sIR receiver, has %sIR transmitter\n",
tvee->has_radio ? "" : "no ",
(tvee->has_ir & 2) ? "" : "no ",
(tvee->has_ir & 4) ? "" : "no ");
else
- tveeprom_info("has %sradio\n",
+ pr_info("has %sradio\n",
tvee->has_radio ? "" : "no ");
}
EXPORT_SYMBOL(tveeprom_hauppauge_analog);
@@ -773,26 +755,17 @@ int tveeprom_read(struct i2c_client *c, unsigned char *eedata, int len)
buf = 0;
err = i2c_master_send(c, &buf, 1);
if (err != 1) {
- tveeprom_info("Huh, no eeprom present (err=%d)?\n", err);
+ pr_info("Huh, no eeprom present (err=%d)?\n", err);
return -1;
}
err = i2c_master_recv(c, eedata, len);
if (err != len) {
- tveeprom_warn("i2c eeprom read error (err=%d)\n", err);
+ pr_warn("i2c eeprom read error (err=%d)\n", err);
return -1;
}
- if (debug) {
- int i;
-
- tveeprom_info("full 256-byte eeprom dump:\n");
- for (i = 0; i < len; i++) {
- if (0 == (i % 16))
- tveeprom_info("%02x:", i);
- printk(KERN_CONT " %02x", eedata[i]);
- if (15 == (i % 16))
- printk(KERN_CONT "\n");
- }
- }
+
+ print_hex_dump_debug("full 256-byte eeprom dump:", DUMP_PREFIX_NONE,
+ 16, 1, eedata, len, true);
return 0;
}
EXPORT_SYMBOL(tveeprom_read);
diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
index 1684810cab83..e47b46e2d26c 100644
--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
+++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
@@ -117,6 +117,7 @@ void tpg_init(struct tpg_data *tpg, unsigned w, unsigned h)
tpg_s_fourcc(tpg, V4L2_PIX_FMT_RGB24);
tpg->colorspace = V4L2_COLORSPACE_SRGB;
tpg->perc_fill = 100;
+ tpg->hsv_enc = V4L2_HSV_ENC_180;
}
EXPORT_SYMBOL_GPL(tpg_init);
@@ -234,16 +235,18 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
case V4L2_PIX_FMT_XBGR32:
case V4L2_PIX_FMT_ARGB32:
case V4L2_PIX_FMT_ABGR32:
+ tpg->color_enc = TGP_COLOR_ENC_RGB;
+ break;
case V4L2_PIX_FMT_GREY:
case V4L2_PIX_FMT_Y16:
case V4L2_PIX_FMT_Y16_BE:
- tpg->is_yuv = false;
+ tpg->color_enc = TGP_COLOR_ENC_LUMA;
break;
case V4L2_PIX_FMT_YUV444:
case V4L2_PIX_FMT_YUV555:
case V4L2_PIX_FMT_YUV565:
case V4L2_PIX_FMT_YUV32:
- tpg->is_yuv = true;
+ tpg->color_enc = TGP_COLOR_ENC_YCBCR;
break;
case V4L2_PIX_FMT_YUV420M:
case V4L2_PIX_FMT_YVU420M:
@@ -256,7 +259,7 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
tpg->hdownsampling[1] = 2;
tpg->hdownsampling[2] = 2;
tpg->planes = 3;
- tpg->is_yuv = true;
+ tpg->color_enc = TGP_COLOR_ENC_YCBCR;
break;
case V4L2_PIX_FMT_YUV422M:
case V4L2_PIX_FMT_YVU422M:
@@ -268,7 +271,7 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
tpg->hdownsampling[1] = 2;
tpg->hdownsampling[2] = 2;
tpg->planes = 3;
- tpg->is_yuv = true;
+ tpg->color_enc = TGP_COLOR_ENC_YCBCR;
break;
case V4L2_PIX_FMT_NV16M:
case V4L2_PIX_FMT_NV61M:
@@ -280,7 +283,7 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
tpg->hdownsampling[1] = 1;
tpg->hmask[1] = ~1;
tpg->planes = 2;
- tpg->is_yuv = true;
+ tpg->color_enc = TGP_COLOR_ENC_YCBCR;
break;
case V4L2_PIX_FMT_NV12M:
case V4L2_PIX_FMT_NV21M:
@@ -292,7 +295,7 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
tpg->hdownsampling[1] = 1;
tpg->hmask[1] = ~1;
tpg->planes = 2;
- tpg->is_yuv = true;
+ tpg->color_enc = TGP_COLOR_ENC_YCBCR;
break;
case V4L2_PIX_FMT_YUV444M:
case V4L2_PIX_FMT_YVU444M:
@@ -302,21 +305,25 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
tpg->vdownsampling[2] = 1;
tpg->hdownsampling[1] = 1;
tpg->hdownsampling[2] = 1;
- tpg->is_yuv = true;
+ tpg->color_enc = TGP_COLOR_ENC_YCBCR;
break;
case V4L2_PIX_FMT_NV24:
case V4L2_PIX_FMT_NV42:
tpg->vdownsampling[1] = 1;
tpg->hdownsampling[1] = 1;
tpg->planes = 2;
- tpg->is_yuv = true;
+ tpg->color_enc = TGP_COLOR_ENC_YCBCR;
break;
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_UYVY:
case V4L2_PIX_FMT_YVYU:
case V4L2_PIX_FMT_VYUY:
tpg->hmask[0] = ~1;
- tpg->is_yuv = true;
+ tpg->color_enc = TGP_COLOR_ENC_YCBCR;
+ break;
+ case V4L2_PIX_FMT_HSV24:
+ case V4L2_PIX_FMT_HSV32:
+ tpg->color_enc = TGP_COLOR_ENC_HSV;
break;
default:
return false;
@@ -351,6 +358,7 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
break;
case V4L2_PIX_FMT_RGB24:
case V4L2_PIX_FMT_BGR24:
+ case V4L2_PIX_FMT_HSV24:
tpg->twopixelsize[0] = 2 * 3;
break;
case V4L2_PIX_FMT_BGR666:
@@ -361,6 +369,7 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
case V4L2_PIX_FMT_ARGB32:
case V4L2_PIX_FMT_ABGR32:
case V4L2_PIX_FMT_YUV32:
+ case V4L2_PIX_FMT_HSV32:
tpg->twopixelsize[0] = 2 * 4;
break;
case V4L2_PIX_FMT_NV12:
@@ -490,6 +499,71 @@ static inline int linear_to_rec709(int v)
return tpg_linear_to_rec709[v];
}
+static void color_to_hsv(struct tpg_data *tpg, int r, int g, int b,
+ int *h, int *s, int *v)
+{
+ int max_rgb, min_rgb, diff_rgb;
+ int aux;
+ int third;
+ int third_size;
+
+ r >>= 4;
+ g >>= 4;
+ b >>= 4;
+
+ /* Value */
+ max_rgb = max3(r, g, b);
+ *v = max_rgb;
+ if (!max_rgb) {
+ *h = 0;
+ *s = 0;
+ return;
+ }
+
+ /* Saturation */
+ min_rgb = min3(r, g, b);
+ diff_rgb = max_rgb - min_rgb;
+ aux = 255 * diff_rgb;
+ aux += max_rgb / 2;
+ aux /= max_rgb;
+ *s = aux;
+ if (!aux) {
+ *h = 0;
+ return;
+ }
+
+ third_size = (tpg->real_hsv_enc == V4L2_HSV_ENC_180) ? 60 : 85;
+
+ /* Hue */
+ if (max_rgb == r) {
+ aux = g - b;
+ third = 0;
+ } else if (max_rgb == g) {
+ aux = b - r;
+ third = third_size;
+ } else {
+ aux = r - g;
+ third = third_size * 2;
+ }
+
+ aux *= third_size / 2;
+ aux += diff_rgb / 2;
+ aux /= diff_rgb;
+ aux += third;
+
+ /* Clamp Hue */
+ if (tpg->real_hsv_enc == V4L2_HSV_ENC_180) {
+ if (aux < 0)
+ aux += 180;
+ else if (aux > 180)
+ aux -= 180;
+ } else {
+ aux = aux & 0xff;
+ }
+
+ *h = aux;
+}
+
static void rgb2ycbcr(const int m[3][3], int r, int g, int b,
int y_offset, int *y, int *cb, int *cr)
{
@@ -729,6 +803,8 @@ static void precalculate_color(struct tpg_data *tpg, int k)
int r = tpg_colors[col].r;
int g = tpg_colors[col].g;
int b = tpg_colors[col].b;
+ int y, cb, cr;
+ bool ycbcr_valid = false;
if (k == TPG_COLOR_TEXTBG) {
col = tpg_get_textbg_color(tpg);
@@ -759,9 +835,9 @@ static void precalculate_color(struct tpg_data *tpg, int k)
g <<= 4;
b <<= 4;
}
- if (tpg->qual == TPG_QUAL_GRAY || tpg->fourcc == V4L2_PIX_FMT_GREY ||
- tpg->fourcc == V4L2_PIX_FMT_Y16 ||
- tpg->fourcc == V4L2_PIX_FMT_Y16_BE) {
+
+ if (tpg->qual == TPG_QUAL_GRAY ||
+ tpg->color_enc == TGP_COLOR_ENC_LUMA) {
/* Rec. 709 Luma function */
/* (0.2126, 0.7152, 0.0722) * (255 * 256) */
r = g = b = (13879 * r + 46688 * g + 4713 * b) >> 16;
@@ -775,7 +851,8 @@ static void precalculate_color(struct tpg_data *tpg, int k)
* Remember that r, g and b are still in the 0 - 0xff0 range.
*/
if (tpg->real_rgb_range == V4L2_DV_RGB_RANGE_LIMITED &&
- tpg->rgb_range == V4L2_DV_RGB_RANGE_FULL && !tpg->is_yuv) {
+ tpg->rgb_range == V4L2_DV_RGB_RANGE_FULL &&
+ tpg->color_enc == TGP_COLOR_ENC_RGB) {
/*
* Convert from full range (which is what r, g and b are)
* to limited range (which is the 'real' RGB range), which
@@ -785,7 +862,9 @@ static void precalculate_color(struct tpg_data *tpg, int k)
g = (g * 219) / 255 + (16 << 4);
b = (b * 219) / 255 + (16 << 4);
} else if (tpg->real_rgb_range != V4L2_DV_RGB_RANGE_LIMITED &&
- tpg->rgb_range == V4L2_DV_RGB_RANGE_LIMITED && !tpg->is_yuv) {
+ tpg->rgb_range == V4L2_DV_RGB_RANGE_LIMITED &&
+ tpg->color_enc == TGP_COLOR_ENC_RGB) {
+
/*
* Clamp r, g and b to the limited range and convert to full
* range since that's what we deliver.
@@ -798,10 +877,10 @@ static void precalculate_color(struct tpg_data *tpg, int k)
b = (b - (16 << 4)) * 255 / 219;
}
- if (tpg->brightness != 128 || tpg->contrast != 128 ||
- tpg->saturation != 128 || tpg->hue) {
+ if ((tpg->brightness != 128 || tpg->contrast != 128 ||
+ tpg->saturation != 128 || tpg->hue) &&
+ tpg->color_enc != TGP_COLOR_ENC_LUMA) {
/* Implement these operations */
- int y, cb, cr;
int tmp_cb, tmp_cr;
/* First convert to YCbCr */
@@ -818,29 +897,45 @@ static void precalculate_color(struct tpg_data *tpg, int k)
cb = (128 << 4) + (tmp_cb * tpg->contrast * tpg->saturation) / (128 * 128);
cr = (128 << 4) + (tmp_cr * tpg->contrast * tpg->saturation) / (128 * 128);
- if (tpg->is_yuv) {
- tpg->colors[k][0] = clamp(y >> 4, 1, 254);
- tpg->colors[k][1] = clamp(cb >> 4, 1, 254);
- tpg->colors[k][2] = clamp(cr >> 4, 1, 254);
- return;
- }
- ycbcr_to_color(tpg, y, cb, cr, &r, &g, &b);
+ if (tpg->color_enc == TGP_COLOR_ENC_YCBCR)
+ ycbcr_valid = true;
+ else
+ ycbcr_to_color(tpg, y, cb, cr, &r, &g, &b);
+ } else if ((tpg->brightness != 128 || tpg->contrast != 128) &&
+ tpg->color_enc == TGP_COLOR_ENC_LUMA) {
+ r = (16 << 4) + ((r - (16 << 4)) * tpg->contrast) / 128;
+ r += (tpg->brightness << 4) - (128 << 4);
}
- if (tpg->is_yuv) {
- /* Convert to YCbCr */
- int y, cb, cr;
+ switch (tpg->color_enc) {
+ case TGP_COLOR_ENC_HSV:
+ {
+ int h, s, v;
- color_to_ycbcr(tpg, r, g, b, &y, &cb, &cr);
+ color_to_hsv(tpg, r, g, b, &h, &s, &v);
+ tpg->colors[k][0] = h;
+ tpg->colors[k][1] = s;
+ tpg->colors[k][2] = v;
+ break;
+ }
+ case TGP_COLOR_ENC_YCBCR:
+ {
+ /* Convert to YCbCr */
+ if (!ycbcr_valid)
+ color_to_ycbcr(tpg, r, g, b, &y, &cb, &cr);
+ y >>= 4;
+ cb >>= 4;
+ cr >>= 4;
if (tpg->real_quantization == V4L2_QUANTIZATION_LIM_RANGE) {
- y = clamp(y, 16 << 4, 235 << 4);
- cb = clamp(cb, 16 << 4, 240 << 4);
- cr = clamp(cr, 16 << 4, 240 << 4);
+ y = clamp(y, 16, 235);
+ cb = clamp(cb, 16, 240);
+ cr = clamp(cr, 16, 240);
+ } else {
+ y = clamp(y, 1, 254);
+ cb = clamp(cb, 1, 254);
+ cr = clamp(cr, 1, 254);
}
- y = clamp(y >> 4, 1, 254);
- cb = clamp(cb >> 4, 1, 254);
- cr = clamp(cr >> 4, 1, 254);
switch (tpg->fourcc) {
case V4L2_PIX_FMT_YUV444:
y >>= 4;
@@ -861,7 +956,15 @@ static void precalculate_color(struct tpg_data *tpg, int k)
tpg->colors[k][0] = y;
tpg->colors[k][1] = cb;
tpg->colors[k][2] = cr;
- } else {
+ break;
+ }
+ case TGP_COLOR_ENC_LUMA:
+ {
+ tpg->colors[k][0] = r >> 4;
+ break;
+ }
+ case TGP_COLOR_ENC_RGB:
+ {
if (tpg->real_quantization == V4L2_QUANTIZATION_LIM_RANGE) {
r = (r * 219) / 255 + (16 << 4);
g = (g * 219) / 255 + (16 << 4);
@@ -911,6 +1014,8 @@ static void precalculate_color(struct tpg_data *tpg, int k)
tpg->colors[k][0] = r;
tpg->colors[k][1] = g;
tpg->colors[k][2] = b;
+ break;
+ }
}
}
@@ -928,7 +1033,7 @@ static void gen_twopix(struct tpg_data *tpg,
{
unsigned offset = odd * tpg->twopixelsize[0] / 2;
u8 alpha = tpg->alpha_component;
- u8 r_y, g_u, b_v;
+ u8 r_y_h, g_u_s, b_v;
if (tpg->alpha_red_only && color != TPG_COLOR_CSC_RED &&
color != TPG_COLOR_100_RED &&
@@ -936,161 +1041,161 @@ static void gen_twopix(struct tpg_data *tpg,
alpha = 0;
if (color == TPG_COLOR_RANDOM)
precalculate_color(tpg, color);
- r_y = tpg->colors[color][0]; /* R or precalculated Y */
- g_u = tpg->colors[color][1]; /* G or precalculated U */
+ r_y_h = tpg->colors[color][0]; /* R or precalculated Y, H */
+ g_u_s = tpg->colors[color][1]; /* G or precalculated U, V */
b_v = tpg->colors[color][2]; /* B or precalculated V */
switch (tpg->fourcc) {
case V4L2_PIX_FMT_GREY:
- buf[0][offset] = r_y;
+ buf[0][offset] = r_y_h;
break;
case V4L2_PIX_FMT_Y16:
/*
- * Ideally both bytes should be set to r_y, but then you won't
+ * Ideally both bytes should be set to r_y_h, but then you won't
* be able to detect endian problems. So keep it 0 except for
- * the corner case where r_y is 0xff so white really will be
+ * the corner case where r_y_h is 0xff so white really will be
* white (0xffff).
*/
- buf[0][offset] = r_y == 0xff ? r_y : 0;
- buf[0][offset+1] = r_y;
+ buf[0][offset] = r_y_h == 0xff ? r_y_h : 0;
+ buf[0][offset+1] = r_y_h;
break;
case V4L2_PIX_FMT_Y16_BE:
/* See comment for V4L2_PIX_FMT_Y16 above */
- buf[0][offset] = r_y;
- buf[0][offset+1] = r_y == 0xff ? r_y : 0;
+ buf[0][offset] = r_y_h;
+ buf[0][offset+1] = r_y_h == 0xff ? r_y_h : 0;
break;
case V4L2_PIX_FMT_YUV422M:
case V4L2_PIX_FMT_YUV422P:
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YUV420M:
- buf[0][offset] = r_y;
+ buf[0][offset] = r_y_h;
if (odd) {
- buf[1][0] = (buf[1][0] + g_u) / 2;
+ buf[1][0] = (buf[1][0] + g_u_s) / 2;
buf[2][0] = (buf[2][0] + b_v) / 2;
buf[1][1] = buf[1][0];
buf[2][1] = buf[2][0];
break;
}
- buf[1][0] = g_u;
+ buf[1][0] = g_u_s;
buf[2][0] = b_v;
break;
case V4L2_PIX_FMT_YVU422M:
case V4L2_PIX_FMT_YVU420:
case V4L2_PIX_FMT_YVU420M:
- buf[0][offset] = r_y;
+ buf[0][offset] = r_y_h;
if (odd) {
buf[1][0] = (buf[1][0] + b_v) / 2;
- buf[2][0] = (buf[2][0] + g_u) / 2;
+ buf[2][0] = (buf[2][0] + g_u_s) / 2;
buf[1][1] = buf[1][0];
buf[2][1] = buf[2][0];
break;
}
buf[1][0] = b_v;
- buf[2][0] = g_u;
+ buf[2][0] = g_u_s;
break;
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV12M:
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV16M:
- buf[0][offset] = r_y;
+ buf[0][offset] = r_y_h;
if (odd) {
- buf[1][0] = (buf[1][0] + g_u) / 2;
+ buf[1][0] = (buf[1][0] + g_u_s) / 2;
buf[1][1] = (buf[1][1] + b_v) / 2;
break;
}
- buf[1][0] = g_u;
+ buf[1][0] = g_u_s;
buf[1][1] = b_v;
break;
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_NV21M:
case V4L2_PIX_FMT_NV61:
case V4L2_PIX_FMT_NV61M:
- buf[0][offset] = r_y;
+ buf[0][offset] = r_y_h;
if (odd) {
buf[1][0] = (buf[1][0] + b_v) / 2;
- buf[1][1] = (buf[1][1] + g_u) / 2;
+ buf[1][1] = (buf[1][1] + g_u_s) / 2;
break;
}
buf[1][0] = b_v;
- buf[1][1] = g_u;
+ buf[1][1] = g_u_s;
break;
case V4L2_PIX_FMT_YUV444M:
- buf[0][offset] = r_y;
- buf[1][offset] = g_u;
+ buf[0][offset] = r_y_h;
+ buf[1][offset] = g_u_s;
buf[2][offset] = b_v;
break;
case V4L2_PIX_FMT_YVU444M:
- buf[0][offset] = r_y;
+ buf[0][offset] = r_y_h;
buf[1][offset] = b_v;
- buf[2][offset] = g_u;
+ buf[2][offset] = g_u_s;
break;
case V4L2_PIX_FMT_NV24:
- buf[0][offset] = r_y;
- buf[1][2 * offset] = g_u;
+ buf[0][offset] = r_y_h;
+ buf[1][2 * offset] = g_u_s;
buf[1][2 * offset + 1] = b_v;
break;
case V4L2_PIX_FMT_NV42:
- buf[0][offset] = r_y;
+ buf[0][offset] = r_y_h;
buf[1][2 * offset] = b_v;
- buf[1][2 * offset + 1] = g_u;
+ buf[1][2 * offset + 1] = g_u_s;
break;
case V4L2_PIX_FMT_YUYV:
- buf[0][offset] = r_y;
+ buf[0][offset] = r_y_h;
if (odd) {
- buf[0][1] = (buf[0][1] + g_u) / 2;
+ buf[0][1] = (buf[0][1] + g_u_s) / 2;
buf[0][3] = (buf[0][3] + b_v) / 2;
break;
}
- buf[0][1] = g_u;
+ buf[0][1] = g_u_s;
buf[0][3] = b_v;
break;
case V4L2_PIX_FMT_UYVY:
- buf[0][offset + 1] = r_y;
+ buf[0][offset + 1] = r_y_h;
if (odd) {
- buf[0][0] = (buf[0][0] + g_u) / 2;
+ buf[0][0] = (buf[0][0] + g_u_s) / 2;
buf[0][2] = (buf[0][2] + b_v) / 2;
break;
}
- buf[0][0] = g_u;
+ buf[0][0] = g_u_s;
buf[0][2] = b_v;
break;
case V4L2_PIX_FMT_YVYU:
- buf[0][offset] = r_y;
+ buf[0][offset] = r_y_h;
if (odd) {
buf[0][1] = (buf[0][1] + b_v) / 2;
- buf[0][3] = (buf[0][3] + g_u) / 2;
+ buf[0][3] = (buf[0][3] + g_u_s) / 2;
break;
}
buf[0][1] = b_v;
- buf[0][3] = g_u;
+ buf[0][3] = g_u_s;
break;
case V4L2_PIX_FMT_VYUY:
- buf[0][offset + 1] = r_y;
+ buf[0][offset + 1] = r_y_h;
if (odd) {
buf[0][0] = (buf[0][0] + b_v) / 2;
- buf[0][2] = (buf[0][2] + g_u) / 2;
+ buf[0][2] = (buf[0][2] + g_u_s) / 2;
break;
}
buf[0][0] = b_v;
- buf[0][2] = g_u;
+ buf[0][2] = g_u_s;
break;
case V4L2_PIX_FMT_RGB332:
- buf[0][offset] = (r_y << 5) | (g_u << 2) | b_v;
+ buf[0][offset] = (r_y_h << 5) | (g_u_s << 2) | b_v;
break;
case V4L2_PIX_FMT_YUV565:
case V4L2_PIX_FMT_RGB565:
- buf[0][offset] = (g_u << 5) | b_v;
- buf[0][offset + 1] = (r_y << 3) | (g_u >> 3);
+ buf[0][offset] = (g_u_s << 5) | b_v;
+ buf[0][offset + 1] = (r_y_h << 3) | (g_u_s >> 3);
break;
case V4L2_PIX_FMT_RGB565X:
- buf[0][offset] = (r_y << 3) | (g_u >> 3);
- buf[0][offset + 1] = (g_u << 5) | b_v;
+ buf[0][offset] = (r_y_h << 3) | (g_u_s >> 3);
+ buf[0][offset + 1] = (g_u_s << 5) | b_v;
break;
case V4L2_PIX_FMT_RGB444:
case V4L2_PIX_FMT_XRGB444:
@@ -1098,8 +1203,8 @@ static void gen_twopix(struct tpg_data *tpg,
/* fall through */
case V4L2_PIX_FMT_YUV444:
case V4L2_PIX_FMT_ARGB444:
- buf[0][offset] = (g_u << 4) | b_v;
- buf[0][offset + 1] = (alpha & 0xf0) | r_y;
+ buf[0][offset] = (g_u_s << 4) | b_v;
+ buf[0][offset + 1] = (alpha & 0xf0) | r_y_h;
break;
case V4L2_PIX_FMT_RGB555:
case V4L2_PIX_FMT_XRGB555:
@@ -1107,42 +1212,45 @@ static void gen_twopix(struct tpg_data *tpg,
/* fall through */
case V4L2_PIX_FMT_YUV555:
case V4L2_PIX_FMT_ARGB555:
- buf[0][offset] = (g_u << 5) | b_v;
- buf[0][offset + 1] = (alpha & 0x80) | (r_y << 2) | (g_u >> 3);
+ buf[0][offset] = (g_u_s << 5) | b_v;
+ buf[0][offset + 1] = (alpha & 0x80) | (r_y_h << 2)
+ | (g_u_s >> 3);
break;
case V4L2_PIX_FMT_RGB555X:
case V4L2_PIX_FMT_XRGB555X:
alpha = 0;
/* fall through */
case V4L2_PIX_FMT_ARGB555X:
- buf[0][offset] = (alpha & 0x80) | (r_y << 2) | (g_u >> 3);
- buf[0][offset + 1] = (g_u << 5) | b_v;
+ buf[0][offset] = (alpha & 0x80) | (r_y_h << 2) | (g_u_s >> 3);
+ buf[0][offset + 1] = (g_u_s << 5) | b_v;
break;
case V4L2_PIX_FMT_RGB24:
- buf[0][offset] = r_y;
- buf[0][offset + 1] = g_u;
+ case V4L2_PIX_FMT_HSV24:
+ buf[0][offset] = r_y_h;
+ buf[0][offset + 1] = g_u_s;
buf[0][offset + 2] = b_v;
break;
case V4L2_PIX_FMT_BGR24:
buf[0][offset] = b_v;
- buf[0][offset + 1] = g_u;
- buf[0][offset + 2] = r_y;
+ buf[0][offset + 1] = g_u_s;
+ buf[0][offset + 2] = r_y_h;
break;
case V4L2_PIX_FMT_BGR666:
- buf[0][offset] = (b_v << 2) | (g_u >> 4);
- buf[0][offset + 1] = (g_u << 4) | (r_y >> 2);
- buf[0][offset + 2] = r_y << 6;
+ buf[0][offset] = (b_v << 2) | (g_u_s >> 4);
+ buf[0][offset + 1] = (g_u_s << 4) | (r_y_h >> 2);
+ buf[0][offset + 2] = r_y_h << 6;
buf[0][offset + 3] = 0;
break;
case V4L2_PIX_FMT_RGB32:
case V4L2_PIX_FMT_XRGB32:
+ case V4L2_PIX_FMT_HSV32:
alpha = 0;
/* fall through */
case V4L2_PIX_FMT_YUV32:
case V4L2_PIX_FMT_ARGB32:
buf[0][offset] = alpha;
- buf[0][offset + 1] = r_y;
- buf[0][offset + 2] = g_u;
+ buf[0][offset + 1] = r_y_h;
+ buf[0][offset + 2] = g_u_s;
buf[0][offset + 3] = b_v;
break;
case V4L2_PIX_FMT_BGR32:
@@ -1151,87 +1259,87 @@ static void gen_twopix(struct tpg_data *tpg,
/* fall through */
case V4L2_PIX_FMT_ABGR32:
buf[0][offset] = b_v;
- buf[0][offset + 1] = g_u;
- buf[0][offset + 2] = r_y;
+ buf[0][offset + 1] = g_u_s;
+ buf[0][offset + 2] = r_y_h;
buf[0][offset + 3] = alpha;
break;
case V4L2_PIX_FMT_SBGGR8:
- buf[0][offset] = odd ? g_u : b_v;
- buf[1][offset] = odd ? r_y : g_u;
+ buf[0][offset] = odd ? g_u_s : b_v;
+ buf[1][offset] = odd ? r_y_h : g_u_s;
break;
case V4L2_PIX_FMT_SGBRG8:
- buf[0][offset] = odd ? b_v : g_u;
- buf[1][offset] = odd ? g_u : r_y;
+ buf[0][offset] = odd ? b_v : g_u_s;
+ buf[1][offset] = odd ? g_u_s : r_y_h;
break;
case V4L2_PIX_FMT_SGRBG8:
- buf[0][offset] = odd ? r_y : g_u;
- buf[1][offset] = odd ? g_u : b_v;
+ buf[0][offset] = odd ? r_y_h : g_u_s;
+ buf[1][offset] = odd ? g_u_s : b_v;
break;
case V4L2_PIX_FMT_SRGGB8:
- buf[0][offset] = odd ? g_u : r_y;
- buf[1][offset] = odd ? b_v : g_u;
+ buf[0][offset] = odd ? g_u_s : r_y_h;
+ buf[1][offset] = odd ? b_v : g_u_s;
break;
case V4L2_PIX_FMT_SBGGR10:
- buf[0][offset] = odd ? g_u << 2 : b_v << 2;
- buf[0][offset + 1] = odd ? g_u >> 6 : b_v >> 6;
- buf[1][offset] = odd ? r_y << 2 : g_u << 2;
- buf[1][offset + 1] = odd ? r_y >> 6 : g_u >> 6;
+ buf[0][offset] = odd ? g_u_s << 2 : b_v << 2;
+ buf[0][offset + 1] = odd ? g_u_s >> 6 : b_v >> 6;
+ buf[1][offset] = odd ? r_y_h << 2 : g_u_s << 2;
+ buf[1][offset + 1] = odd ? r_y_h >> 6 : g_u_s >> 6;
buf[0][offset] |= (buf[0][offset] >> 2) & 3;
buf[1][offset] |= (buf[1][offset] >> 2) & 3;
break;
case V4L2_PIX_FMT_SGBRG10:
- buf[0][offset] = odd ? b_v << 2 : g_u << 2;
- buf[0][offset + 1] = odd ? b_v >> 6 : g_u >> 6;
- buf[1][offset] = odd ? g_u << 2 : r_y << 2;
- buf[1][offset + 1] = odd ? g_u >> 6 : r_y >> 6;
+ buf[0][offset] = odd ? b_v << 2 : g_u_s << 2;
+ buf[0][offset + 1] = odd ? b_v >> 6 : g_u_s >> 6;
+ buf[1][offset] = odd ? g_u_s << 2 : r_y_h << 2;
+ buf[1][offset + 1] = odd ? g_u_s >> 6 : r_y_h >> 6;
buf[0][offset] |= (buf[0][offset] >> 2) & 3;
buf[1][offset] |= (buf[1][offset] >> 2) & 3;
break;
case V4L2_PIX_FMT_SGRBG10:
- buf[0][offset] = odd ? r_y << 2 : g_u << 2;
- buf[0][offset + 1] = odd ? r_y >> 6 : g_u >> 6;
- buf[1][offset] = odd ? g_u << 2 : b_v << 2;
- buf[1][offset + 1] = odd ? g_u >> 6 : b_v >> 6;
+ buf[0][offset] = odd ? r_y_h << 2 : g_u_s << 2;
+ buf[0][offset + 1] = odd ? r_y_h >> 6 : g_u_s >> 6;
+ buf[1][offset] = odd ? g_u_s << 2 : b_v << 2;
+ buf[1][offset + 1] = odd ? g_u_s >> 6 : b_v >> 6;
buf[0][offset] |= (buf[0][offset] >> 2) & 3;
buf[1][offset] |= (buf[1][offset] >> 2) & 3;
break;
case V4L2_PIX_FMT_SRGGB10:
- buf[0][offset] = odd ? g_u << 2 : r_y << 2;
- buf[0][offset + 1] = odd ? g_u >> 6 : r_y >> 6;
- buf[1][offset] = odd ? b_v << 2 : g_u << 2;
- buf[1][offset + 1] = odd ? b_v >> 6 : g_u >> 6;
+ buf[0][offset] = odd ? g_u_s << 2 : r_y_h << 2;
+ buf[0][offset + 1] = odd ? g_u_s >> 6 : r_y_h >> 6;
+ buf[1][offset] = odd ? b_v << 2 : g_u_s << 2;
+ buf[1][offset + 1] = odd ? b_v >> 6 : g_u_s >> 6;
buf[0][offset] |= (buf[0][offset] >> 2) & 3;
buf[1][offset] |= (buf[1][offset] >> 2) & 3;
break;
case V4L2_PIX_FMT_SBGGR12:
- buf[0][offset] = odd ? g_u << 4 : b_v << 4;
- buf[0][offset + 1] = odd ? g_u >> 4 : b_v >> 4;
- buf[1][offset] = odd ? r_y << 4 : g_u << 4;
- buf[1][offset + 1] = odd ? r_y >> 4 : g_u >> 4;
+ buf[0][offset] = odd ? g_u_s << 4 : b_v << 4;
+ buf[0][offset + 1] = odd ? g_u_s >> 4 : b_v >> 4;
+ buf[1][offset] = odd ? r_y_h << 4 : g_u_s << 4;
+ buf[1][offset + 1] = odd ? r_y_h >> 4 : g_u_s >> 4;
buf[0][offset] |= (buf[0][offset] >> 4) & 0xf;
buf[1][offset] |= (buf[1][offset] >> 4) & 0xf;
break;
case V4L2_PIX_FMT_SGBRG12:
- buf[0][offset] = odd ? b_v << 4 : g_u << 4;
- buf[0][offset + 1] = odd ? b_v >> 4 : g_u >> 4;
- buf[1][offset] = odd ? g_u << 4 : r_y << 4;
- buf[1][offset + 1] = odd ? g_u >> 4 : r_y >> 4;
+ buf[0][offset] = odd ? b_v << 4 : g_u_s << 4;
+ buf[0][offset + 1] = odd ? b_v >> 4 : g_u_s >> 4;
+ buf[1][offset] = odd ? g_u_s << 4 : r_y_h << 4;
+ buf[1][offset + 1] = odd ? g_u_s >> 4 : r_y_h >> 4;
buf[0][offset] |= (buf[0][offset] >> 4) & 0xf;
buf[1][offset] |= (buf[1][offset] >> 4) & 0xf;
break;
case V4L2_PIX_FMT_SGRBG12:
- buf[0][offset] = odd ? r_y << 4 : g_u << 4;
- buf[0][offset + 1] = odd ? r_y >> 4 : g_u >> 4;
- buf[1][offset] = odd ? g_u << 4 : b_v << 4;
- buf[1][offset + 1] = odd ? g_u >> 4 : b_v >> 4;
+ buf[0][offset] = odd ? r_y_h << 4 : g_u_s << 4;
+ buf[0][offset + 1] = odd ? r_y_h >> 4 : g_u_s >> 4;
+ buf[1][offset] = odd ? g_u_s << 4 : b_v << 4;
+ buf[1][offset + 1] = odd ? g_u_s >> 4 : b_v >> 4;
buf[0][offset] |= (buf[0][offset] >> 4) & 0xf;
buf[1][offset] |= (buf[1][offset] >> 4) & 0xf;
break;
case V4L2_PIX_FMT_SRGGB12:
- buf[0][offset] = odd ? g_u << 4 : r_y << 4;
- buf[0][offset + 1] = odd ? g_u >> 4 : r_y >> 4;
- buf[1][offset] = odd ? b_v << 4 : g_u << 4;
- buf[1][offset + 1] = odd ? b_v >> 4 : g_u >> 4;
+ buf[0][offset] = odd ? g_u_s << 4 : r_y_h << 4;
+ buf[0][offset + 1] = odd ? g_u_s >> 4 : r_y_h >> 4;
+ buf[1][offset] = odd ? b_v << 4 : g_u_s << 4;
+ buf[1][offset + 1] = odd ? b_v >> 4 : g_u_s >> 4;
buf[0][offset] |= (buf[0][offset] >> 4) & 0xf;
buf[1][offset] |= (buf[1][offset] >> 4) & 0xf;
break;
@@ -1828,6 +1936,7 @@ static void tpg_recalc(struct tpg_data *tpg)
tpg->recalc_lines = true;
tpg->real_xfer_func = tpg->xfer_func;
tpg->real_ycbcr_enc = tpg->ycbcr_enc;
+ tpg->real_hsv_enc = tpg->hsv_enc;
tpg->real_quantization = tpg->quantization;
if (tpg->xfer_func == V4L2_XFER_FUNC_DEFAULT)
@@ -1840,7 +1949,8 @@ static void tpg_recalc(struct tpg_data *tpg)
if (tpg->quantization == V4L2_QUANTIZATION_DEFAULT)
tpg->real_quantization =
- V4L2_MAP_QUANTIZATION_DEFAULT(!tpg->is_yuv,
+ V4L2_MAP_QUANTIZATION_DEFAULT(
+ tpg->color_enc != TGP_COLOR_ENC_YCBCR,
tpg->colorspace, tpg->real_ycbcr_enc);
tpg_precalculate_colors(tpg);
@@ -1887,11 +1997,28 @@ static int tpg_pattern_avg(const struct tpg_data *tpg,
return -1;
}
+static const char *tpg_color_enc_str(enum tgp_color_enc
+ color_enc)
+{
+ switch (color_enc) {
+ case TGP_COLOR_ENC_HSV:
+ return "HSV";
+ case TGP_COLOR_ENC_YCBCR:
+ return "Y'CbCr";
+ case TGP_COLOR_ENC_LUMA:
+ return "Luma";
+ case TGP_COLOR_ENC_RGB:
+ default:
+ return "R'G'B";
+
+ }
+}
+
void tpg_log_status(struct tpg_data *tpg)
{
pr_info("tpg source WxH: %ux%u (%s)\n",
- tpg->src_width, tpg->src_height,
- tpg->is_yuv ? "YCbCr" : "RGB");
+ tpg->src_width, tpg->src_height,
+ tpg_color_enc_str(tpg->color_enc));
pr_info("tpg field: %u\n", tpg->field);
pr_info("tpg crop: %ux%u@%dx%d\n", tpg->crop.width, tpg->crop.height,
tpg->crop.left, tpg->crop.top);
@@ -1900,6 +2027,7 @@ void tpg_log_status(struct tpg_data *tpg)
pr_info("tpg colorspace: %d\n", tpg->colorspace);
pr_info("tpg transfer function: %d/%d\n", tpg->xfer_func, tpg->real_xfer_func);
pr_info("tpg Y'CbCr encoding: %d/%d\n", tpg->ycbcr_enc, tpg->real_ycbcr_enc);
+ pr_info("tpg HSV encoding: %d/%d\n", tpg->hsv_enc, tpg->real_hsv_enc);
pr_info("tpg quantization: %d/%d\n", tpg->quantization, tpg->real_quantization);
pr_info("tpg RGB range: %d/%d\n", tpg->rgb_range, tpg->real_rgb_range);
}
diff --git a/drivers/media/dvb-core/Kconfig b/drivers/media/dvb-core/Kconfig
index fa7a2490ed5f..eeef94a0c84e 100644
--- a/drivers/media/dvb-core/Kconfig
+++ b/drivers/media/dvb-core/Kconfig
@@ -5,7 +5,7 @@
config DVB_MAX_ADAPTERS
int "maximum number of DVB/ATSC adapters"
depends on DVB_CORE
- default 8
+ default 16
range 1 255
help
Maximum number of DVB/ATSC adapters. Increasing this number
@@ -13,7 +13,7 @@ config DVB_MAX_ADAPTERS
if a much lower number of DVB/ATSC adapters is present.
Only values in the range 4-32 are tested.
- If you are unsure about this, use the default value 8
+ If you are unsure about this, use the default value 16
config DVB_DYNAMIC_MINORS
bool "Dynamic DVB minor allocation"
@@ -27,3 +27,16 @@ config DVB_DYNAMIC_MINORS
will be required to manage the device nodes.
If you are unsure about this, say N here.
+
+config DVB_DEMUX_SECTION_LOSS_LOG
+ bool "Enable DVB demux section packet loss log"
+ depends on DVB_CORE
+ default n
+ help
+ Enable extra log messages meant to detect packet loss
+ inside the Kernel.
+
+ Should not be enabled on normal cases, as logs can
+ be very verbose.
+
+ If you are unsure about this, say N here.
diff --git a/drivers/media/dvb-core/Makefile b/drivers/media/dvb-core/Makefile
index 8f22bcd7c1f9..281bc89576e6 100644
--- a/drivers/media/dvb-core/Makefile
+++ b/drivers/media/dvb-core/Makefile
@@ -4,7 +4,7 @@
dvb-net-$(CONFIG_DVB_NET) := dvb_net.o
-dvb-core-objs := dvbdev.o dmxdev.o dvb_demux.o dvb_filter.o \
+dvb-core-objs := dvbdev.o dmxdev.o dvb_demux.o \
dvb_ca_en50221.o dvb_frontend.o \
$(dvb-net-y) dvb_ringbuffer.o dvb_math.o
diff --git a/drivers/media/dvb-core/demux.h b/drivers/media/dvb-core/demux.h
index aeda2b64931c..f8adf4506a45 100644
--- a/drivers/media/dvb-core/demux.h
+++ b/drivers/media/dvb-core/demux.h
@@ -103,7 +103,6 @@ struct dmx_ts_feed {
u16 pid,
int type,
enum dmx_ts_pes pes_type,
- size_t circular_buffer_size,
ktime_t timeout);
int (*start_filtering)(struct dmx_ts_feed *feed);
int (*stop_filtering)(struct dmx_ts_feed *feed);
@@ -181,7 +180,6 @@ struct dmx_section_feed {
/* public: */
int (*set)(struct dmx_section_feed *feed,
u16 pid,
- size_t circular_buffer_size,
int check_crc);
int (*allocate_filter)(struct dmx_section_feed *feed,
struct dmx_section_filter **filter);
@@ -206,8 +204,7 @@ struct dmx_section_feed {
* the &dmx_demux.
* Any TS packets that match the filter settings are copied to a circular
* buffer. The filtered TS packets are delivered to the client using this
- * callback function. The size of the circular buffer is controlled by the
- * circular_buffer_size parameter of the &dmx_ts_feed.@set function.
+ * callback function.
* It is expected that the @buffer1 and @buffer2 callback parameters point to
* addresses within the circular buffer, but other implementations are also
* possible. Note that the called party should not try to free the memory
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index 7b67e1dd97fd..0c16bb213101 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -20,6 +20,8 @@
*
*/
+#define pr_fmt(fmt) "dmxdev: " fmt
+
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
@@ -28,7 +30,7 @@
#include <linux/poll.h>
#include <linux/ioctl.h>
#include <linux/wait.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "dmxdev.h"
static int debug;
@@ -36,7 +38,11 @@ static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
-#define dprintk if (debug) printk
+#define dprintk(fmt, arg...) do { \
+ if (debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
+} while (0)
static int dvb_dmxdev_buffer_write(struct dvb_ringbuffer *buf,
const u8 *src, size_t len)
@@ -50,7 +56,7 @@ static int dvb_dmxdev_buffer_write(struct dvb_ringbuffer *buf,
free = dvb_ringbuffer_free(buf);
if (len > free) {
- dprintk("dmxdev: buffer overflow\n");
+ dprintk("buffer overflow\n");
return -EOVERFLOW;
}
@@ -126,7 +132,7 @@ static int dvb_dvr_open(struct inode *inode, struct file *file)
struct dmxdev *dmxdev = dvbdev->priv;
struct dmx_frontend *front;
- dprintk("function : %s\n", __func__);
+ dprintk("%s\n", __func__);
if (mutex_lock_interruptible(&dmxdev->mutex))
return -ERESTARTSYS;
@@ -258,7 +264,7 @@ static int dvb_dvr_set_buffer_size(struct dmxdev *dmxdev,
void *newmem;
void *oldmem;
- dprintk("function : %s\n", __func__);
+ dprintk("%s\n", __func__);
if (buf->size == size)
return 0;
@@ -367,7 +373,7 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
return 0;
}
del_timer(&dmxdevfilter->timer);
- dprintk("dmxdev: section callback %*ph\n", 6, buffer1);
+ dprintk("section callback %*ph\n", 6, buffer1);
ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer1,
buffer1_len);
if (ret == buffer1_len) {
@@ -556,7 +562,7 @@ static int dvb_dmxdev_start_feed(struct dmxdev *dmxdev,
struct dmxdev_filter *filter,
struct dmxdev_feed *feed)
{
- ktime_t timeout = ktime_set(0, 0);
+ ktime_t timeout = 0;
struct dmx_pes_filter_params *para = &filter->params.pes;
dmx_output_t otype;
int ret;
@@ -589,7 +595,7 @@ static int dvb_dmxdev_start_feed(struct dmxdev *dmxdev,
tsfeed = feed->ts;
tsfeed->priv = filter;
- ret = tsfeed->set(tsfeed, feed->pid, ts_type, ts_pes, 32768, timeout);
+ ret = tsfeed->set(tsfeed, feed->pid, ts_type, ts_pes, timeout);
if (ret < 0) {
dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed);
return ret;
@@ -655,15 +661,15 @@ static int dvb_dmxdev_filter_start(struct dmxdev_filter *filter)
secfeed,
dvb_dmxdev_section_callback);
if (ret < 0) {
- printk("DVB (%s): could not alloc feed\n",
+ pr_err("DVB (%s): could not alloc feed\n",
__func__);
return ret;
}
- ret = (*secfeed)->set(*secfeed, para->pid, 32768,
+ ret = (*secfeed)->set(*secfeed, para->pid,
(para->flags & DMX_CHECK_CRC) ? 1 : 0);
if (ret < 0) {
- printk("DVB (%s): could not set feed\n",
+ pr_err("DVB (%s): could not set feed\n",
__func__);
dvb_dmxdev_feed_restart(filter);
return ret;
@@ -844,7 +850,7 @@ static int dvb_dmxdev_filter_set(struct dmxdev *dmxdev,
struct dmxdev_filter *dmxdevfilter,
struct dmx_sct_filter_params *params)
{
- dprintk("function : %s, PID=0x%04x, flags=%02x, timeout=%d\n",
+ dprintk("%s: PID=0x%04x, flags=%02x, timeout=%d\n",
__func__, params->pid, params->flags, params->timeout);
dvb_dmxdev_filter_stop(dmxdevfilter);
@@ -1184,7 +1190,7 @@ static unsigned int dvb_dvr_poll(struct file *file, poll_table *wait)
struct dmxdev *dmxdev = dvbdev->priv;
unsigned int mask = 0;
- dprintk("function : %s\n", __func__);
+ dprintk("%s\n", __func__);
if (dmxdev->exit)
return POLLERR;
diff --git a/drivers/media/dvb-core/dvb-usb-ids.h b/drivers/media/dvb-core/dvb-usb-ids.h
index a7a4674ccc40..779f4224b63e 100644
--- a/drivers/media/dvb-core/dvb-usb-ids.h
+++ b/drivers/media/dvb-core/dvb-usb-ids.h
@@ -262,6 +262,7 @@
#define USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI 0x3012
#define USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI_2 0x3015
#define USB_PID_TECHNOTREND_TVSTICK_CT2_4400 0x3014
+#define USB_PID_TECHNOTREND_CONNECT_S2_4650_CI 0x3017
#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY 0x005a
#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY_2 0x0081
#define USB_PID_TERRATEC_CINERGY_HT_USB_XE 0x0058
@@ -411,4 +412,5 @@
#define USB_PID_SVEON_STV27 0xd3af
#define USB_PID_TURBOX_DTT_2000 0xd3a4
#define USB_PID_WINTV_SOLOHD 0x0264
+#define USB_PID_EVOLVEO_XTRATV_STICK 0xa115
#endif
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
index b5b5b195ea7f..fd893141211c 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb-core/dvb_ca_en50221.c
@@ -28,6 +28,8 @@
* Or, point your browser to http://www.gnu.org/copyleft/gpl.html
*/
+#define pr_fmt(fmt) "dvb_ca_en50221: " fmt
+
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/list.h>
@@ -46,7 +48,10 @@ static int dvb_ca_en50221_debug;
module_param_named(cam_debug, dvb_ca_en50221_debug, int, 0644);
MODULE_PARM_DESC(cam_debug, "enable verbose debug messages");
-#define dprintk if (dvb_ca_en50221_debug) printk
+#define dprintk(fmt, arg...) do { \
+ if (dvb_ca_en50221_debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), __func__, ##arg);\
+} while (0)
#define INIT_TIMEOUT_SECS 10
@@ -166,7 +171,7 @@ static void dvb_ca_private_free(struct dvb_ca_private *ca)
{
unsigned int i;
- dvb_unregister_device(ca->dvbdev);
+ dvb_free_device(ca->dvbdev);
for (i = 0; i < ca->slot_count; i++)
vfree(ca->slot_info[i].rx_buffer.data);
@@ -298,7 +303,8 @@ static int dvb_ca_en50221_wait_if_status(struct dvb_ca_private *ca, int slot,
/* if we got the flags, it was successful! */
if (res & waitfor) {
- dprintk("%s succeeded timeout:%lu\n", __func__, jiffies - start);
+ dprintk("%s succeeded timeout:%lu\n",
+ __func__, jiffies - start);
return 0;
}
@@ -519,8 +525,9 @@ static int dvb_ca_en50221_parse_attributes(struct dvb_ca_private *ca, int slot)
/* is it a version we support? */
if (strncmp(dvb_str + 8, "1.00", 4)) {
- printk("dvb_ca adapter %d: Unsupported DVB CAM module version %c%c%c%c\n",
- ca->dvbdev->adapter->num, dvb_str[8], dvb_str[9], dvb_str[10], dvb_str[11]);
+ pr_err("dvb_ca adapter %d: Unsupported DVB CAM module version %c%c%c%c\n",
+ ca->dvbdev->adapter->num, dvb_str[8], dvb_str[9],
+ dvb_str[10], dvb_str[11]);
return -EINVAL;
}
@@ -557,8 +564,8 @@ static int dvb_ca_en50221_parse_attributes(struct dvb_ca_private *ca, int slot)
break;
default: /* Unknown tuple type - just skip this tuple and move to the next one */
- dprintk("dvb_ca: Skipping unknown tuple type:0x%x length:0x%x\n", tupleType,
- tupleLength);
+ dprintk("dvb_ca: Skipping unknown tuple type:0x%x length:0x%x\n",
+ tupleType, tupleLength);
break;
}
}
@@ -567,7 +574,8 @@ static int dvb_ca_en50221_parse_attributes(struct dvb_ca_private *ca, int slot)
return -EINVAL;
dprintk("Valid DVB CAM detected MANID:%x DEVID:%x CONFIGBASE:0x%x CONFIGOPTION:0x%x\n",
- manfid, devid, ca->slot_info[slot].config_base, ca->slot_info[slot].config_option);
+ manfid, devid, ca->slot_info[slot].config_base,
+ ca->slot_info[slot].config_option);
// success!
return 0;
@@ -661,14 +669,15 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * eb
/* check it will fit */
if (ebuf == NULL) {
if (bytes_read > ca->slot_info[slot].link_buf_size) {
- printk("dvb_ca adapter %d: CAM tried to send a buffer larger than the link buffer size (%i > %i)!\n",
- ca->dvbdev->adapter->num, bytes_read, ca->slot_info[slot].link_buf_size);
+ pr_err("dvb_ca adapter %d: CAM tried to send a buffer larger than the link buffer size (%i > %i)!\n",
+ ca->dvbdev->adapter->num, bytes_read,
+ ca->slot_info[slot].link_buf_size);
ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT;
status = -EIO;
goto exit;
}
if (bytes_read < 2) {
- printk("dvb_ca adapter %d: CAM sent a buffer that was less than 2 bytes!\n",
+ pr_err("dvb_ca adapter %d: CAM sent a buffer that was less than 2 bytes!\n",
ca->dvbdev->adapter->num);
ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT;
status = -EIO;
@@ -676,7 +685,7 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * eb
}
} else {
if (bytes_read > ecount) {
- printk("dvb_ca adapter %d: CAM tried to send a buffer larger than the ecount size!\n",
+ pr_err("dvb_ca adapter %d: CAM tried to send a buffer larger than the ecount size!\n",
ca->dvbdev->adapter->num);
status = -EIO;
goto exit;
@@ -1062,7 +1071,7 @@ static int dvb_ca_en50221_thread(void *data)
case DVB_CA_SLOTSTATE_WAITREADY:
if (time_after(jiffies, ca->slot_info[slot].timeout)) {
- printk("dvb_ca adaptor %d: PC card did not respond :(\n",
+ pr_err("dvb_ca adaptor %d: PC card did not respond :(\n",
ca->dvbdev->adapter->num);
ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID;
dvb_ca_en50221_thread_update_delay(ca);
@@ -1084,14 +1093,14 @@ static int dvb_ca_en50221_thread(void *data)
}
}
- printk("dvb_ca adapter %d: Invalid PC card inserted :(\n",
+ pr_err("dvb_ca adapter %d: Invalid PC card inserted :(\n",
ca->dvbdev->adapter->num);
ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID;
dvb_ca_en50221_thread_update_delay(ca);
break;
}
if (dvb_ca_en50221_set_configoption(ca, slot) != 0) {
- printk("dvb_ca adapter %d: Unable to initialise CAM :(\n",
+ pr_err("dvb_ca adapter %d: Unable to initialise CAM :(\n",
ca->dvbdev->adapter->num);
ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID;
dvb_ca_en50221_thread_update_delay(ca);
@@ -1099,7 +1108,7 @@ static int dvb_ca_en50221_thread(void *data)
}
if (ca->pub->write_cam_control(ca->pub, slot,
CTRLIF_COMMAND, CMDREG_RS) != 0) {
- printk("dvb_ca adapter %d: Unable to reset CAM IF\n",
+ pr_err("dvb_ca adapter %d: Unable to reset CAM IF\n",
ca->dvbdev->adapter->num);
ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID;
dvb_ca_en50221_thread_update_delay(ca);
@@ -1114,7 +1123,7 @@ static int dvb_ca_en50221_thread(void *data)
case DVB_CA_SLOTSTATE_WAITFR:
if (time_after(jiffies, ca->slot_info[slot].timeout)) {
- printk("dvb_ca adapter %d: DVB CAM did not respond :(\n",
+ pr_err("dvb_ca adapter %d: DVB CAM did not respond :(\n",
ca->dvbdev->adapter->num);
ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID;
dvb_ca_en50221_thread_update_delay(ca);
@@ -1141,7 +1150,8 @@ static int dvb_ca_en50221_thread(void *data)
}
}
- printk("dvb_ca adapter %d: DVB CAM link initialisation failed :(\n", ca->dvbdev->adapter->num);
+ pr_err("dvb_ca adapter %d: DVB CAM link initialisation failed :(\n",
+ ca->dvbdev->adapter->num);
ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID;
dvb_ca_en50221_thread_update_delay(ca);
break;
@@ -1150,7 +1160,8 @@ static int dvb_ca_en50221_thread(void *data)
if (ca->slot_info[slot].rx_buffer.data == NULL) {
rxbuf = vmalloc(RX_BUFFER_SIZE);
if (rxbuf == NULL) {
- printk("dvb_ca adapter %d: Unable to allocate CAM rx buffer :(\n", ca->dvbdev->adapter->num);
+ pr_err("dvb_ca adapter %d: Unable to allocate CAM rx buffer :(\n",
+ ca->dvbdev->adapter->num);
ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID;
dvb_ca_en50221_thread_update_delay(ca);
break;
@@ -1161,7 +1172,8 @@ static int dvb_ca_en50221_thread(void *data)
ca->pub->slot_ts_enable(ca->pub, slot);
ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_RUNNING;
dvb_ca_en50221_thread_update_delay(ca);
- printk("dvb_ca adapter %d: DVB CAM detected and initialised successfully\n", ca->dvbdev->adapter->num);
+ pr_err("dvb_ca adapter %d: DVB CAM detected and initialised successfully\n",
+ ca->dvbdev->adapter->num);
break;
case DVB_CA_SLOTSTATE_RUNNING:
@@ -1497,7 +1509,8 @@ static ssize_t dvb_ca_en50221_io_read(struct file *file, char __user * buf,
pktlen = 2;
do {
if (idx == -1) {
- printk("dvb_ca adapter %d: BUG: read packet ended before last_fragment encountered\n", ca->dvbdev->adapter->num);
+ pr_err("dvb_ca adapter %d: BUG: read packet ended before last_fragment encountered\n",
+ ca->dvbdev->adapter->num);
status = -EIO;
goto exit;
}
@@ -1755,8 +1768,8 @@ int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter,
ca->dvbdev->adapter->num, ca->dvbdev->id);
if (IS_ERR(ca->thread)) {
ret = PTR_ERR(ca->thread);
- printk("dvb_ca_init: failed to start kernel_thread (%d)\n",
- ret);
+ pr_err("dvb_ca_init: failed to start kernel_thread (%d)\n",
+ ret);
goto unregister_device;
}
return 0;
@@ -1794,6 +1807,7 @@ void dvb_ca_en50221_release(struct dvb_ca_en50221 *pubca)
for (i = 0; i < ca->slot_count; i++) {
dvb_ca_en50221_slot_shutdown(ca, i);
}
+ dvb_remove_device(ca->dvbdev);
dvb_ca_private_put(ca);
pubca->private = NULL;
}
diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c
index a0cf7b0d03e8..bbbff72bbb2a 100644
--- a/drivers/media/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb-core/dvb_demux.c
@@ -21,6 +21,8 @@
*
*/
+#define pr_fmt(fmt) "dvb_demux: " fmt
+
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
@@ -29,17 +31,11 @@
#include <linux/poll.h>
#include <linux/string.h>
#include <linux/crc32.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/div64.h>
#include "dvb_demux.h"
-#define NOBUFS
-/*
-** #define DVB_DEMUX_SECTION_LOSS_LOG to monitor payload loss in the syslog
-*/
-// #define DVB_DEMUX_SECTION_LOSS_LOG
-
static int dvb_demux_tscheck;
module_param(dvb_demux_tscheck, int, 0644);
MODULE_PARM_DESC(dvb_demux_tscheck,
@@ -55,10 +51,13 @@ module_param(dvb_demux_feed_err_pkts, int, 0644);
MODULE_PARM_DESC(dvb_demux_feed_err_pkts,
"when set to 0, drop packets with the TEI bit set (1 by default)");
-#define dprintk_tscheck(x...) do { \
- if (dvb_demux_tscheck && printk_ratelimit()) \
- printk(x); \
- } while (0)
+#define dprintk(fmt, arg...) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), __func__, ##arg)
+
+#define dprintk_tscheck(x...) do { \
+ if (dvb_demux_tscheck && printk_ratelimit()) \
+ dprintk(x); \
+} while (0)
/******************************************************************************
* static inlined helper functions
@@ -109,21 +108,23 @@ static inline int dvb_dmx_swfilter_payload(struct dvb_demux_feed *feed,
{
int count = payload(buf);
int p;
- //int ccok;
- //u8 cc;
+#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
+ int ccok;
+ u8 cc;
+#endif
if (count == 0)
return -1;
p = 188 - count;
- /*
+#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
cc = buf[3] & 0x0f;
ccok = ((feed->cc + 1) & 0x0f) == cc;
feed->cc = cc;
if (!ccok)
- printk("missed packet!\n");
- */
+ dprintk("missed packet!\n");
+#endif
if (buf[1] & 0x40) // PUSI ?
feed->peslen = 0xfffa;
@@ -189,7 +190,7 @@ static void dvb_dmx_swfilter_section_new(struct dvb_demux_feed *feed)
{
struct dmx_section_feed *sec = &feed->feed.sec;
-#ifdef DVB_DEMUX_SECTION_LOSS_LOG
+#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
if (sec->secbufp < sec->tsfeedp) {
int i, n = sec->tsfeedp - sec->secbufp;
@@ -199,12 +200,12 @@ static void dvb_dmx_swfilter_section_new(struct dvb_demux_feed *feed)
* but just first and last.
*/
if (sec->secbuf[0] != 0xff || sec->secbuf[n - 1] != 0xff) {
- printk("dvb_demux.c section ts padding loss: %d/%d\n",
+ dprintk("dvb_demux.c section ts padding loss: %d/%d\n",
n, sec->tsfeedp);
- printk("dvb_demux.c pad data:");
+ dprintk("dvb_demux.c pad data:");
for (i = 0; i < n; i++)
- printk(" %02x", sec->secbuf[i]);
- printk("\n");
+ pr_cont(" %02x", sec->secbuf[i]);
+ pr_cont("\n");
}
}
#endif
@@ -242,8 +243,8 @@ static int dvb_dmx_swfilter_section_copy_dump(struct dvb_demux_feed *feed,
return 0;
if (sec->tsfeedp + len > DMX_MAX_SECFEED_SIZE) {
-#ifdef DVB_DEMUX_SECTION_LOSS_LOG
- printk("dvb_demux.c section buffer full loss: %d/%d\n",
+#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
+ dprintk("dvb_demux.c section buffer full loss: %d/%d\n",
sec->tsfeedp + len - DMX_MAX_SECFEED_SIZE,
DMX_MAX_SECFEED_SIZE);
#endif
@@ -276,9 +277,9 @@ static int dvb_dmx_swfilter_section_copy_dump(struct dvb_demux_feed *feed,
/* dump [secbuf .. secbuf+seclen) */
if (feed->pusi_seen)
dvb_dmx_swfilter_section_feed(feed);
-#ifdef DVB_DEMUX_SECTION_LOSS_LOG
+#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
else
- printk("dvb_demux.c pusi not seen, discarding section data\n");
+ dprintk("dvb_demux.c pusi not seen, discarding section data\n");
#endif
sec->secbufp += seclen; /* secbufp and secbuf moving together is */
sec->secbuf += seclen; /* redundant but saves pointer arithmetic */
@@ -312,9 +313,9 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
}
if (!ccok || dc_i) {
-#ifdef DVB_DEMUX_SECTION_LOSS_LOG
- printk("dvb_demux.c discontinuity detected %d bytes lost\n",
- count);
+#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
+ dprintk("dvb_demux.c discontinuity detected %d bytes lost\n",
+ count);
/*
* those bytes under sume circumstances will again be reported
* in the following dvb_dmx_swfilter_section_new
@@ -344,9 +345,10 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
dvb_dmx_swfilter_section_copy_dump(feed, after,
after_len);
}
-#ifdef DVB_DEMUX_SECTION_LOSS_LOG
+#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
else if (count > 0)
- printk("dvb_demux.c PUSI=1 but %d bytes lost\n", count);
+ dprintk("dvb_demux.c PUSI=1 but %d bytes lost\n",
+ count);
#endif
} else {
/* PUSI=0 (is not set), no section boundary */
@@ -415,9 +417,9 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
1024);
speed_timedelta = ktime_ms_delta(cur_time,
demux->speed_last_time);
- printk(KERN_INFO "TS speed %llu Kbits/sec \n",
- div64_u64(speed_bytes,
- speed_timedelta));
+ dprintk("TS speed %llu Kbits/sec \n",
+ div64_u64(speed_bytes,
+ speed_timedelta));
}
demux->speed_last_time = cur_time;
@@ -426,8 +428,7 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
}
if (buf[1] & 0x80) {
- dprintk_tscheck("TEI detected. "
- "PID=0x%x data1=0x%x\n",
+ dprintk_tscheck("TEI detected. PID=0x%x data1=0x%x\n",
pid, buf[1]);
/* data in this packet can't be trusted - drop it unless
* module option dvb_demux_feed_err_pkts is set */
@@ -635,7 +636,7 @@ static void dvb_demux_feed_add(struct dvb_demux_feed *feed)
{
spin_lock_irq(&feed->demux->lock);
if (dvb_demux_feed_find(feed)) {
- printk(KERN_ERR "%s: feed already in list (type=%x state=%x pid=%x)\n",
+ pr_err("%s: feed already in list (type=%x state=%x pid=%x)\n",
__func__, feed->type, feed->state, feed->pid);
goto out;
}
@@ -649,7 +650,7 @@ static void dvb_demux_feed_del(struct dvb_demux_feed *feed)
{
spin_lock_irq(&feed->demux->lock);
if (!(dvb_demux_feed_find(feed))) {
- printk(KERN_ERR "%s: feed not in list (type=%x state=%x pid=%x)\n",
+ pr_err("%s: feed not in list (type=%x state=%x pid=%x)\n",
__func__, feed->type, feed->state, feed->pid);
goto out;
}
@@ -660,8 +661,7 @@ out:
}
static int dmx_ts_feed_set(struct dmx_ts_feed *ts_feed, u16 pid, int ts_type,
- enum dmx_ts_pes pes_type,
- size_t circular_buffer_size, ktime_t timeout)
+ enum dmx_ts_pes pes_type, ktime_t timeout)
{
struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
struct dvb_demux *demux = feed->demux;
@@ -691,23 +691,10 @@ static int dmx_ts_feed_set(struct dmx_ts_feed *ts_feed, u16 pid, int ts_type,
dvb_demux_feed_add(feed);
feed->pid = pid;
- feed->buffer_size = circular_buffer_size;
feed->timeout = timeout;
feed->ts_type = ts_type;
feed->pes_type = pes_type;
- if (feed->buffer_size) {
-#ifdef NOBUFS
- feed->buffer = NULL;
-#else
- feed->buffer = vmalloc(feed->buffer_size);
- if (!feed->buffer) {
- mutex_unlock(&demux->mutex);
- return -ENOMEM;
- }
-#endif
- }
-
feed->state = DMX_STATE_READY;
mutex_unlock(&demux->mutex);
@@ -796,7 +783,6 @@ static int dvbdmx_allocate_ts_feed(struct dmx_demux *dmx,
feed->demux = demux;
feed->pid = 0xffff;
feed->peslen = 0xfffa;
- feed->buffer = NULL;
(*ts_feed) = &feed->feed.ts;
(*ts_feed)->parent = dmx;
@@ -833,10 +819,6 @@ static int dvbdmx_release_ts_feed(struct dmx_demux *dmx,
mutex_unlock(&demux->mutex);
return -EINVAL;
}
-#ifndef NOBUFS
- vfree(feed->buffer);
- feed->buffer = NULL;
-#endif
feed->state = DMX_STATE_FREE;
feed->filter->state = DMX_STATE_FREE;
@@ -888,8 +870,7 @@ static int dmx_section_feed_allocate_filter(struct dmx_section_feed *feed,
}
static int dmx_section_feed_set(struct dmx_section_feed *feed,
- u16 pid, size_t circular_buffer_size,
- int check_crc)
+ u16 pid, int check_crc)
{
struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed;
struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
@@ -903,19 +884,8 @@ static int dmx_section_feed_set(struct dmx_section_feed *feed,
dvb_demux_feed_add(dvbdmxfeed);
dvbdmxfeed->pid = pid;
- dvbdmxfeed->buffer_size = circular_buffer_size;
dvbdmxfeed->feed.sec.check_crc = check_crc;
-#ifdef NOBUFS
- dvbdmxfeed->buffer = NULL;
-#else
- dvbdmxfeed->buffer = vmalloc(dvbdmxfeed->buffer_size);
- if (!dvbdmxfeed->buffer) {
- mutex_unlock(&dvbdmx->mutex);
- return -ENOMEM;
- }
-#endif
-
dvbdmxfeed->state = DMX_STATE_READY;
mutex_unlock(&dvbdmx->mutex);
return 0;
@@ -1074,7 +1044,6 @@ static int dvbdmx_allocate_section_feed(struct dmx_demux *demux,
dvbdmxfeed->feed.sec.secbufp = dvbdmxfeed->feed.sec.seclen = 0;
dvbdmxfeed->feed.sec.tsfeedp = 0;
dvbdmxfeed->filter = NULL;
- dvbdmxfeed->buffer = NULL;
(*feed) = &dvbdmxfeed->feed.sec;
(*feed)->is_filtering = 0;
@@ -1103,10 +1072,6 @@ static int dvbdmx_release_section_feed(struct dmx_demux *demux,
mutex_unlock(&dvbdmx->mutex);
return -EINVAL;
}
-#ifndef NOBUFS
- vfree(dvbdmxfeed->buffer);
- dvbdmxfeed->buffer = NULL;
-#endif
dvbdmxfeed->state = DMX_STATE_FREE;
dvb_demux_feed_del(dvbdmxfeed);
@@ -1268,7 +1233,7 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux)
dvbdemux->cnt_storage = vmalloc(MAX_PID + 1);
if (!dvbdemux->cnt_storage)
- printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n");
+ pr_warn("Couldn't allocate memory for TS/TEI check. Disabling it\n");
INIT_LIST_HEAD(&dvbdemux->frontend_list);
diff --git a/drivers/media/dvb-core/dvb_demux.h b/drivers/media/dvb-core/dvb_demux.h
index 5ed3cab4ad28..9235b008ea0a 100644
--- a/drivers/media/dvb-core/dvb_demux.h
+++ b/drivers/media/dvb-core/dvb_demux.h
@@ -80,8 +80,6 @@ struct dvb_demux_feed {
int type;
int state;
u16 pid;
- u8 *buffer;
- int buffer_size;
ktime_t timeout;
struct dvb_demux_filter *filter;
diff --git a/drivers/media/dvb-core/dvb_filter.c b/drivers/media/dvb-core/dvb_filter.c
deleted file mode 100644
index 772003fb1821..000000000000
--- a/drivers/media/dvb-core/dvb_filter.c
+++ /dev/null
@@ -1,603 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include "dvb_filter.h"
-
-#if 0
-static unsigned int bitrates[3][16] =
-{{0,32,64,96,128,160,192,224,256,288,320,352,384,416,448,0},
- {0,32,48,56,64,80,96,112,128,160,192,224,256,320,384,0},
- {0,32,40,48,56,64,80,96,112,128,160,192,224,256,320,0}};
-#endif
-
-static u32 freq[4] = {480, 441, 320, 0};
-
-static unsigned int ac3_bitrates[32] =
- {32,40,48,56,64,80,96,112,128,160,192,224,256,320,384,448,512,576,640,
- 0,0,0,0,0,0,0,0,0,0,0,0,0};
-
-static u32 ac3_frames[3][32] =
- {{64,80,96,112,128,160,192,224,256,320,384,448,512,640,768,896,1024,
- 1152,1280,0,0,0,0,0,0,0,0,0,0,0,0,0},
- {69,87,104,121,139,174,208,243,278,348,417,487,557,696,835,975,1114,
- 1253,1393,0,0,0,0,0,0,0,0,0,0,0,0,0},
- {96,120,144,168,192,240,288,336,384,480,576,672,768,960,1152,1344,
- 1536,1728,1920,0,0,0,0,0,0,0,0,0,0,0,0,0}};
-
-
-
-#if 0
-static void setup_ts2pes(ipack *pa, ipack *pv, u16 *pida, u16 *pidv,
- void (*pes_write)(u8 *buf, int count, void *data),
- void *priv)
-{
- dvb_filter_ipack_init(pa, IPACKS, pes_write);
- dvb_filter_ipack_init(pv, IPACKS, pes_write);
- pa->pid = pida;
- pv->pid = pidv;
- pa->data = priv;
- pv->data = priv;
-}
-#endif
-
-#if 0
-static void ts_to_pes(ipack *p, u8 *buf) // don't need count (=188)
-{
- u8 off = 0;
-
- if (!buf || !p ){
- printk("NULL POINTER IDIOT\n");
- return;
- }
- if (buf[1]&PAY_START) {
- if (p->plength == MMAX_PLENGTH-6 && p->found>6){
- p->plength = p->found-6;
- p->found = 0;
- send_ipack(p);
- dvb_filter_ipack_reset(p);
- }
- }
- if (buf[3] & ADAPT_FIELD) { // adaptation field?
- off = buf[4] + 1;
- if (off+4 > 187) return;
- }
- dvb_filter_instant_repack(buf+4+off, TS_SIZE-4-off, p);
-}
-#endif
-
-#if 0
-/* needs 5 byte input, returns picture coding type*/
-static int read_picture_header(u8 *headr, struct mpg_picture *pic, int field, int pr)
-{
- u8 pct;
-
- if (pr) printk( "Pic header: ");
- pic->temporal_reference[field] = (( headr[0] << 2 ) |
- (headr[1] & 0x03) )& 0x03ff;
- if (pr) printk( " temp ref: 0x%04x", pic->temporal_reference[field]);
-
- pct = ( headr[1] >> 2 ) & 0x07;
- pic->picture_coding_type[field] = pct;
- if (pr) {
- switch(pct){
- case I_FRAME:
- printk( " I-FRAME");
- break;
- case B_FRAME:
- printk( " B-FRAME");
- break;
- case P_FRAME:
- printk( " P-FRAME");
- break;
- }
- }
-
-
- pic->vinfo.vbv_delay = (( headr[1] >> 5 ) | ( headr[2] << 3) |
- ( (headr[3] & 0x1F) << 11) ) & 0xffff;
-
- if (pr) printk( " vbv delay: 0x%04x", pic->vinfo.vbv_delay);
-
- pic->picture_header_parameter = ( headr[3] & 0xe0 ) |
- ((headr[4] & 0x80) >> 3);
-
- if ( pct == B_FRAME ){
- pic->picture_header_parameter |= ( headr[4] >> 3 ) & 0x0f;
- }
- if (pr) printk( " pic head param: 0x%x",
- pic->picture_header_parameter);
-
- return pct;
-}
-#endif
-
-#if 0
-/* needs 4 byte input */
-static int read_gop_header(u8 *headr, struct mpg_picture *pic, int pr)
-{
- if (pr) printk("GOP header: ");
-
- pic->time_code = (( headr[0] << 17 ) | ( headr[1] << 9) |
- ( headr[2] << 1 ) | (headr[3] &0x01)) & 0x1ffffff;
-
- if (pr) printk(" time: %d:%d.%d ", (headr[0]>>2)& 0x1F,
- ((headr[0]<<4)& 0x30)| ((headr[1]>>4)& 0x0F),
- ((headr[1]<<3)& 0x38)| ((headr[2]>>5)& 0x0F));
-
- if ( ( headr[3] & 0x40 ) != 0 ){
- pic->closed_gop = 1;
- } else {
- pic->closed_gop = 0;
- }
- if (pr) printk("closed: %d", pic->closed_gop);
-
- if ( ( headr[3] & 0x20 ) != 0 ){
- pic->broken_link = 1;
- } else {
- pic->broken_link = 0;
- }
- if (pr) printk(" broken: %d\n", pic->broken_link);
-
- return 0;
-}
-#endif
-
-#if 0
-/* needs 8 byte input */
-static int read_sequence_header(u8 *headr, struct dvb_video_info *vi, int pr)
-{
- int sw;
- int form = -1;
-
- if (pr) printk("Reading sequence header\n");
-
- vi->horizontal_size = ((headr[1] &0xF0) >> 4) | (headr[0] << 4);
- vi->vertical_size = ((headr[1] &0x0F) << 8) | (headr[2]);
-
- sw = (int)((headr[3]&0xF0) >> 4) ;
-
- switch( sw ){
- case 1:
- if (pr)
- printk("Videostream: ASPECT: 1:1");
- vi->aspect_ratio = 100;
- break;
- case 2:
- if (pr)
- printk("Videostream: ASPECT: 4:3");
- vi->aspect_ratio = 133;
- break;
- case 3:
- if (pr)
- printk("Videostream: ASPECT: 16:9");
- vi->aspect_ratio = 177;
- break;
- case 4:
- if (pr)
- printk("Videostream: ASPECT: 2.21:1");
- vi->aspect_ratio = 221;
- break;
-
- case 5 ... 15:
- if (pr)
- printk("Videostream: ASPECT: reserved");
- vi->aspect_ratio = 0;
- break;
-
- default:
- vi->aspect_ratio = 0;
- return -1;
- }
-
- if (pr)
- printk(" Size = %dx%d",vi->horizontal_size,vi->vertical_size);
-
- sw = (int)(headr[3]&0x0F);
-
- switch ( sw ) {
- case 1:
- if (pr)
- printk(" FRate: 23.976 fps");
- vi->framerate = 23976;
- form = -1;
- break;
- case 2:
- if (pr)
- printk(" FRate: 24 fps");
- vi->framerate = 24000;
- form = -1;
- break;
- case 3:
- if (pr)
- printk(" FRate: 25 fps");
- vi->framerate = 25000;
- form = VIDEO_MODE_PAL;
- break;
- case 4:
- if (pr)
- printk(" FRate: 29.97 fps");
- vi->framerate = 29970;
- form = VIDEO_MODE_NTSC;
- break;
- case 5:
- if (pr)
- printk(" FRate: 30 fps");
- vi->framerate = 30000;
- form = VIDEO_MODE_NTSC;
- break;
- case 6:
- if (pr)
- printk(" FRate: 50 fps");
- vi->framerate = 50000;
- form = VIDEO_MODE_PAL;
- break;
- case 7:
- if (pr)
- printk(" FRate: 60 fps");
- vi->framerate = 60000;
- form = VIDEO_MODE_NTSC;
- break;
- }
-
- vi->bit_rate = (headr[4] << 10) | (headr[5] << 2) | (headr[6] & 0x03);
-
- vi->vbv_buffer_size
- = (( headr[6] & 0xF8) >> 3 ) | (( headr[7] & 0x1F )<< 5);
-
- if (pr){
- printk(" BRate: %d Mbit/s",4*(vi->bit_rate)/10000);
- printk(" vbvbuffer %d",16*1024*(vi->vbv_buffer_size));
- printk("\n");
- }
-
- vi->video_format = form;
-
- return 0;
-}
-#endif
-
-
-#if 0
-static int get_vinfo(u8 *mbuf, int count, struct dvb_video_info *vi, int pr)
-{
- u8 *headr;
- int found = 0;
- int c = 0;
-
- while (found < 4 && c+4 < count){
- u8 *b;
-
- b = mbuf+c;
- if ( b[0] == 0x00 && b[1] == 0x00 && b[2] == 0x01
- && b[3] == 0xb3) found = 4;
- else {
- c++;
- }
- }
-
- if (! found) return -1;
- c += 4;
- if (c+12 >= count) return -1;
- headr = mbuf+c;
- if (read_sequence_header(headr, vi, pr) < 0) return -1;
- vi->off = c-4;
- return 0;
-}
-#endif
-
-
-#if 0
-static int get_ainfo(u8 *mbuf, int count, struct dvb_audio_info *ai, int pr)
-{
- u8 *headr;
- int found = 0;
- int c = 0;
- int fr = 0;
-
- while (found < 2 && c < count){
- u8 b[2];
- memcpy( b, mbuf+c, 2);
-
- if ( b[0] == 0xff && (b[1] & 0xf8) == 0xf8)
- found = 2;
- else {
- c++;
- }
- }
-
- if (!found) return -1;
-
- if (c+3 >= count) return -1;
- headr = mbuf+c;
-
- ai->layer = (headr[1] & 0x06) >> 1;
-
- if (pr)
- printk("Audiostream: Layer: %d", 4-ai->layer);
-
-
- ai->bit_rate = bitrates[(3-ai->layer)][(headr[2] >> 4 )]*1000;
-
- if (pr){
- if (ai->bit_rate == 0)
- printk(" Bit rate: free");
- else if (ai->bit_rate == 0xf)
- printk(" BRate: reserved");
- else
- printk(" BRate: %d kb/s", ai->bit_rate/1000);
- }
-
- fr = (headr[2] & 0x0c ) >> 2;
- ai->frequency = freq[fr]*100;
- if (pr){
- if (ai->frequency == 3)
- printk(" Freq: reserved\n");
- else
- printk(" Freq: %d kHz\n",ai->frequency);
-
- }
- ai->off = c;
- return 0;
-}
-#endif
-
-
-int dvb_filter_get_ac3info(u8 *mbuf, int count, struct dvb_audio_info *ai, int pr)
-{
- u8 *headr;
- int found = 0;
- int c = 0;
- u8 frame = 0;
- int fr = 0;
-
- while ( !found && c < count){
- u8 *b = mbuf+c;
-
- if ( b[0] == 0x0b && b[1] == 0x77 )
- found = 1;
- else {
- c++;
- }
- }
-
- if (!found) return -1;
- if (pr)
- printk("Audiostream: AC3");
-
- ai->off = c;
- if (c+5 >= count) return -1;
-
- ai->layer = 0; // 0 for AC3
- headr = mbuf+c+2;
-
- frame = (headr[2]&0x3f);
- ai->bit_rate = ac3_bitrates[frame >> 1]*1000;
-
- if (pr)
- printk(" BRate: %d kb/s", (int) ai->bit_rate/1000);
-
- ai->frequency = (headr[2] & 0xc0 ) >> 6;
- fr = (headr[2] & 0xc0 ) >> 6;
- ai->frequency = freq[fr]*100;
- if (pr) printk (" Freq: %d Hz\n", (int) ai->frequency);
-
-
- ai->framesize = ac3_frames[fr][frame >> 1];
- if ((frame & 1) && (fr == 1)) ai->framesize++;
- ai->framesize = ai->framesize << 1;
- if (pr) printk (" Framesize %d\n",(int) ai->framesize);
-
-
- return 0;
-}
-EXPORT_SYMBOL(dvb_filter_get_ac3info);
-
-
-#if 0
-static u8 *skip_pes_header(u8 **bufp)
-{
- u8 *inbuf = *bufp;
- u8 *buf = inbuf;
- u8 *pts = NULL;
- int skip = 0;
-
- static const int mpeg1_skip_table[16] = {
- 1, 0xffff, 5, 10, 0xffff, 0xffff, 0xffff, 0xffff,
- 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff
- };
-
-
- if ((inbuf[6] & 0xc0) == 0x80){ /* mpeg2 */
- if (buf[7] & PTS_ONLY)
- pts = buf+9;
- else pts = NULL;
- buf = inbuf + 9 + inbuf[8];
- } else { /* mpeg1 */
- for (buf = inbuf + 6; *buf == 0xff; buf++)
- if (buf == inbuf + 6 + 16) {
- break;
- }
- if ((*buf & 0xc0) == 0x40)
- buf += 2;
- skip = mpeg1_skip_table [*buf >> 4];
- if (skip == 5 || skip == 10) pts = buf;
- else pts = NULL;
-
- buf += mpeg1_skip_table [*buf >> 4];
- }
-
- *bufp = buf;
- return pts;
-}
-#endif
-
-#if 0
-static void initialize_quant_matrix( u32 *matrix )
-{
- int i;
-
- matrix[0] = 0x08101013;
- matrix[1] = 0x10131616;
- matrix[2] = 0x16161616;
- matrix[3] = 0x1a181a1b;
- matrix[4] = 0x1b1b1a1a;
- matrix[5] = 0x1a1a1b1b;
- matrix[6] = 0x1b1d1d1d;
- matrix[7] = 0x2222221d;
- matrix[8] = 0x1d1d1b1b;
- matrix[9] = 0x1d1d2020;
- matrix[10] = 0x22222526;
- matrix[11] = 0x25232322;
- matrix[12] = 0x23262628;
- matrix[13] = 0x28283030;
- matrix[14] = 0x2e2e3838;
- matrix[15] = 0x3a454553;
-
- for ( i = 16 ; i < 32 ; i++ )
- matrix[i] = 0x10101010;
-}
-#endif
-
-#if 0
-static void initialize_mpg_picture(struct mpg_picture *pic)
-{
- int i;
-
- /* set MPEG1 */
- pic->mpeg1_flag = 1;
- pic->profile_and_level = 0x4A ; /* MP@LL */
- pic->progressive_sequence = 1;
- pic->low_delay = 0;
-
- pic->sequence_display_extension_flag = 0;
- for ( i = 0 ; i < 4 ; i++ ){
- pic->frame_centre_horizontal_offset[i] = 0;
- pic->frame_centre_vertical_offset[i] = 0;
- }
- pic->last_frame_centre_horizontal_offset = 0;
- pic->last_frame_centre_vertical_offset = 0;
-
- pic->picture_display_extension_flag[0] = 0;
- pic->picture_display_extension_flag[1] = 0;
- pic->sequence_header_flag = 0;
- pic->gop_flag = 0;
- pic->sequence_end_flag = 0;
-}
-#endif
-
-#if 0
-static void mpg_set_picture_parameter( int32_t field_type, struct mpg_picture *pic )
-{
- int16_t last_h_offset;
- int16_t last_v_offset;
-
- int16_t *p_h_offset;
- int16_t *p_v_offset;
-
- if ( pic->mpeg1_flag ){
- pic->picture_structure[field_type] = VIDEO_FRAME_PICTURE;
- pic->top_field_first = 0;
- pic->repeat_first_field = 0;
- pic->progressive_frame = 1;
- pic->picture_coding_parameter = 0x000010;
- }
-
- /* Reset flag */
- pic->picture_display_extension_flag[field_type] = 0;
-
- last_h_offset = pic->last_frame_centre_horizontal_offset;
- last_v_offset = pic->last_frame_centre_vertical_offset;
- if ( field_type == FIRST_FIELD ){
- p_h_offset = pic->frame_centre_horizontal_offset;
- p_v_offset = pic->frame_centre_vertical_offset;
- *p_h_offset = last_h_offset;
- *(p_h_offset + 1) = last_h_offset;
- *(p_h_offset + 2) = last_h_offset;
- *p_v_offset = last_v_offset;
- *(p_v_offset + 1) = last_v_offset;
- *(p_v_offset + 2) = last_v_offset;
- } else {
- pic->frame_centre_horizontal_offset[3] = last_h_offset;
- pic->frame_centre_vertical_offset[3] = last_v_offset;
- }
-}
-#endif
-
-#if 0
-static void init_mpg_picture( struct mpg_picture *pic, int chan, int32_t field_type)
-{
- pic->picture_header = 0;
- pic->sequence_header_data
- = ( INIT_HORIZONTAL_SIZE << 20 )
- | ( INIT_VERTICAL_SIZE << 8 )
- | ( INIT_ASPECT_RATIO << 4 )
- | ( INIT_FRAME_RATE );
- pic->mpeg1_flag = 0;
- pic->vinfo.horizontal_size
- = INIT_DISP_HORIZONTAL_SIZE;
- pic->vinfo.vertical_size
- = INIT_DISP_VERTICAL_SIZE;
- pic->picture_display_extension_flag[field_type]
- = 0;
- pic->pts_flag[field_type] = 0;
-
- pic->sequence_gop_header = 0;
- pic->picture_header = 0;
- pic->sequence_header_flag = 0;
- pic->gop_flag = 0;
- pic->sequence_end_flag = 0;
- pic->sequence_display_extension_flag = 0;
- pic->last_frame_centre_horizontal_offset = 0;
- pic->last_frame_centre_vertical_offset = 0;
- pic->channel = chan;
-}
-#endif
-
-void dvb_filter_pes2ts_init(struct dvb_filter_pes2ts *p2ts, unsigned short pid,
- dvb_filter_pes2ts_cb_t *cb, void *priv)
-{
- unsigned char *buf=p2ts->buf;
-
- buf[0]=0x47;
- buf[1]=(pid>>8);
- buf[2]=pid&0xff;
- p2ts->cc=0;
- p2ts->cb=cb;
- p2ts->priv=priv;
-}
-EXPORT_SYMBOL(dvb_filter_pes2ts_init);
-
-int dvb_filter_pes2ts(struct dvb_filter_pes2ts *p2ts, unsigned char *pes,
- int len, int payload_start)
-{
- unsigned char *buf=p2ts->buf;
- int ret=0, rest;
-
- //len=6+((pes[4]<<8)|pes[5]);
-
- if (payload_start)
- buf[1]|=0x40;
- else
- buf[1]&=~0x40;
- while (len>=184) {
- buf[3]=0x10|((p2ts->cc++)&0x0f);
- memcpy(buf+4, pes, 184);
- if ((ret=p2ts->cb(p2ts->priv, buf)))
- return ret;
- len-=184; pes+=184;
- buf[1]&=~0x40;
- }
- if (!len)
- return 0;
- buf[3]=0x30|((p2ts->cc++)&0x0f);
- rest=183-len;
- if (rest) {
- buf[5]=0x00;
- if (rest-1)
- memset(buf+6, 0xff, rest-1);
- }
- buf[4]=rest;
- memcpy(buf+5+rest, pes, len);
- return p2ts->cb(p2ts->priv, buf);
-}
-EXPORT_SYMBOL(dvb_filter_pes2ts);
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 01511e5a5566..db74cb74d271 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -28,6 +28,8 @@
/* Enables DVBv3 compatibility bits at the headers */
#define __DVB_CORE__
+#define pr_fmt(fmt) "dvb_frontend: " fmt
+
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -67,6 +69,9 @@ MODULE_PARM_DESC(dvb_powerdown_on_sleep, "0: do not power down, 1: turn LNB volt
module_param(dvb_mfe_wait_time, int, 0644);
MODULE_PARM_DESC(dvb_mfe_wait_time, "Wait up to <mfe_wait_time> seconds on open() for multi-frontend to become available (default:5 seconds)");
+#define dprintk(fmt, arg...) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), __func__, ##arg)
+
#define FESTATE_IDLE 1
#define FESTATE_RETUNE 2
#define FESTATE_TUNING_FAST 4
@@ -99,8 +104,6 @@ MODULE_PARM_DESC(dvb_mfe_wait_time, "Wait up to <mfe_wait_time> seconds on open(
static DEFINE_MUTEX(frontend_mutex);
struct dvb_frontend_private {
- struct kref refcount;
-
/* thread/frontend values */
struct dvb_device *dvbdev;
struct dvb_frontend_parameters parameters_out;
@@ -138,21 +141,30 @@ struct dvb_frontend_private {
#endif
};
-static void dvb_frontend_private_free(struct kref *ref)
+static void dvb_frontend_invoke_release(struct dvb_frontend *fe,
+ void (*release)(struct dvb_frontend *fe));
+
+static void dvb_frontend_free(struct kref *ref)
{
- struct dvb_frontend_private *fepriv =
- container_of(ref, struct dvb_frontend_private, refcount);
+ struct dvb_frontend *fe =
+ container_of(ref, struct dvb_frontend, refcount);
+ struct dvb_frontend_private *fepriv = fe->frontend_priv;
+
+ dvb_free_device(fepriv->dvbdev);
+
+ dvb_frontend_invoke_release(fe, fe->ops.release);
+
kfree(fepriv);
}
-static void dvb_frontend_private_put(struct dvb_frontend_private *fepriv)
+static void dvb_frontend_put(struct dvb_frontend *fe)
{
- kref_put(&fepriv->refcount, dvb_frontend_private_free);
+ kref_put(&fe->refcount, dvb_frontend_free);
}
-static void dvb_frontend_private_get(struct dvb_frontend_private *fepriv)
+static void dvb_frontend_get(struct dvb_frontend *fe)
{
- kref_get(&fepriv->refcount);
+ kref_get(&fe->refcount);
}
static void dvb_frontend_wakeup(struct dvb_frontend *fe);
@@ -1515,12 +1527,8 @@ static int dtv_set_frontend(struct dvb_frontend *fe);
static bool is_dvbv3_delsys(u32 delsys)
{
- bool status;
-
- status = (delsys == SYS_DVBT) || (delsys == SYS_DVBC_ANNEX_A) ||
- (delsys == SYS_DVBS) || (delsys == SYS_ATSC);
-
- return status;
+ return (delsys == SYS_DVBT) || (delsys == SYS_DVBC_ANNEX_A) ||
+ (delsys == SYS_DVBS) || (delsys == SYS_ATSC);
}
/**
@@ -2356,7 +2364,8 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
int i;
u8 last = 1;
if (dvb_frontend_debug)
- printk("%s switch command: 0x%04lx\n", __func__, swcmd);
+ dprintk("%s switch command: 0x%04lx\n",
+ __func__, swcmd);
nexttime = ktime_get_boottime();
if (dvb_frontend_debug)
tv[0] = nexttime;
@@ -2379,10 +2388,10 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
dvb_frontend_sleep_until(&nexttime, 8000);
}
if (dvb_frontend_debug) {
- printk("%s(%d): switch delay (should be 32k followed by all 8k\n",
+ dprintk("%s(%d): switch delay (should be 32k followed by all 8k)\n",
__func__, fe->dvb->num);
for (i = 1; i < 10; i++)
- printk("%d: %d\n", i,
+ pr_info("%d: %d\n", i,
(int) ktime_us_delta(tv[i], tv[i-1]));
}
err = 0;
@@ -2545,7 +2554,7 @@ static int dvb_frontend_open(struct inode *inode, struct file *file)
fepriv->events.eventr = fepriv->events.eventw = 0;
}
- dvb_frontend_private_get(fepriv);
+ dvb_frontend_get(fe);
if (adapter->mfe_shared)
mutex_unlock (&adapter->mfe_lock);
@@ -2595,7 +2604,7 @@ static int dvb_frontend_release(struct inode *inode, struct file *file)
fe->ops.ts_bus_ctrl(fe, 0);
}
- dvb_frontend_private_put(fepriv);
+ dvb_frontend_put(fe);
return ret;
}
@@ -2685,7 +2694,14 @@ int dvb_register_frontend(struct dvb_adapter* dvb,
}
fepriv = fe->frontend_priv;
- kref_init(&fepriv->refcount);
+ kref_init(&fe->refcount);
+
+ /*
+ * After initialization, there need to be two references: one
+ * for dvb_unregister_frontend(), and another one for
+ * dvb_frontend_detach().
+ */
+ dvb_frontend_get(fe);
sema_init(&fepriv->sem, 1);
init_waitqueue_head (&fepriv->wait_queue);
@@ -2720,50 +2736,33 @@ int dvb_unregister_frontend(struct dvb_frontend* fe)
dev_dbg(fe->dvb->device, "%s:\n", __func__);
mutex_lock(&frontend_mutex);
- dvb_frontend_stop (fe);
- dvb_unregister_device (fepriv->dvbdev);
+ dvb_frontend_stop(fe);
+ dvb_remove_device(fepriv->dvbdev);
/* fe is invalid now */
mutex_unlock(&frontend_mutex);
- dvb_frontend_private_put(fepriv);
+ dvb_frontend_put(fe);
return 0;
}
EXPORT_SYMBOL(dvb_unregister_frontend);
-#ifdef CONFIG_MEDIA_ATTACH
-void dvb_frontend_detach(struct dvb_frontend* fe)
+static void dvb_frontend_invoke_release(struct dvb_frontend *fe,
+ void (*release)(struct dvb_frontend *fe))
{
- void *ptr;
-
- if (fe->ops.release_sec) {
- fe->ops.release_sec(fe);
- dvb_detach(fe->ops.release_sec);
- }
- if (fe->ops.tuner_ops.release) {
- fe->ops.tuner_ops.release(fe);
- dvb_detach(fe->ops.tuner_ops.release);
- }
- if (fe->ops.analog_ops.release) {
- fe->ops.analog_ops.release(fe);
- dvb_detach(fe->ops.analog_ops.release);
- }
- ptr = (void*)fe->ops.release;
- if (ptr) {
- fe->ops.release(fe);
- dvb_detach(ptr);
+ if (release) {
+ release(fe);
+#ifdef CONFIG_MEDIA_ATTACH
+ dvb_detach(release);
+#endif
}
}
-#else
+
void dvb_frontend_detach(struct dvb_frontend* fe)
{
- if (fe->ops.release_sec)
- fe->ops.release_sec(fe);
- if (fe->ops.tuner_ops.release)
- fe->ops.tuner_ops.release(fe);
- if (fe->ops.analog_ops.release)
- fe->ops.analog_ops.release(fe);
- if (fe->ops.release)
- fe->ops.release(fe);
+ dvb_frontend_invoke_release(fe, fe->ops.release_sec);
+ dvb_frontend_invoke_release(fe, fe->ops.tuner_ops.release);
+ dvb_frontend_invoke_release(fe, fe->ops.analog_ops.release);
+ dvb_frontend_invoke_release(fe, fe->ops.detach);
+ dvb_frontend_put(fe);
}
-#endif
EXPORT_SYMBOL(dvb_frontend_detach);
diff --git a/drivers/media/dvb-core/dvb_frontend.h b/drivers/media/dvb-core/dvb_frontend.h
index fb6e84811504..482912d3b77a 100644
--- a/drivers/media/dvb-core/dvb_frontend.h
+++ b/drivers/media/dvb-core/dvb_frontend.h
@@ -225,7 +225,7 @@ struct dvb_tuner_ops {
struct dvb_tuner_info info;
- int (*release)(struct dvb_frontend *fe);
+ void (*release)(struct dvb_frontend *fe);
int (*init)(struct dvb_frontend *fe);
int (*sleep)(struct dvb_frontend *fe);
int (*suspend)(struct dvb_frontend *fe);
@@ -323,7 +323,11 @@ struct dtv_frontend_properties;
*
* @info: embedded struct dvb_tuner_info with tuner properties
* @delsys: Delivery systems supported by the frontend
- * @release: callback function called when frontend is dettached.
+ * @detach: callback function called when frontend is detached.
+ * drivers should clean up, but not yet free the struct
+ * dvb_frontend allocation.
+ * @release: callback function called when frontend is ready to be
+ * freed.
* drivers should free any allocated memory.
* @release_sec: callback function requesting that the Satelite Equipment
* Control (SEC) driver to release and free any memory
@@ -408,6 +412,7 @@ struct dvb_frontend_ops {
u8 delsys[MAX_DELSYS];
+ void (*detach)(struct dvb_frontend *fe);
void (*release)(struct dvb_frontend* fe);
void (*release_sec)(struct dvb_frontend* fe);
@@ -655,6 +660,7 @@ struct dtv_frontend_properties {
*/
struct dvb_frontend {
+ struct kref refcount;
struct dvb_frontend_ops ops;
struct dvb_adapter *dvb;
void *demodulator_priv;
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index 0da622f5fe69..bc5e8cfe7ca2 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -54,13 +54,15 @@
*
*/
+#define pr_fmt(fmt) "dvb_net: " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/dvb/net.h>
#include <linux/uio.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/crc32.h>
#include <linux/mutex.h>
#include <linux/sched.h>
@@ -309,451 +311,589 @@ static inline void reset_ule( struct dvb_net_priv *p )
* Decode ULE SNDUs according to draft-ietf-ipdvb-ule-03.txt from a sequence of
* TS cells of a single PID.
*/
-static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
-{
- struct dvb_net_priv *priv = netdev_priv(dev);
- unsigned long skipped = 0L;
- const u8 *ts, *ts_end, *from_where = NULL;
- u8 ts_remain = 0, how_much = 0, new_ts = 1;
- struct ethhdr *ethh = NULL;
- bool error = false;
+struct dvb_net_ule_handle {
+ struct net_device *dev;
+ struct dvb_net_priv *priv;
+ struct ethhdr *ethh;
+ const u8 *buf;
+ size_t buf_len;
+ unsigned long skipped;
+ const u8 *ts, *ts_end, *from_where;
+ u8 ts_remain, how_much, new_ts;
+ bool error;
#ifdef ULE_DEBUG
- /* The code inside ULE_DEBUG keeps a history of the last 100 TS cells processed. */
+ /*
+ * The code inside ULE_DEBUG keeps a history of the
+ * last 100 TS cells processed.
+ */
static unsigned char ule_hist[100*TS_SZ];
static unsigned char *ule_where = ule_hist, ule_dump;
#endif
+};
- /* For all TS cells in current buffer.
- * Appearently, we are called for every single TS cell.
- */
- for (ts = buf, ts_end = buf + buf_len; ts < ts_end; /* no default incr. */ ) {
-
- if (new_ts) {
- /* We are about to process a new TS cell. */
+static int dvb_net_ule_new_ts_cell(struct dvb_net_ule_handle *h)
+{
+ /* We are about to process a new TS cell. */
#ifdef ULE_DEBUG
- if (ule_where >= &ule_hist[100*TS_SZ]) ule_where = ule_hist;
- memcpy( ule_where, ts, TS_SZ );
- if (ule_dump) {
- hexdump( ule_where, TS_SZ );
- ule_dump = 0;
- }
- ule_where += TS_SZ;
+ if (h->ule_where >= &h->ule_hist[100*TS_SZ])
+ h->ule_where = h->ule_hist;
+ memcpy(h->ule_where, h->ts, TS_SZ);
+ if (h->ule_dump) {
+ hexdump(h->ule_where, TS_SZ);
+ h->ule_dump = 0;
+ }
+ h->ule_where += TS_SZ;
#endif
- /* Check TS error conditions: sync_byte, transport_error_indicator, scrambling_control . */
- if ((ts[0] != TS_SYNC) || (ts[1] & TS_TEI) || ((ts[3] & TS_SC) != 0)) {
- printk(KERN_WARNING "%lu: Invalid TS cell: SYNC %#x, TEI %u, SC %#x.\n",
- priv->ts_count, ts[0],
- (ts[1] & TS_TEI) >> 7,
- (ts[3] & TS_SC) >> 6);
-
- /* Drop partly decoded SNDU, reset state, resync on PUSI. */
- if (priv->ule_skb) {
- dev_kfree_skb( priv->ule_skb );
- /* Prepare for next SNDU. */
- dev->stats.rx_errors++;
- dev->stats.rx_frame_errors++;
- }
- reset_ule(priv);
- priv->need_pusi = 1;
+ /*
+ * Check TS h->error conditions: sync_byte, transport_error_indicator,
+ * scrambling_control .
+ */
+ if ((h->ts[0] != TS_SYNC) || (h->ts[1] & TS_TEI) ||
+ ((h->ts[3] & TS_SC) != 0)) {
+ pr_warn("%lu: Invalid TS cell: SYNC %#x, TEI %u, SC %#x.\n",
+ h->priv->ts_count, h->ts[0],
+ (h->ts[1] & TS_TEI) >> 7,
+ (h->ts[3] & TS_SC) >> 6);
+
+ /* Drop partly decoded SNDU, reset state, resync on PUSI. */
+ if (h->priv->ule_skb) {
+ dev_kfree_skb(h->priv->ule_skb);
+ /* Prepare for next SNDU. */
+ h->dev->stats.rx_errors++;
+ h->dev->stats.rx_frame_errors++;
+ }
+ reset_ule(h->priv);
+ h->priv->need_pusi = 1;
- /* Continue with next TS cell. */
- ts += TS_SZ;
- priv->ts_count++;
- continue;
- }
+ /* Continue with next TS cell. */
+ h->ts += TS_SZ;
+ h->priv->ts_count++;
+ return 1;
+ }
- ts_remain = 184;
- from_where = ts + 4;
+ h->ts_remain = 184;
+ h->from_where = h->ts + 4;
+
+ return 0;
+}
+
+static int dvb_net_ule_ts_pusi(struct dvb_net_ule_handle *h)
+{
+ if (h->ts[1] & TS_PUSI) {
+ /* Find beginning of first ULE SNDU in current TS cell. */
+ /* Synchronize continuity counter. */
+ h->priv->tscc = h->ts[3] & 0x0F;
+ /* There is a pointer field here. */
+ if (h->ts[4] > h->ts_remain) {
+ pr_err("%lu: Invalid ULE packet (pointer field %d)\n",
+ h->priv->ts_count, h->ts[4]);
+ h->ts += TS_SZ;
+ h->priv->ts_count++;
+ return 1;
}
- /* Synchronize on PUSI, if required. */
- if (priv->need_pusi) {
- if (ts[1] & TS_PUSI) {
- /* Find beginning of first ULE SNDU in current TS cell. */
- /* Synchronize continuity counter. */
- priv->tscc = ts[3] & 0x0F;
- /* There is a pointer field here. */
- if (ts[4] > ts_remain) {
- printk(KERN_ERR "%lu: Invalid ULE packet "
- "(pointer field %d)\n", priv->ts_count, ts[4]);
- ts += TS_SZ;
- priv->ts_count++;
- continue;
- }
- /* Skip to destination of pointer field. */
- from_where = &ts[5] + ts[4];
- ts_remain -= 1 + ts[4];
- skipped = 0;
- } else {
- skipped++;
- ts += TS_SZ;
- priv->ts_count++;
- continue;
- }
+ /* Skip to destination of pointer field. */
+ h->from_where = &h->ts[5] + h->ts[4];
+ h->ts_remain -= 1 + h->ts[4];
+ h->skipped = 0;
+ } else {
+ h->skipped++;
+ h->ts += TS_SZ;
+ h->priv->ts_count++;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int dvb_net_ule_new_ts(struct dvb_net_ule_handle *h)
+{
+ /* Check continuity counter. */
+ if ((h->ts[3] & 0x0F) == h->priv->tscc)
+ h->priv->tscc = (h->priv->tscc + 1) & 0x0F;
+ else {
+ /* TS discontinuity handling: */
+ pr_warn("%lu: TS discontinuity: got %#x, expected %#x.\n",
+ h->priv->ts_count, h->ts[3] & 0x0F,
+ h->priv->tscc);
+ /* Drop partly decoded SNDU, reset state, resync on PUSI. */
+ if (h->priv->ule_skb) {
+ dev_kfree_skb(h->priv->ule_skb);
+ /* Prepare for next SNDU. */
+ // reset_ule(h->priv); moved to below.
+ h->dev->stats.rx_errors++;
+ h->dev->stats.rx_frame_errors++;
}
+ reset_ule(h->priv);
+ /* skip to next PUSI. */
+ h->priv->need_pusi = 1;
+ return 1;
+ }
+ /*
+ * If we still have an incomplete payload, but PUSI is
+ * set; some TS cells are missing.
+ * This is only possible here, if we missed exactly 16 TS
+ * cells (continuity counter wrap).
+ */
+ if (h->ts[1] & TS_PUSI) {
+ if (!h->priv->need_pusi) {
+ if (!(*h->from_where < (h->ts_remain-1)) ||
+ *h->from_where != h->priv->ule_sndu_remain) {
+ /*
+ * Pointer field is invalid.
+ * Drop this TS cell and any started ULE SNDU.
+ */
+ pr_warn("%lu: Invalid pointer field: %u.\n",
+ h->priv->ts_count,
+ *h->from_where);
- if (new_ts) {
- /* Check continuity counter. */
- if ((ts[3] & 0x0F) == priv->tscc)
- priv->tscc = (priv->tscc + 1) & 0x0F;
- else {
- /* TS discontinuity handling: */
- printk(KERN_WARNING "%lu: TS discontinuity: got %#x, "
- "expected %#x.\n", priv->ts_count, ts[3] & 0x0F, priv->tscc);
- /* Drop partly decoded SNDU, reset state, resync on PUSI. */
- if (priv->ule_skb) {
- dev_kfree_skb( priv->ule_skb );
- /* Prepare for next SNDU. */
- // reset_ule(priv); moved to below.
- dev->stats.rx_errors++;
- dev->stats.rx_frame_errors++;
+ /*
+ * Drop partly decoded SNDU, reset state,
+ * resync on PUSI.
+ */
+ if (h->priv->ule_skb) {
+ h->error = true;
+ dev_kfree_skb(h->priv->ule_skb);
}
- reset_ule(priv);
- /* skip to next PUSI. */
- priv->need_pusi = 1;
- continue;
- }
- /* If we still have an incomplete payload, but PUSI is
- * set; some TS cells are missing.
- * This is only possible here, if we missed exactly 16 TS
- * cells (continuity counter wrap). */
- if (ts[1] & TS_PUSI) {
- if (! priv->need_pusi) {
- if (!(*from_where < (ts_remain-1)) || *from_where != priv->ule_sndu_remain) {
- /* Pointer field is invalid. Drop this TS cell and any started ULE SNDU. */
- printk(KERN_WARNING "%lu: Invalid pointer "
- "field: %u.\n", priv->ts_count, *from_where);
-
- /* Drop partly decoded SNDU, reset state, resync on PUSI. */
- if (priv->ule_skb) {
- error = true;
- dev_kfree_skb(priv->ule_skb);
- }
-
- if (error || priv->ule_sndu_remain) {
- dev->stats.rx_errors++;
- dev->stats.rx_frame_errors++;
- error = false;
- }
-
- reset_ule(priv);
- priv->need_pusi = 1;
- continue;
- }
- /* Skip pointer field (we're processing a
- * packed payload). */
- from_where += 1;
- ts_remain -= 1;
- } else
- priv->need_pusi = 0;
-
- if (priv->ule_sndu_remain > 183) {
- /* Current SNDU lacks more data than there could be available in the
- * current TS cell. */
- dev->stats.rx_errors++;
- dev->stats.rx_length_errors++;
- printk(KERN_WARNING "%lu: Expected %d more SNDU bytes, but "
- "got PUSI (pf %d, ts_remain %d). Flushing incomplete payload.\n",
- priv->ts_count, priv->ule_sndu_remain, ts[4], ts_remain);
- dev_kfree_skb(priv->ule_skb);
- /* Prepare for next SNDU. */
- reset_ule(priv);
- /* Resync: go to where pointer field points to: start of next ULE SNDU. */
- from_where += ts[4];
- ts_remain -= ts[4];
+
+ if (h->error || h->priv->ule_sndu_remain) {
+ h->dev->stats.rx_errors++;
+ h->dev->stats.rx_frame_errors++;
+ h->error = false;
}
+
+ reset_ule(h->priv);
+ h->priv->need_pusi = 1;
+ return 1;
}
+ /*
+ * Skip pointer field (we're processing a
+ * packed payload).
+ */
+ h->from_where += 1;
+ h->ts_remain -= 1;
+ } else
+ h->priv->need_pusi = 0;
+
+ if (h->priv->ule_sndu_remain > 183) {
+ /*
+ * Current SNDU lacks more data than there
+ * could be available in the current TS cell.
+ */
+ h->dev->stats.rx_errors++;
+ h->dev->stats.rx_length_errors++;
+ pr_warn("%lu: Expected %d more SNDU bytes, but got PUSI (pf %d, h->ts_remain %d). Flushing incomplete payload.\n",
+ h->priv->ts_count,
+ h->priv->ule_sndu_remain,
+ h->ts[4], h->ts_remain);
+ dev_kfree_skb(h->priv->ule_skb);
+ /* Prepare for next SNDU. */
+ reset_ule(h->priv);
+ /*
+ * Resync: go to where pointer field points to:
+ * start of next ULE SNDU.
+ */
+ h->from_where += h->ts[4];
+ h->ts_remain -= h->ts[4];
}
+ }
+ return 0;
+}
- /* Check if new payload needs to be started. */
- if (priv->ule_skb == NULL) {
- /* Start a new payload with skb.
- * Find ULE header. It is only guaranteed that the
- * length field (2 bytes) is contained in the current
- * TS.
- * Check ts_remain has to be >= 2 here. */
- if (ts_remain < 2) {
- printk(KERN_WARNING "Invalid payload packing: only %d "
- "bytes left in TS. Resyncing.\n", ts_remain);
- priv->ule_sndu_len = 0;
- priv->need_pusi = 1;
- ts += TS_SZ;
- continue;
- }
- if (! priv->ule_sndu_len) {
- /* Got at least two bytes, thus extrace the SNDU length. */
- priv->ule_sndu_len = from_where[0] << 8 | from_where[1];
- if (priv->ule_sndu_len & 0x8000) {
- /* D-Bit is set: no dest mac present. */
- priv->ule_sndu_len &= 0x7FFF;
- priv->ule_dbit = 1;
- } else
- priv->ule_dbit = 0;
-
- if (priv->ule_sndu_len < 5) {
- printk(KERN_WARNING "%lu: Invalid ULE SNDU length %u. "
- "Resyncing.\n", priv->ts_count, priv->ule_sndu_len);
- dev->stats.rx_errors++;
- dev->stats.rx_length_errors++;
- priv->ule_sndu_len = 0;
- priv->need_pusi = 1;
- new_ts = 1;
- ts += TS_SZ;
- priv->ts_count++;
- continue;
- }
- ts_remain -= 2; /* consume the 2 bytes SNDU length. */
- from_where += 2;
- }
+/*
+ * Start a new payload with skb.
+ * Find ULE header. It is only guaranteed that the
+ * length field (2 bytes) is contained in the current
+ * TS.
+ * Check h.ts_remain has to be >= 2 here.
+ */
+static int dvb_net_ule_new_payload(struct dvb_net_ule_handle *h)
+{
+ if (h->ts_remain < 2) {
+ pr_warn("Invalid payload packing: only %d bytes left in TS. Resyncing.\n",
+ h->ts_remain);
+ h->priv->ule_sndu_len = 0;
+ h->priv->need_pusi = 1;
+ h->ts += TS_SZ;
+ return 1;
+ }
- priv->ule_sndu_remain = priv->ule_sndu_len + 2;
+ if (!h->priv->ule_sndu_len) {
+ /* Got at least two bytes, thus extrace the SNDU length. */
+ h->priv->ule_sndu_len = h->from_where[0] << 8 |
+ h->from_where[1];
+ if (h->priv->ule_sndu_len & 0x8000) {
+ /* D-Bit is set: no dest mac present. */
+ h->priv->ule_sndu_len &= 0x7FFF;
+ h->priv->ule_dbit = 1;
+ } else
+ h->priv->ule_dbit = 0;
+
+ if (h->priv->ule_sndu_len < 5) {
+ pr_warn("%lu: Invalid ULE SNDU length %u. Resyncing.\n",
+ h->priv->ts_count,
+ h->priv->ule_sndu_len);
+ h->dev->stats.rx_errors++;
+ h->dev->stats.rx_length_errors++;
+ h->priv->ule_sndu_len = 0;
+ h->priv->need_pusi = 1;
+ h->new_ts = 1;
+ h->ts += TS_SZ;
+ h->priv->ts_count++;
+ return 1;
+ }
+ h->ts_remain -= 2; /* consume the 2 bytes SNDU length. */
+ h->from_where += 2;
+ }
+
+ h->priv->ule_sndu_remain = h->priv->ule_sndu_len + 2;
+ /*
+ * State of current TS:
+ * h->ts_remain (remaining bytes in the current TS cell)
+ * 0 ule_type is not available now, we need the next TS cell
+ * 1 the first byte of the ule_type is present
+ * >=2 full ULE header present, maybe some payload data as well.
+ */
+ switch (h->ts_remain) {
+ case 1:
+ h->priv->ule_sndu_remain--;
+ h->priv->ule_sndu_type = h->from_where[0] << 8;
+
+ /* first byte of ule_type is set. */
+ h->priv->ule_sndu_type_1 = 1;
+ h->ts_remain -= 1;
+ h->from_where += 1;
+ /* fallthrough */
+ case 0:
+ h->new_ts = 1;
+ h->ts += TS_SZ;
+ h->priv->ts_count++;
+ return 1;
+
+ default: /* complete ULE header is present in current TS. */
+ /* Extract ULE type field. */
+ if (h->priv->ule_sndu_type_1) {
+ h->priv->ule_sndu_type_1 = 0;
+ h->priv->ule_sndu_type |= h->from_where[0];
+ h->from_where += 1; /* points to payload start. */
+ h->ts_remain -= 1;
+ } else {
+ /* Complete type is present in new TS. */
+ h->priv->ule_sndu_type = h->from_where[0] << 8 |
+ h->from_where[1];
+ h->from_where += 2; /* points to payload start. */
+ h->ts_remain -= 2;
+ }
+ break;
+ }
+
+ /*
+ * Allocate the skb (decoder target buffer) with the correct size,
+ * as follows:
+ *
+ * prepare for the largest case: bridged SNDU with MAC address
+ * (dbit = 0).
+ */
+ h->priv->ule_skb = dev_alloc_skb(h->priv->ule_sndu_len +
+ ETH_HLEN + ETH_ALEN);
+ if (!h->priv->ule_skb) {
+ pr_notice("%s: Memory squeeze, dropping packet.\n",
+ h->dev->name);
+ h->dev->stats.rx_dropped++;
+ return -1;
+ }
+
+ /* This includes the CRC32 _and_ dest mac, if !dbit. */
+ h->priv->ule_sndu_remain = h->priv->ule_sndu_len;
+ h->priv->ule_skb->dev = h->dev;
+ /*
+ * Leave space for Ethernet or bridged SNDU header
+ * (eth hdr plus one MAC addr).
+ */
+ skb_reserve(h->priv->ule_skb, ETH_HLEN + ETH_ALEN);
+
+ return 0;
+}
+
+
+static int dvb_net_ule_should_drop(struct dvb_net_ule_handle *h)
+{
+ static const u8 bc_addr[ETH_ALEN] = { [0 ... ETH_ALEN - 1] = 0xff };
+
+ /*
+ * The destination MAC address is the next data in the skb. It comes
+ * before any extension headers.
+ *
+ * Check if the payload of this SNDU should be passed up the stack.
+ */
+ if (h->priv->rx_mode == RX_MODE_PROMISC)
+ return 0;
+
+ if (h->priv->ule_skb->data[0] & 0x01) {
+ /* multicast or broadcast */
+ if (!ether_addr_equal(h->priv->ule_skb->data, bc_addr)) {
+ /* multicast */
+ if (h->priv->rx_mode == RX_MODE_MULTI) {
+ int i;
+
+ for (i = 0; i < h->priv->multi_num &&
+ !ether_addr_equal(h->priv->ule_skb->data,
+ h->priv->multi_macs[i]);
+ i++)
+ ;
+ if (i == h->priv->multi_num)
+ return 1;
+ } else if (h->priv->rx_mode != RX_MODE_ALL_MULTI)
+ return 1; /* no broadcast; */
/*
- * State of current TS:
- * ts_remain (remaining bytes in the current TS cell)
- * 0 ule_type is not available now, we need the next TS cell
- * 1 the first byte of the ule_type is present
- * >=2 full ULE header present, maybe some payload data as well.
+ * else:
+ * all multicast mode: accept all multicast packets
*/
- switch (ts_remain) {
- case 1:
- priv->ule_sndu_remain--;
- priv->ule_sndu_type = from_where[0] << 8;
- priv->ule_sndu_type_1 = 1; /* first byte of ule_type is set. */
- ts_remain -= 1; from_where += 1;
- /* Continue w/ next TS. */
- case 0:
- new_ts = 1;
- ts += TS_SZ;
- priv->ts_count++;
- continue;
-
- default: /* complete ULE header is present in current TS. */
- /* Extract ULE type field. */
- if (priv->ule_sndu_type_1) {
- priv->ule_sndu_type_1 = 0;
- priv->ule_sndu_type |= from_where[0];
- from_where += 1; /* points to payload start. */
- ts_remain -= 1;
- } else {
- /* Complete type is present in new TS. */
- priv->ule_sndu_type = from_where[0] << 8 | from_where[1];
- from_where += 2; /* points to payload start. */
- ts_remain -= 2;
- }
- break;
- }
+ }
+ /* else: broadcast */
+ } else if (!ether_addr_equal(h->priv->ule_skb->data, h->dev->dev_addr))
+ return 1;
- /* Allocate the skb (decoder target buffer) with the correct size, as follows:
- * prepare for the largest case: bridged SNDU with MAC address (dbit = 0). */
- priv->ule_skb = dev_alloc_skb( priv->ule_sndu_len + ETH_HLEN + ETH_ALEN );
- if (priv->ule_skb == NULL) {
- printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n",
- dev->name);
- dev->stats.rx_dropped++;
- return;
- }
+ return 0;
+}
+
+
+static void dvb_net_ule_check_crc(struct dvb_net_ule_handle *h,
+ u32 ule_crc, u32 expected_crc)
+{
+ u8 dest_addr[ETH_ALEN];
+
+ if (ule_crc != expected_crc) {
+ pr_warn("%lu: CRC32 check FAILED: %08x / %08x, SNDU len %d type %#x, ts_remain %d, next 2: %x.\n",
+ h->priv->ts_count, ule_crc, expected_crc,
+ h->priv->ule_sndu_len, h->priv->ule_sndu_type,
+ h->ts_remain,
+ h->ts_remain > 2 ?
+ *(unsigned short *)h->from_where : 0);
+
+ #ifdef ULE_DEBUG
+ hexdump(iov[0].iov_base, iov[0].iov_len);
+ hexdump(iov[1].iov_base, iov[1].iov_len);
+ hexdump(iov[2].iov_base, iov[2].iov_len);
+
+ if (h->ule_where == h->ule_hist) {
+ hexdump(&h->ule_hist[98*TS_SZ], TS_SZ);
+ hexdump(&h->ule_hist[99*TS_SZ], TS_SZ);
+ } else if (h->ule_where == &h->ule_hist[TS_SZ]) {
+ hexdump(&h->ule_hist[99*TS_SZ], TS_SZ);
+ hexdump(h->ule_hist, TS_SZ);
+ } else {
+ hexdump(h->ule_where - TS_SZ - TS_SZ, TS_SZ);
+ hexdump(h->ule_where - TS_SZ, TS_SZ);
+ }
+ h->ule_dump = 1;
+ #endif
+
+ h->dev->stats.rx_errors++;
+ h->dev->stats.rx_crc_errors++;
+ dev_kfree_skb(h->priv->ule_skb);
+
+ return;
+ }
+
+ /* CRC32 verified OK. */
+
+ /* CRC32 was OK, so remove it from skb. */
+ h->priv->ule_skb->tail -= 4;
+ h->priv->ule_skb->len -= 4;
+
+ if (!h->priv->ule_dbit) {
+ if (dvb_net_ule_should_drop(h)) {
+#ifdef ULE_DEBUG
+ netdev_dbg(h->dev,
+ "Dropping SNDU: MAC destination address does not match: dest addr: %pM, h->dev addr: %pM\n",
+ h->priv->ule_skb->data, h->dev->dev_addr);
+#endif
+ dev_kfree_skb(h->priv->ule_skb);
+ return;
+ }
+
+ skb_copy_from_linear_data(h->priv->ule_skb, dest_addr,
+ ETH_ALEN);
+ skb_pull(h->priv->ule_skb, ETH_ALEN);
+ }
+
+ /* Handle ULE Extension Headers. */
+ if (h->priv->ule_sndu_type < ETH_P_802_3_MIN) {
+ /* There is an extension header. Handle it accordingly. */
+ int l = handle_ule_extensions(h->priv);
+
+ if (l < 0) {
+ /*
+ * Mandatory extension header unknown or TEST SNDU.
+ * Drop it.
+ */
+
+ // pr_warn("Dropping SNDU, extension headers.\n" );
+ dev_kfree_skb(h->priv->ule_skb);
+ return;
+ }
+ skb_pull(h->priv->ule_skb, l);
+ }
+
+ /*
+ * Construct/assure correct ethernet header.
+ * Note: in bridged mode (h->priv->ule_bridged != 0)
+ * we already have the (original) ethernet
+ * header at the start of the payload (after
+ * optional dest. address and any extension
+ * headers).
+ */
+ if (!h->priv->ule_bridged) {
+ skb_push(h->priv->ule_skb, ETH_HLEN);
+ h->ethh = (struct ethhdr *)h->priv->ule_skb->data;
+ if (!h->priv->ule_dbit) {
+ /*
+ * dest_addr buffer is only valid if
+ * h->priv->ule_dbit == 0
+ */
+ memcpy(h->ethh->h_dest, dest_addr, ETH_ALEN);
+ eth_zero_addr(h->ethh->h_source);
+ } else /* zeroize source and dest */
+ memset(h->ethh, 0, ETH_ALEN * 2);
- /* This includes the CRC32 _and_ dest mac, if !dbit. */
- priv->ule_sndu_remain = priv->ule_sndu_len;
- priv->ule_skb->dev = dev;
- /* Leave space for Ethernet or bridged SNDU header (eth hdr plus one MAC addr). */
- skb_reserve( priv->ule_skb, ETH_HLEN + ETH_ALEN );
+ h->ethh->h_proto = htons(h->priv->ule_sndu_type);
+ }
+ /* else: skb is in correct state; nothing to do. */
+ h->priv->ule_bridged = 0;
+
+ /* Stuff into kernel's protocol stack. */
+ h->priv->ule_skb->protocol = dvb_net_eth_type_trans(h->priv->ule_skb,
+ h->dev);
+ /*
+ * If D-bit is set (i.e. destination MAC address not present),
+ * receive the packet anyhow.
+ */
+#if 0
+ if (h->priv->ule_dbit && skb->pkt_type == PACKET_OTHERHOST)
+ h->priv->ule_skb->pkt_type = PACKET_HOST;
+#endif
+ h->dev->stats.rx_packets++;
+ h->dev->stats.rx_bytes += h->priv->ule_skb->len;
+ netif_rx(h->priv->ule_skb);
+}
+
+static void dvb_net_ule(struct net_device *dev, const u8 *buf, size_t buf_len)
+{
+ int ret;
+ struct dvb_net_ule_handle h = {
+ .dev = dev,
+ .buf = buf,
+ .buf_len = buf_len,
+ .skipped = 0L,
+ .ts = NULL,
+ .ts_end = NULL,
+ .from_where = NULL,
+ .ts_remain = 0,
+ .how_much = 0,
+ .new_ts = 1,
+ .ethh = NULL,
+ .error = false,
+#ifdef ULE_DEBUG
+ .ule_where = ule_hist,
+#endif
+ };
+
+ /*
+ * For all TS cells in current buffer.
+ * Appearently, we are called for every single TS cell.
+ */
+ for (h.ts = h.buf, h.ts_end = h.buf + h.buf_len;
+ h.ts < h.ts_end; /* no incr. */) {
+ if (h.new_ts) {
+ /* We are about to process a new TS cell. */
+ if (dvb_net_ule_new_ts_cell(&h))
+ continue;
+ }
+
+ /* Synchronize on PUSI, if required. */
+ if (h.priv->need_pusi) {
+ if (dvb_net_ule_ts_pusi(&h))
+ continue;
+ }
+
+ if (h.new_ts) {
+ if (dvb_net_ule_new_ts(&h))
+ continue;
+ }
+
+ /* Check if new payload needs to be started. */
+ if (h.priv->ule_skb == NULL) {
+ ret = dvb_net_ule_new_payload(&h);
+ if (ret < 0)
+ return;
+ if (ret)
+ continue;
}
/* Copy data into our current skb. */
- how_much = min(priv->ule_sndu_remain, (int)ts_remain);
- memcpy(skb_put(priv->ule_skb, how_much), from_where, how_much);
- priv->ule_sndu_remain -= how_much;
- ts_remain -= how_much;
- from_where += how_much;
+ h.how_much = min(h.priv->ule_sndu_remain, (int)h.ts_remain);
+ memcpy(skb_put(h.priv->ule_skb, h.how_much),
+ h.from_where, h.how_much);
+ h.priv->ule_sndu_remain -= h.how_much;
+ h.ts_remain -= h.how_much;
+ h.from_where += h.how_much;
/* Check for complete payload. */
- if (priv->ule_sndu_remain <= 0) {
+ if (h.priv->ule_sndu_remain <= 0) {
/* Check CRC32, we've got it in our skb already. */
- __be16 ulen = htons(priv->ule_sndu_len);
- __be16 utype = htons(priv->ule_sndu_type);
+ __be16 ulen = htons(h.priv->ule_sndu_len);
+ __be16 utype = htons(h.priv->ule_sndu_type);
const u8 *tail;
struct kvec iov[3] = {
{ &ulen, sizeof ulen },
{ &utype, sizeof utype },
- { priv->ule_skb->data, priv->ule_skb->len - 4 }
+ { h.priv->ule_skb->data,
+ h.priv->ule_skb->len - 4 }
};
u32 ule_crc = ~0L, expected_crc;
- if (priv->ule_dbit) {
+ if (h.priv->ule_dbit) {
/* Set D-bit for CRC32 verification,
* if it was set originally. */
ulen |= htons(0x8000);
}
ule_crc = iov_crc32(ule_crc, iov, 3);
- tail = skb_tail_pointer(priv->ule_skb);
+ tail = skb_tail_pointer(h.priv->ule_skb);
expected_crc = *(tail - 4) << 24 |
*(tail - 3) << 16 |
*(tail - 2) << 8 |
*(tail - 1);
- if (ule_crc != expected_crc) {
- printk(KERN_WARNING "%lu: CRC32 check FAILED: %08x / %08x, SNDU len %d type %#x, ts_remain %d, next 2: %x.\n",
- priv->ts_count, ule_crc, expected_crc, priv->ule_sndu_len, priv->ule_sndu_type, ts_remain, ts_remain > 2 ? *(unsigned short *)from_where : 0);
-#ifdef ULE_DEBUG
- hexdump( iov[0].iov_base, iov[0].iov_len );
- hexdump( iov[1].iov_base, iov[1].iov_len );
- hexdump( iov[2].iov_base, iov[2].iov_len );
-
- if (ule_where == ule_hist) {
- hexdump( &ule_hist[98*TS_SZ], TS_SZ );
- hexdump( &ule_hist[99*TS_SZ], TS_SZ );
- } else if (ule_where == &ule_hist[TS_SZ]) {
- hexdump( &ule_hist[99*TS_SZ], TS_SZ );
- hexdump( ule_hist, TS_SZ );
- } else {
- hexdump( ule_where - TS_SZ - TS_SZ, TS_SZ );
- hexdump( ule_where - TS_SZ, TS_SZ );
- }
- ule_dump = 1;
-#endif
+ dvb_net_ule_check_crc(&h, ule_crc, expected_crc);
- dev->stats.rx_errors++;
- dev->stats.rx_crc_errors++;
- dev_kfree_skb(priv->ule_skb);
- } else {
- /* CRC32 verified OK. */
- u8 dest_addr[ETH_ALEN];
- static const u8 bc_addr[ETH_ALEN] =
- { [ 0 ... ETH_ALEN-1] = 0xff };
-
- /* CRC32 was OK. Remove it from skb. */
- priv->ule_skb->tail -= 4;
- priv->ule_skb->len -= 4;
-
- if (!priv->ule_dbit) {
- /*
- * The destination MAC address is the
- * next data in the skb. It comes
- * before any extension headers.
- *
- * Check if the payload of this SNDU
- * should be passed up the stack.
- */
- register int drop = 0;
- if (priv->rx_mode != RX_MODE_PROMISC) {
- if (priv->ule_skb->data[0] & 0x01) {
- /* multicast or broadcast */
- if (!ether_addr_equal(priv->ule_skb->data, bc_addr)) {
- /* multicast */
- if (priv->rx_mode == RX_MODE_MULTI) {
- int i;
- for(i = 0; i < priv->multi_num &&
- !ether_addr_equal(priv->ule_skb->data,
- priv->multi_macs[i]); i++)
- ;
- if (i == priv->multi_num)
- drop = 1;
- } else if (priv->rx_mode != RX_MODE_ALL_MULTI)
- drop = 1; /* no broadcast; */
- /* else: all multicast mode: accept all multicast packets */
- }
- /* else: broadcast */
- }
- else if (!ether_addr_equal(priv->ule_skb->data, dev->dev_addr))
- drop = 1;
- /* else: destination address matches the MAC address of our receiver device */
- }
- /* else: promiscuous mode; pass everything up the stack */
-
- if (drop) {
-#ifdef ULE_DEBUG
- netdev_dbg(dev, "Dropping SNDU: MAC destination address does not match: dest addr: %pM, dev addr: %pM\n",
- priv->ule_skb->data, dev->dev_addr);
-#endif
- dev_kfree_skb(priv->ule_skb);
- goto sndu_done;
- }
- else
- {
- skb_copy_from_linear_data(priv->ule_skb,
- dest_addr,
- ETH_ALEN);
- skb_pull(priv->ule_skb, ETH_ALEN);
- }
- }
-
- /* Handle ULE Extension Headers. */
- if (priv->ule_sndu_type < ETH_P_802_3_MIN) {
- /* There is an extension header. Handle it accordingly. */
- int l = handle_ule_extensions(priv);
- if (l < 0) {
- /* Mandatory extension header unknown or TEST SNDU. Drop it. */
- // printk( KERN_WARNING "Dropping SNDU, extension headers.\n" );
- dev_kfree_skb(priv->ule_skb);
- goto sndu_done;
- }
- skb_pull(priv->ule_skb, l);
- }
-
- /*
- * Construct/assure correct ethernet header.
- * Note: in bridged mode (priv->ule_bridged !=
- * 0) we already have the (original) ethernet
- * header at the start of the payload (after
- * optional dest. address and any extension
- * headers).
- */
-
- if (!priv->ule_bridged) {
- skb_push(priv->ule_skb, ETH_HLEN);
- ethh = (struct ethhdr *)priv->ule_skb->data;
- if (!priv->ule_dbit) {
- /* dest_addr buffer is only valid if priv->ule_dbit == 0 */
- memcpy(ethh->h_dest, dest_addr, ETH_ALEN);
- eth_zero_addr(ethh->h_source);
- }
- else /* zeroize source and dest */
- memset( ethh, 0, ETH_ALEN*2 );
-
- ethh->h_proto = htons(priv->ule_sndu_type);
- }
- /* else: skb is in correct state; nothing to do. */
- priv->ule_bridged = 0;
-
- /* Stuff into kernel's protocol stack. */
- priv->ule_skb->protocol = dvb_net_eth_type_trans(priv->ule_skb, dev);
- /* If D-bit is set (i.e. destination MAC address not present),
- * receive the packet anyhow. */
- /* if (priv->ule_dbit && skb->pkt_type == PACKET_OTHERHOST)
- priv->ule_skb->pkt_type = PACKET_HOST; */
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += priv->ule_skb->len;
- netif_rx(priv->ule_skb);
- }
- sndu_done:
/* Prepare for next SNDU. */
- reset_ule(priv);
+ reset_ule(h.priv);
}
/* More data in current TS (look at the bytes following the CRC32)? */
- if (ts_remain >= 2 && *((unsigned short *)from_where) != 0xFFFF) {
+ if (h.ts_remain >= 2 && *((unsigned short *)h.from_where) != 0xFFFF) {
/* Next ULE SNDU starts right there. */
- new_ts = 0;
- priv->ule_skb = NULL;
- priv->ule_sndu_type_1 = 0;
- priv->ule_sndu_len = 0;
- // printk(KERN_WARNING "More data in current TS: [%#x %#x %#x %#x]\n",
- // *(from_where + 0), *(from_where + 1),
- // *(from_where + 2), *(from_where + 3));
- // printk(KERN_WARNING "ts @ %p, stopped @ %p:\n", ts, from_where + 0);
- // hexdump(ts, 188);
+ h.new_ts = 0;
+ h.priv->ule_skb = NULL;
+ h.priv->ule_sndu_type_1 = 0;
+ h.priv->ule_sndu_len = 0;
+ // pr_warn("More data in current TS: [%#x %#x %#x %#x]\n",
+ // *(h.from_where + 0), *(h.from_where + 1),
+ // *(h.from_where + 2), *(h.from_where + 3));
+ // pr_warn("h.ts @ %p, stopped @ %p:\n", h.ts, h.from_where + 0);
+ // hexdump(h.ts, 188);
} else {
- new_ts = 1;
- ts += TS_SZ;
- priv->ts_count++;
- if (priv->ule_skb == NULL) {
- priv->need_pusi = 1;
- priv->ule_sndu_type_1 = 0;
- priv->ule_sndu_len = 0;
+ h.new_ts = 1;
+ h.ts += TS_SZ;
+ h.priv->ts_count++;
+ if (h.priv->ule_skb == NULL) {
+ h.priv->need_pusi = 1;
+ h.priv->ule_sndu_type_1 = 0;
+ h.priv->ule_sndu_len = 0;
}
}
} /* for all available TS cells */
@@ -766,10 +906,10 @@ static int dvb_net_ts_callback(const u8 *buffer1, size_t buffer1_len,
struct net_device *dev = feed->priv;
if (buffer2)
- printk(KERN_WARNING "buffer2 not NULL: %p.\n", buffer2);
+ pr_warn("buffer2 not NULL: %p.\n", buffer2);
if (buffer1_len > 32768)
- printk(KERN_WARNING "length > 32k: %zu.\n", buffer1_len);
- /* printk("TS callback: %u bytes, %u TS cells @ %p.\n",
+ pr_warn("length > 32k: %zu.\n", buffer1_len);
+ /* pr_info("TS callback: %u bytes, %u TS cells @ %p.\n",
buffer1_len, buffer1_len / TS_SZ, buffer1); */
dvb_net_ule(dev, buffer1, buffer1_len);
return 0;
@@ -786,7 +926,7 @@ static void dvb_net_sec(struct net_device *dev,
/* note: pkt_len includes a 32bit checksum */
if (pkt_len < 16) {
- printk("%s: IP/MPE packet length = %d too small.\n",
+ pr_warn("%s: IP/MPE packet length = %d too small.\n",
dev->name, pkt_len);
stats->rx_errors++;
stats->rx_length_errors++;
@@ -824,7 +964,7 @@ static void dvb_net_sec(struct net_device *dev,
* 12 byte MPE header; 4 byte checksum; + 2 byte alignment, 8 byte LLC/SNAP
*/
if (!(skb = dev_alloc_skb(pkt_len - 4 - 12 + 14 + 2 - snap))) {
- //printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
+ //pr_notice("%s: Memory squeeze, dropping packet.\n", dev->name);
stats->rx_dropped++;
return;
}
@@ -903,7 +1043,7 @@ static int dvb_net_filter_sec_set(struct net_device *dev,
*secfilter=NULL;
ret = priv->secfeed->allocate_filter(priv->secfeed, secfilter);
if (ret<0) {
- printk("%s: could not get filter\n", dev->name);
+ pr_err("%s: could not get filter\n", dev->name);
return ret;
}
@@ -944,7 +1084,7 @@ static int dvb_net_feed_start(struct net_device *dev)
netdev_dbg(dev, "rx_mode %i\n", priv->rx_mode);
mutex_lock(&priv->mutex);
if (priv->tsfeed || priv->secfeed || priv->secfilter || priv->multi_secfilter[0])
- printk("%s: BUG %d\n", __func__, __LINE__);
+ pr_err("%s: BUG %d\n", __func__, __LINE__);
priv->secfeed=NULL;
priv->secfilter=NULL;
@@ -955,14 +1095,15 @@ static int dvb_net_feed_start(struct net_device *dev)
ret=demux->allocate_section_feed(demux, &priv->secfeed,
dvb_net_sec_callback);
if (ret<0) {
- printk("%s: could not allocate section feed\n", dev->name);
+ pr_err("%s: could not allocate section feed\n",
+ dev->name);
goto error;
}
- ret = priv->secfeed->set(priv->secfeed, priv->pid, 32768, 1);
+ ret = priv->secfeed->set(priv->secfeed, priv->pid, 1);
if (ret<0) {
- printk("%s: could not set section feed\n", dev->name);
+ pr_err("%s: could not set section feed\n", dev->name);
priv->demux->release_section_feed(priv->demux, priv->secfeed);
priv->secfeed=NULL;
goto error;
@@ -1003,7 +1144,7 @@ static int dvb_net_feed_start(struct net_device *dev)
netdev_dbg(dev, "alloc tsfeed\n");
ret = demux->allocate_ts_feed(demux, &priv->tsfeed, dvb_net_ts_callback);
if (ret < 0) {
- printk("%s: could not allocate ts feed\n", dev->name);
+ pr_err("%s: could not allocate ts feed\n", dev->name);
goto error;
}
@@ -1013,12 +1154,11 @@ static int dvb_net_feed_start(struct net_device *dev)
priv->pid, /* pid */
TS_PACKET, /* type */
DMX_PES_OTHER, /* pes type */
- 32768, /* circular buffer size */
timeout /* timeout */
);
if (ret < 0) {
- printk("%s: could not set ts feed\n", dev->name);
+ pr_err("%s: could not set ts feed\n", dev->name);
priv->demux->release_ts_feed(priv->demux, priv->tsfeed);
priv->tsfeed = NULL;
goto error;
@@ -1067,7 +1207,7 @@ static int dvb_net_feed_stop(struct net_device *dev)
priv->demux->release_section_feed(priv->demux, priv->secfeed);
priv->secfeed = NULL;
} else
- printk("%s: no feed to stop\n", dev->name);
+ pr_err("%s: no feed to stop\n", dev->name);
} else if (priv->feedtype == DVB_NET_FEEDTYPE_ULE) {
if (priv->tsfeed) {
if (priv->tsfeed->is_filtering) {
@@ -1078,7 +1218,7 @@ static int dvb_net_feed_stop(struct net_device *dev)
priv->tsfeed = NULL;
}
else
- printk("%s: no ts feed to stop\n", dev->name);
+ pr_err("%s: no ts feed to stop\n", dev->name);
} else
ret = -EINVAL;
mutex_unlock(&priv->mutex);
@@ -1279,7 +1419,7 @@ static int dvb_net_add_if(struct dvb_net *dvbnet, u16 pid, u8 feedtype)
free_netdev(net);
return result;
}
- printk("dvb_net: created network interface %s\n", net->name);
+ pr_info("created network interface %s\n", net->name);
return if_num;
}
@@ -1298,7 +1438,7 @@ static int dvb_net_remove_if(struct dvb_net *dvbnet, unsigned long num)
dvb_net_stop(net);
flush_work(&priv->set_multicast_list_wq);
flush_work(&priv->restart_net_feed_wq);
- printk("dvb_net: removed network interface %s\n", net->name);
+ pr_info("removed network interface %s\n", net->name);
unregister_netdev(net);
dvbnet->state[num]=0;
dvbnet->device[num] = NULL;
diff --git a/drivers/media/dvb-core/dvb_ringbuffer.c b/drivers/media/dvb-core/dvb_ringbuffer.c
index 7df7fb3738a0..5c4b5a1f604f 100644
--- a/drivers/media/dvb-core/dvb_ringbuffer.c
+++ b/drivers/media/dvb-core/dvb_ringbuffer.c
@@ -31,7 +31,7 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/string.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "dvb_ringbuffer.h"
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index 75a3f4b57fd4..38c844667789 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -21,6 +21,8 @@
*
*/
+#define pr_fmt(fmt) "dvbdev: " fmt
+
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/string.h>
@@ -43,7 +45,11 @@ static int dvbdev_debug;
module_param(dvbdev_debug, int, 0644);
MODULE_PARM_DESC(dvbdev_debug, "Turn on/off device debugging (default:off).");
-#define dprintk if (dvbdev_debug) printk
+#define dprintk(fmt, arg...) do { \
+ if (dvbdev_debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
+} while (0)
static LIST_HEAD(dvb_adapter_list);
static DEFINE_MUTEX(dvbdev_register_lock);
@@ -354,7 +360,7 @@ static int dvb_create_media_entity(struct dvb_device *dvbdev,
if (ret)
return ret;
- printk(KERN_DEBUG "%s: media entity '%s' registered.\n",
+ pr_info("%s: media entity '%s' registered.\n",
__func__, dvbdev->entity->name);
return 0;
@@ -438,7 +444,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
if ((id = dvbdev_get_free_id (adap, type)) < 0){
mutex_unlock(&dvbdev_register_lock);
*pdvbdev = NULL;
- printk(KERN_ERR "%s: couldn't find free device id\n", __func__);
+ pr_err("%s: couldn't find free device id\n", __func__);
return -ENFILE;
}
@@ -493,8 +499,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
ret = dvb_register_media_device(dvbdev, type, minor, demux_sink_pads);
if (ret) {
- printk(KERN_ERR
- "%s: dvb_register_media_device failed to create the mediagraph\n",
+ pr_err("%s: dvb_register_media_device failed to create the mediagraph\n",
__func__);
dvb_media_device_free(dvbdev);
@@ -511,11 +516,11 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
MKDEV(DVB_MAJOR, minor),
dvbdev, "dvb%d.%s%d", adap->num, dnames[type], id);
if (IS_ERR(clsdev)) {
- printk(KERN_ERR "%s: failed to create device dvb%d.%s%d (%ld)\n",
+ pr_err("%s: failed to create device dvb%d.%s%d (%ld)\n",
__func__, adap->num, dnames[type], id, PTR_ERR(clsdev));
return PTR_ERR(clsdev);
}
- dprintk(KERN_DEBUG "DVB: register adapter%d/%s%d @ minor: %i (0x%02x)\n",
+ dprintk("DVB: register adapter%d/%s%d @ minor: %i (0x%02x)\n",
adap->num, dnames[type], id, minor, minor);
return 0;
@@ -523,7 +528,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
EXPORT_SYMBOL(dvb_register_device);
-void dvb_unregister_device(struct dvb_device *dvbdev)
+void dvb_remove_device(struct dvb_device *dvbdev)
{
if (!dvbdev)
return;
@@ -537,9 +542,26 @@ void dvb_unregister_device(struct dvb_device *dvbdev)
device_destroy(dvb_class, MKDEV(DVB_MAJOR, dvbdev->minor));
list_del (&dvbdev->list_head);
+}
+EXPORT_SYMBOL(dvb_remove_device);
+
+
+void dvb_free_device(struct dvb_device *dvbdev)
+{
+ if (!dvbdev)
+ return;
+
kfree (dvbdev->fops);
kfree (dvbdev);
}
+EXPORT_SYMBOL(dvb_free_device);
+
+
+void dvb_unregister_device(struct dvb_device *dvbdev)
+{
+ dvb_remove_device(dvbdev);
+ dvb_free_device(dvbdev);
+}
EXPORT_SYMBOL(dvb_unregister_device);
@@ -808,7 +830,7 @@ int dvb_register_adapter(struct dvb_adapter *adap, const char *name,
memset (adap, 0, sizeof(struct dvb_adapter));
INIT_LIST_HEAD (&adap->device_list);
- printk(KERN_INFO "DVB: registering new adapter (%s)\n", name);
+ pr_info("DVB: registering new adapter (%s)\n", name);
adap->num = num;
adap->name = name;
@@ -926,13 +948,13 @@ static int __init init_dvbdev(void)
dev_t dev = MKDEV(DVB_MAJOR, 0);
if ((retval = register_chrdev_region(dev, MAX_DVB_MINORS, "DVB")) != 0) {
- printk(KERN_ERR "dvb-core: unable to get major %d\n", DVB_MAJOR);
+ pr_err("dvb-core: unable to get major %d\n", DVB_MAJOR);
return retval;
}
cdev_init(&dvb_device_cdev, &dvb_device_fops);
if ((retval = cdev_add(&dvb_device_cdev, dev, MAX_DVB_MINORS)) != 0) {
- printk(KERN_ERR "dvb-core: unable register character device\n");
+ pr_err("dvb-core: unable register character device\n");
goto error;
}
diff --git a/drivers/media/dvb-core/dvbdev.h b/drivers/media/dvb-core/dvbdev.h
index 4aff7bd3dea8..8c0a7b51555e 100644
--- a/drivers/media/dvb-core/dvbdev.h
+++ b/drivers/media/dvb-core/dvbdev.h
@@ -34,7 +34,7 @@
#if defined(CONFIG_DVB_MAX_ADAPTERS) && CONFIG_DVB_MAX_ADAPTERS > 0
#define DVB_MAX_ADAPTERS CONFIG_DVB_MAX_ADAPTERS
#else
- #define DVB_MAX_ADAPTERS 8
+ #define DVB_MAX_ADAPTERS 16
#endif
#define DVB_UNSET (-1)
@@ -212,8 +212,31 @@ int dvb_register_device(struct dvb_adapter *adap,
int demux_sink_pads);
/**
+ * dvb_remove_device - Remove a registered DVB device
+ *
+ * This does not free memory. To do that, call dvb_free_device().
+ *
+ * @dvbdev: pointer to struct dvb_device
+ */
+void dvb_remove_device(struct dvb_device *dvbdev);
+
+/**
+ * dvb_free_device - Free memory occupied by a DVB device.
+ *
+ * Call dvb_unregister_device() before calling this function.
+ *
+ * @dvbdev: pointer to struct dvb_device
+ */
+void dvb_free_device(struct dvb_device *dvbdev);
+
+/**
* dvb_unregister_device - Unregisters a DVB device
*
+ * This is a combination of dvb_remove_device() and dvb_free_device().
+ * Using this function is usually a mistake, and is often an indicator
+ * for a use-after-free bug (when a userspace process keeps a file
+ * handle to a detached device).
+ *
* @dvbdev: pointer to struct dvb_device
*/
void dvb_unregister_device(struct dvb_device *dvbdev);
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index b71b747ee0ba..c841fa1770be 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -642,7 +642,7 @@ config DVB_S5H1409
to support this frontend.
config DVB_AU8522
- depends on I2C
+ depends on DVB_CORE && I2C
tristate
config DVB_AU8522_DTV
@@ -656,7 +656,7 @@ config DVB_AU8522_DTV
config DVB_AU8522_V4L
tristate "Auvitek AU8522 based ATV demod"
- depends on VIDEO_V4L2 && I2C
+ depends on VIDEO_V4L2 && DVB_CORE && I2C
select DVB_AU8522
default m if !MEDIA_SUBDRV_AUTOSELECT
help
@@ -722,7 +722,7 @@ config DVB_PLL
config DVB_TUNER_DIB0070
tristate "DiBcom DiB0070 silicon base-band tuner"
- depends on I2C
+ depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
A driver for the silicon baseband tuner DiB0070 from DiBcom.
@@ -731,7 +731,7 @@ config DVB_TUNER_DIB0070
config DVB_TUNER_DIB0090
tristate "DiBcom DiB0090 silicon base-band tuner"
- depends on I2C
+ depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
A driver for the silicon baseband tuner DiB0090 from DiBcom.
@@ -879,5 +879,6 @@ comment "Tools to develop new frontends"
config DVB_DUMMY_FE
tristate "Dummy frontend driver"
+ depends on DVB_CORE
default n
endmenu
diff --git a/drivers/media/dvb-frontends/af9013.c b/drivers/media/dvb-frontends/af9013.c
index 8bcde336ffd7..c6cb3bbc912a 100644
--- a/drivers/media/dvb-frontends/af9013.c
+++ b/drivers/media/dvb-frontends/af9013.c
@@ -1351,7 +1351,7 @@ static void af9013_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops af9013_ops;
+static const struct dvb_frontend_ops af9013_ops;
static int af9013_download_firmware(struct af9013_state *state)
{
@@ -1516,7 +1516,7 @@ err:
}
EXPORT_SYMBOL(af9013_attach);
-static struct dvb_frontend_ops af9013_ops = {
+static const struct dvb_frontend_ops af9013_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Afatech AF9013",
diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c
index 9a8157a5f49d..f8818028752e 100644
--- a/drivers/media/dvb-frontends/af9033.c
+++ b/drivers/media/dvb-frontends/af9033.c
@@ -1198,7 +1198,7 @@ err:
return ret;
}
-static struct dvb_frontend_ops af9033_ops = {
+static const struct dvb_frontend_ops af9033_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Afatech AF9033 (DVB-T)",
diff --git a/drivers/media/dvb-frontends/as102_fe.c b/drivers/media/dvb-frontends/as102_fe.c
index 9412fcd1bddb..98d575f2744c 100644
--- a/drivers/media/dvb-frontends/as102_fe.c
+++ b/drivers/media/dvb-frontends/as102_fe.c
@@ -415,7 +415,7 @@ static void as102_fe_release(struct dvb_frontend *fe)
}
-static struct dvb_frontend_ops as102_fe_ops = {
+static const struct dvb_frontend_ops as102_fe_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Abilis AS102 DVB-T",
diff --git a/drivers/media/dvb-frontends/ascot2e.c b/drivers/media/dvb-frontends/ascot2e.c
index ad304eed656d..0ee0df53b91b 100644
--- a/drivers/media/dvb-frontends/ascot2e.c
+++ b/drivers/media/dvb-frontends/ascot2e.c
@@ -254,14 +254,13 @@ static int ascot2e_init(struct dvb_frontend *fe)
return ascot2e_leave_power_save(priv);
}
-static int ascot2e_release(struct dvb_frontend *fe)
+static void ascot2e_release(struct dvb_frontend *fe)
{
struct ascot2e_priv *priv = fe->tuner_priv;
dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static int ascot2e_sleep(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/atbm8830.c b/drivers/media/dvb-frontends/atbm8830.c
index 47248b868e38..07ce05578278 100644
--- a/drivers/media/dvb-frontends/atbm8830.c
+++ b/drivers/media/dvb-frontends/atbm8830.c
@@ -428,7 +428,7 @@ static int atbm8830_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
return atbm8830_write_reg(priv, REG_I2C_GATE, enable ? 1 : 0);
}
-static struct dvb_frontend_ops atbm8830_ops = {
+static const struct dvb_frontend_ops atbm8830_ops = {
.delsys = { SYS_DTMB },
.info = {
.name = "AltoBeam ATBM8830/8831 DMB-TH",
diff --git a/drivers/media/dvb-frontends/au8522_common.c b/drivers/media/dvb-frontends/au8522_common.c
index f135126bc373..cf4ac240a01f 100644
--- a/drivers/media/dvb-frontends/au8522_common.c
+++ b/drivers/media/dvb-frontends/au8522_common.c
@@ -50,8 +50,8 @@ int au8522_writereg(struct au8522_state *state, u16 reg, u8 data)
ret = i2c_transfer(state->i2c, &msg, 1);
if (ret != 1)
- printk("%s: writereg error (reg == 0x%02x, val == 0x%04x, "
- "ret == %i)\n", __func__, reg, data, ret);
+ printk("%s: writereg error (reg == 0x%02x, val == 0x%04x, ret == %i)\n",
+ __func__, reg, data, ret);
return (ret != 1) ? -1 : 0;
}
diff --git a/drivers/media/dvb-frontends/au8522_dig.c b/drivers/media/dvb-frontends/au8522_dig.c
index e676b9461a59..7ed326e43fc4 100644
--- a/drivers/media/dvb-frontends/au8522_dig.c
+++ b/drivers/media/dvb-frontends/au8522_dig.c
@@ -834,7 +834,7 @@ static int au8522_get_tune_settings(struct dvb_frontend *fe,
return 0;
}
-static struct dvb_frontend_ops au8522_ops;
+static const struct dvb_frontend_ops au8522_ops;
static void au8522_release(struct dvb_frontend *fe)
@@ -894,7 +894,7 @@ error:
}
EXPORT_SYMBOL(au8522_attach);
-static struct dvb_frontend_ops au8522_ops = {
+static const struct dvb_frontend_ops au8522_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Auvitek AU8522 QAM/8VSB Frontend",
diff --git a/drivers/media/dvb-frontends/bcm3510.c b/drivers/media/dvb-frontends/bcm3510.c
index bb698839e477..617c5e29f919 100644
--- a/drivers/media/dvb-frontends/bcm3510.c
+++ b/drivers/media/dvb-frontends/bcm3510.c
@@ -788,7 +788,7 @@ static int bcm3510_init(struct dvb_frontend* fe)
}
-static struct dvb_frontend_ops bcm3510_ops;
+static const struct dvb_frontend_ops bcm3510_ops;
struct dvb_frontend* bcm3510_attach(const struct bcm3510_config *config,
struct i2c_adapter *i2c)
@@ -834,7 +834,7 @@ error:
}
EXPORT_SYMBOL(bcm3510_attach);
-static struct dvb_frontend_ops bcm3510_ops = {
+static const struct dvb_frontend_ops bcm3510_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Broadcom BCM3510 VSB/QAM frontend",
diff --git a/drivers/media/dvb-frontends/cx22700.c b/drivers/media/dvb-frontends/cx22700.c
index 5cad925609e0..2b629e23ceeb 100644
--- a/drivers/media/dvb-frontends/cx22700.c
+++ b/drivers/media/dvb-frontends/cx22700.c
@@ -380,7 +380,7 @@ static void cx22700_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops cx22700_ops;
+static const struct dvb_frontend_ops cx22700_ops;
struct dvb_frontend* cx22700_attach(const struct cx22700_config* config,
struct i2c_adapter* i2c)
@@ -408,7 +408,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops cx22700_ops = {
+static const struct dvb_frontend_ops cx22700_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Conexant CX22700 DVB-T",
diff --git a/drivers/media/dvb-frontends/cx24110.c b/drivers/media/dvb-frontends/cx24110.c
index 6cb81ec12847..cf1bc99d1f32 100644
--- a/drivers/media/dvb-frontends/cx24110.c
+++ b/drivers/media/dvb-frontends/cx24110.c
@@ -120,8 +120,8 @@ static int cx24110_writereg (struct cx24110_state* state, int reg, int data)
int err;
if ((err = i2c_transfer(state->i2c, &msg, 1)) != 1) {
- dprintk ("%s: writereg error (err == %i, reg == 0x%02x,"
- " data == 0x%02x)\n", __func__, err, reg, data);
+ dprintk("%s: writereg error (err == %i, reg == 0x%02x, data == 0x%02x)\n",
+ __func__, err, reg, data);
return -EREMOTEIO;
}
@@ -592,7 +592,7 @@ static void cx24110_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops cx24110_ops;
+static const struct dvb_frontend_ops cx24110_ops;
struct dvb_frontend* cx24110_attach(const struct cx24110_config* config,
struct i2c_adapter* i2c)
@@ -625,7 +625,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops cx24110_ops = {
+static const struct dvb_frontend_ops cx24110_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Conexant CX24110 DVB-S",
diff --git a/drivers/media/dvb-frontends/cx24113.c b/drivers/media/dvb-frontends/cx24113.c
index 3883c3b31aef..db44ebb7c561 100644
--- a/drivers/media/dvb-frontends/cx24113.c
+++ b/drivers/media/dvb-frontends/cx24113.c
@@ -108,8 +108,8 @@ static int cx24113_writereg(struct cx24113_state *state, int reg, int data)
.flags = 0, .buf = buf, .len = 2 };
int err = i2c_transfer(state->i2c, &msg, 1);
if (err != 1) {
- printk(KERN_DEBUG "%s: writereg error(err == %i, reg == 0x%02x,"
- " data == 0x%02x)\n", __func__, err, reg, data);
+ printk(KERN_DEBUG "%s: writereg error(err == %i, reg == 0x%02x, data == 0x%02x)\n",
+ __func__, err, reg, data);
return err;
}
@@ -527,13 +527,12 @@ static int cx24113_get_frequency(struct dvb_frontend *fe, u32 *frequency)
return 0;
}
-static int cx24113_release(struct dvb_frontend *fe)
+static void cx24113_release(struct dvb_frontend *fe)
{
struct cx24113_state *state = fe->tuner_priv;
dprintk("\n");
fe->tuner_priv = NULL;
kfree(state);
- return 0;
}
static const struct dvb_tuner_ops cx24113_tuner_ops = {
diff --git a/drivers/media/dvb-frontends/cx24116.c b/drivers/media/dvb-frontends/cx24116.c
index 8814f36d53fb..e105532bfba8 100644
--- a/drivers/media/dvb-frontends/cx24116.c
+++ b/drivers/media/dvb-frontends/cx24116.c
@@ -209,8 +209,8 @@ static int cx24116_writereg(struct cx24116_state *state, int reg, int data)
err = i2c_transfer(state->i2c, &msg, 1);
if (err != 1) {
- printk(KERN_ERR "%s: writereg error(err == %i, reg == 0x%02x,"
- " value == 0x%02x)\n", __func__, err, reg, data);
+ printk(KERN_ERR "%s: writereg error(err == %i, reg == 0x%02x, value == 0x%02x)\n",
+ __func__, err, reg, data);
return -EREMOTEIO;
}
@@ -498,8 +498,8 @@ static int cx24116_firmware_ondemand(struct dvb_frontend *fe)
printk(KERN_INFO "%s: Waiting for firmware upload(2)...\n",
__func__);
if (ret) {
- printk(KERN_ERR "%s: No firmware uploaded "
- "(timeout or file not found?)\n", __func__);
+ printk(KERN_ERR "%s: No firmware uploaded (timeout or file not found?)\n",
+ __func__);
return ret;
}
@@ -1116,7 +1116,7 @@ static void cx24116_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops cx24116_ops;
+static const struct dvb_frontend_ops cx24116_ops;
struct dvb_frontend *cx24116_attach(const struct cx24116_config *config,
struct i2c_adapter *i2c)
@@ -1467,7 +1467,7 @@ static int cx24116_get_algo(struct dvb_frontend *fe)
return DVBFE_ALGO_HW;
}
-static struct dvb_frontend_ops cx24116_ops = {
+static const struct dvb_frontend_ops cx24116_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2 },
.info = {
.name = "Conexant CX24116/CX24118",
diff --git a/drivers/media/dvb-frontends/cx24117.c b/drivers/media/dvb-frontends/cx24117.c
index a3f7eb4e609d..d37cb7762bd6 100644
--- a/drivers/media/dvb-frontends/cx24117.c
+++ b/drivers/media/dvb-frontends/cx24117.c
@@ -474,8 +474,8 @@ static int cx24117_firmware_ondemand(struct dvb_frontend *fe)
"%s: Waiting for firmware upload(2)...\n", __func__);
if (ret) {
dev_err(&state->priv->i2c->dev,
- "%s: No firmware uploaded "
- "(timeout or file not found?)\n", __func__);
+ "%s: No firmware uploaded (timeout or file not found?)\n",
+__func__);
return ret;
}
@@ -1164,7 +1164,7 @@ static void cx24117_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops cx24117_ops;
+static const struct dvb_frontend_ops cx24117_ops;
struct dvb_frontend *cx24117_attach(const struct cx24117_config *config,
struct i2c_adapter *i2c)
@@ -1618,7 +1618,7 @@ static int cx24117_get_frontend(struct dvb_frontend *fe,
return 0;
}
-static struct dvb_frontend_ops cx24117_ops = {
+static const struct dvb_frontend_ops cx24117_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2 },
.info = {
.name = "Conexant CX24117/CX24132",
diff --git a/drivers/media/dvb-frontends/cx24120.c b/drivers/media/dvb-frontends/cx24120.c
index 066ee387bf25..7f11dcc94d85 100644
--- a/drivers/media/dvb-frontends/cx24120.c
+++ b/drivers/media/dvb-frontends/cx24120.c
@@ -267,7 +267,7 @@ out:
return ret;
}
-static struct dvb_frontend_ops cx24120_ops;
+static const struct dvb_frontend_ops cx24120_ops;
struct dvb_frontend *cx24120_attach(const struct cx24120_config *config,
struct i2c_adapter *i2c)
@@ -1154,8 +1154,7 @@ static int cx24120_set_frontend(struct dvb_frontend *fe)
dev_dbg(&state->i2c->dev,
"delivery system(%d) not supported\n",
c->delivery_system);
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
state->dnxt.delsys = c->delivery_system;
@@ -1552,7 +1551,7 @@ static int cx24120_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
return 0;
}
-static struct dvb_frontend_ops cx24120_ops = {
+static const struct dvb_frontend_ops cx24120_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2 },
.info = {
.name = "Conexant CX24120/CX24118",
diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c
index 113b0949408a..8aed8cc9f93d 100644
--- a/drivers/media/dvb-frontends/cx24123.c
+++ b/drivers/media/dvb-frontends/cx24123.c
@@ -255,8 +255,8 @@ static int cx24123_i2c_writereg(struct cx24123_state *state,
err = i2c_transfer(state->i2c, &msg, 1);
if (err != 1) {
- printk("%s: writereg error(err == %i, reg == 0x%02x,"
- " data == 0x%02x)\n", __func__, err, reg, data);
+ printk("%s: writereg error(err == %i, reg == 0x%02x, data == 0x%02x)\n",
+ __func__, err, reg, data);
return err;
}
@@ -1049,7 +1049,7 @@ struct i2c_adapter *
}
EXPORT_SYMBOL(cx24123_get_tuner_i2c_adapter);
-static struct dvb_frontend_ops cx24123_ops;
+static const struct dvb_frontend_ops cx24123_ops;
struct dvb_frontend *cx24123_attach(const struct cx24123_config *config,
struct i2c_adapter *i2c)
@@ -1111,7 +1111,7 @@ error:
}
EXPORT_SYMBOL(cx24123_attach);
-static struct dvb_frontend_ops cx24123_ops = {
+static const struct dvb_frontend_ops cx24123_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Conexant CX24123/CX24109",
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
index 5afb9c508f65..614bfb3740f1 100644
--- a/drivers/media/dvb-frontends/cxd2841er.c
+++ b/drivers/media/dvb-frontends/cxd2841er.c
@@ -3719,7 +3719,7 @@ static int cxd2841er_init_tc(struct dvb_frontend *fe)
return 0;
}
-static struct dvb_frontend_ops cxd2841er_dvbs_s2_ops;
+static const struct dvb_frontend_ops cxd2841er_dvbs_s2_ops;
static struct dvb_frontend_ops cxd2841er_t_c_ops;
static struct dvb_frontend *cxd2841er_attach(struct cxd2841er_config *cfg,
@@ -3801,7 +3801,7 @@ struct dvb_frontend *cxd2841er_attach_t_c(struct cxd2841er_config *cfg,
}
EXPORT_SYMBOL(cxd2841er_attach_t_c);
-static struct dvb_frontend_ops cxd2841er_dvbs_s2_ops = {
+static const struct dvb_frontend_ops cxd2841er_dvbs_s2_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2 },
.info = {
.name = "Sony CXD2841ER DVB-S/S2 demodulator",
@@ -3829,7 +3829,7 @@ static struct dvb_frontend_ops cxd2841er_dvbs_s2_ops = {
.tune = cxd2841er_tune_s
};
-static struct dvb_frontend_ops cxd2841er_t_c_ops = {
+static struct dvb_frontend_ops cxd2841er_t_c_ops = {
.delsys = { SYS_DVBT, SYS_DVBT2, SYS_DVBC_ANNEX_A },
.info = {
.name = "", /* will set in attach function */
diff --git a/drivers/media/dvb-frontends/dib0070.c b/drivers/media/dvb-frontends/dib0070.c
index ee7d66997ccd..befc8172159d 100644
--- a/drivers/media/dvb-frontends/dib0070.c
+++ b/drivers/media/dvb-frontends/dib0070.c
@@ -24,6 +24,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/i2c.h>
@@ -38,12 +40,10 @@ static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
-#define dprintk(args...) do { \
- if (debug) { \
- printk(KERN_DEBUG "DiB0070: "); \
- printk(args); \
- printk("\n"); \
- } \
+#define dprintk(fmt, arg...) do { \
+ if (debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
} while (0)
#define DIB0070_P1D 0x00
@@ -87,7 +87,7 @@ static u16 dib0070_read_reg(struct dib0070_state *state, u8 reg)
u16 ret;
if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return 0;
}
@@ -104,7 +104,7 @@ static u16 dib0070_read_reg(struct dib0070_state *state, u8 reg)
state->msg[1].len = 2;
if (i2c_transfer(state->i2c, state->msg, 2) != 2) {
- printk(KERN_WARNING "DiB0070 I2C read failed\n");
+ pr_warn("DiB0070 I2C read failed\n");
ret = 0;
} else
ret = (state->i2c_read_buffer[0] << 8)
@@ -119,7 +119,7 @@ static int dib0070_write_reg(struct dib0070_state *state, u8 reg, u16 val)
int ret;
if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return -EINVAL;
}
state->i2c_write_buffer[0] = reg;
@@ -133,7 +133,7 @@ static int dib0070_write_reg(struct dib0070_state *state, u8 reg, u16 val)
state->msg[0].len = 3;
if (i2c_transfer(state->i2c, state->msg, 1) != 1) {
- printk(KERN_WARNING "DiB0070 I2C write failed\n");
+ pr_warn("DiB0070 I2C write failed\n");
ret = -EREMOTEIO;
} else
ret = 0;
@@ -205,7 +205,7 @@ static int dib0070_captrim(struct dib0070_state *state, enum frontend_tune_state
adc = dib0070_read_reg(state, 0x19);
- dprintk("CAPTRIM=%hd; ADC = %hd (ADC) & %dmV", state->captrim, adc, (u32) adc*(u32)1800/(u32)1024);
+ dprintk("CAPTRIM=%hd; ADC = %hd (ADC) & %dmV\n", state->captrim, adc, (u32) adc*(u32)1800/(u32)1024);
if (adc >= 400) {
adc -= 400;
@@ -216,7 +216,7 @@ static int dib0070_captrim(struct dib0070_state *state, enum frontend_tune_state
}
if (adc < state->adc_diff) {
- dprintk("CAPTRIM=%hd is closer to target (%hd/%hd)", state->captrim, adc, state->adc_diff);
+ dprintk("CAPTRIM=%hd is closer to target (%hd/%hd)\n", state->captrim, adc, state->adc_diff);
state->adc_diff = adc;
state->fcaptrim = state->captrim;
}
@@ -241,7 +241,7 @@ static int dib0070_set_ctrl_lo5(struct dvb_frontend *fe, u8 vco_bias_trim, u8 hf
struct dib0070_state *state = fe->tuner_priv;
u16 lo5 = (third_order_filt << 14) | (0 << 13) | (1 << 12) | (3 << 9) | (cp_current << 6) | (hf_div_trim << 3) | (vco_bias_trim << 0);
- dprintk("CTRL_LO5: 0x%x", lo5);
+ dprintk("CTRL_LO5: 0x%x\n", lo5);
return dib0070_write_reg(state, 0x15, lo5);
}
@@ -256,7 +256,7 @@ void dib0070_ctrl_agc_filter(struct dvb_frontend *fe, u8 open)
dib0070_write_reg(state, 0x1b, 0x4112);
if (state->cfg->vga_filter != 0) {
dib0070_write_reg(state, 0x1a, state->cfg->vga_filter);
- dprintk("vga filter register is set to %x", state->cfg->vga_filter);
+ dprintk("vga filter register is set to %x\n", state->cfg->vga_filter);
} else
dib0070_write_reg(state, 0x1a, 0x0009);
}
@@ -380,7 +380,7 @@ static int dib0070_tune_digital(struct dvb_frontend *fe)
}
if (*tune_state == CT_TUNER_START) {
- dprintk("Tuning for Band: %hd (%d kHz)", band, freq);
+ dprintk("Tuning for Band: %hd (%d kHz)\n", band, freq);
if (state->current_rf != freq) {
u8 REFDIV;
u32 FBDiv, Rest, FREF, VCOF_kHz;
@@ -458,12 +458,12 @@ static int dib0070_tune_digital(struct dvb_frontend *fe)
dib0070_write_reg(state, 0x20,
0x0040 | 0x0020 | 0x0010 | 0x0008 | 0x0002 | 0x0001 | state->current_tune_table_index->tuner_enable);
- dprintk("REFDIV: %hd, FREF: %d", REFDIV, FREF);
- dprintk("FBDIV: %d, Rest: %d", FBDiv, Rest);
- dprintk("Num: %hd, Den: %hd, SD: %hd", (u16) Rest, Den, (state->lo4 >> 12) & 0x1);
- dprintk("HFDIV code: %hd", state->current_tune_table_index->hfdiv);
- dprintk("VCO = %hd", state->current_tune_table_index->vco_band);
- dprintk("VCOF: ((%hd*%d) << 1))", state->current_tune_table_index->vco_multi, freq);
+ dprintk("REFDIV: %hd, FREF: %d\n", REFDIV, FREF);
+ dprintk("FBDIV: %d, Rest: %d\n", FBDiv, Rest);
+ dprintk("Num: %hd, Den: %hd, SD: %hd\n", (u16) Rest, Den, (state->lo4 >> 12) & 0x1);
+ dprintk("HFDIV code: %hd\n", state->current_tune_table_index->hfdiv);
+ dprintk("VCO = %hd\n", state->current_tune_table_index->vco_band);
+ dprintk("VCOF: ((%hd*%d) << 1))\n", state->current_tune_table_index->vco_multi, freq);
*tune_state = CT_TUNER_STEP_0;
} else { /* we are already tuned to this frequency - the configuration is correct */
@@ -625,7 +625,7 @@ static void dib0070_wbd_offset_calibration(struct dib0070_state *state)
u8 gain;
for (gain = 6; gain < 8; gain++) {
state->wbd_offset_3_3[gain - 6] = ((dib0070_read_wbd_offset(state, gain) * 8 * 18 / 33 + 1) / 2);
- dprintk("Gain: %d, WBDOffset (3.3V) = %hd", gain, state->wbd_offset_3_3[gain-6]);
+ dprintk("Gain: %d, WBDOffset (3.3V) = %hd\n", gain, state->wbd_offset_3_3[gain-6]);
}
}
@@ -665,10 +665,10 @@ static int dib0070_reset(struct dvb_frontend *fe)
state->revision = DIB0070S_P1A;
/* P1F or not */
- dprintk("Revision: %x", state->revision);
+ dprintk("Revision: %x\n", state->revision);
if (state->revision == DIB0070_P1D) {
- dprintk("Error: this driver is not to be used meant for P1D or earlier");
+ dprintk("Error: this driver is not to be used meant for P1D or earlier\n");
return -EINVAL;
}
@@ -722,11 +722,10 @@ static int dib0070_get_frequency(struct dvb_frontend *fe, u32 *frequency)
return 0;
}
-static int dib0070_release(struct dvb_frontend *fe)
+static void dib0070_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static const struct dvb_tuner_ops dib0070_ops = {
@@ -761,7 +760,7 @@ struct dvb_frontend *dib0070_attach(struct dvb_frontend *fe, struct i2c_adapter
if (dib0070_reset(fe) != 0)
goto free_mem;
- printk(KERN_INFO "DiB0070: successfully identified\n");
+ pr_info("DiB0070: successfully identified\n");
memcpy(&fe->ops.tuner_ops, &dib0070_ops, sizeof(struct dvb_tuner_ops));
fe->tuner_priv = state;
diff --git a/drivers/media/dvb-frontends/dib0090.c b/drivers/media/dvb-frontends/dib0090.c
index 14c403254fe0..fd3b33296b15 100644
--- a/drivers/media/dvb-frontends/dib0090.c
+++ b/drivers/media/dvb-frontends/dib0090.c
@@ -24,6 +24,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/i2c.h>
@@ -38,12 +40,10 @@ static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
-#define dprintk(args...) do { \
- if (debug) { \
- printk(KERN_DEBUG "DiB0090: "); \
- printk(args); \
- printk("\n"); \
- } \
+#define dprintk(fmt, arg...) do { \
+ if (debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
} while (0)
#define CONFIG_SYS_DVBT
@@ -218,7 +218,7 @@ static u16 dib0090_read_reg(struct dib0090_state *state, u8 reg)
u16 ret;
if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return 0;
}
@@ -235,7 +235,7 @@ static u16 dib0090_read_reg(struct dib0090_state *state, u8 reg)
state->msg[1].len = 2;
if (i2c_transfer(state->i2c, state->msg, 2) != 2) {
- printk(KERN_WARNING "DiB0090 I2C read failed\n");
+ pr_warn("DiB0090 I2C read failed\n");
ret = 0;
} else
ret = (state->i2c_read_buffer[0] << 8)
@@ -250,7 +250,7 @@ static int dib0090_write_reg(struct dib0090_state *state, u32 reg, u16 val)
int ret;
if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return -EINVAL;
}
@@ -265,7 +265,7 @@ static int dib0090_write_reg(struct dib0090_state *state, u32 reg, u16 val)
state->msg[0].len = 3;
if (i2c_transfer(state->i2c, state->msg, 1) != 1) {
- printk(KERN_WARNING "DiB0090 I2C write failed\n");
+ pr_warn("DiB0090 I2C write failed\n");
ret = -EREMOTEIO;
} else
ret = 0;
@@ -279,7 +279,7 @@ static u16 dib0090_fw_read_reg(struct dib0090_fw_state *state, u8 reg)
u16 ret;
if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return 0;
}
@@ -291,7 +291,7 @@ static u16 dib0090_fw_read_reg(struct dib0090_fw_state *state, u8 reg)
state->msg.buf = state->i2c_read_buffer;
state->msg.len = 2;
if (i2c_transfer(state->i2c, &state->msg, 1) != 1) {
- printk(KERN_WARNING "DiB0090 I2C read failed\n");
+ pr_warn("DiB0090 I2C read failed\n");
ret = 0;
} else
ret = (state->i2c_read_buffer[0] << 8)
@@ -306,7 +306,7 @@ static int dib0090_fw_write_reg(struct dib0090_fw_state *state, u8 reg, u16 val)
int ret;
if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return -EINVAL;
}
@@ -319,7 +319,7 @@ static int dib0090_fw_write_reg(struct dib0090_fw_state *state, u8 reg, u16 val)
state->msg.buf = state->i2c_write_buffer;
state->msg.len = 2;
if (i2c_transfer(state->i2c, &state->msg, 1) != 1) {
- printk(KERN_WARNING "DiB0090 I2C write failed\n");
+ pr_warn("DiB0090 I2C write failed\n");
ret = -EREMOTEIO;
} else
ret = 0;
@@ -351,7 +351,7 @@ static int dib0090_identify(struct dvb_frontend *fe)
identity->p1g = 0;
identity->in_soc = 0;
- dprintk("Tuner identification (Version = 0x%04x)", v);
+ dprintk("Tuner identification (Version = 0x%04x)\n", v);
/* without PLL lock info */
v &= ~KROSUS_PLL_LOCKED;
@@ -366,19 +366,19 @@ static int dib0090_identify(struct dvb_frontend *fe)
identity->in_soc = 1;
switch (identity->version) {
case SOC_8090_P1G_11R1:
- dprintk("SOC 8090 P1-G11R1 Has been detected");
+ dprintk("SOC 8090 P1-G11R1 Has been detected\n");
identity->p1g = 1;
break;
case SOC_8090_P1G_21R1:
- dprintk("SOC 8090 P1-G21R1 Has been detected");
+ dprintk("SOC 8090 P1-G21R1 Has been detected\n");
identity->p1g = 1;
break;
case SOC_7090_P1G_11R1:
- dprintk("SOC 7090 P1-G11R1 Has been detected");
+ dprintk("SOC 7090 P1-G11R1 Has been detected\n");
identity->p1g = 1;
break;
case SOC_7090_P1G_21R1:
- dprintk("SOC 7090 P1-G21R1 Has been detected");
+ dprintk("SOC 7090 P1-G21R1 Has been detected\n");
identity->p1g = 1;
break;
default:
@@ -387,16 +387,16 @@ static int dib0090_identify(struct dvb_frontend *fe)
} else {
switch ((identity->version >> 5) & 0x7) {
case MP001:
- dprintk("MP001 : 9090/8096");
+ dprintk("MP001 : 9090/8096\n");
break;
case MP005:
- dprintk("MP005 : Single Sband");
+ dprintk("MP005 : Single Sband\n");
break;
case MP008:
- dprintk("MP008 : diversity VHF-UHF-LBAND");
+ dprintk("MP008 : diversity VHF-UHF-LBAND\n");
break;
case MP009:
- dprintk("MP009 : diversity 29098 CBAND-UHF-LBAND-SBAND");
+ dprintk("MP009 : diversity 29098 CBAND-UHF-LBAND-SBAND\n");
break;
default:
goto identification_error;
@@ -404,21 +404,21 @@ static int dib0090_identify(struct dvb_frontend *fe)
switch (identity->version & 0x1f) {
case P1G_21R2:
- dprintk("P1G_21R2 detected");
+ dprintk("P1G_21R2 detected\n");
identity->p1g = 1;
break;
case P1G:
- dprintk("P1G detected");
+ dprintk("P1G detected\n");
identity->p1g = 1;
break;
case P1D_E_F:
- dprintk("P1D/E/F detected");
+ dprintk("P1D/E/F detected\n");
break;
case P1C:
- dprintk("P1C detected");
+ dprintk("P1C detected\n");
break;
case P1A_B:
- dprintk("P1-A/B detected: driver is deactivated - not available");
+ dprintk("P1-A/B detected: driver is deactivated - not available\n");
goto identification_error;
break;
default:
@@ -441,7 +441,7 @@ static int dib0090_fw_identify(struct dvb_frontend *fe)
identity->p1g = 0;
identity->in_soc = 0;
- dprintk("FE: Tuner identification (Version = 0x%04x)", v);
+ dprintk("FE: Tuner identification (Version = 0x%04x)\n", v);
/* without PLL lock info */
v &= ~KROSUS_PLL_LOCKED;
@@ -456,19 +456,19 @@ static int dib0090_fw_identify(struct dvb_frontend *fe)
identity->in_soc = 1;
switch (identity->version) {
case SOC_8090_P1G_11R1:
- dprintk("SOC 8090 P1-G11R1 Has been detected");
+ dprintk("SOC 8090 P1-G11R1 Has been detected\n");
identity->p1g = 1;
break;
case SOC_8090_P1G_21R1:
- dprintk("SOC 8090 P1-G21R1 Has been detected");
+ dprintk("SOC 8090 P1-G21R1 Has been detected\n");
identity->p1g = 1;
break;
case SOC_7090_P1G_11R1:
- dprintk("SOC 7090 P1-G11R1 Has been detected");
+ dprintk("SOC 7090 P1-G11R1 Has been detected\n");
identity->p1g = 1;
break;
case SOC_7090_P1G_21R1:
- dprintk("SOC 7090 P1-G21R1 Has been detected");
+ dprintk("SOC 7090 P1-G21R1 Has been detected\n");
identity->p1g = 1;
break;
default:
@@ -477,16 +477,16 @@ static int dib0090_fw_identify(struct dvb_frontend *fe)
} else {
switch ((identity->version >> 5) & 0x7) {
case MP001:
- dprintk("MP001 : 9090/8096");
+ dprintk("MP001 : 9090/8096\n");
break;
case MP005:
- dprintk("MP005 : Single Sband");
+ dprintk("MP005 : Single Sband\n");
break;
case MP008:
- dprintk("MP008 : diversity VHF-UHF-LBAND");
+ dprintk("MP008 : diversity VHF-UHF-LBAND\n");
break;
case MP009:
- dprintk("MP009 : diversity 29098 CBAND-UHF-LBAND-SBAND");
+ dprintk("MP009 : diversity 29098 CBAND-UHF-LBAND-SBAND\n");
break;
default:
goto identification_error;
@@ -494,21 +494,21 @@ static int dib0090_fw_identify(struct dvb_frontend *fe)
switch (identity->version & 0x1f) {
case P1G_21R2:
- dprintk("P1G_21R2 detected");
+ dprintk("P1G_21R2 detected\n");
identity->p1g = 1;
break;
case P1G:
- dprintk("P1G detected");
+ dprintk("P1G detected\n");
identity->p1g = 1;
break;
case P1D_E_F:
- dprintk("P1D/E/F detected");
+ dprintk("P1D/E/F detected\n");
break;
case P1C:
- dprintk("P1C detected");
+ dprintk("P1C detected\n");
break;
case P1A_B:
- dprintk("P1-A/B detected: driver is deactivated - not available");
+ dprintk("P1-A/B detected: driver is deactivated - not available\n");
goto identification_error;
break;
default:
@@ -574,7 +574,7 @@ static void dib0090_reset_digital(struct dvb_frontend *fe, const struct dib0090_
} while (--i);
if (i == 0) {
- dprintk("Pll: Unable to lock Pll");
+ dprintk("Pll: Unable to lock Pll\n");
return;
}
@@ -596,7 +596,7 @@ static int dib0090_fw_reset_digital(struct dvb_frontend *fe, const struct dib009
u16 v;
int i;
- dprintk("fw reset digital");
+ dprintk("fw reset digital\n");
HARD_RESET(state);
dib0090_fw_write_reg(state, 0x24, EN_PLL | EN_CRYSTAL);
@@ -645,7 +645,7 @@ static int dib0090_fw_reset_digital(struct dvb_frontend *fe, const struct dib009
} while (--i);
if (i == 0) {
- dprintk("Pll: Unable to lock Pll");
+ dprintk("Pll: Unable to lock Pll\n");
return -EIO;
}
@@ -922,7 +922,7 @@ static void dib0090_wbd_target(struct dib0090_state *state, u32 rf)
#endif
state->wbd_target = dib0090_wbd_to_db(state, state->wbd_offset + offset);
- dprintk("wbd-target: %d dB", (u32) state->wbd_target);
+ dprintk("wbd-target: %d dB\n", (u32) state->wbd_target);
}
static const int gain_reg_addr[4] = {
@@ -1019,7 +1019,7 @@ static void dib0090_gain_apply(struct dib0090_state *state, s16 gain_delta, s16
gain_reg[3] |= ((bb % 10) * 100) / 125;
#ifdef DEBUG_AGC
- dprintk("GA CALC: DB: %3d(rf) + %3d(bb) = %3d gain_reg[0]=%04x gain_reg[1]=%04x gain_reg[2]=%04x gain_reg[0]=%04x", rf, bb, rf + bb,
+ dprintk("GA CALC: DB: %3d(rf) + %3d(bb) = %3d gain_reg[0]=%04x gain_reg[1]=%04x gain_reg[2]=%04x gain_reg[0]=%04x\n", rf, bb, rf + bb,
gain_reg[0], gain_reg[1], gain_reg[2], gain_reg[3]);
#endif
@@ -1050,7 +1050,7 @@ static void dib0090_set_rframp_pwm(struct dib0090_state *state, const u16 * cfg)
dib0090_write_reg(state, 0x2a, 0xffff);
- dprintk("total RF gain: %ddB, step: %d", (u32) cfg[0], dib0090_read_reg(state, 0x2a));
+ dprintk("total RF gain: %ddB, step: %d\n", (u32) cfg[0], dib0090_read_reg(state, 0x2a));
dib0090_write_regs(state, 0x2c, cfg + 3, 6);
dib0090_write_regs(state, 0x3e, cfg + 9, 2);
@@ -1069,7 +1069,7 @@ static void dib0090_set_bbramp_pwm(struct dib0090_state *state, const u16 * cfg)
dib0090_set_boost(state, cfg[0] > 500); /* we want the boost if the gain is higher that 50dB */
dib0090_write_reg(state, 0x33, 0xffff);
- dprintk("total BB gain: %ddB, step: %d", (u32) cfg[0], dib0090_read_reg(state, 0x33));
+ dprintk("total BB gain: %ddB, step: %d\n", (u32) cfg[0], dib0090_read_reg(state, 0x33));
dib0090_write_regs(state, 0x35, cfg + 3, 4);
}
@@ -1122,7 +1122,7 @@ void dib0090_pwm_gain_reset(struct dvb_frontend *fe)
/* activate the ramp generator using PWM control */
if (state->rf_ramp)
- dprintk("ramp RF gain = %d BAND = %s version = %d",
+ dprintk("ramp RF gain = %d BAND = %s version = %d\n",
state->rf_ramp[0],
(state->current_band == BAND_CBAND) ? "CBAND" : "NOT CBAND",
state->identity.version & 0x1f);
@@ -1130,10 +1130,10 @@ void dib0090_pwm_gain_reset(struct dvb_frontend *fe)
if (rf_ramp && ((state->rf_ramp && state->rf_ramp[0] == 0) ||
(state->current_band == BAND_CBAND &&
(state->identity.version & 0x1f) <= P1D_E_F))) {
- dprintk("DE-Engage mux for direct gain reg control");
+ dprintk("DE-Engage mux for direct gain reg control\n");
en_pwm_rf_mux = 0;
} else
- dprintk("Engage mux for PWM control");
+ dprintk("Engage mux for PWM control\n");
dib0090_write_reg(state, 0x32, (en_pwm_rf_mux << 12) | (en_pwm_rf_mux << 11));
@@ -1352,7 +1352,7 @@ u16 dib0090_get_wbd_target(struct dvb_frontend *fe)
while (f_MHz > wbd->max_freq)
wbd++;
- dprintk("using wbd-table-entry with max freq %d", wbd->max_freq);
+ dprintk("using wbd-table-entry with max freq %d\n", wbd->max_freq);
if (current_temp < 0)
current_temp = 0;
@@ -1373,8 +1373,8 @@ u16 dib0090_get_wbd_target(struct dvb_frontend *fe)
wbd_tcold += ((wbd_thot - wbd_tcold) * current_temp) >> 7;
state->wbd_target = dib0090_wbd_to_db(state, state->wbd_offset + wbd_tcold);
- dprintk("wbd-target: %d dB", (u32) state->wbd_target);
- dprintk("wbd offset applied is %d", wbd_tcold);
+ dprintk("wbd-target: %d dB\n", (u32) state->wbd_target);
+ dprintk("wbd offset applied is %d\n", wbd_tcold);
return state->wbd_offset + wbd_tcold;
}
@@ -1415,7 +1415,7 @@ int dib0090_update_rframp_7090(struct dvb_frontend *fe, u8 cfg_sensitivity)
if ((!state->identity.p1g) || (!state->identity.in_soc)
|| ((state->identity.version != SOC_7090_P1G_21R1)
&& (state->identity.version != SOC_7090_P1G_11R1))) {
- dprintk("%s() function can only be used for dib7090P", __func__);
+ dprintk("%s() function can only be used for dib7090P\n", __func__);
return -ENODEV;
}
@@ -1598,7 +1598,7 @@ static int dib0090_reset(struct dvb_frontend *fe)
dib0090_write_reg(state, 0x14, 1);
else
dib0090_write_reg(state, 0x14, 2);
- dprintk("Pll lock : %d", (dib0090_read_reg(state, 0x1a) >> 11) & 0x1);
+ dprintk("Pll lock : %d\n", (dib0090_read_reg(state, 0x1a) >> 11) & 0x1);
state->calibrate = DC_CAL | WBD_CAL | TEMP_CAL; /* enable iq-offset-calibration and wbd-calibration when tuning next time */
@@ -1711,7 +1711,8 @@ static int dib0090_dc_offset_calibration(struct dib0090_state *state, enum front
/* fall through */
case CT_TUNER_STEP_0:
- dprintk("Start/continue DC calibration for %s path", (state->dc->i == 1) ? "I" : "Q");
+ dprintk("Start/continue DC calibration for %s path\n",
+ (state->dc->i == 1) ? "I" : "Q");
dib0090_write_reg(state, 0x01, state->dc->bb1);
dib0090_write_reg(state, 0x07, state->bb7 | (state->dc->i << 7));
@@ -1733,13 +1734,13 @@ static int dib0090_dc_offset_calibration(struct dib0090_state *state, enum front
break;
case CT_TUNER_STEP_5: /* found an offset */
- dprintk("adc_diff = %d, current step= %d", (u32) state->adc_diff, state->step);
+ dprintk("adc_diff = %d, current step= %d\n", (u32) state->adc_diff, state->step);
if (state->step == 0 && state->adc_diff < 0) {
state->min_adc_diff = -1023;
- dprintk("Change of sign of the minimum adc diff");
+ dprintk("Change of sign of the minimum adc diff\n");
}
- dprintk("adc_diff = %d, min_adc_diff = %d current_step = %d", state->adc_diff, state->min_adc_diff, state->step);
+ dprintk("adc_diff = %d, min_adc_diff = %d current_step = %d\n", state->adc_diff, state->min_adc_diff, state->step);
/* first turn for this frequency */
if (state->step == 0) {
@@ -1758,12 +1759,12 @@ static int dib0090_dc_offset_calibration(struct dib0090_state *state, enum front
} else {
/* the minimum was what we have seen in the step before */
if (ABS(state->adc_diff) > ABS(state->min_adc_diff)) {
- dprintk("Since adc_diff N = %d > adc_diff step N-1 = %d, Come back one step", state->adc_diff, state->min_adc_diff);
+ dprintk("Since adc_diff N = %d > adc_diff step N-1 = %d, Come back one step\n", state->adc_diff, state->min_adc_diff);
state->step--;
}
dib0090_set_trim(state);
- dprintk("BB Offset Cal, BBreg=%hd,Offset=%hd,Value Set=%hd", state->dc->addr, state->adc_diff, state->step);
+ dprintk("BB Offset Cal, BBreg=%hd,Offset=%hd,Value Set=%hd\n", state->dc->addr, state->adc_diff, state->step);
state->dc++;
if (state->dc->addr == 0) /* done */
@@ -1819,7 +1820,7 @@ static int dib0090_wbd_calibration(struct dib0090_state *state, enum frontend_tu
case CT_TUNER_STEP_0:
state->wbd_offset = dib0090_get_slow_adc_val(state);
- dprintk("WBD calibration offset = %d", state->wbd_offset);
+ dprintk("WBD calibration offset = %d\n", state->wbd_offset);
*tune_state = CT_TUNER_START; /* reset done -> real tuning can now begin */
state->calibrate &= ~WBD_CAL;
break;
@@ -2064,7 +2065,7 @@ int dib0090_update_tuning_table_7090(struct dvb_frontend *fe,
if ((!state->identity.p1g) || (!state->identity.in_soc)
|| ((state->identity.version != SOC_7090_P1G_21R1)
&& (state->identity.version != SOC_7090_P1G_11R1))) {
- dprintk("%s() function can only be used for dib7090", __func__);
+ dprintk("%s() function can only be used for dib7090\n", __func__);
return -ENODEV;
}
@@ -2098,7 +2099,8 @@ static int dib0090_captrim_search(struct dib0090_state *state, enum frontend_tun
force_soft_search = 1;
if (*tune_state == CT_TUNER_START) {
- dprintk("Start Captrim search : %s", (force_soft_search == 1) ? "FORCE SOFT SEARCH" : "AUTO");
+ dprintk("Start Captrim search : %s\n",
+ (force_soft_search == 1) ? "FORCE SOFT SEARCH" : "AUTO");
dib0090_write_reg(state, 0x10, 0x2B1);
dib0090_write_reg(state, 0x1e, 0x0032);
@@ -2140,13 +2142,13 @@ static int dib0090_captrim_search(struct dib0090_state *state, enum frontend_tun
dib0090_read_reg(state, 0x40);
state->fcaptrim = dib0090_read_reg(state, 0x18) & 0x7F;
- dprintk("***Final Captrim= 0x%x", state->fcaptrim);
+ dprintk("***Final Captrim= 0x%x\n", state->fcaptrim);
*tune_state = CT_TUNER_STEP_3;
} else {
/* MERGE for all krosus before P1G */
adc = dib0090_get_slow_adc_val(state);
- dprintk("CAPTRIM=%d; ADC = %d (ADC) & %dmV", (u32) state->captrim, (u32) adc, (u32) (adc) * (u32) 1800 / (u32) 1024);
+ dprintk("CAPTRIM=%d; ADC = %d (ADC) & %dmV\n", (u32) state->captrim, (u32) adc, (u32) (adc) * (u32) 1800 / (u32) 1024);
if (state->rest == 0 || state->identity.in_soc) { /* Just for 8090P SOCS where auto captrim HW bug : TO CHECK IN ACI for SOCS !!! if 400 for 8090p SOC => tune issue !!! */
adc_target = 200;
@@ -2162,7 +2164,7 @@ static int dib0090_captrim_search(struct dib0090_state *state, enum frontend_tun
}
if (adc < state->adc_diff) {
- dprintk("CAPTRIM=%d is closer to target (%d/%d)", (u32) state->captrim, (u32) adc, (u32) state->adc_diff);
+ dprintk("CAPTRIM=%d is closer to target (%d/%d)\n", (u32) state->captrim, (u32) adc, (u32) state->adc_diff);
state->adc_diff = adc;
state->fcaptrim = state->captrim;
}
@@ -2216,7 +2218,7 @@ static int dib0090_get_temperature(struct dib0090_state *state, enum frontend_tu
val = dib0090_get_slow_adc_val(state);
state->temperature = ((s16) ((val - state->adc_diff) * 180) >> 8) + 55;
- dprintk("temperature: %d C", state->temperature - 30);
+ dprintk("temperature: %d C\n", state->temperature - 30);
*tune_state = CT_TUNER_STEP_2;
break;
@@ -2478,13 +2480,13 @@ static int dib0090_tune(struct dvb_frontend *fe)
wbd++;
dib0090_write_reg(state, 0x1e, 0x07ff);
- dprintk("Final Captrim: %d", (u32) state->fcaptrim);
- dprintk("HFDIV code: %d", (u32) pll->hfdiv_code);
- dprintk("VCO = %d", (u32) pll->vco_band);
- dprintk("VCOF in kHz: %d ((%d*%d) << 1))", (u32) ((pll->hfdiv * state->rf_request) * 2), (u32) pll->hfdiv, (u32) state->rf_request);
- dprintk("REFDIV: %d, FREF: %d", (u32) 1, (u32) state->config->io.clock_khz);
- dprintk("FBDIV: %d, Rest: %d", (u32) dib0090_read_reg(state, 0x15), (u32) dib0090_read_reg(state, 0x17));
- dprintk("Num: %d, Den: %d, SD: %d", (u32) dib0090_read_reg(state, 0x17), (u32) (dib0090_read_reg(state, 0x16) >> 8),
+ dprintk("Final Captrim: %d\n", (u32) state->fcaptrim);
+ dprintk("HFDIV code: %d\n", (u32) pll->hfdiv_code);
+ dprintk("VCO = %d\n", (u32) pll->vco_band);
+ dprintk("VCOF in kHz: %d ((%d*%d) << 1))\n", (u32) ((pll->hfdiv * state->rf_request) * 2), (u32) pll->hfdiv, (u32) state->rf_request);
+ dprintk("REFDIV: %d, FREF: %d\n", (u32) 1, (u32) state->config->io.clock_khz);
+ dprintk("FBDIV: %d, Rest: %d\n", (u32) dib0090_read_reg(state, 0x15), (u32) dib0090_read_reg(state, 0x17));
+ dprintk("Num: %d, Den: %d, SD: %d\n", (u32) dib0090_read_reg(state, 0x17), (u32) (dib0090_read_reg(state, 0x16) >> 8),
(u32) dib0090_read_reg(state, 0x1c) & 0x3);
#define WBD 0x781 /* 1 1 1 1 0000 0 0 1 */
@@ -2498,7 +2500,7 @@ static int dib0090_tune(struct dvb_frontend *fe)
dib0090_write_reg(state, 0x10, state->wbdmux);
if ((tune->tuner_enable == EN_CAB) && state->identity.p1g) {
- dprintk("P1G : The cable band is selected and lna_tune = %d", tune->lna_tune);
+ dprintk("P1G : The cable band is selected and lna_tune = %d\n", tune->lna_tune);
dib0090_write_reg(state, 0x09, tune->lna_bias);
dib0090_write_reg(state, 0x0b, 0xb800 | (tune->lna_tune << 6) | (tune->switch_trim));
} else
@@ -2524,11 +2526,10 @@ static int dib0090_tune(struct dvb_frontend *fe)
return ret;
}
-static int dib0090_release(struct dvb_frontend *fe)
+static void dib0090_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
enum frontend_tune_state dib0090_get_tune_state(struct dvb_frontend *fe)
@@ -2643,7 +2644,7 @@ struct dvb_frontend *dib0090_register(struct dvb_frontend *fe, struct i2c_adapte
if (dib0090_reset(fe) != 0)
goto free_mem;
- printk(KERN_INFO "DiB0090: successfully identified\n");
+ pr_info("DiB0090: successfully identified\n");
memcpy(&fe->ops.tuner_ops, &dib0090_ops, sizeof(struct dvb_tuner_ops));
return fe;
@@ -2670,7 +2671,7 @@ struct dvb_frontend *dib0090_fw_register(struct dvb_frontend *fe, struct i2c_ada
if (dib0090_fw_reset_digital(fe, st->config) != 0)
goto free_mem;
- dprintk("DiB0090 FW: successfully identified");
+ dprintk("DiB0090 FW: successfully identified\n");
memcpy(&fe->ops.tuner_ops, &dib0090_fw_ops, sizeof(struct dvb_tuner_ops));
return fe;
diff --git a/drivers/media/dvb-frontends/dib3000mb.c b/drivers/media/dvb-frontends/dib3000mb.c
index 6821ecb53d63..068bec104e29 100644
--- a/drivers/media/dvb-frontends/dib3000mb.c
+++ b/drivers/media/dvb-frontends/dib3000mb.c
@@ -21,6 +21,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -42,13 +44,13 @@ static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "set debugging level (1=info,2=xfer,4=setfe,8=getfe (|-able)).");
-#define deb_info(args...) dprintk(0x01,args)
-#define deb_i2c(args...) dprintk(0x02,args)
-#define deb_srch(args...) dprintk(0x04,args)
-#define deb_info(args...) dprintk(0x01,args)
-#define deb_xfer(args...) dprintk(0x02,args)
-#define deb_setf(args...) dprintk(0x04,args)
-#define deb_getf(args...) dprintk(0x08,args)
+#define deb_info(args...) dprintk(0x01, args)
+#define deb_i2c(args...) dprintk(0x02, args)
+#define deb_srch(args...) dprintk(0x04, args)
+#define deb_info(args...) dprintk(0x01, args)
+#define deb_xfer(args...) dprintk(0x02, args)
+#define deb_setf(args...) dprintk(0x04, args)
+#define deb_getf(args...) dprintk(0x08, args)
static int dib3000_read_reg(struct dib3000_state *state, u16 reg)
{
@@ -126,103 +128,96 @@ static int dib3000mb_set_frontend(struct dvb_frontend *fe, int tuner)
fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0);
- deb_setf("bandwidth: ");
switch (c->bandwidth_hz) {
case 8000000:
- deb_setf("8 MHz\n");
wr_foreach(dib3000mb_reg_timing_freq, dib3000mb_timing_freq[2]);
wr_foreach(dib3000mb_reg_bandwidth, dib3000mb_bandwidth_8mhz);
break;
case 7000000:
- deb_setf("7 MHz\n");
wr_foreach(dib3000mb_reg_timing_freq, dib3000mb_timing_freq[1]);
wr_foreach(dib3000mb_reg_bandwidth, dib3000mb_bandwidth_7mhz);
break;
case 6000000:
- deb_setf("6 MHz\n");
wr_foreach(dib3000mb_reg_timing_freq, dib3000mb_timing_freq[0]);
wr_foreach(dib3000mb_reg_bandwidth, dib3000mb_bandwidth_6mhz);
break;
case 0:
return -EOPNOTSUPP;
default:
- err("unknown bandwidth value.");
+ pr_err("unknown bandwidth value.\n");
return -EINVAL;
}
+ deb_setf("bandwidth: %d MHZ\n", c->bandwidth_hz / 1000000);
}
wr(DIB3000MB_REG_LOCK1_MASK, DIB3000MB_LOCK1_SEARCH_4);
- deb_setf("transmission mode: ");
switch (c->transmission_mode) {
case TRANSMISSION_MODE_2K:
- deb_setf("2k\n");
+ deb_setf("transmission mode: 2k\n");
wr(DIB3000MB_REG_FFT, DIB3000_TRANSMISSION_MODE_2K);
break;
case TRANSMISSION_MODE_8K:
- deb_setf("8k\n");
+ deb_setf("transmission mode: 8k\n");
wr(DIB3000MB_REG_FFT, DIB3000_TRANSMISSION_MODE_8K);
break;
case TRANSMISSION_MODE_AUTO:
- deb_setf("auto\n");
+ deb_setf("transmission mode: auto\n");
break;
default:
return -EINVAL;
}
- deb_setf("guard: ");
switch (c->guard_interval) {
case GUARD_INTERVAL_1_32:
- deb_setf("1_32\n");
+ deb_setf("guard 1_32\n");
wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_32);
break;
case GUARD_INTERVAL_1_16:
- deb_setf("1_16\n");
+ deb_setf("guard 1_16\n");
wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_16);
break;
case GUARD_INTERVAL_1_8:
- deb_setf("1_8\n");
+ deb_setf("guard 1_8\n");
wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_8);
break;
case GUARD_INTERVAL_1_4:
- deb_setf("1_4\n");
+ deb_setf("guard 1_4\n");
wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_4);
break;
case GUARD_INTERVAL_AUTO:
- deb_setf("auto\n");
+ deb_setf("guard auto\n");
break;
default:
return -EINVAL;
}
- deb_setf("inversion: ");
switch (c->inversion) {
case INVERSION_OFF:
- deb_setf("off\n");
+ deb_setf("inversion off\n");
wr(DIB3000MB_REG_DDS_INV, DIB3000_DDS_INVERSION_OFF);
break;
case INVERSION_AUTO:
- deb_setf("auto ");
+ deb_setf("inversion auto\n");
break;
case INVERSION_ON:
- deb_setf("on\n");
+ deb_setf("inversion on\n");
wr(DIB3000MB_REG_DDS_INV, DIB3000_DDS_INVERSION_ON);
break;
default:
return -EINVAL;
}
- deb_setf("modulation: ");
switch (c->modulation) {
case QPSK:
- deb_setf("qpsk\n");
+ deb_setf("modulation: qpsk\n");
wr(DIB3000MB_REG_QAM, DIB3000_CONSTELLATION_QPSK);
break;
case QAM_16:
- deb_setf("qam16\n");
+ deb_setf("modulation: qam16\n");
wr(DIB3000MB_REG_QAM, DIB3000_CONSTELLATION_16QAM);
break;
case QAM_64:
- deb_setf("qam64\n");
+ deb_setf("modulation: qam64\n");
wr(DIB3000MB_REG_QAM, DIB3000_CONSTELLATION_64QAM);
break;
case QAM_AUTO:
@@ -230,69 +225,64 @@ static int dib3000mb_set_frontend(struct dvb_frontend *fe, int tuner)
default:
return -EINVAL;
}
- deb_setf("hierarchy: ");
switch (c->hierarchy) {
case HIERARCHY_NONE:
- deb_setf("none ");
+ deb_setf("hierarchy: none\n");
/* fall through */
case HIERARCHY_1:
- deb_setf("alpha=1\n");
+ deb_setf("hierarchy: alpha=1\n");
wr(DIB3000MB_REG_VIT_ALPHA, DIB3000_ALPHA_1);
break;
case HIERARCHY_2:
- deb_setf("alpha=2\n");
+ deb_setf("hierarchy: alpha=2\n");
wr(DIB3000MB_REG_VIT_ALPHA, DIB3000_ALPHA_2);
break;
case HIERARCHY_4:
- deb_setf("alpha=4\n");
+ deb_setf("hierarchy: alpha=4\n");
wr(DIB3000MB_REG_VIT_ALPHA, DIB3000_ALPHA_4);
break;
case HIERARCHY_AUTO:
- deb_setf("alpha=auto\n");
+ deb_setf("hierarchy: alpha=auto\n");
break;
default:
return -EINVAL;
}
- deb_setf("hierarchy: ");
if (c->hierarchy == HIERARCHY_NONE) {
- deb_setf("none\n");
wr(DIB3000MB_REG_VIT_HRCH, DIB3000_HRCH_OFF);
wr(DIB3000MB_REG_VIT_HP, DIB3000_SELECT_HP);
fe_cr = c->code_rate_HP;
} else if (c->hierarchy != HIERARCHY_AUTO) {
- deb_setf("on\n");
wr(DIB3000MB_REG_VIT_HRCH, DIB3000_HRCH_ON);
wr(DIB3000MB_REG_VIT_HP, DIB3000_SELECT_LP);
fe_cr = c->code_rate_LP;
}
- deb_setf("fec: ");
switch (fe_cr) {
case FEC_1_2:
- deb_setf("1_2\n");
+ deb_setf("fec: 1_2\n");
wr(DIB3000MB_REG_VIT_CODE_RATE, DIB3000_FEC_1_2);
break;
case FEC_2_3:
- deb_setf("2_3\n");
+ deb_setf("fec: 2_3\n");
wr(DIB3000MB_REG_VIT_CODE_RATE, DIB3000_FEC_2_3);
break;
case FEC_3_4:
- deb_setf("3_4\n");
+ deb_setf("fec: 3_4\n");
wr(DIB3000MB_REG_VIT_CODE_RATE, DIB3000_FEC_3_4);
break;
case FEC_5_6:
- deb_setf("5_6\n");
+ deb_setf("fec: 5_6\n");
wr(DIB3000MB_REG_VIT_CODE_RATE, DIB3000_FEC_5_6);
break;
case FEC_7_8:
- deb_setf("7_8\n");
+ deb_setf("fec: 7_8\n");
wr(DIB3000MB_REG_VIT_CODE_RATE, DIB3000_FEC_7_8);
break;
case FEC_NONE:
- deb_setf("none ");
+ deb_setf("fec: none\n");
break;
case FEC_AUTO:
- deb_setf("auto\n");
+ deb_setf("fec: auto\n");
break;
default:
return -EINVAL;
@@ -357,7 +347,8 @@ static int dib3000mb_set_frontend(struct dvb_frontend *fe, int tuner)
rd(DIB3000MB_REG_LOCK2_VALUE))) < 0 && as_count++ < 100)
msleep(1);
- deb_setf("search_state after autosearch %d after %d checks\n",search_state,as_count);
+ deb_setf("search_state after autosearch %d after %d checks\n",
+ search_state, as_count);
if (search_state == 1) {
if (dib3000mb_get_frontend(fe, c) == 0) {
@@ -464,7 +455,7 @@ static int dib3000mb_get_frontend(struct dvb_frontend* fe,
return 0;
dds_val = ((rd(DIB3000MB_REG_DDS_VALUE_MSB) & 0xff) << 16) + rd(DIB3000MB_REG_DDS_VALUE_LSB);
- deb_getf("DDS_VAL: %x %x %x",dds_val, rd(DIB3000MB_REG_DDS_VALUE_MSB), rd(DIB3000MB_REG_DDS_VALUE_LSB));
+ deb_getf("DDS_VAL: %x %x %x\n", dds_val, rd(DIB3000MB_REG_DDS_VALUE_MSB), rd(DIB3000MB_REG_DDS_VALUE_LSB));
if (dds_val < threshold)
inv_test1 = 0;
else if (dds_val == threshold)
@@ -473,7 +464,7 @@ static int dib3000mb_get_frontend(struct dvb_frontend* fe,
inv_test1 = 2;
dds_val = ((rd(DIB3000MB_REG_DDS_FREQ_MSB) & 0xff) << 16) + rd(DIB3000MB_REG_DDS_FREQ_LSB);
- deb_getf("DDS_FREQ: %x %x %x",dds_val, rd(DIB3000MB_REG_DDS_FREQ_MSB), rd(DIB3000MB_REG_DDS_FREQ_LSB));
+ deb_getf("DDS_FREQ: %x %x %x\n", dds_val, rd(DIB3000MB_REG_DDS_FREQ_MSB), rd(DIB3000MB_REG_DDS_FREQ_LSB));
if (dds_val < threshold)
inv_test2 = 0;
else if (dds_val == threshold)
@@ -490,19 +481,19 @@ static int dib3000mb_get_frontend(struct dvb_frontend* fe,
switch ((tps_val = rd(DIB3000MB_REG_TPS_QAM))) {
case DIB3000_CONSTELLATION_QPSK:
- deb_getf("QPSK ");
+ deb_getf("QPSK\n");
c->modulation = QPSK;
break;
case DIB3000_CONSTELLATION_16QAM:
- deb_getf("QAM16 ");
+ deb_getf("QAM16\n");
c->modulation = QAM_16;
break;
case DIB3000_CONSTELLATION_64QAM:
- deb_getf("QAM64 ");
+ deb_getf("QAM64\n");
c->modulation = QAM_64;
break;
default:
- err("Unexpected constellation returned by TPS (%d)", tps_val);
+ pr_err("Unexpected constellation returned by TPS (%d)\n", tps_val);
break;
}
deb_getf("TPS: %d\n", tps_val);
@@ -513,23 +504,23 @@ static int dib3000mb_get_frontend(struct dvb_frontend* fe,
c->code_rate_HP = FEC_NONE;
switch ((tps_val = rd(DIB3000MB_REG_TPS_VIT_ALPHA))) {
case DIB3000_ALPHA_0:
- deb_getf("HIERARCHY_NONE ");
+ deb_getf("HIERARCHY_NONE\n");
c->hierarchy = HIERARCHY_NONE;
break;
case DIB3000_ALPHA_1:
- deb_getf("HIERARCHY_1 ");
+ deb_getf("HIERARCHY_1\n");
c->hierarchy = HIERARCHY_1;
break;
case DIB3000_ALPHA_2:
- deb_getf("HIERARCHY_2 ");
+ deb_getf("HIERARCHY_2\n");
c->hierarchy = HIERARCHY_2;
break;
case DIB3000_ALPHA_4:
- deb_getf("HIERARCHY_4 ");
+ deb_getf("HIERARCHY_4\n");
c->hierarchy = HIERARCHY_4;
break;
default:
- err("Unexpected ALPHA value returned by TPS (%d)", tps_val);
+ pr_err("Unexpected ALPHA value returned by TPS (%d)\n", tps_val);
break;
}
deb_getf("TPS: %d\n", tps_val);
@@ -546,65 +537,65 @@ static int dib3000mb_get_frontend(struct dvb_frontend* fe,
switch (tps_val) {
case DIB3000_FEC_1_2:
- deb_getf("FEC_1_2 ");
+ deb_getf("FEC_1_2\n");
*cr = FEC_1_2;
break;
case DIB3000_FEC_2_3:
- deb_getf("FEC_2_3 ");
+ deb_getf("FEC_2_3\n");
*cr = FEC_2_3;
break;
case DIB3000_FEC_3_4:
- deb_getf("FEC_3_4 ");
+ deb_getf("FEC_3_4\n");
*cr = FEC_3_4;
break;
case DIB3000_FEC_5_6:
- deb_getf("FEC_5_6 ");
+ deb_getf("FEC_5_6\n");
*cr = FEC_4_5;
break;
case DIB3000_FEC_7_8:
- deb_getf("FEC_7_8 ");
+ deb_getf("FEC_7_8\n");
*cr = FEC_7_8;
break;
default:
- err("Unexpected FEC returned by TPS (%d)", tps_val);
+ pr_err("Unexpected FEC returned by TPS (%d)\n", tps_val);
break;
}
deb_getf("TPS: %d\n",tps_val);
switch ((tps_val = rd(DIB3000MB_REG_TPS_GUARD_TIME))) {
case DIB3000_GUARD_TIME_1_32:
- deb_getf("GUARD_INTERVAL_1_32 ");
+ deb_getf("GUARD_INTERVAL_1_32\n");
c->guard_interval = GUARD_INTERVAL_1_32;
break;
case DIB3000_GUARD_TIME_1_16:
- deb_getf("GUARD_INTERVAL_1_16 ");
+ deb_getf("GUARD_INTERVAL_1_16\n");
c->guard_interval = GUARD_INTERVAL_1_16;
break;
case DIB3000_GUARD_TIME_1_8:
- deb_getf("GUARD_INTERVAL_1_8 ");
+ deb_getf("GUARD_INTERVAL_1_8\n");
c->guard_interval = GUARD_INTERVAL_1_8;
break;
case DIB3000_GUARD_TIME_1_4:
- deb_getf("GUARD_INTERVAL_1_4 ");
+ deb_getf("GUARD_INTERVAL_1_4\n");
c->guard_interval = GUARD_INTERVAL_1_4;
break;
default:
- err("Unexpected Guard Time returned by TPS (%d)", tps_val);
+ pr_err("Unexpected Guard Time returned by TPS (%d)\n", tps_val);
break;
}
deb_getf("TPS: %d\n", tps_val);
switch ((tps_val = rd(DIB3000MB_REG_TPS_FFT))) {
case DIB3000_TRANSMISSION_MODE_2K:
- deb_getf("TRANSMISSION_MODE_2K ");
+ deb_getf("TRANSMISSION_MODE_2K\n");
c->transmission_mode = TRANSMISSION_MODE_2K;
break;
case DIB3000_TRANSMISSION_MODE_8K:
- deb_getf("TRANSMISSION_MODE_8K ");
+ deb_getf("TRANSMISSION_MODE_8K\n");
c->transmission_mode = TRANSMISSION_MODE_8K;
break;
default:
- err("unexpected transmission mode return by TPS (%d)", tps_val);
+ pr_err("unexpected transmission mode return by TPS (%d)\n", tps_val);
break;
}
deb_getf("TPS: %d\n", tps_val);
@@ -751,7 +742,7 @@ static int dib3000mb_tuner_pass_ctrl(struct dvb_frontend *fe, int onoff, u8 pll_
return 0;
}
-static struct dvb_frontend_ops dib3000mb_ops;
+static const struct dvb_frontend_ops dib3000mb_ops;
struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
@@ -791,7 +782,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops dib3000mb_ops = {
+static const struct dvb_frontend_ops dib3000mb_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "DiBcom 3000M-B DVB-T",
diff --git a/drivers/media/dvb-frontends/dib3000mb_priv.h b/drivers/media/dvb-frontends/dib3000mb_priv.h
index 0459d5c84314..ef7f5d136c6b 100644
--- a/drivers/media/dvb-frontends/dib3000mb_priv.h
+++ b/drivers/media/dvb-frontends/dib3000mb_priv.h
@@ -13,20 +13,15 @@
#ifndef __DIB3000MB_PRIV_H_INCLUDED__
#define __DIB3000MB_PRIV_H_INCLUDED__
-/* info and err, taken from usb.h, if there is anything available like by default. */
-#define err(format, arg...) printk(KERN_ERR "dib3000: " format "\n" , ## arg)
-#define info(format, arg...) printk(KERN_INFO "dib3000: " format "\n" , ## arg)
-#define warn(format, arg...) printk(KERN_WARNING "dib3000: " format "\n" , ## arg)
-
/* handy shortcuts */
#define rd(reg) dib3000_read_reg(state,reg)
#define wr(reg,val) if (dib3000_write_reg(state,reg,val)) \
- { err("while sending 0x%04x to 0x%04x.",val,reg); return -EREMOTEIO; }
+ { pr_err("while sending 0x%04x to 0x%04x.", val, reg); return -EREMOTEIO; }
#define wr_foreach(a,v) { int i; \
if (sizeof(a) != sizeof(v)) \
- err("sizeof: %zu %zu is different",sizeof(a),sizeof(v));\
+ pr_err("sizeof: %zu %zu is different", sizeof(a), sizeof(v));\
for (i=0; i < sizeof(a)/sizeof(u16); i++) \
wr(a[i],v[i]); \
}
@@ -37,8 +32,11 @@
/* debug */
-#define dprintk(level,args...) \
- do { if ((debug & level)) { printk(args); } } while (0)
+#define dprintk(level, fmt, arg...) do { \
+ if (debug & level) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
+} while (0)
/* mask for enabling a specific pid for the pid_filter */
#define DIB3000_ACTIVATE_PID_FILTERING (0x2000)
diff --git a/drivers/media/dvb-frontends/dib3000mc.c b/drivers/media/dvb-frontends/dib3000mc.c
index da0f1dc5aaf7..224283fe100a 100644
--- a/drivers/media/dvb-frontends/dib3000mc.c
+++ b/drivers/media/dvb-frontends/dib3000mc.c
@@ -11,6 +11,8 @@
* published by the Free Software Foundation, version 2.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/i2c.h>
@@ -27,7 +29,11 @@ static int buggy_sfn_workaround;
module_param(buggy_sfn_workaround, int, 0644);
MODULE_PARM_DESC(buggy_sfn_workaround, "Enable work-around for buggy SFNs (default: 0)");
-#define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiB3000MC/P:"); printk(args); printk("\n"); } } while (0)
+#define dprintk(fmt, arg...) do { \
+ if (debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
+} while (0)
struct dib3000mc_state {
struct dvb_frontend demod;
@@ -873,7 +879,7 @@ int dib3000mc_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 defa
}
EXPORT_SYMBOL(dib3000mc_i2c_enumeration);
-static struct dvb_frontend_ops dib3000mc_ops;
+static const struct dvb_frontend_ops dib3000mc_ops;
struct dvb_frontend * dib3000mc_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib3000mc_config *cfg)
{
@@ -906,7 +912,7 @@ error:
}
EXPORT_SYMBOL(dib3000mc_attach);
-static struct dvb_frontend_ops dib3000mc_ops = {
+static const struct dvb_frontend_ops dib3000mc_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "DiBcom 3000MC/P",
diff --git a/drivers/media/dvb-frontends/dib7000m.c b/drivers/media/dvb-frontends/dib7000m.c
index b3ddae8885ac..5ce9f93a65c3 100644
--- a/drivers/media/dvb-frontends/dib7000m.c
+++ b/drivers/media/dvb-frontends/dib7000m.c
@@ -8,6 +8,9 @@
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/i2c.h>
@@ -21,7 +24,11 @@ static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
-#define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiB7000M: "); printk(args); printk("\n"); } } while (0)
+#define dprintk(fmt, arg...) do { \
+ if (debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
+} while (0)
struct dib7000m_state {
struct dvb_frontend demod;
@@ -74,7 +81,7 @@ static u16 dib7000m_read_word(struct dib7000m_state *state, u16 reg)
u16 ret;
if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return 0;
}
@@ -92,7 +99,7 @@ static u16 dib7000m_read_word(struct dib7000m_state *state, u16 reg)
state->msg[1].len = 2;
if (i2c_transfer(state->i2c_adap, state->msg, 2) != 2)
- dprintk("i2c read error on %d",reg);
+ dprintk("i2c read error on %d\n", reg);
ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
mutex_unlock(&state->i2c_buffer_lock);
@@ -105,7 +112,7 @@ static int dib7000m_write_word(struct dib7000m_state *state, u16 reg, u16 val)
int ret;
if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return -EINVAL;
}
@@ -154,7 +161,7 @@ static int dib7000m_set_output_mode(struct dib7000m_state *state, int mode)
fifo_threshold = 1792;
smo_mode = (dib7000m_read_word(state, 294 + state->reg_offs) & 0x0010) | (1 << 1);
- dprintk( "setting output mode for demod %p to %d", &state->demod, mode);
+ dprintk("setting output mode for demod %p to %d\n", &state->demod, mode);
switch (mode) {
case OUTMODE_MPEG2_PAR_GATED_CLK: // STBs with parallel gated clock
@@ -181,7 +188,7 @@ static int dib7000m_set_output_mode(struct dib7000m_state *state, int mode)
outreg = 0;
break;
default:
- dprintk( "Unhandled output_mode passed to be set for demod %p",&state->demod);
+ dprintk("Unhandled output_mode passed to be set for demod %p\n", &state->demod);
break;
}
@@ -302,7 +309,7 @@ static int dib7000m_set_adc_state(struct dib7000m_state *state, enum dibx000_adc
break;
}
-// dprintk( "913: %x, 914: %x", reg_913, reg_914);
+// dprintk("913: %x, 914: %x\n", reg_913, reg_914);
ret |= dib7000m_write_word(state, 913, reg_913);
ret |= dib7000m_write_word(state, 914, reg_914);
@@ -320,10 +327,10 @@ static int dib7000m_set_bandwidth(struct dib7000m_state *state, u32 bw)
state->current_bandwidth = bw;
if (state->timf == 0) {
- dprintk( "using default timf");
+ dprintk("using default timf\n");
timf = state->timf_default;
} else {
- dprintk( "using updated timf");
+ dprintk("using updated timf\n");
timf = state->timf;
}
@@ -340,7 +347,7 @@ static int dib7000m_set_diversity_in(struct dvb_frontend *demod, int onoff)
struct dib7000m_state *state = demod->demodulator_priv;
if (state->div_force_off) {
- dprintk( "diversity combination deactivated - forced by COFDM parameters");
+ dprintk("diversity combination deactivated - forced by COFDM parameters\n");
onoff = 0;
}
state->div_state = (u8)onoff;
@@ -580,10 +587,10 @@ static int dib7000m_demod_reset(struct dib7000m_state *state)
dib7000mc_reset_pll(state);
if (dib7000m_reset_gpio(state) != 0)
- dprintk( "GPIO reset was not successful.");
+ dprintk("GPIO reset was not successful.\n");
if (dib7000m_set_output_mode(state, OUTMODE_HIGH_Z) != 0)
- dprintk( "OUTPUT_MODE could not be reset.");
+ dprintk("OUTPUT_MODE could not be reset.\n");
/* unforce divstr regardless whether i2c enumeration was done or not */
dib7000m_write_word(state, 1794, dib7000m_read_word(state, 1794) & ~(1 << 1) );
@@ -650,7 +657,7 @@ static int dib7000m_agc_soft_split(struct dib7000m_state *state)
(agc - state->current_agc->split.min_thres) /
(state->current_agc->split.max_thres - state->current_agc->split.min_thres);
- dprintk( "AGC split_offset: %d",split_offset);
+ dprintk("AGC split_offset: %d\n", split_offset);
// P_agc_force_split and P_agc_split_offset
return dib7000m_write_word(state, 103, (dib7000m_read_word(state, 103) & 0xff00) | split_offset);
@@ -687,7 +694,7 @@ static int dib7000m_set_agc_config(struct dib7000m_state *state, u8 band)
}
if (agc == NULL) {
- dprintk( "no valid AGC configuration found for band 0x%02x",band);
+ dprintk("no valid AGC configuration found for band 0x%02x\n", band);
return -EINVAL;
}
@@ -703,7 +710,7 @@ static int dib7000m_set_agc_config(struct dib7000m_state *state, u8 band)
dib7000m_write_word(state, 98, (agc->alpha_mant << 5) | agc->alpha_exp);
dib7000m_write_word(state, 99, (agc->beta_mant << 6) | agc->beta_exp);
- dprintk( "WBD: ref: %d, sel: %d, active: %d, alpha: %d",
+ dprintk("WBD: ref: %d, sel: %d, active: %d, alpha: %d\n",
state->wbd_ref != 0 ? state->wbd_ref : agc->wbd_ref, agc->wbd_sel, !agc->perform_agc_softsplit, agc->wbd_sel);
/* AGC continued */
@@ -724,7 +731,7 @@ static int dib7000m_set_agc_config(struct dib7000m_state *state, u8 band)
if (state->revision > 0x4000) { // settings for the MC
dib7000m_write_word(state, 71, agc->agc1_pt3);
-// dprintk( "929: %x %d %d",
+// dprintk("929: %x %d %d\n",
// (dib7000m_read_word(state, 929) & 0xffe3) | (agc->wbd_inv << 4) | (agc->wbd_sel << 2), agc->wbd_inv, agc->wbd_sel);
dib7000m_write_word(state, 929, (dib7000m_read_word(state, 929) & 0xffe3) | (agc->wbd_inv << 4) | (agc->wbd_sel << 2));
} else {
@@ -742,7 +749,7 @@ static void dib7000m_update_timf(struct dib7000m_state *state)
state->timf = timf * 160 / (state->current_bandwidth / 50);
dib7000m_write_word(state, 23, (u16) (timf >> 16));
dib7000m_write_word(state, 24, (u16) (timf & 0xffff));
- dprintk( "updated timf_frequency: %d (default: %d)",state->timf, state->timf_default);
+ dprintk("updated timf_frequency: %d (default: %d)\n", state->timf, state->timf_default);
}
static int dib7000m_agc_startup(struct dvb_frontend *demod)
@@ -804,7 +811,7 @@ static int dib7000m_agc_startup(struct dvb_frontend *demod)
dib7000m_restart_agc(state);
- dprintk( "SPLIT %p: %hd", demod, agc_split);
+ dprintk("SPLIT %p: %hd\n", demod, agc_split);
(*agc_state)++;
ret = 5;
@@ -1013,12 +1020,12 @@ static int dib7000m_autosearch_irq(struct dib7000m_state *state, u16 reg)
u16 irq_pending = dib7000m_read_word(state, reg);
if (irq_pending & 0x1) { // failed
- dprintk( "autosearch failed");
+ dprintk("autosearch failed\n");
return 1;
}
if (irq_pending & 0x2) { // succeeded
- dprintk( "autosearch succeeded");
+ dprintk("autosearch succeeded\n");
return 2;
}
return 0; // still pending
@@ -1102,7 +1109,7 @@ static int dib7000m_wakeup(struct dvb_frontend *demod)
dib7000m_set_power_mode(state, DIB7000M_POWER_ALL);
if (dib7000m_set_adc_state(state, DIBX000_SLOW_ADC_ON) != 0)
- dprintk( "could not start Slow ADC");
+ dprintk("could not start Slow ADC\n");
return 0;
}
@@ -1121,7 +1128,7 @@ static int dib7000m_identify(struct dib7000m_state *state)
u16 value;
if ((value = dib7000m_read_word(state, 896)) != 0x01b3) {
- dprintk( "wrong Vendor ID (0x%x)",value);
+ dprintk("wrong Vendor ID (0x%x)\n", value);
return -EREMOTEIO;
}
@@ -1130,21 +1137,21 @@ static int dib7000m_identify(struct dib7000m_state *state)
state->revision != 0x4001 &&
state->revision != 0x4002 &&
state->revision != 0x4003) {
- dprintk( "wrong Device ID (0x%x)",value);
+ dprintk("wrong Device ID (0x%x)\n", value);
return -EREMOTEIO;
}
/* protect this driver to be used with 7000PC */
if (state->revision == 0x4000 && dib7000m_read_word(state, 769) == 0x4000) {
- dprintk( "this driver does not work with DiB7000PC");
+ dprintk("this driver does not work with DiB7000PC\n");
return -EREMOTEIO;
}
switch (state->revision) {
- case 0x4000: dprintk( "found DiB7000MA/PA/MB/PB"); break;
- case 0x4001: state->reg_offs = 1; dprintk( "found DiB7000HC"); break;
- case 0x4002: state->reg_offs = 1; dprintk( "found DiB7000MC"); break;
- case 0x4003: state->reg_offs = 1; dprintk( "found DiB9000"); break;
+ case 0x4000: dprintk("found DiB7000MA/PA/MB/PB\n"); break;
+ case 0x4001: state->reg_offs = 1; dprintk("found DiB7000HC\n"); break;
+ case 0x4002: state->reg_offs = 1; dprintk("found DiB7000MC\n"); break;
+ case 0x4003: state->reg_offs = 1; dprintk("found DiB9000\n"); break;
}
return 0;
@@ -1242,7 +1249,7 @@ static int dib7000m_set_frontend(struct dvb_frontend *fe)
found = dib7000m_autosearch_is_irq(fe);
} while (found == 0 && i--);
- dprintk("autosearch returns: %d",found);
+ dprintk("autosearch returns: %d\n", found);
if (found == 0 || found == 1)
return 0; // no channel found
@@ -1330,7 +1337,7 @@ int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
struct dib7000m_state *state = fe->demodulator_priv;
u16 val = dib7000m_read_word(state, 294 + state->reg_offs) & 0xffef;
val |= (onoff & 0x1) << 4;
- dprintk("PID filter enabled %d", onoff);
+ dprintk("PID filter enabled %d\n", onoff);
return dib7000m_write_word(state, 294 + state->reg_offs, val);
}
EXPORT_SYMBOL(dib7000m_pid_filter_ctrl);
@@ -1338,7 +1345,7 @@ EXPORT_SYMBOL(dib7000m_pid_filter_ctrl);
int dib7000m_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
{
struct dib7000m_state *state = fe->demodulator_priv;
- dprintk("PID filter: index %x, PID %d, OnOff %d", id, pid, onoff);
+ dprintk("PID filter: index %x, PID %d, OnOff %d\n", id, pid, onoff);
return dib7000m_write_word(state, 300 + state->reg_offs + id,
onoff ? (1 << 13) | pid : 0);
}
@@ -1362,7 +1369,7 @@ int dib7000m_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods,
if (dib7000m_identify(&st) != 0) {
st.i2c_addr = default_addr;
if (dib7000m_identify(&st) != 0) {
- dprintk("DiB7000M #%d: not identified", k);
+ dprintk("DiB7000M #%d: not identified\n", k);
return -EIO;
}
}
@@ -1375,7 +1382,7 @@ int dib7000m_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods,
/* set new i2c address and force divstart */
dib7000m_write_word(&st, 1794, (new_addr << 2) | 0x2);
- dprintk("IC %d initialized (to i2c_address 0x%x)", k, new_addr);
+ dprintk("IC %d initialized (to i2c_address 0x%x)\n", k, new_addr);
}
for (k = 0; k < no_of_demods; k++) {
@@ -1394,7 +1401,7 @@ int dib7000m_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods,
EXPORT_SYMBOL(dib7000m_i2c_enumeration);
#endif
-static struct dvb_frontend_ops dib7000m_ops;
+static const struct dvb_frontend_ops dib7000m_ops;
struct dvb_frontend * dib7000m_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000m_config *cfg)
{
struct dvb_frontend *demod;
@@ -1432,7 +1439,7 @@ error:
}
EXPORT_SYMBOL(dib7000m_attach);
-static struct dvb_frontend_ops dib7000m_ops = {
+static const struct dvb_frontend_ops dib7000m_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "DiBcom 7000MA/MB/PA/PB/MC",
diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c
index b861d4437f2a..a27c0001f2d6 100644
--- a/drivers/media/dvb-frontends/dib7000p.c
+++ b/drivers/media/dvb-frontends/dib7000p.c
@@ -7,6 +7,9 @@
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/i2c.h>
@@ -26,7 +29,11 @@ static int buggy_sfn_workaround;
module_param(buggy_sfn_workaround, int, 0644);
MODULE_PARM_DESC(buggy_sfn_workaround, "Enable work-around for buggy SFNs (default: 0)");
-#define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiB7000P: "); printk(args); printk("\n"); } } while (0)
+#define dprintk(fmt, arg...) do { \
+ if (debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
+} while (0)
struct i2c_device {
struct i2c_adapter *i2c_adap;
@@ -98,7 +105,7 @@ static u16 dib7000p_read_word(struct dib7000p_state *state, u16 reg)
u16 ret;
if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return 0;
}
@@ -116,7 +123,7 @@ static u16 dib7000p_read_word(struct dib7000p_state *state, u16 reg)
state->msg[1].len = 2;
if (i2c_transfer(state->i2c_adap, state->msg, 2) != 2)
- dprintk("i2c read error on %d", reg);
+ dprintk("i2c read error on %d\n", reg);
ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
mutex_unlock(&state->i2c_buffer_lock);
@@ -128,7 +135,7 @@ static int dib7000p_write_word(struct dib7000p_state *state, u16 reg, u16 val)
int ret;
if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return -EINVAL;
}
@@ -174,7 +181,7 @@ static int dib7000p_set_output_mode(struct dib7000p_state *state, int mode)
fifo_threshold = 1792;
smo_mode = (dib7000p_read_word(state, 235) & 0x0050) | (1 << 1);
- dprintk("setting output mode for demod %p to %d", &state->demod, mode);
+ dprintk("setting output mode for demod %p to %d\n", &state->demod, mode);
switch (mode) {
case OUTMODE_MPEG2_PAR_GATED_CLK:
@@ -204,7 +211,7 @@ static int dib7000p_set_output_mode(struct dib7000p_state *state, int mode)
outreg = 0;
break;
default:
- dprintk("Unhandled output_mode passed to be set for demod %p", &state->demod);
+ dprintk("Unhandled output_mode passed to be set for demod %p\n", &state->demod);
break;
}
@@ -224,7 +231,7 @@ static int dib7000p_set_diversity_in(struct dvb_frontend *demod, int onoff)
struct dib7000p_state *state = demod->demodulator_priv;
if (state->div_force_off) {
- dprintk("diversity combination deactivated - forced by COFDM parameters");
+ dprintk("diversity combination deactivated - forced by COFDM parameters\n");
onoff = 0;
dib7000p_write_word(state, 207, 0);
} else
@@ -374,10 +381,10 @@ static int dib7000p_set_bandwidth(struct dib7000p_state *state, u32 bw)
state->current_bandwidth = bw;
if (state->timf == 0) {
- dprintk("using default timf");
+ dprintk("using default timf\n");
timf = state->cfg.bw->timf;
} else {
- dprintk("using updated timf");
+ dprintk("using updated timf\n");
timf = state->timf;
}
@@ -494,7 +501,7 @@ static int dib7000p_update_pll(struct dvb_frontend *fe, struct dibx000_bandwidth
loopdiv = (reg_1856 >> 6) & 0x3f;
if ((bw != NULL) && (bw->pll_prediv != prediv || bw->pll_ratio != loopdiv)) {
- dprintk("Updating pll (prediv: old = %d new = %d ; loopdiv : old = %d new = %d)", prediv, bw->pll_prediv, loopdiv, bw->pll_ratio);
+ dprintk("Updating pll (prediv: old = %d new = %d ; loopdiv : old = %d new = %d)\n", prediv, bw->pll_prediv, loopdiv, bw->pll_ratio);
reg_1856 &= 0xf000;
reg_1857 = dib7000p_read_word(state, 1857);
dib7000p_write_word(state, 1857, reg_1857 & ~(1 << 15));
@@ -511,7 +518,7 @@ static int dib7000p_update_pll(struct dvb_frontend *fe, struct dibx000_bandwidth
dib7000p_write_word(state, 1857, reg_1857 | (1 << 15));
while (((dib7000p_read_word(state, 1856) >> 15) & 0x1) != 1)
- dprintk("Waiting for PLL to lock");
+ dprintk("Waiting for PLL to lock\n");
return 0;
}
@@ -521,7 +528,7 @@ static int dib7000p_update_pll(struct dvb_frontend *fe, struct dibx000_bandwidth
static int dib7000p_reset_gpio(struct dib7000p_state *st)
{
/* reset the GPIOs */
- dprintk("gpio dir: %x: val: %x, pwm_pos: %x", st->gpio_dir, st->gpio_val, st->cfg.gpio_pwm_pos);
+ dprintk("gpio dir: %x: val: %x, pwm_pos: %x\n", st->gpio_dir, st->gpio_val, st->cfg.gpio_pwm_pos);
dib7000p_write_word(st, 1029, st->gpio_dir);
dib7000p_write_word(st, 1030, st->gpio_val);
@@ -669,7 +676,7 @@ static int dib7000p_demod_reset(struct dib7000p_state *state)
dib7000p_reset_pll(state);
if (dib7000p_reset_gpio(state) != 0)
- dprintk("GPIO reset was not successful.");
+ dprintk("GPIO reset was not successful.\n");
if (state->version == SOC7090) {
dib7000p_write_word(state, 899, 0);
@@ -681,7 +688,7 @@ static int dib7000p_demod_reset(struct dib7000p_state *state)
dib7000p_write_word(state, 273, (0<<6) | 30);
}
if (dib7000p_set_output_mode(state, OUTMODE_HIGH_Z) != 0)
- dprintk("OUTPUT_MODE could not be reset.");
+ dprintk("OUTPUT_MODE could not be reset.\n");
dib7000p_set_adc_state(state, DIBX000_SLOW_ADC_ON);
dib7000p_sad_calib(state);
@@ -759,7 +766,7 @@ static int dib7000p_set_agc_config(struct dib7000p_state *state, u8 band)
}
if (agc == NULL) {
- dprintk("no valid AGC configuration found for band 0x%02x", band);
+ dprintk("no valid AGC configuration found for band 0x%02x\n", band);
return -EINVAL;
}
@@ -776,7 +783,7 @@ static int dib7000p_set_agc_config(struct dib7000p_state *state, u8 band)
dib7000p_write_word(state, 102, (agc->beta_mant << 6) | agc->beta_exp);
/* AGC continued */
- dprintk("WBD: ref: %d, sel: %d, active: %d, alpha: %d",
+ dprintk("WBD: ref: %d, sel: %d, active: %d, alpha: %d\n",
state->wbd_ref != 0 ? state->wbd_ref : agc->wbd_ref, agc->wbd_sel, !agc->perform_agc_softsplit, agc->wbd_sel);
if (state->wbd_ref != 0)
@@ -806,7 +813,7 @@ static void dib7000p_set_dds(struct dib7000p_state *state, s32 offset_khz)
u32 dds = state->cfg.bw->ifreq & 0x1ffffff;
u8 invert = !!(state->cfg.bw->ifreq & (1 << 25));
- dprintk("setting a frequency offset of %dkHz internal freq = %d invert = %d", offset_khz, internal, invert);
+ dprintk("setting a frequency offset of %dkHz internal freq = %d invert = %d\n", offset_khz, internal, invert);
if (offset_khz < 0)
unit_khz_dds_val *= -1;
@@ -902,7 +909,7 @@ static int dib7000p_agc_startup(struct dvb_frontend *demod)
dib7000p_restart_agc(state);
- dprintk("SPLIT %p: %hd", demod, agc_split);
+ dprintk("SPLIT %p: %hd\n", demod, agc_split);
(*agc_state)++;
ret = 5;
@@ -934,7 +941,7 @@ static void dib7000p_update_timf(struct dib7000p_state *state)
state->timf = timf * 160 / (state->current_bandwidth / 50);
dib7000p_write_word(state, 23, (u16) (timf >> 16));
dib7000p_write_word(state, 24, (u16) (timf & 0xffff));
- dprintk("updated timf_frequency: %d (default: %d)", state->timf, state->cfg.bw->timf);
+ dprintk("updated timf_frequency: %d (default: %d)\n", state->timf, state->cfg.bw->timf);
}
@@ -1202,7 +1209,7 @@ static void dib7000p_spur_protect(struct dib7000p_state *state, u32 rf_khz, u32
int bw_khz = bw;
u32 pha;
- dprintk("relative position of the Spur: %dk (RF: %dk, XTAL: %dk)", f_rel, rf_khz, xtal);
+ dprintk("relative position of the Spur: %dk (RF: %dk, XTAL: %dk)\n", f_rel, rf_khz, xtal);
if (f_rel < -bw_khz / 2 || f_rel > bw_khz / 2)
return;
@@ -1252,7 +1259,7 @@ static void dib7000p_spur_protect(struct dib7000p_state *state, u32 rf_khz, u32
coef_im[k] = (1 << 24) - 1;
coef_im[k] /= (1 << 15);
- dprintk("PALF COEF: %d re: %d im: %d", k, coef_re[k], coef_im[k]);
+ dprintk("PALF COEF: %d re: %d im: %d\n", k, coef_re[k], coef_im[k]);
dib7000p_write_word(state, 143, (0 << 14) | (k << 10) | (coef_re[k] & 0x3ff));
dib7000p_write_word(state, 144, coef_im[k] & 0x3ff);
@@ -1280,7 +1287,7 @@ static int dib7000p_tune(struct dvb_frontend *demod)
/* P_ctrl_inh_cor=0, P_ctrl_alpha_cor=4, P_ctrl_inh_isi=0, P_ctrl_alpha_isi=3, P_ctrl_inh_cor4=1, P_ctrl_alpha_cor4=3 */
tmp = (0 << 14) | (4 << 10) | (0 << 9) | (3 << 5) | (1 << 4) | (0x3);
if (state->sfn_workaround_active) {
- dprintk("SFN workaround is active");
+ dprintk("SFN workaround is active\n");
tmp |= (1 << 9);
dib7000p_write_word(state, 166, 0x4000);
} else {
@@ -1390,15 +1397,15 @@ static int dib7000p_sleep(struct dvb_frontend *demod)
static int dib7000p_identify(struct dib7000p_state *st)
{
u16 value;
- dprintk("checking demod on I2C address: %d (%x)", st->i2c_addr, st->i2c_addr);
+ dprintk("checking demod on I2C address: %d (%x)\n", st->i2c_addr, st->i2c_addr);
if ((value = dib7000p_read_word(st, 768)) != 0x01b3) {
- dprintk("wrong Vendor ID (read=0x%x)", value);
+ dprintk("wrong Vendor ID (read=0x%x)\n", value);
return -EREMOTEIO;
}
if ((value = dib7000p_read_word(st, 769)) != 0x4000) {
- dprintk("wrong Device ID (%x)", value);
+ dprintk("wrong Device ID (%x)\n", value);
return -EREMOTEIO;
}
@@ -1536,7 +1543,7 @@ static int dib7000p_set_frontend(struct dvb_frontend *fe)
found = dib7000p_autosearch_is_irq(fe);
} while (found == 0 && i--);
- dprintk("autosearch returns: %d", found);
+ dprintk("autosearch returns: %d\n", found);
if (found == 0 || found == 1)
return 0;
@@ -1951,7 +1958,7 @@ static int dib7000p_get_stats(struct dvb_frontend *demod, enum fe_status stat)
time_us = dib7000p_get_time_us(demod);
state->ber_jiffies_stats = jiffies + msecs_to_jiffies((time_us + 500) / 1000);
- dprintk("Next all layers stats available in %u us.", time_us);
+ dprintk("Next all layers stats available in %u us.\n", time_us);
dib7000p_read_ber(demod, &val);
c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
@@ -2019,7 +2026,7 @@ static int dib7000pc_detection(struct i2c_adapter *i2c_adap)
if (i2c_transfer(i2c_adap, msg, 2) == 2)
if (rx[0] == 0x01 && rx[1] == 0xb3) {
- dprintk("-D- DiB7000PC detected");
+ dprintk("-D- DiB7000PC detected\n");
return 1;
}
@@ -2027,11 +2034,11 @@ static int dib7000pc_detection(struct i2c_adapter *i2c_adap)
if (i2c_transfer(i2c_adap, msg, 2) == 2)
if (rx[0] == 0x01 && rx[1] == 0xb3) {
- dprintk("-D- DiB7000PC detected");
+ dprintk("-D- DiB7000PC detected\n");
return 1;
}
- dprintk("-D- DiB7000PC not detected");
+ dprintk("-D- DiB7000PC not detected\n");
kfree(rx);
rx_memory_error:
@@ -2050,14 +2057,14 @@ static int dib7000p_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
struct dib7000p_state *state = fe->demodulator_priv;
u16 val = dib7000p_read_word(state, 235) & 0xffef;
val |= (onoff & 0x1) << 4;
- dprintk("PID filter enabled %d", onoff);
+ dprintk("PID filter enabled %d\n", onoff);
return dib7000p_write_word(state, 235, val);
}
static int dib7000p_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
{
struct dib7000p_state *state = fe->demodulator_priv;
- dprintk("PID filter: index %x, PID %d, OnOff %d", id, pid, onoff);
+ dprintk("PID filter: index %x, PID %d, OnOff %d\n", id, pid, onoff);
return dib7000p_write_word(state, 241 + id, onoff ? (1 << 13) | pid : 0);
}
@@ -2100,7 +2107,7 @@ static int dib7000p_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u
/* set new i2c address and force divstart */
dib7000p_write_word(dpst, 1285, (new_addr << 2) | 0x2);
- dprintk("IC %d initialized (to i2c_address 0x%x)", k, new_addr);
+ dprintk("IC %d initialized (to i2c_address 0x%x)\n", k, new_addr);
}
for (k = 0; k < no_of_demods; k++) {
@@ -2136,21 +2143,21 @@ static s32 dib7000p_get_adc_power(struct dvb_frontend *fe)
buf[0] = dib7000p_read_word(state, 0x184);
buf[1] = dib7000p_read_word(state, 0x185);
pow_i = (buf[0] << 16) | buf[1];
- dprintk("raw pow_i = %d", pow_i);
+ dprintk("raw pow_i = %d\n", pow_i);
tmp_val = pow_i;
while (tmp_val >>= 1)
exp++;
mant = (pow_i * 1000 / (1 << exp));
- dprintk(" mant = %d exp = %d", mant / 1000, exp);
+ dprintk(" mant = %d exp = %d\n", mant / 1000, exp);
ix = (u8) ((mant - 1000) / 100); /* index of the LUT */
- dprintk(" ix = %d", ix);
+ dprintk(" ix = %d\n", ix);
pow_i = (lut_1000ln_mant[ix] + 693 * (exp - 20) - 6908);
pow_i = (pow_i << 8) / 1000;
- dprintk(" pow_i = %d", pow_i);
+ dprintk(" pow_i = %d\n", pow_i);
return pow_i;
}
@@ -2185,7 +2192,7 @@ static int w7090p_tuner_write_serpar(struct i2c_adapter *i2c_adap, struct i2c_ms
n_overflow = (dib7000p_read_word(state, 1984) >> 1) & 0x1;
i--;
if (i == 0)
- dprintk("Tuner ITF: write busy (overflow)");
+ dprintk("Tuner ITF: write busy (overflow)\n");
}
dib7000p_write_word(state, 1985, (1 << 6) | (serpar_num & 0x3f));
dib7000p_write_word(state, 1986, (msg[0].buf[1] << 8) | msg[0].buf[2]);
@@ -2205,7 +2212,7 @@ static int w7090p_tuner_read_serpar(struct i2c_adapter *i2c_adap, struct i2c_msg
n_overflow = (dib7000p_read_word(state, 1984) >> 1) & 0x1;
i--;
if (i == 0)
- dprintk("TunerITF: read busy (overflow)");
+ dprintk("TunerITF: read busy (overflow)\n");
}
dib7000p_write_word(state, 1985, (0 << 6) | (serpar_num & 0x3f));
@@ -2214,7 +2221,7 @@ static int w7090p_tuner_read_serpar(struct i2c_adapter *i2c_adap, struct i2c_msg
n_empty = dib7000p_read_word(state, 1984) & 0x1;
i--;
if (i == 0)
- dprintk("TunerITF: read busy (empty)");
+ dprintk("TunerITF: read busy (empty)\n");
}
read_word = dib7000p_read_word(state, 1987);
msg[1].buf[0] = (read_word >> 8) & 0xff;
@@ -2435,7 +2442,7 @@ static u32 dib7090_calcSyncFreq(u32 P_Kin, u32 P_Kout, u32 insertExtSynchro, u32
static int dib7090_cfg_DibTx(struct dib7000p_state *state, u32 P_Kin, u32 P_Kout, u32 insertExtSynchro, u32 synchroMode, u32 syncWord, u32 syncSize)
{
- dprintk("Configure DibStream Tx");
+ dprintk("Configure DibStream Tx\n");
dib7000p_write_word(state, 1615, 1);
dib7000p_write_word(state, 1603, P_Kin);
@@ -2455,7 +2462,7 @@ static int dib7090_cfg_DibRx(struct dib7000p_state *state, u32 P_Kin, u32 P_Kout
{
u32 syncFreq;
- dprintk("Configure DibStream Rx");
+ dprintk("Configure DibStream Rx\n");
if ((P_Kin != 0) && (P_Kout != 0)) {
syncFreq = dib7090_calcSyncFreq(P_Kin, P_Kout, insertExtSynchro, syncSize);
dib7000p_write_word(state, 1542, syncFreq);
@@ -2492,7 +2499,7 @@ static void dib7090_enMpegMux(struct dib7000p_state *state, int onoff)
static void dib7090_configMpegMux(struct dib7000p_state *state,
u16 pulseWidth, u16 enSerialMode, u16 enSerialClkDiv2)
{
- dprintk("Enable Mpeg mux");
+ dprintk("Enable Mpeg mux\n");
dib7090_enMpegMux(state, 0);
@@ -2513,17 +2520,17 @@ static void dib7090_setDibTxMux(struct dib7000p_state *state, int mode)
switch (mode) {
case MPEG_ON_DIBTX:
- dprintk("SET MPEG ON DIBSTREAM TX");
+ dprintk("SET MPEG ON DIBSTREAM TX\n");
dib7090_cfg_DibTx(state, 8, 5, 0, 0, 0, 0);
reg_1288 |= (1<<9);
break;
case DIV_ON_DIBTX:
- dprintk("SET DIV_OUT ON DIBSTREAM TX");
+ dprintk("SET DIV_OUT ON DIBSTREAM TX\n");
dib7090_cfg_DibTx(state, 5, 5, 0, 0, 0, 0);
reg_1288 |= (1<<8);
break;
case ADC_ON_DIBTX:
- dprintk("SET ADC_OUT ON DIBSTREAM TX");
+ dprintk("SET ADC_OUT ON DIBSTREAM TX\n");
dib7090_cfg_DibTx(state, 20, 5, 10, 0, 0, 0);
reg_1288 |= (1<<7);
break;
@@ -2539,17 +2546,17 @@ static void dib7090_setHostBusMux(struct dib7000p_state *state, int mode)
switch (mode) {
case DEMOUT_ON_HOSTBUS:
- dprintk("SET DEM OUT OLD INTERF ON HOST BUS");
+ dprintk("SET DEM OUT OLD INTERF ON HOST BUS\n");
dib7090_enMpegMux(state, 0);
reg_1288 |= (1<<6);
break;
case DIBTX_ON_HOSTBUS:
- dprintk("SET DIBSTREAM TX ON HOST BUS");
+ dprintk("SET DIBSTREAM TX ON HOST BUS\n");
dib7090_enMpegMux(state, 0);
reg_1288 |= (1<<5);
break;
case MPEG_ON_HOSTBUS:
- dprintk("SET MPEG MUX ON HOST BUS");
+ dprintk("SET MPEG MUX ON HOST BUS\n");
reg_1288 |= (1<<4);
break;
default:
@@ -2565,7 +2572,7 @@ static int dib7090_set_diversity_in(struct dvb_frontend *fe, int onoff)
switch (onoff) {
case 0: /* only use the internal way - not the diversity input */
- dprintk("%s mode OFF : by default Enable Mpeg INPUT", __func__);
+ dprintk("%s mode OFF : by default Enable Mpeg INPUT\n", __func__);
dib7090_cfg_DibRx(state, 8, 5, 0, 0, 0, 8, 0);
/* Do not divide the serial clock of MPEG MUX */
@@ -2581,7 +2588,7 @@ static int dib7090_set_diversity_in(struct dvb_frontend *fe, int onoff)
break;
case 1: /* both ways */
case 2: /* only the diversity input */
- dprintk("%s ON : Enable diversity INPUT", __func__);
+ dprintk("%s ON : Enable diversity INPUT\n", __func__);
dib7090_cfg_DibRx(state, 5, 5, 0, 0, 0, 0, 0);
state->input_mode_mpeg = 0;
break;
@@ -2612,11 +2619,11 @@ static int dib7090_set_output_mode(struct dvb_frontend *fe, int mode)
case OUTMODE_MPEG2_SERIAL:
if (prefer_mpeg_mux_use) {
- dprintk("setting output mode TS_SERIAL using Mpeg Mux");
+ dprintk("setting output mode TS_SERIAL using Mpeg Mux\n");
dib7090_configMpegMux(state, 3, 1, 1);
dib7090_setHostBusMux(state, MPEG_ON_HOSTBUS);
} else {/* Use Smooth block */
- dprintk("setting output mode TS_SERIAL using Smooth bloc");
+ dprintk("setting output mode TS_SERIAL using Smooth bloc\n");
dib7090_setHostBusMux(state, DEMOUT_ON_HOSTBUS);
outreg |= (2<<6) | (0 << 1);
}
@@ -2624,24 +2631,24 @@ static int dib7090_set_output_mode(struct dvb_frontend *fe, int mode)
case OUTMODE_MPEG2_PAR_GATED_CLK:
if (prefer_mpeg_mux_use) {
- dprintk("setting output mode TS_PARALLEL_GATED using Mpeg Mux");
+ dprintk("setting output mode TS_PARALLEL_GATED using Mpeg Mux\n");
dib7090_configMpegMux(state, 2, 0, 0);
dib7090_setHostBusMux(state, MPEG_ON_HOSTBUS);
} else { /* Use Smooth block */
- dprintk("setting output mode TS_PARALLEL_GATED using Smooth block");
+ dprintk("setting output mode TS_PARALLEL_GATED using Smooth block\n");
dib7090_setHostBusMux(state, DEMOUT_ON_HOSTBUS);
outreg |= (0<<6);
}
break;
case OUTMODE_MPEG2_PAR_CONT_CLK: /* Using Smooth block only */
- dprintk("setting output mode TS_PARALLEL_CONT using Smooth block");
+ dprintk("setting output mode TS_PARALLEL_CONT using Smooth block\n");
dib7090_setHostBusMux(state, DEMOUT_ON_HOSTBUS);
outreg |= (1<<6);
break;
case OUTMODE_MPEG2_FIFO: /* Using Smooth block because not supported by new Mpeg Mux bloc */
- dprintk("setting output mode TS_FIFO using Smooth block");
+ dprintk("setting output mode TS_FIFO using Smooth block\n");
dib7090_setHostBusMux(state, DEMOUT_ON_HOSTBUS);
outreg |= (5<<6);
smo_mode |= (3 << 1);
@@ -2649,13 +2656,13 @@ static int dib7090_set_output_mode(struct dvb_frontend *fe, int mode)
break;
case OUTMODE_DIVERSITY:
- dprintk("setting output mode MODE_DIVERSITY");
+ dprintk("setting output mode MODE_DIVERSITY\n");
dib7090_setDibTxMux(state, DIV_ON_DIBTX);
dib7090_setHostBusMux(state, DIBTX_ON_HOSTBUS);
break;
case OUTMODE_ANALOG_ADC:
- dprintk("setting output mode MODE_ANALOG_ADC");
+ dprintk("setting output mode MODE_ANALOG_ADC\n");
dib7090_setDibTxMux(state, ADC_ON_DIBTX);
dib7090_setHostBusMux(state, DIBTX_ON_HOSTBUS);
break;
@@ -2678,7 +2685,7 @@ static int dib7090_tuner_sleep(struct dvb_frontend *fe, int onoff)
struct dib7000p_state *state = fe->demodulator_priv;
u16 en_cur_state;
- dprintk("sleep dib7090: %d", onoff);
+ dprintk("sleep dib7090: %d\n", onoff);
en_cur_state = dib7000p_read_word(state, 1922);
@@ -2714,7 +2721,7 @@ static int dib7090_slave_reset(struct dvb_frontend *fe)
return 0;
}
-static struct dvb_frontend_ops dib7000p_ops;
+static const struct dvb_frontend_ops dib7000p_ops;
static struct dvb_frontend *dib7000p_init(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg)
{
struct dvb_frontend *demod;
@@ -2804,7 +2811,7 @@ void *dib7000p_attach(struct dib7000p_ops *ops)
}
EXPORT_SYMBOL(dib7000p_attach);
-static struct dvb_frontend_ops dib7000p_ops = {
+static const struct dvb_frontend_ops dib7000p_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "DiBcom 7000PC",
diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
index ddf9c44877a2..e501ec964df1 100644
--- a/drivers/media/dvb-frontends/dib8000.c
+++ b/drivers/media/dvb-frontends/dib8000.c
@@ -7,6 +7,9 @@
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/i2c.h>
@@ -31,7 +34,11 @@ static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
-#define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiB8000: "); printk(args); printk("\n"); } } while (0)
+#define dprintk(fmt, arg...) do { \
+ if (debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
+} while (0)
struct i2c_device {
struct i2c_adapter *adap;
@@ -147,7 +154,7 @@ static u16 dib8000_i2c_read16(struct i2c_device *i2c, u16 reg)
};
if (mutex_lock_interruptible(i2c->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return 0;
}
@@ -157,7 +164,7 @@ static u16 dib8000_i2c_read16(struct i2c_device *i2c, u16 reg)
msg[1].buf = i2c->i2c_read_buffer;
if (i2c_transfer(i2c->adap, msg, 2) != 2)
- dprintk("i2c read error on %d", reg);
+ dprintk("i2c read error on %d\n", reg);
ret = (msg[1].buf[0] << 8) | msg[1].buf[1];
mutex_unlock(i2c->i2c_buffer_lock);
@@ -182,7 +189,7 @@ static u16 __dib8000_read_word(struct dib8000_state *state, u16 reg)
state->msg[1].len = 2;
if (i2c_transfer(state->i2c.adap, state->msg, 2) != 2)
- dprintk("i2c read error on %d", reg);
+ dprintk("i2c read error on %d\n", reg);
ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
@@ -194,7 +201,7 @@ static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
u16 ret;
if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return 0;
}
@@ -210,7 +217,7 @@ static u32 dib8000_read32(struct dib8000_state *state, u16 reg)
u16 rw[2];
if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return 0;
}
@@ -228,7 +235,7 @@ static int dib8000_i2c_write16(struct i2c_device *i2c, u16 reg, u16 val)
int ret = 0;
if (mutex_lock_interruptible(i2c->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return -EINVAL;
}
@@ -249,7 +256,7 @@ static int dib8000_write_word(struct dib8000_state *state, u16 reg, u16 val)
int ret;
if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return -EINVAL;
}
@@ -395,7 +402,7 @@ static void dib8000_set_acquisition_mode(struct dib8000_state *state)
{
u16 nud = dib8000_read_word(state, 298);
nud |= (1 << 3) | (1 << 0);
- dprintk("acquisition mode activated");
+ dprintk("acquisition mode activated\n");
dib8000_write_word(state, 298, nud);
}
static int dib8000_set_output_mode(struct dvb_frontend *fe, int mode)
@@ -408,7 +415,7 @@ static int dib8000_set_output_mode(struct dvb_frontend *fe, int mode)
fifo_threshold = 1792;
smo_mode = (dib8000_read_word(state, 299) & 0x0050) | (1 << 1);
- dprintk("-I- Setting output mode for demod %p to %d",
+ dprintk("-I- Setting output mode for demod %p to %d\n",
&state->fe[0], mode);
switch (mode) {
@@ -443,7 +450,7 @@ static int dib8000_set_output_mode(struct dvb_frontend *fe, int mode)
break;
default:
- dprintk("Unhandled output_mode passed to be set for demod %p",
+ dprintk("Unhandled output_mode passed to be set for demod %p\n",
&state->fe[0]);
return -EINVAL;
}
@@ -464,7 +471,7 @@ static int dib8000_set_diversity_in(struct dvb_frontend *fe, int onoff)
struct dib8000_state *state = fe->demodulator_priv;
u16 tmp, sync_wait = dib8000_read_word(state, 273) & 0xfff0;
- dprintk("set diversity input to %i", onoff);
+ dprintk("set diversity input to %i\n", onoff);
if (!state->differential_constellation) {
dib8000_write_word(state, 272, 1 << 9); //dvsy_off_lmod4 = 1
dib8000_write_word(state, 273, sync_wait | (1 << 2) | 2); // sync_enable = 1; comb_mode = 2
@@ -531,7 +538,7 @@ static void dib8000_set_power_mode(struct dib8000_state *state, enum dib8000_pow
break;
}
- dprintk("powermode : 774 : %x ; 775 : %x; 776 : %x ; 900 : %x; 1280 : %x", reg_774, reg_775, reg_776, reg_900, reg_1280);
+ dprintk("powermode : 774 : %x ; 775 : %x; 776 : %x ; 900 : %x; 1280 : %x\n", reg_774, reg_775, reg_776, reg_900, reg_1280);
dib8000_write_word(state, 774, reg_774);
dib8000_write_word(state, 775, reg_775);
dib8000_write_word(state, 776, reg_776);
@@ -619,10 +626,10 @@ static int dib8000_set_bandwidth(struct dvb_frontend *fe, u32 bw)
bw = 6000;
if (state->timf == 0) {
- dprintk("using default timf");
+ dprintk("using default timf\n");
timf = state->timf_default;
} else {
- dprintk("using updated timf");
+ dprintk("using updated timf\n");
timf = state->timf;
}
@@ -667,7 +674,7 @@ static int dib8000_set_wbd_ref(struct dvb_frontend *fe, u16 value)
static void dib8000_reset_pll_common(struct dib8000_state *state, const struct dibx000_bandwidth_config *bw)
{
- dprintk("ifreq: %d %x, inversion: %d", bw->ifreq, bw->ifreq, bw->ifreq >> 25);
+ dprintk("ifreq: %d %x, inversion: %d\n", bw->ifreq, bw->ifreq, bw->ifreq >> 25);
if (state->revision != 0x8090) {
dib8000_write_word(state, 23,
(u16) (((bw->internal * 1000) >> 16) & 0xffff));
@@ -704,7 +711,7 @@ static void dib8000_reset_pll(struct dib8000_state *state)
clk_cfg1 = (clk_cfg1 & 0xfff7) | (pll->pll_bypass << 3);
dib8000_write_word(state, 902, clk_cfg1);
- dprintk("clk_cfg1: 0x%04x", clk_cfg1);
+ dprintk("clk_cfg1: 0x%04x\n", clk_cfg1);
/* smpl_cfg: P_refclksel=2, P_ensmplsel=1 nodivsmpl=1 */
if (state->cfg.pll->ADClkSrc == 0)
@@ -754,7 +761,7 @@ static int dib8000_update_pll(struct dvb_frontend *fe,
pll->pll_ratio == loopdiv))
return -EINVAL;
- dprintk("Updating pll (prediv: old = %d new = %d ; loopdiv : old = %d new = %d)", prediv, pll->pll_prediv, loopdiv, pll->pll_ratio);
+ dprintk("Updating pll (prediv: old = %d new = %d ; loopdiv : old = %d new = %d)\n", prediv, pll->pll_prediv, loopdiv, pll->pll_ratio);
if (state->revision == 0x8090) {
reg_1856 &= 0xf000;
reg_1857 = dib8000_read_word(state, 1857);
@@ -767,11 +774,11 @@ static int dib8000_update_pll(struct dvb_frontend *fe,
/* write new system clk into P_sec_len */
internal = dib8000_read32(state, 23) / 1000;
- dprintk("Old Internal = %d", internal);
+ dprintk("Old Internal = %d\n", internal);
xtal = 2 * (internal / loopdiv) * prediv;
internal = 1000 * (xtal/pll->pll_prediv) * pll->pll_ratio;
- dprintk("Xtal = %d , New Fmem = %d New Fdemod = %d, New Fsampling = %d", xtal, internal/1000, internal/2000, internal/8000);
- dprintk("New Internal = %d", internal);
+ dprintk("Xtal = %d , New Fmem = %d New Fdemod = %d, New Fsampling = %d\n", xtal, internal/1000, internal/2000, internal/8000);
+ dprintk("New Internal = %d\n", internal);
dib8000_write_word(state, 23,
(u16) (((internal / 2) >> 16) & 0xffff));
@@ -780,22 +787,22 @@ static int dib8000_update_pll(struct dvb_frontend *fe,
dib8000_write_word(state, 1857, reg_1857 | (1 << 15));
while (((dib8000_read_word(state, 1856)>>15)&0x1) != 1)
- dprintk("Waiting for PLL to lock");
+ dprintk("Waiting for PLL to lock\n");
/* verify */
reg_1856 = dib8000_read_word(state, 1856);
- dprintk("PLL Updated with prediv = %d and loopdiv = %d",
+ dprintk("PLL Updated with prediv = %d and loopdiv = %d\n",
reg_1856&0x3f, (reg_1856>>6)&0x3f);
} else {
if (bw != state->current_demod_bw) {
/** Bandwidth change => force PLL update **/
- dprintk("PLL: Bandwidth Change %d MHz -> %d MHz (prediv: %d->%d)", state->current_demod_bw / 1000, bw / 1000, oldprediv, state->cfg.pll->pll_prediv);
+ dprintk("PLL: Bandwidth Change %d MHz -> %d MHz (prediv: %d->%d)\n", state->current_demod_bw / 1000, bw / 1000, oldprediv, state->cfg.pll->pll_prediv);
if (state->cfg.pll->pll_prediv != oldprediv) {
/** Full PLL change only if prediv is changed **/
/** full update => bypass and reconfigure **/
- dprintk("PLL: New Setting for %d MHz Bandwidth (prediv: %d, ratio: %d)", bw/1000, state->cfg.pll->pll_prediv, state->cfg.pll->pll_ratio);
+ dprintk("PLL: New Setting for %d MHz Bandwidth (prediv: %d, ratio: %d)\n", bw/1000, state->cfg.pll->pll_prediv, state->cfg.pll->pll_ratio);
dib8000_write_word(state, 902, dib8000_read_word(state, 902) | (1<<3)); /* bypass PLL */
dib8000_reset_pll(state);
dib8000_write_word(state, 898, 0x0004); /* sad */
@@ -807,7 +814,7 @@ static int dib8000_update_pll(struct dvb_frontend *fe,
if (ratio != 0) {
/** ratio update => only change ratio **/
- dprintk("PLL: Update ratio (prediv: %d, ratio: %d)", state->cfg.pll->pll_prediv, ratio);
+ dprintk("PLL: Update ratio (prediv: %d, ratio: %d)\n", state->cfg.pll->pll_prediv, ratio);
dib8000_write_word(state, 901, (state->cfg.pll->pll_prediv << 8) | (ratio << 0)); /* only the PLL ratio is updated. */
}
}
@@ -841,7 +848,7 @@ static int dib8000_cfg_gpio(struct dib8000_state *st, u8 num, u8 dir, u8 val)
st->cfg.gpio_val |= (val & 0x01) << num; /* set the new value */
dib8000_write_word(st, 1030, st->cfg.gpio_val);
- dprintk("gpio dir: %x: gpio val: %x", st->cfg.gpio_dir, st->cfg.gpio_val);
+ dprintk("gpio dir: %x: gpio val: %x\n", st->cfg.gpio_dir, st->cfg.gpio_val);
return 0;
}
@@ -958,29 +965,29 @@ static u16 dib8000_identify(struct i2c_device *client)
value = dib8000_i2c_read16(client, 896);
if ((value = dib8000_i2c_read16(client, 896)) != 0x01b3) {
- dprintk("wrong Vendor ID (read=0x%x)", value);
+ dprintk("wrong Vendor ID (read=0x%x)\n", value);
return 0;
}
value = dib8000_i2c_read16(client, 897);
if (value != 0x8000 && value != 0x8001 &&
value != 0x8002 && value != 0x8090) {
- dprintk("wrong Device ID (%x)", value);
+ dprintk("wrong Device ID (%x)\n", value);
return 0;
}
switch (value) {
case 0x8000:
- dprintk("found DiB8000A");
+ dprintk("found DiB8000A\n");
break;
case 0x8001:
- dprintk("found DiB8000B");
+ dprintk("found DiB8000B\n");
break;
case 0x8002:
- dprintk("found DiB8000C");
+ dprintk("found DiB8000C\n");
break;
case 0x8090:
- dprintk("found DiB8096P");
+ dprintk("found DiB8096P\n");
break;
}
return value;
@@ -1037,7 +1044,7 @@ static int dib8000_reset(struct dvb_frontend *fe)
dib8000_write_word(state, 1287, 0x0003);
if (state->revision == 0x8000)
- dprintk("error : dib8000 MA not supported");
+ dprintk("error : dib8000 MA not supported\n");
dibx000_reset_i2c_master(&state->i2c_master);
@@ -1069,7 +1076,7 @@ static int dib8000_reset(struct dvb_frontend *fe)
if (state->cfg.drives)
dib8000_write_word(state, 906, state->cfg.drives);
else {
- dprintk("using standard PAD-drive-settings, please adjust settings in config-struct to be optimal.");
+ dprintk("using standard PAD-drive-settings, please adjust settings in config-struct to be optimal.\n");
/* min drive SDRAM - not optimal - adjust */
dib8000_write_word(state, 906, 0x2d98);
}
@@ -1080,11 +1087,11 @@ static int dib8000_reset(struct dvb_frontend *fe)
dib8000_write_word(state, 898, 0x0004);
if (dib8000_reset_gpio(state) != 0)
- dprintk("GPIO reset was not successful.");
+ dprintk("GPIO reset was not successful.\n");
if ((state->revision != 0x8090) &&
(dib8000_set_output_mode(fe, OUTMODE_HIGH_Z) != 0))
- dprintk("OUTPUT_MODE could not be resetted.");
+ dprintk("OUTPUT_MODE could not be resetted.\n");
state->current_agc = NULL;
@@ -1176,7 +1183,7 @@ static int dib8000_set_agc_config(struct dib8000_state *state, u8 band)
}
if (agc == NULL) {
- dprintk("no valid AGC configuration found for band 0x%02x", band);
+ dprintk("no valid AGC configuration found for band 0x%02x\n", band);
return -EINVAL;
}
@@ -1192,7 +1199,7 @@ static int dib8000_set_agc_config(struct dib8000_state *state, u8 band)
dib8000_write_word(state, 102, (agc->alpha_mant << 5) | agc->alpha_exp);
dib8000_write_word(state, 103, (agc->beta_mant << 6) | agc->beta_exp);
- dprintk("WBD: ref: %d, sel: %d, active: %d, alpha: %d",
+ dprintk("WBD: ref: %d, sel: %d, active: %d, alpha: %d\n",
state->wbd_ref != 0 ? state->wbd_ref : agc->wbd_ref, agc->wbd_sel, !agc->perform_agc_softsplit, agc->wbd_sel);
/* AGC continued */
@@ -1251,7 +1258,7 @@ static int dib8000_agc_soft_split(struct dib8000_state *state)
(agc - state->current_agc->split.min_thres) /
(state->current_agc->split.max_thres - state->current_agc->split.min_thres);
- dprintk("AGC split_offset: %d", split_offset);
+ dprintk("AGC split_offset: %d\n", split_offset);
// P_agc_force_split and P_agc_split_offset
dib8000_write_word(state, 107, (dib8000_read_word(state, 107) & 0xff00) | split_offset);
@@ -1395,7 +1402,7 @@ static void dib8096p_cfg_DibTx(struct dib8000_state *state, u32 P_Kin,
u32 P_Kout, u32 insertExtSynchro, u32 synchroMode,
u32 syncWord, u32 syncSize)
{
- dprintk("Configure DibStream Tx");
+ dprintk("Configure DibStream Tx\n");
dib8000_write_word(state, 1615, 1);
dib8000_write_word(state, 1603, P_Kin);
@@ -1414,7 +1421,7 @@ static void dib8096p_cfg_DibRx(struct dib8000_state *state, u32 P_Kin,
{
u32 syncFreq;
- dprintk("Configure DibStream Rx synchroMode = %d", synchroMode);
+ dprintk("Configure DibStream Rx synchroMode = %d\n", synchroMode);
if ((P_Kin != 0) && (P_Kout != 0)) {
syncFreq = dib8096p_calcSyncFreq(P_Kin, P_Kout,
@@ -1456,7 +1463,7 @@ static void dib8096p_configMpegMux(struct dib8000_state *state,
{
u16 reg_1287;
- dprintk("Enable Mpeg mux");
+ dprintk("Enable Mpeg mux\n");
dib8096p_enMpegMux(state, 0);
@@ -1477,15 +1484,15 @@ static void dib8096p_setDibTxMux(struct dib8000_state *state, int mode)
switch (mode) {
case MPEG_ON_DIBTX:
- dprintk("SET MPEG ON DIBSTREAM TX");
+ dprintk("SET MPEG ON DIBSTREAM TX\n");
dib8096p_cfg_DibTx(state, 8, 5, 0, 0, 0, 0);
reg_1288 |= (1 << 9); break;
case DIV_ON_DIBTX:
- dprintk("SET DIV_OUT ON DIBSTREAM TX");
+ dprintk("SET DIV_OUT ON DIBSTREAM TX\n");
dib8096p_cfg_DibTx(state, 5, 5, 0, 0, 0, 0);
reg_1288 |= (1 << 8); break;
case ADC_ON_DIBTX:
- dprintk("SET ADC_OUT ON DIBSTREAM TX");
+ dprintk("SET ADC_OUT ON DIBSTREAM TX\n");
dib8096p_cfg_DibTx(state, 20, 5, 10, 0, 0, 0);
reg_1288 |= (1 << 7); break;
default:
@@ -1500,17 +1507,17 @@ static void dib8096p_setHostBusMux(struct dib8000_state *state, int mode)
switch (mode) {
case DEMOUT_ON_HOSTBUS:
- dprintk("SET DEM OUT OLD INTERF ON HOST BUS");
+ dprintk("SET DEM OUT OLD INTERF ON HOST BUS\n");
dib8096p_enMpegMux(state, 0);
reg_1288 |= (1 << 6);
break;
case DIBTX_ON_HOSTBUS:
- dprintk("SET DIBSTREAM TX ON HOST BUS");
+ dprintk("SET DIBSTREAM TX ON HOST BUS\n");
dib8096p_enMpegMux(state, 0);
reg_1288 |= (1 << 5);
break;
case MPEG_ON_HOSTBUS:
- dprintk("SET MPEG MUX ON HOST BUS");
+ dprintk("SET MPEG MUX ON HOST BUS\n");
reg_1288 |= (1 << 4);
break;
default:
@@ -1526,7 +1533,7 @@ static int dib8096p_set_diversity_in(struct dvb_frontend *fe, int onoff)
switch (onoff) {
case 0: /* only use the internal way - not the diversity input */
- dprintk("%s mode OFF : by default Enable Mpeg INPUT",
+ dprintk("%s mode OFF : by default Enable Mpeg INPUT\n",
__func__);
/* outputRate = 8 */
dib8096p_cfg_DibRx(state, 8, 5, 0, 0, 0, 8, 0);
@@ -1544,7 +1551,7 @@ static int dib8096p_set_diversity_in(struct dvb_frontend *fe, int onoff)
break;
case 1: /* both ways */
case 2: /* only the diversity input */
- dprintk("%s ON : Enable diversity INPUT", __func__);
+ dprintk("%s ON : Enable diversity INPUT\n", __func__);
dib8096p_cfg_DibRx(state, 5, 5, 0, 0, 0, 0, 0);
state->input_mode_mpeg = 0;
break;
@@ -1576,11 +1583,11 @@ static int dib8096p_set_output_mode(struct dvb_frontend *fe, int mode)
case OUTMODE_MPEG2_SERIAL:
if (prefer_mpeg_mux_use) {
- dprintk("dib8096P setting output mode TS_SERIAL using Mpeg Mux");
+ dprintk("dib8096P setting output mode TS_SERIAL using Mpeg Mux\n");
dib8096p_configMpegMux(state, 3, 1, 1);
dib8096p_setHostBusMux(state, MPEG_ON_HOSTBUS);
} else {/* Use Smooth block */
- dprintk("dib8096P setting output mode TS_SERIAL using Smooth bloc");
+ dprintk("dib8096P setting output mode TS_SERIAL using Smooth bloc\n");
dib8096p_setHostBusMux(state,
DEMOUT_ON_HOSTBUS);
outreg |= (2 << 6) | (0 << 1);
@@ -1589,11 +1596,11 @@ static int dib8096p_set_output_mode(struct dvb_frontend *fe, int mode)
case OUTMODE_MPEG2_PAR_GATED_CLK:
if (prefer_mpeg_mux_use) {
- dprintk("dib8096P setting output mode TS_PARALLEL_GATED using Mpeg Mux");
+ dprintk("dib8096P setting output mode TS_PARALLEL_GATED using Mpeg Mux\n");
dib8096p_configMpegMux(state, 2, 0, 0);
dib8096p_setHostBusMux(state, MPEG_ON_HOSTBUS);
} else { /* Use Smooth block */
- dprintk("dib8096P setting output mode TS_PARALLEL_GATED using Smooth block");
+ dprintk("dib8096P setting output mode TS_PARALLEL_GATED using Smooth block\n");
dib8096p_setHostBusMux(state,
DEMOUT_ON_HOSTBUS);
outreg |= (0 << 6);
@@ -1601,7 +1608,7 @@ static int dib8096p_set_output_mode(struct dvb_frontend *fe, int mode)
break;
case OUTMODE_MPEG2_PAR_CONT_CLK: /* Using Smooth block only */
- dprintk("dib8096P setting output mode TS_PARALLEL_CONT using Smooth block");
+ dprintk("dib8096P setting output mode TS_PARALLEL_CONT using Smooth block\n");
dib8096p_setHostBusMux(state, DEMOUT_ON_HOSTBUS);
outreg |= (1 << 6);
break;
@@ -1609,7 +1616,7 @@ static int dib8096p_set_output_mode(struct dvb_frontend *fe, int mode)
case OUTMODE_MPEG2_FIFO:
/* Using Smooth block because not supported
by new Mpeg Mux bloc */
- dprintk("dib8096P setting output mode TS_FIFO using Smooth block");
+ dprintk("dib8096P setting output mode TS_FIFO using Smooth block\n");
dib8096p_setHostBusMux(state, DEMOUT_ON_HOSTBUS);
outreg |= (5 << 6);
smo_mode |= (3 << 1);
@@ -1617,13 +1624,13 @@ static int dib8096p_set_output_mode(struct dvb_frontend *fe, int mode)
break;
case OUTMODE_DIVERSITY:
- dprintk("dib8096P setting output mode MODE_DIVERSITY");
+ dprintk("dib8096P setting output mode MODE_DIVERSITY\n");
dib8096p_setDibTxMux(state, DIV_ON_DIBTX);
dib8096p_setHostBusMux(state, DIBTX_ON_HOSTBUS);
break;
case OUTMODE_ANALOG_ADC:
- dprintk("dib8096P setting output mode MODE_ANALOG_ADC");
+ dprintk("dib8096P setting output mode MODE_ANALOG_ADC\n");
dib8096p_setDibTxMux(state, ADC_ON_DIBTX);
dib8096p_setHostBusMux(state, DIBTX_ON_HOSTBUS);
break;
@@ -1632,7 +1639,7 @@ static int dib8096p_set_output_mode(struct dvb_frontend *fe, int mode)
if (mode != OUTMODE_HIGH_Z)
outreg |= (1<<10);
- dprintk("output_mpeg2_in_188_bytes = %d",
+ dprintk("output_mpeg2_in_188_bytes = %d\n",
state->cfg.output_mpeg2_in_188_bytes);
if (state->cfg.output_mpeg2_in_188_bytes)
smo_mode |= (1 << 5);
@@ -1678,7 +1685,7 @@ static int dib8096p_tuner_write_serpar(struct i2c_adapter *i2c_adap,
n_overflow = (dib8000_read_word(state, 1984) >> 1) & 0x1;
i--;
if (i == 0)
- dprintk("Tuner ITF: write busy (overflow)");
+ dprintk("Tuner ITF: write busy (overflow)\n");
}
dib8000_write_word(state, 1985, (1 << 6) | (serpar_num & 0x3f));
dib8000_write_word(state, 1986, (msg[0].buf[1] << 8) | msg[0].buf[2]);
@@ -1699,7 +1706,7 @@ static int dib8096p_tuner_read_serpar(struct i2c_adapter *i2c_adap,
n_overflow = (dib8000_read_word(state, 1984) >> 1) & 0x1;
i--;
if (i == 0)
- dprintk("TunerITF: read busy (overflow)");
+ dprintk("TunerITF: read busy (overflow)\n");
}
dib8000_write_word(state, 1985, (0<<6) | (serpar_num&0x3f));
@@ -1708,7 +1715,7 @@ static int dib8096p_tuner_read_serpar(struct i2c_adapter *i2c_adap,
n_empty = dib8000_read_word(state, 1984)&0x1;
i--;
if (i == 0)
- dprintk("TunerITF: read busy (empty)");
+ dprintk("TunerITF: read busy (empty)\n");
}
read_word = dib8000_read_word(state, 1987);
@@ -1889,7 +1896,7 @@ static int dib8096p_tuner_sleep(struct dvb_frontend *fe, int onoff)
struct dib8000_state *state = fe->demodulator_priv;
u16 en_cur_state;
- dprintk("sleep dib8096p: %d", onoff);
+ dprintk("sleep dib8096p: %d\n", onoff);
en_cur_state = dib8000_read_word(state, 1922);
@@ -1958,7 +1965,7 @@ static void dib8000_update_timf(struct dib8000_state *state)
dib8000_write_word(state, 29, (u16) (timf >> 16));
dib8000_write_word(state, 30, (u16) (timf & 0xffff));
- dprintk("Updated timing frequency: %d (default: %d)", state->timf, state->timf_default);
+ dprintk("Updated timing frequency: %d (default: %d)\n", state->timf, state->timf_default);
}
static u32 dib8000_ctrl_timf(struct dvb_frontend *fe, uint8_t op, uint32_t timf)
@@ -2118,7 +2125,7 @@ static u16 dib8000_get_init_prbs(struct dib8000_state *state, u16 subchannel)
int sub_channel_prbs_group = 0;
sub_channel_prbs_group = (subchannel / 3) + 1;
- dprintk("sub_channel_prbs_group = %d , subchannel =%d prbs = 0x%04x", sub_channel_prbs_group, subchannel, lut_prbs_8k[sub_channel_prbs_group]);
+ dprintk("sub_channel_prbs_group = %d , subchannel =%d prbs = 0x%04x\n", sub_channel_prbs_group, subchannel, lut_prbs_8k[sub_channel_prbs_group]);
switch (state->fe[0]->dtv_property_cache.transmission_mode) {
case TRANSMISSION_MODE_2K:
@@ -2604,7 +2611,7 @@ static int dib8000_autosearch_start(struct dvb_frontend *fe)
slist = 0;
}
}
- dprintk("Using list for autosearch : %d", slist);
+ dprintk("Using list for autosearch : %d\n", slist);
dib8000_set_isdbt_common_channel(state, slist, 1);
@@ -2638,17 +2645,17 @@ static int dib8000_autosearch_irq(struct dvb_frontend *fe)
if ((state->revision >= 0x8002) &&
(state->autosearch_state == AS_SEARCHING_FFT)) {
if (irq_pending & 0x1) {
- dprintk("dib8000_autosearch_irq: max correlation result available");
+ dprintk("dib8000_autosearch_irq: max correlation result available\n");
return 3;
}
} else {
if (irq_pending & 0x1) { /* failed */
- dprintk("dib8000_autosearch_irq failed");
+ dprintk("dib8000_autosearch_irq failed\n");
return 1;
}
if (irq_pending & 0x2) { /* succeeded */
- dprintk("dib8000_autosearch_irq succeeded");
+ dprintk("dib8000_autosearch_irq succeeded\n");
return 2;
}
}
@@ -2699,7 +2706,7 @@ static void dib8000_set_dds(struct dib8000_state *state, s32 offset_khz)
dds += abs_offset_khz * unit_khz_dds_val;
}
- dprintk("setting a DDS frequency offset of %c%dkHz", invert ? '-' : ' ', dds / unit_khz_dds_val);
+ dprintk("setting a DDS frequency offset of %c%dkHz\n", invert ? '-' : ' ', dds / unit_khz_dds_val);
if (abs_offset_khz <= (state->cfg.pll->internal / ratio)) {
/* Max dds offset is the half of the demod freq */
@@ -2738,7 +2745,7 @@ static void dib8000_set_frequency_offset(struct dib8000_state *state)
}
}
- dprintk("%dkhz tuner offset (frequency = %dHz & current_rf = %dHz) total_dds_offset_hz = %d", c->frequency - current_rf, c->frequency, current_rf, total_dds_offset_khz);
+ dprintk("%dkhz tuner offset (frequency = %dHz & current_rf = %dHz) total_dds_offset_hz = %d\n", c->frequency - current_rf, c->frequency, current_rf, total_dds_offset_khz);
/* apply dds offset now */
dib8000_set_dds(state, total_dds_offset_khz);
@@ -2890,7 +2897,7 @@ static u16 dib8000_read_lock(struct dvb_frontend *fe)
static int dib8090p_init_sdram(struct dib8000_state *state)
{
u16 reg = 0;
- dprintk("init sdram");
+ dprintk("init sdram\n");
reg = dib8000_read_word(state, 274) & 0xfff0;
dib8000_write_word(state, 274, reg | 0x7); /* P_dintlv_delay_ram = 7 because of MobileSdram */
@@ -2931,7 +2938,7 @@ static int is_manual_mode(struct dtv_frontend_properties *c)
* Transmission mode is only detected on auto mode, currently
*/
if (c->transmission_mode == TRANSMISSION_MODE_AUTO) {
- dprintk("transmission mode auto");
+ dprintk("transmission mode auto\n");
return 0;
}
@@ -2939,7 +2946,7 @@ static int is_manual_mode(struct dtv_frontend_properties *c)
* Guard interval is only detected on auto mode, currently
*/
if (c->guard_interval == GUARD_INTERVAL_AUTO) {
- dprintk("guard interval auto");
+ dprintk("guard interval auto\n");
return 0;
}
@@ -2948,7 +2955,7 @@ static int is_manual_mode(struct dtv_frontend_properties *c)
* layer should be enabled
*/
if (!c->isdbt_layer_enabled) {
- dprintk("no layer modulation specified");
+ dprintk("no layer modulation specified\n");
return 0;
}
@@ -2970,7 +2977,7 @@ static int is_manual_mode(struct dtv_frontend_properties *c)
if ((c->layer[i].modulation == QAM_AUTO) ||
(c->layer[i].fec == FEC_AUTO)) {
- dprintk("layer %c has either modulation or FEC auto",
+ dprintk("layer %c has either modulation or FEC auto\n",
'A' + i);
return 0;
}
@@ -2981,7 +2988,7 @@ static int is_manual_mode(struct dtv_frontend_properties *c)
* fallback to auto mode.
*/
if (n_segs == 0 || n_segs > 13) {
- dprintk("number of segments is invalid");
+ dprintk("number of segments is invalid\n");
return 0;
}
@@ -3009,7 +3016,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
#if 0
if (*tune_state < CT_DEMOD_STOP)
- dprintk("IN: context status = %d, TUNE_STATE %d autosearch step = %u jiffies = %lu",
+ dprintk("IN: context status = %d, TUNE_STATE %d autosearch step = %u jiffies = %lu\n",
state->channel_parameters_set, *tune_state, state->autosearch_state, now);
#endif
@@ -3022,7 +3029,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
state->status = FE_STATUS_TUNE_PENDING;
state->channel_parameters_set = is_manual_mode(c);
- dprintk("Tuning channel on %s search mode",
+ dprintk("Tuning channel on %s search mode\n",
state->channel_parameters_set ? "manual" : "auto");
dib8000_viterbi_state(state, 0); /* force chan dec in restart */
@@ -3102,7 +3109,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
corm[1] = (dib8000_read_word(state, 596) << 16) | (dib8000_read_word(state, 597));
corm[0] = (dib8000_read_word(state, 598) << 16) | (dib8000_read_word(state, 599));
}
- /* dprintk("corm fft: %u %u %u", corm[0], corm[1], corm[2]); */
+ /* dprintk("corm fft: %u %u %u\n", corm[0], corm[1], corm[2]); */
max_value = 0;
for (find_index = 1 ; find_index < 3 ; find_index++) {
@@ -3122,7 +3129,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
state->found_nfft = TRANSMISSION_MODE_8K;
break;
}
- /* dprintk("Autosearch FFT has found Mode %d", max_value + 1); */
+ /* dprintk("Autosearch FFT has found Mode %d\n", max_value + 1); */
*tune_state = CT_DEMOD_SEARCH_NEXT;
state->autosearch_state = AS_SEARCHING_GUARD;
@@ -3137,7 +3144,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
state->found_guard = dib8000_read_word(state, 572) & 0x3;
else
state->found_guard = dib8000_read_word(state, 570) & 0x3;
- /* dprintk("guard interval found=%i", state->found_guard); */
+ /* dprintk("guard interval found=%i\n", state->found_guard); */
*tune_state = CT_DEMOD_STEP_3;
break;
@@ -3233,7 +3240,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
/* defines timeout for mpeg lock depending on interleaver length of longest layer */
for (i = 0; i < 3; i++) {
if (c->layer[i].interleaving >= deeper_interleaver) {
- dprintk("layer%i: time interleaver = %d ", i, c->layer[i].interleaving);
+ dprintk("layer%i: time interleaver = %d\n", i, c->layer[i].interleaving);
if (c->layer[i].segment_count > 0) { /* valid layer */
deeper_interleaver = c->layer[0].interleaving;
state->longest_intlv_layer = i;
@@ -3252,7 +3259,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
locks *= 2;
*timeout = now + msecs_to_jiffies(200 * locks); /* give the mpeg lock 800ms if sram is present */
- dprintk("Deeper interleaver mode = %d on layer %d : timeout mult factor = %d => will use timeout = %ld",
+ dprintk("Deeper interleaver mode = %d on layer %d : timeout mult factor = %d => will use timeout = %ld\n",
deeper_interleaver, state->longest_intlv_layer, locks, *timeout);
*tune_state = CT_DEMOD_STEP_10;
@@ -3263,7 +3270,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
case CT_DEMOD_STEP_10: /* 40 */
locks = dib8000_read_lock(fe);
if (locks&(1<<(7-state->longest_intlv_layer))) { /* mpeg lock : check the longest one */
- dprintk("ISDB-T layer locks: Layer A %s, Layer B %s, Layer C %s",
+ dprintk("ISDB-T layer locks: Layer A %s, Layer B %s, Layer C %s\n",
c->layer[0].segment_count ? (locks >> 7) & 0x1 ? "locked" : "NOT LOCKED" : "not enabled",
c->layer[1].segment_count ? (locks >> 6) & 0x1 ? "locked" : "NOT LOCKED" : "not enabled",
c->layer[2].segment_count ? (locks >> 5) & 0x1 ? "locked" : "NOT LOCKED" : "not enabled");
@@ -3283,7 +3290,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
*tune_state = CT_DEMOD_STEP_11;
} else { /* we are done mpeg of the longest interleaver xas not locking but let's try if an other layer has locked in the same time */
if (locks & (0x7 << 5)) {
- dprintk("Not all ISDB-T layers locked in %d ms: Layer A %s, Layer B %s, Layer C %s",
+ dprintk("Not all ISDB-T layers locked in %d ms: Layer A %s, Layer B %s, Layer C %s\n",
jiffies_to_msecs(now - *timeout),
c->layer[0].segment_count ? (locks >> 7) & 0x1 ? "locked" : "NOT LOCKED" : "not enabled",
c->layer[1].segment_count ? (locks >> 6) & 0x1 ? "locked" : "NOT LOCKED" : "not enabled",
@@ -3348,7 +3355,7 @@ static int dib8000_wakeup(struct dvb_frontend *fe)
dib8000_set_power_mode(state, DIB8000_POWER_ALL);
dib8000_set_adc_state(state, DIBX000_ADC_ON);
if (dib8000_set_adc_state(state, DIBX000_SLOW_ADC_ON) != 0)
- dprintk("could not start Slow ADC");
+ dprintk("could not start Slow ADC\n");
if (state->revision == 0x8090)
dib8000_sad_calib(state);
@@ -3401,11 +3408,11 @@ static int dib8000_get_frontend(struct dvb_frontend *fe,
if (!(stat & FE_HAS_SYNC))
return 0;
- dprintk("dib8000_get_frontend: TMCC lock");
+ dprintk("dib8000_get_frontend: TMCC lock\n");
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
state->fe[index_frontend]->ops.read_status(state->fe[index_frontend], &stat);
if (stat&FE_HAS_SYNC) {
- dprintk("TMCC lock on the slave%i", index_frontend);
+ dprintk("TMCC lock on the slave%i\n", index_frontend);
/* synchronize the cache with the other frontends */
state->fe[index_frontend]->ops.get_frontend(state->fe[index_frontend], c);
for (sub_index_frontend = 0; (sub_index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[sub_index_frontend] != NULL); sub_index_frontend++) {
@@ -3437,41 +3444,41 @@ static int dib8000_get_frontend(struct dvb_frontend *fe,
switch ((val & 0x30) >> 4) {
case 1:
c->transmission_mode = TRANSMISSION_MODE_2K;
- dprintk("dib8000_get_frontend: transmission mode 2K");
+ dprintk("dib8000_get_frontend: transmission mode 2K\n");
break;
case 2:
c->transmission_mode = TRANSMISSION_MODE_4K;
- dprintk("dib8000_get_frontend: transmission mode 4K");
+ dprintk("dib8000_get_frontend: transmission mode 4K\n");
break;
case 3:
default:
c->transmission_mode = TRANSMISSION_MODE_8K;
- dprintk("dib8000_get_frontend: transmission mode 8K");
+ dprintk("dib8000_get_frontend: transmission mode 8K\n");
break;
}
switch (val & 0x3) {
case 0:
c->guard_interval = GUARD_INTERVAL_1_32;
- dprintk("dib8000_get_frontend: Guard Interval = 1/32 ");
+ dprintk("dib8000_get_frontend: Guard Interval = 1/32\n");
break;
case 1:
c->guard_interval = GUARD_INTERVAL_1_16;
- dprintk("dib8000_get_frontend: Guard Interval = 1/16 ");
+ dprintk("dib8000_get_frontend: Guard Interval = 1/16\n");
break;
case 2:
- dprintk("dib8000_get_frontend: Guard Interval = 1/8 ");
+ dprintk("dib8000_get_frontend: Guard Interval = 1/8\n");
c->guard_interval = GUARD_INTERVAL_1_8;
break;
case 3:
- dprintk("dib8000_get_frontend: Guard Interval = 1/4 ");
+ dprintk("dib8000_get_frontend: Guard Interval = 1/4\n");
c->guard_interval = GUARD_INTERVAL_1_4;
break;
}
val = dib8000_read_word(state, 505);
c->isdbt_partial_reception = val & 1;
- dprintk("dib8000_get_frontend: partial_reception = %d ", c->isdbt_partial_reception);
+ dprintk("dib8000_get_frontend: partial_reception = %d\n", c->isdbt_partial_reception);
for (i = 0; i < 3; i++) {
int show;
@@ -3485,7 +3492,7 @@ static int dib8000_get_frontend(struct dvb_frontend *fe,
show = 1;
if (show)
- dprintk("dib8000_get_frontend: Layer %d segments = %d ",
+ dprintk("dib8000_get_frontend: Layer %d segments = %d\n",
i, c->layer[i].segment_count);
val = dib8000_read_word(state, 499 + i) & 0x3;
@@ -3494,7 +3501,7 @@ static int dib8000_get_frontend(struct dvb_frontend *fe,
val = 4;
c->layer[i].interleaving = val;
if (show)
- dprintk("dib8000_get_frontend: Layer %d time_intlv = %d ",
+ dprintk("dib8000_get_frontend: Layer %d time_intlv = %d\n",
i, c->layer[i].interleaving);
val = dib8000_read_word(state, 481 + i);
@@ -3502,27 +3509,27 @@ static int dib8000_get_frontend(struct dvb_frontend *fe,
case 1:
c->layer[i].fec = FEC_1_2;
if (show)
- dprintk("dib8000_get_frontend: Layer %d Code Rate = 1/2 ", i);
+ dprintk("dib8000_get_frontend: Layer %d Code Rate = 1/2\n", i);
break;
case 2:
c->layer[i].fec = FEC_2_3;
if (show)
- dprintk("dib8000_get_frontend: Layer %d Code Rate = 2/3 ", i);
+ dprintk("dib8000_get_frontend: Layer %d Code Rate = 2/3\n", i);
break;
case 3:
c->layer[i].fec = FEC_3_4;
if (show)
- dprintk("dib8000_get_frontend: Layer %d Code Rate = 3/4 ", i);
+ dprintk("dib8000_get_frontend: Layer %d Code Rate = 3/4\n", i);
break;
case 5:
c->layer[i].fec = FEC_5_6;
if (show)
- dprintk("dib8000_get_frontend: Layer %d Code Rate = 5/6 ", i);
+ dprintk("dib8000_get_frontend: Layer %d Code Rate = 5/6\n", i);
break;
default:
c->layer[i].fec = FEC_7_8;
if (show)
- dprintk("dib8000_get_frontend: Layer %d Code Rate = 7/8 ", i);
+ dprintk("dib8000_get_frontend: Layer %d Code Rate = 7/8\n", i);
break;
}
@@ -3531,23 +3538,23 @@ static int dib8000_get_frontend(struct dvb_frontend *fe,
case 0:
c->layer[i].modulation = DQPSK;
if (show)
- dprintk("dib8000_get_frontend: Layer %d DQPSK ", i);
+ dprintk("dib8000_get_frontend: Layer %d DQPSK\n", i);
break;
case 1:
c->layer[i].modulation = QPSK;
if (show)
- dprintk("dib8000_get_frontend: Layer %d QPSK ", i);
+ dprintk("dib8000_get_frontend: Layer %d QPSK\n", i);
break;
case 2:
c->layer[i].modulation = QAM_16;
if (show)
- dprintk("dib8000_get_frontend: Layer %d QAM16 ", i);
+ dprintk("dib8000_get_frontend: Layer %d QAM16\n", i);
break;
case 3:
default:
c->layer[i].modulation = QAM_64;
if (show)
- dprintk("dib8000_get_frontend: Layer %d QAM64 ", i);
+ dprintk("dib8000_get_frontend: Layer %d QAM64\n", i);
break;
}
}
@@ -3578,12 +3585,12 @@ static int dib8000_set_frontend(struct dvb_frontend *fe)
unsigned long delay, callback_time;
if (c->frequency == 0) {
- dprintk("dib8000: must at least specify frequency ");
+ dprintk("dib8000: must at least specify frequency\n");
return 0;
}
if (c->bandwidth_hz == 0) {
- dprintk("dib8000: no bandwidth specified, set to default ");
+ dprintk("dib8000: no bandwidth specified, set to default\n");
c->bandwidth_hz = 6000000;
}
@@ -3671,7 +3678,7 @@ static int dib8000_set_frontend(struct dvb_frontend *fe)
/* we are in autosearch */
if (state->channel_parameters_set == 0) { /* searching */
if ((dib8000_get_status(state->fe[index_frontend]) == FE_STATUS_DEMOD_SUCCESS) || (dib8000_get_status(state->fe[index_frontend]) == FE_STATUS_FFT_SUCCESS)) {
- dprintk("autosearch succeeded on fe%i", index_frontend);
+ dprintk("autosearch succeeded on fe%i\n", index_frontend);
dib8000_get_frontend(state->fe[index_frontend], c); /* we read the channel parameters from the frontend which was successful */
state->channel_parameters_set = 1;
@@ -3708,11 +3715,11 @@ static int dib8000_set_frontend(struct dvb_frontend *fe)
active = 1;
}
if (active == 0)
- dprintk("tuning done with status %d", dib8000_get_status(state->fe[0]));
+ dprintk("tuning done with status %d\n", dib8000_get_status(state->fe[0]));
}
if ((active == 1) && (callback_time == 0)) {
- dprintk("strange callback time something went wrong");
+ dprintk("strange callback time something went wrong\n");
active = 0;
}
@@ -4172,7 +4179,7 @@ static int dib8000_get_stats(struct dvb_frontend *fe, enum fe_status stat)
time_us = dib8000_get_time_us(fe, -1);
state->ber_jiffies_stats = jiffies + msecs_to_jiffies((time_us + 500) / 1000);
- dprintk("Next all layers stats available in %u us.", time_us);
+ dprintk("Next all layers stats available in %u us.\n", time_us);
dib8000_read_ber(fe, &val);
c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
@@ -4239,12 +4246,12 @@ static int dib8000_set_slave_frontend(struct dvb_frontend *fe, struct dvb_fronte
while ((index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL))
index_frontend++;
if (index_frontend < MAX_NUMBER_OF_FRONTENDS) {
- dprintk("set slave fe %p to index %i", fe_slave, index_frontend);
+ dprintk("set slave fe %p to index %i\n", fe_slave, index_frontend);
state->fe[index_frontend] = fe_slave;
return 0;
}
- dprintk("too many slave frontend");
+ dprintk("too many slave frontend\n");
return -ENOMEM;
}
@@ -4256,12 +4263,12 @@ static int dib8000_remove_slave_frontend(struct dvb_frontend *fe)
while ((index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL))
index_frontend++;
if (index_frontend != 1) {
- dprintk("remove slave fe %p (index %i)", state->fe[index_frontend-1], index_frontend-1);
+ dprintk("remove slave fe %p (index %i)\n", state->fe[index_frontend-1], index_frontend-1);
state->fe[index_frontend] = NULL;
return 0;
}
- dprintk("no frontend to be removed");
+ dprintk("no frontend to be removed\n");
return -ENODEV;
}
@@ -4283,18 +4290,18 @@ static int dib8000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods,
client.i2c_write_buffer = kzalloc(4 * sizeof(u8), GFP_KERNEL);
if (!client.i2c_write_buffer) {
- dprintk("%s: not enough memory", __func__);
+ dprintk("%s: not enough memory\n", __func__);
return -ENOMEM;
}
client.i2c_read_buffer = kzalloc(4 * sizeof(u8), GFP_KERNEL);
if (!client.i2c_read_buffer) {
- dprintk("%s: not enough memory", __func__);
+ dprintk("%s: not enough memory\n", __func__);
ret = -ENOMEM;
goto error_memory_read;
}
client.i2c_buffer_lock = kzalloc(sizeof(struct mutex), GFP_KERNEL);
if (!client.i2c_buffer_lock) {
- dprintk("%s: not enough memory", __func__);
+ dprintk("%s: not enough memory\n", __func__);
ret = -ENOMEM;
goto error_memory_lock;
}
@@ -4313,7 +4320,7 @@ static int dib8000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods,
dib8000_i2c_write16(&client, 1287, 0x0003);
client.addr = default_addr;
if (dib8000_identify(&client) == 0) {
- dprintk("#%d: not identified", k);
+ dprintk("#%d: not identified\n", k);
ret = -EINVAL;
goto error;
}
@@ -4327,7 +4334,7 @@ static int dib8000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods,
client.addr = new_addr;
dib8000_identify(&client);
- dprintk("IC %d initialized (to i2c_address 0x%x)", k, new_addr);
+ dprintk("IC %d initialized (to i2c_address 0x%x)\n", k, new_addr);
}
for (k = 0; k < no_of_demods; k++) {
@@ -4385,14 +4392,14 @@ static int dib8000_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
u16 val = dib8000_read_word(st, 299) & 0xffef;
val |= (onoff & 0x1) << 4;
- dprintk("pid filter enabled %d", onoff);
+ dprintk("pid filter enabled %d\n", onoff);
return dib8000_write_word(st, 299, val);
}
static int dib8000_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
{
struct dib8000_state *st = fe->demodulator_priv;
- dprintk("Index %x, PID %d, OnOff %d", id, pid, onoff);
+ dprintk("Index %x, PID %d, OnOff %d\n", id, pid, onoff);
return dib8000_write_word(st, 305 + id, onoff ? (1 << 13) | pid : 0);
}
@@ -4431,7 +4438,7 @@ static struct dvb_frontend *dib8000_init(struct i2c_adapter *i2c_adap, u8 i2c_ad
struct dvb_frontend *fe;
struct dib8000_state *state;
- dprintk("dib8000_init");
+ dprintk("dib8000_init\n");
state = kzalloc(sizeof(struct dib8000_state), GFP_KERNEL);
if (state == NULL)
diff --git a/drivers/media/dvb-frontends/dib9000.c b/drivers/media/dvb-frontends/dib9000.c
index 5897977d2d00..c95fff4f9582 100644
--- a/drivers/media/dvb-frontends/dib9000.c
+++ b/drivers/media/dvb-frontends/dib9000.c
@@ -7,6 +7,9 @@
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
@@ -21,7 +24,12 @@ static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
-#define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiB9000: "); printk(args); printk("\n"); } } while (0)
+#define dprintk(fmt, arg...) do { \
+ if (debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
+} while (0)
+
#define MAX_NUMBER_OF_FRONTENDS 6
struct i2c_device {
@@ -258,7 +266,7 @@ static int dib9000_read16_attr(struct dib9000_state *state, u16 reg, u8 *b, u32
state->msg[1].buf = b;
ret = i2c_transfer(state->i2c.i2c_adap, state->msg, 2) != 2 ? -EREMOTEIO : 0;
if (ret != 0) {
- dprintk("i2c read error on %d", reg);
+ dprintk("i2c read error on %d\n", reg);
return -EREMOTEIO;
}
@@ -285,7 +293,7 @@ static u16 dib9000_i2c_read16(struct i2c_device *i2c, u16 reg)
i2c->i2c_write_buffer[1] = reg & 0xff;
if (i2c_transfer(i2c->i2c_adap, msg, 2) != 2) {
- dprintk("read register %x error", reg);
+ dprintk("read register %x error\n", reg);
return 0;
}
@@ -440,7 +448,7 @@ static int dib9000_risc_mem_read(struct dib9000_state *state, u8 cmd, u8 * b, u1
return -EIO;
if (mutex_lock_interruptible(&state->platform.risc.mem_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return -EINTR;
}
dib9000_risc_mem_setup(state, cmd | 0x80);
@@ -456,7 +464,7 @@ static int dib9000_risc_mem_write(struct dib9000_state *state, u8 cmd, const u8
return -EIO;
if (mutex_lock_interruptible(&state->platform.risc.mem_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return -EINTR;
}
dib9000_risc_mem_setup(state, cmd);
@@ -479,13 +487,13 @@ static int dib9000_firmware_download(struct dib9000_state *state, u8 risc_id, u1
dib9000_write_word(state, 1025 + offs, 0);
dib9000_write_word(state, 1031 + offs, key);
- dprintk("going to download %dB of microcode", len);
+ dprintk("going to download %dB of microcode\n", len);
if (dib9000_write16_noinc(state, 1026 + offs, (u8 *) code, (u16) len) != 0) {
- dprintk("error while downloading microcode for RISC %c", 'A' + risc_id);
+ dprintk("error while downloading microcode for RISC %c\n", 'A' + risc_id);
return -EIO;
}
- dprintk("Microcode for RISC %c loaded", 'A' + risc_id);
+ dprintk("Microcode for RISC %c loaded\n", 'A' + risc_id);
return 0;
}
@@ -511,10 +519,10 @@ static int dib9000_mbx_host_init(struct dib9000_state *state, u8 risc_id)
} while ((reset_reg & 0x8000) && --tries);
if (reset_reg & 0x8000) {
- dprintk("MBX: init ERROR, no response from RISC %c", 'A' + risc_id);
+ dprintk("MBX: init ERROR, no response from RISC %c\n", 'A' + risc_id);
return -EIO;
}
- dprintk("MBX: initialized");
+ dprintk("MBX: initialized\n");
return 0;
}
@@ -531,30 +539,27 @@ static int dib9000_mbx_send_attr(struct dib9000_state *state, u8 id, u16 * data,
return -EINVAL;
if (mutex_lock_interruptible(&state->platform.risc.mbx_if_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return -EINTR;
}
tmp = MAX_MAILBOX_TRY;
do {
size = dib9000_read_word_attr(state, 1043, attr) & 0xff;
if ((size + len + 1) > MBX_MAX_WORDS && --tmp) {
- dprintk("MBX: RISC mbx full, retrying");
+ dprintk("MBX: RISC mbx full, retrying\n");
msleep(100);
} else
break;
} while (1);
- /*dprintk( "MBX: size: %d", size); */
+ /*dprintk( "MBX: size: %d\n", size); */
if (tmp == 0) {
ret = -EINVAL;
goto out;
}
#ifdef DUMP_MSG
- dprintk("--> %02x %d ", id, len + 1);
- for (i = 0; i < len; i++)
- dprintk("%04x ", data[i]);
- dprintk("\n");
+ dprintk("--> %02x %d %*ph\n", id, len + 1, len, data);
#endif
/* byte-order conversion - works on big (where it is not necessary) or little endian */
@@ -596,7 +601,7 @@ static u8 dib9000_mbx_read(struct dib9000_state *state, u16 * data, u8 risc_id,
return 0;
if (mutex_lock_interruptible(&state->platform.risc.mbx_if_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return 0;
}
if (risc_id == 1)
@@ -622,13 +627,13 @@ static u8 dib9000_mbx_read(struct dib9000_state *state, u16 * data, u8 risc_id,
}
#ifdef DUMP_MSG
- dprintk("<-- ");
+ dprintk("<--\n");
for (i = 0; i < size + 1; i++)
- dprintk("%04x ", d[i]);
+ dprintk("%04x\n", d[i]);
dprintk("\n");
#endif
} else {
- dprintk("MBX: message is too big for message cache (%d), flushing message", size);
+ dprintk("MBX: message is too big for message cache (%d), flushing message\n", size);
size--; /* Initial word already read */
while (size--)
dib9000_read16_noinc_attr(state, 1029 + mc_base, (u8 *) data, 2, attr);
@@ -649,9 +654,11 @@ static int dib9000_risc_debug_buf(struct dib9000_state *state, u16 * data, u8 si
b[2 * (size - 2) - 1] = '\0'; /* Bullet proof the buffer */
if (*b == '~') {
b++;
- dprintk("%s", b);
+ dprintk("%s\n", b);
} else
- dprintk("RISC%d: %d.%04d %s", state->fe_id, ts / 10000, ts % 10000, *b ? b : "<empty>");
+ dprintk("RISC%d: %d.%04d %s\n",
+ state->fe_id,
+ ts / 10000, ts % 10000, *b ? b : "<empty>");
return 1;
}
@@ -666,7 +673,7 @@ static int dib9000_mbx_fetch_to_cache(struct dib9000_state *state, u16 attr)
if (*block == 0) {
size = dib9000_mbx_read(state, block, 1, attr);
-/* dprintk( "MBX: fetched %04x message to cache", *block); */
+/* dprintk( "MBX: fetched %04x message to cache\n", *block); */
switch (*block >> 8) {
case IN_MSG_DEBUG_BUF:
@@ -686,7 +693,7 @@ static int dib9000_mbx_fetch_to_cache(struct dib9000_state *state, u16 attr)
return 1;
}
}
- dprintk("MBX: no free cache-slot found for new message...");
+ dprintk("MBX: no free cache-slot found for new message...\n");
return -1;
}
@@ -706,7 +713,7 @@ static int dib9000_mbx_process(struct dib9000_state *state, u16 attr)
return -1;
if (mutex_lock_interruptible(&state->platform.risc.mbx_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return -1;
}
@@ -715,7 +722,7 @@ static int dib9000_mbx_process(struct dib9000_state *state, u16 attr)
dib9000_read_word_attr(state, 1229, attr); /* Clear the IRQ */
/* if (tmp) */
-/* dprintk( "cleared IRQ: %x", tmp); */
+/* dprintk( "cleared IRQ: %x\n", tmp); */
mutex_unlock(&state->platform.risc.mbx_lock);
return ret;
@@ -750,7 +757,7 @@ static int dib9000_mbx_get_message_attr(struct dib9000_state *state, u16 id, u16
} while (--timeout);
if (timeout == 0) {
- dprintk("waiting for message %d timed out", id);
+ dprintk("waiting for message %d timed out\n", id);
return -1;
}
@@ -770,7 +777,7 @@ static int dib9000_risc_check_version(struct dib9000_state *state)
return -EIO;
fw_version = (r[0] << 8) | r[1];
- dprintk("RISC: ver: %d.%02d (IC: %d)", fw_version >> 10, fw_version & 0x3ff, (r[2] << 8) | r[3]);
+ dprintk("RISC: ver: %d.%02d (IC: %d)\n", fw_version >> 10, fw_version & 0x3ff, (r[2] << 8) | r[3]);
if ((fw_version >> 10) != 7)
return -EINVAL;
@@ -850,40 +857,40 @@ static u16 dib9000_identify(struct i2c_device *client)
value = dib9000_i2c_read16(client, 896);
if (value != 0x01b3) {
- dprintk("wrong Vendor ID (0x%x)", value);
+ dprintk("wrong Vendor ID (0x%x)\n", value);
return 0;
}
value = dib9000_i2c_read16(client, 897);
if (value != 0x4000 && value != 0x4001 && value != 0x4002 && value != 0x4003 && value != 0x4004 && value != 0x4005) {
- dprintk("wrong Device ID (0x%x)", value);
+ dprintk("wrong Device ID (0x%x)\n", value);
return 0;
}
/* protect this driver to be used with 7000PC */
if (value == 0x4000 && dib9000_i2c_read16(client, 769) == 0x4000) {
- dprintk("this driver does not work with DiB7000PC");
+ dprintk("this driver does not work with DiB7000PC\n");
return 0;
}
switch (value) {
case 0x4000:
- dprintk("found DiB7000MA/PA/MB/PB");
+ dprintk("found DiB7000MA/PA/MB/PB\n");
break;
case 0x4001:
- dprintk("found DiB7000HC");
+ dprintk("found DiB7000HC\n");
break;
case 0x4002:
- dprintk("found DiB7000MC");
+ dprintk("found DiB7000MC\n");
break;
case 0x4003:
- dprintk("found DiB9000A");
+ dprintk("found DiB9000A\n");
break;
case 0x4004:
- dprintk("found DiB9000H");
+ dprintk("found DiB9000H\n");
break;
case 0x4005:
- dprintk("found DiB9000M");
+ dprintk("found DiB9000M\n");
break;
}
@@ -1013,7 +1020,7 @@ static int dib9000_risc_apb_access_read(struct dib9000_state *state, u32 address
if (address >= 1024 || !state->platform.risc.fw_is_running)
return -EINVAL;
- /* dprintk( "APB access thru rd fw %d %x", address, attribute); */
+ /* dprintk( "APB access thru rd fw %d %x\n", address, attribute); */
mb[0] = (u16) address;
mb[1] = len / 2;
@@ -1043,7 +1050,7 @@ static int dib9000_risc_apb_access_write(struct dib9000_state *state, u32 addres
if (len > 18)
return -EINVAL;
- /* dprintk( "APB access thru wr fw %d %x", address, attribute); */
+ /* dprintk( "APB access thru wr fw %d %x\n", address, attribute); */
mb[0] = (u16)address;
for (i = 0; i + 1 < len; i += 2)
@@ -1191,7 +1198,7 @@ static int dib9000_fw_get_channel(struct dvb_frontend *fe)
int ret = 0;
if (mutex_lock_interruptible(&state->platform.risc.mem_mbx_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return -EINTR;
}
if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) {
@@ -1534,7 +1541,7 @@ static int dib9000_fw_set_output_mode(struct dvb_frontend *fe, int mode)
struct dib9000_state *state = fe->demodulator_priv;
u16 outreg, smo_mode;
- dprintk("setting output mode for demod %p to %d", fe, mode);
+ dprintk("setting output mode for demod %p to %d\n", fe, mode);
switch (mode) {
case OUTMODE_MPEG2_PAR_GATED_CLK:
@@ -1556,7 +1563,7 @@ static int dib9000_fw_set_output_mode(struct dvb_frontend *fe, int mode)
outreg = 0;
break;
default:
- dprintk("Unhandled output_mode passed to be set for demod %p", &state->fe[0]);
+ dprintk("Unhandled output_mode passed to be set for demod %p\n", &state->fe[0]);
return -EINVAL;
}
@@ -1590,7 +1597,7 @@ static int dib9000_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[]
len = 16;
if (dib9000_read_word(state, 790) != 0)
- dprintk("TunerITF: read busy");
+ dprintk("TunerITF: read busy\n");
dib9000_write_word(state, 784, (u16) (msg[index_msg].addr));
dib9000_write_word(state, 787, (len / 2) - 1);
@@ -1601,7 +1608,7 @@ static int dib9000_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[]
i--;
if (i == 0)
- dprintk("TunerITF: read failed");
+ dprintk("TunerITF: read failed\n");
for (i = 0; i < len; i += 2) {
t = dib9000_read_word(state, 785);
@@ -1609,13 +1616,13 @@ static int dib9000_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[]
msg[index_msg].buf[i + 1] = (t) & 0xff;
}
if (dib9000_read_word(state, 790) != 0)
- dprintk("TunerITF: read more data than expected");
+ dprintk("TunerITF: read more data than expected\n");
} else {
i = 1000;
while (dib9000_read_word(state, 789) && i)
i--;
if (i == 0)
- dprintk("TunerITF: write busy");
+ dprintk("TunerITF: write busy\n");
len = msg[index_msg].len;
if (len > 16)
@@ -1631,7 +1638,7 @@ static int dib9000_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[]
while (dib9000_read_word(state, 791) > 0 && i)
i--;
if (i == 0)
- dprintk("TunerITF: write failed");
+ dprintk("TunerITF: write failed\n");
}
}
return num;
@@ -1676,7 +1683,7 @@ static int dib9000_fw_component_bus_xfer(struct i2c_adapter *i2c_adap, struct i2
}
if (mutex_lock_interruptible(&state->platform.risc.mem_mbx_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return 0;
}
@@ -1759,7 +1766,7 @@ static int dib9000_cfg_gpio(struct dib9000_state *st, u8 num, u8 dir, u8 val)
st->gpio_val |= (val & 0x01) << num; /* set the new value */
dib9000_write_word(st, 774, st->gpio_val);
- dprintk("gpio dir: %04x: gpio val: %04x", st->gpio_dir, st->gpio_val);
+ dprintk("gpio dir: %04x: gpio val: %04x\n", st->gpio_dir, st->gpio_val);
return 0;
}
@@ -1779,7 +1786,7 @@ int dib9000_fw_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
if ((state->pid_ctrl_index != -2) && (state->pid_ctrl_index < 9)) {
/* postpone the pid filtering cmd */
- dprintk("pid filter cmd postpone");
+ dprintk("pid filter cmd postpone\n");
state->pid_ctrl_index++;
state->pid_ctrl[state->pid_ctrl_index].cmd = DIB9000_PID_FILTER_CTRL;
state->pid_ctrl[state->pid_ctrl_index].onoff = onoff;
@@ -1787,14 +1794,14 @@ int dib9000_fw_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
}
if (mutex_lock_interruptible(&state->demod_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return -EINTR;
}
val = dib9000_read_word(state, 294 + 1) & 0xffef;
val |= (onoff & 0x1) << 4;
- dprintk("PID filter enabled %d", onoff);
+ dprintk("PID filter enabled %d\n", onoff);
ret = dib9000_write_word(state, 294 + 1, val);
mutex_unlock(&state->demod_lock);
return ret;
@@ -1809,7 +1816,7 @@ int dib9000_fw_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
if (state->pid_ctrl_index != -2) {
/* postpone the pid filtering cmd */
- dprintk("pid filter postpone");
+ dprintk("pid filter postpone\n");
if (state->pid_ctrl_index < 9) {
state->pid_ctrl_index++;
state->pid_ctrl[state->pid_ctrl_index].cmd = DIB9000_PID_FILTER;
@@ -1817,15 +1824,15 @@ int dib9000_fw_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
state->pid_ctrl[state->pid_ctrl_index].pid = pid;
state->pid_ctrl[state->pid_ctrl_index].onoff = onoff;
} else
- dprintk("can not add any more pid ctrl cmd");
+ dprintk("can not add any more pid ctrl cmd\n");
return 0;
}
if (mutex_lock_interruptible(&state->demod_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return -EINTR;
}
- dprintk("Index %x, PID %d, OnOff %d", id, pid, onoff);
+ dprintk("Index %x, PID %d, OnOff %d\n", id, pid, onoff);
ret = dib9000_write_word(state, 300 + 1 + id,
onoff ? (1 << 13) | pid : 0);
mutex_unlock(&state->demod_lock);
@@ -1868,7 +1875,7 @@ static int dib9000_sleep(struct dvb_frontend *fe)
int ret = 0;
if (mutex_lock_interruptible(&state->demod_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return -EINTR;
}
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
@@ -1899,7 +1906,7 @@ static int dib9000_get_frontend(struct dvb_frontend *fe,
if (state->get_frontend_internal == 0) {
if (mutex_lock_interruptible(&state->demod_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return -EINTR;
}
}
@@ -1907,7 +1914,7 @@ static int dib9000_get_frontend(struct dvb_frontend *fe,
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
state->fe[index_frontend]->ops.read_status(state->fe[index_frontend], &stat);
if (stat & FE_HAS_SYNC) {
- dprintk("TPS lock on the slave%i", index_frontend);
+ dprintk("TPS lock on the slave%i\n", index_frontend);
/* synchronize the cache with the other frontends */
state->fe[index_frontend]->ops.get_frontend(state->fe[index_frontend], c);
@@ -1995,18 +2002,18 @@ static int dib9000_set_frontend(struct dvb_frontend *fe)
/* check that the correct parameters are set */
if (state->fe[0]->dtv_property_cache.frequency == 0) {
- dprintk("dib9000: must specify frequency ");
+ dprintk("dib9000: must specify frequency\n");
return 0;
}
if (state->fe[0]->dtv_property_cache.bandwidth_hz == 0) {
- dprintk("dib9000: must specify bandwidth ");
+ dprintk("dib9000: must specify bandwidth\n");
return 0;
}
state->pid_ctrl_index = -1; /* postpone the pid filtering cmd */
if (mutex_lock_interruptible(&state->demod_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return 0;
}
@@ -2073,14 +2080,14 @@ static int dib9000_set_frontend(struct dvb_frontend *fe)
/* check the tune result */
if (exit_condition == 1) { /* tune failed */
- dprintk("tune failed");
+ dprintk("tune failed\n");
mutex_unlock(&state->demod_lock);
/* tune failed; put all the pid filtering cmd to junk */
state->pid_ctrl_index = -1;
return 0;
}
- dprintk("tune success on frontend%i", index_frontend_success);
+ dprintk("tune success on frontend%i\n", index_frontend_success);
/* synchronize all the channel cache */
state->get_frontend_internal = 1;
@@ -2169,7 +2176,7 @@ static int dib9000_read_status(struct dvb_frontend *fe, enum fe_status *stat)
u16 lock = 0, lock_slave = 0;
if (mutex_lock_interruptible(&state->demod_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return -EINTR;
}
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++)
@@ -2202,11 +2209,11 @@ static int dib9000_read_ber(struct dvb_frontend *fe, u32 * ber)
int ret = 0;
if (mutex_lock_interruptible(&state->demod_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return -EINTR;
}
if (mutex_lock_interruptible(&state->platform.risc.mem_mbx_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
ret = -EINTR;
goto error;
}
@@ -2237,7 +2244,7 @@ static int dib9000_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
int ret = 0;
if (mutex_lock_interruptible(&state->demod_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return -EINTR;
}
*strength = 0;
@@ -2250,7 +2257,7 @@ static int dib9000_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
}
if (mutex_lock_interruptible(&state->platform.risc.mem_mbx_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
ret = -EINTR;
goto error;
}
@@ -2281,7 +2288,7 @@ static u32 dib9000_get_snr(struct dvb_frontend *fe)
u16 val;
if (mutex_lock_interruptible(&state->platform.risc.mem_mbx_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return 0;
}
if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) {
@@ -2320,7 +2327,7 @@ static int dib9000_read_snr(struct dvb_frontend *fe, u16 * snr)
u32 snr_master;
if (mutex_lock_interruptible(&state->demod_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return -EINTR;
}
snr_master = dib9000_get_snr(fe);
@@ -2345,11 +2352,11 @@ static int dib9000_read_unc_blocks(struct dvb_frontend *fe, u32 * unc)
int ret = 0;
if (mutex_lock_interruptible(&state->demod_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
return -EINTR;
}
if (mutex_lock_interruptible(&state->platform.risc.mem_mbx_lock) < 0) {
- dprintk("could not get the lock");
+ dprintk("could not get the lock\n");
ret = -EINTR;
goto error;
}
@@ -2376,12 +2383,12 @@ int dib9000_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 defaul
client.i2c_write_buffer = kzalloc(4 * sizeof(u8), GFP_KERNEL);
if (!client.i2c_write_buffer) {
- dprintk("%s: not enough memory", __func__);
+ dprintk("%s: not enough memory\n", __func__);
return -ENOMEM;
}
client.i2c_read_buffer = kzalloc(4 * sizeof(u8), GFP_KERNEL);
if (!client.i2c_read_buffer) {
- dprintk("%s: not enough memory", __func__);
+ dprintk("%s: not enough memory\n", __func__);
ret = -ENOMEM;
goto error_memory;
}
@@ -2408,7 +2415,7 @@ int dib9000_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 defaul
if (dib9000_identify(&client) == 0) {
client.i2c_addr = default_addr;
if (dib9000_identify(&client) == 0) {
- dprintk("DiB9000 #%d: not identified", k);
+ dprintk("DiB9000 #%d: not identified\n", k);
ret = -EIO;
goto error;
}
@@ -2417,7 +2424,7 @@ int dib9000_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 defaul
dib9000_i2c_write16(&client, 1795, (1 << 10) | (4 << 6));
dib9000_i2c_write16(&client, 1794, (new_addr << 2) | 2);
- dprintk("IC %d initialized (to i2c_address 0x%x)", k, new_addr);
+ dprintk("IC %d initialized (to i2c_address 0x%x)\n", k, new_addr);
}
for (k = 0; k < no_of_demods; k++) {
@@ -2445,12 +2452,12 @@ int dib9000_set_slave_frontend(struct dvb_frontend *fe, struct dvb_frontend *fe_
while ((index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL))
index_frontend++;
if (index_frontend < MAX_NUMBER_OF_FRONTENDS) {
- dprintk("set slave fe %p to index %i", fe_slave, index_frontend);
+ dprintk("set slave fe %p to index %i\n", fe_slave, index_frontend);
state->fe[index_frontend] = fe_slave;
return 0;
}
- dprintk("too many slave frontend");
+ dprintk("too many slave frontend\n");
return -ENOMEM;
}
EXPORT_SYMBOL(dib9000_set_slave_frontend);
@@ -2463,12 +2470,12 @@ int dib9000_remove_slave_frontend(struct dvb_frontend *fe)
while ((index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL))
index_frontend++;
if (index_frontend != 1) {
- dprintk("remove slave fe %p (index %i)", state->fe[index_frontend - 1], index_frontend - 1);
+ dprintk("remove slave fe %p (index %i)\n", state->fe[index_frontend - 1], index_frontend - 1);
state->fe[index_frontend] = NULL;
return 0;
}
- dprintk("no frontend to be removed");
+ dprintk("no frontend to be removed\n");
return -ENODEV;
}
EXPORT_SYMBOL(dib9000_remove_slave_frontend);
@@ -2483,7 +2490,7 @@ struct dvb_frontend *dib9000_get_slave_frontend(struct dvb_frontend *fe, int sla
}
EXPORT_SYMBOL(dib9000_get_slave_frontend);
-static struct dvb_frontend_ops dib9000_ops;
+static const struct dvb_frontend_ops dib9000_ops;
struct dvb_frontend *dib9000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, const struct dib9000_config *cfg)
{
struct dvb_frontend *fe;
@@ -2560,7 +2567,7 @@ error:
}
EXPORT_SYMBOL(dib9000_attach);
-static struct dvb_frontend_ops dib9000_ops = {
+static const struct dvb_frontend_ops dib9000_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "DiBcom 9000",
diff --git a/drivers/media/dvb-frontends/dibx000_common.c b/drivers/media/dvb-frontends/dibx000_common.c
index 723358d7ca84..bc28184c7fb0 100644
--- a/drivers/media/dvb-frontends/dibx000_common.c
+++ b/drivers/media/dvb-frontends/dibx000_common.c
@@ -1,3 +1,5 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/i2c.h>
#include <linux/mutex.h>
#include <linux/module.h>
@@ -8,14 +10,18 @@ static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
-#define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiBX000: "); printk(args); printk("\n"); } } while (0)
+#define dprintk(fmt, arg...) do { \
+ if (debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
+} while (0)
static int dibx000_write_word(struct dibx000_i2c_master *mst, u16 reg, u16 val)
{
int ret;
if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return -EINVAL;
}
@@ -41,7 +47,7 @@ static u16 dibx000_read_word(struct dibx000_i2c_master *mst, u16 reg)
u16 ret;
if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return 0;
}
@@ -59,7 +65,7 @@ static u16 dibx000_read_word(struct dibx000_i2c_master *mst, u16 reg)
mst->msg[1].len = 2;
if (i2c_transfer(mst->i2c_adap, mst->msg, 2) != 2)
- dprintk("i2c read error on %d", reg);
+ dprintk("i2c read error on %d\n", reg);
ret = (mst->i2c_read_buffer[0] << 8) | mst->i2c_read_buffer[1];
mutex_unlock(&mst->i2c_buffer_lock);
@@ -192,7 +198,7 @@ static int dibx000_i2c_select_interface(struct dibx000_i2c_master *mst,
enum dibx000_i2c_interface intf)
{
if (mst->device_rev > DIB3000MC && mst->selected_interface != intf) {
- dprintk("selecting interface: %d", intf);
+ dprintk("selecting interface: %d\n", intf);
mst->selected_interface = intf;
return dibx000_write_word(mst, mst->base_reg + 4, intf);
}
@@ -290,7 +296,7 @@ static int dibx000_i2c_gated_gpio67_xfer(struct i2c_adapter *i2c_adap,
dibx000_i2c_select_interface(mst, DIBX000_I2C_INTERFACE_GPIO_6_7);
if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return -EINVAL;
}
@@ -337,7 +343,7 @@ static int dibx000_i2c_gated_tuner_xfer(struct i2c_adapter *i2c_adap,
dibx000_i2c_select_interface(mst, DIBX000_I2C_INTERFACE_TUNER);
if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return -EINVAL;
}
memset(mst->msg, 0, sizeof(struct i2c_msg) * (2 + num));
@@ -391,7 +397,7 @@ struct i2c_adapter *dibx000_get_i2c_adapter(struct dibx000_i2c_master *mst,
i2c = &mst->master_i2c_adap_gpio67;
break;
default:
- printk(KERN_ERR "DiBX000: incorrect I2C interface selected\n");
+ pr_err("incorrect I2C interface selected\n");
break;
}
@@ -434,7 +440,7 @@ int dibx000_init_i2c_master(struct dibx000_i2c_master *mst, u16 device_rev,
mutex_init(&mst->i2c_buffer_lock);
if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
+ dprintk("could not acquire lock\n");
return -EINVAL;
}
memset(mst->msg, 0, sizeof(struct i2c_msg));
@@ -456,29 +462,25 @@ int dibx000_init_i2c_master(struct dibx000_i2c_master *mst, u16 device_rev,
if (i2c_adapter_init
(&mst->gated_tuner_i2c_adap, &dibx000_i2c_gated_tuner_algo,
"DiBX000 tuner I2C bus", mst) != 0)
- printk(KERN_ERR
- "DiBX000: could not initialize the tuner i2c_adapter\n");
+ pr_err("could not initialize the tuner i2c_adapter\n");
mst->master_i2c_adap_gpio12.dev.parent = mst->i2c_adap->dev.parent;
if (i2c_adapter_init
(&mst->master_i2c_adap_gpio12, &dibx000_i2c_master_gpio12_xfer_algo,
"DiBX000 master GPIO12 I2C bus", mst) != 0)
- printk(KERN_ERR
- "DiBX000: could not initialize the master i2c_adapter\n");
+ pr_err("could not initialize the master i2c_adapter\n");
mst->master_i2c_adap_gpio34.dev.parent = mst->i2c_adap->dev.parent;
if (i2c_adapter_init
(&mst->master_i2c_adap_gpio34, &dibx000_i2c_master_gpio34_xfer_algo,
"DiBX000 master GPIO34 I2C bus", mst) != 0)
- printk(KERN_ERR
- "DiBX000: could not initialize the master i2c_adapter\n");
+ pr_err("could not initialize the master i2c_adapter\n");
mst->master_i2c_adap_gpio67.dev.parent = mst->i2c_adap->dev.parent;
if (i2c_adapter_init
(&mst->master_i2c_adap_gpio67, &dibx000_i2c_gated_gpio67_algo,
"DiBX000 master GPIO67 I2C bus", mst) != 0)
- printk(KERN_ERR
- "DiBX000: could not initialize the master i2c_adapter\n");
+ pr_err("could not initialize the master i2c_adapter\n");
/* initialize the i2c-master by closing the gate */
dibx000_i2c_gate_ctrl(mst, mst->i2c_write_buffer, 0, 0);
@@ -500,16 +502,6 @@ void dibx000_exit_i2c_master(struct dibx000_i2c_master *mst)
}
EXPORT_SYMBOL(dibx000_exit_i2c_master);
-
-u32 systime(void)
-{
- struct timespec t;
-
- t = current_kernel_time();
- return (t.tv_sec * 10000) + (t.tv_nsec / 100000);
-}
-EXPORT_SYMBOL(systime);
-
MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
MODULE_DESCRIPTION("Common function the DiBcom demodulator family");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/dibx000_common.h b/drivers/media/dvb-frontends/dibx000_common.h
index b538e0555c95..61f4152f24ee 100644
--- a/drivers/media/dvb-frontends/dibx000_common.h
+++ b/drivers/media/dvb-frontends/dibx000_common.h
@@ -47,8 +47,6 @@ extern void dibx000_exit_i2c_master(struct dibx000_i2c_master *mst);
extern void dibx000_reset_i2c_master(struct dibx000_i2c_master *mst);
extern int dibx000_i2c_set_speed(struct i2c_adapter *i2c_adap, u16 speed);
-extern u32 systime(void);
-
#define BAND_LBAND 0x01
#define BAND_UHF 0x02
#define BAND_VHF 0x04
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c
index bd6d2ee0f7c9..f1c3e3b09b65 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drxj.c
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c
@@ -12264,7 +12264,7 @@ static void drx39xxj_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops drx39xxj_ops;
+static const struct dvb_frontend_ops drx39xxj_ops;
struct dvb_frontend *drx39xxj_attach(struct i2c_adapter *i2c)
{
@@ -12363,7 +12363,7 @@ error:
}
EXPORT_SYMBOL(drx39xxj_attach);
-static struct dvb_frontend_ops drx39xxj_ops = {
+static const struct dvb_frontend_ops drx39xxj_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Micronas DRX39xxj family Frontend",
diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c
index 445a15c2714f..4143f0326684 100644
--- a/drivers/media/dvb-frontends/drxd_hard.c
+++ b/drivers/media/dvb-frontends/drxd_hard.c
@@ -2912,7 +2912,7 @@ static void drxd_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops drxd_ops = {
+static const struct dvb_frontend_ops drxd_ops = {
.delsys = { SYS_DVBT},
.info = {
.name = "Micronas DRXD DVB-T",
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index c595adc61c6f..146edf344dd8 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -6737,7 +6737,7 @@ static int drxk_get_tune_settings(struct dvb_frontend *fe,
}
}
-static struct dvb_frontend_ops drxk_ops = {
+static const struct dvb_frontend_ops drxk_ops = {
/* .delsys will be filled dynamically */
.info = {
.name = "DRXK",
diff --git a/drivers/media/dvb-frontends/ds3000.c b/drivers/media/dvb-frontends/ds3000.c
index 447b518e287a..0b17a45c5640 100644
--- a/drivers/media/dvb-frontends/ds3000.c
+++ b/drivers/media/dvb-frontends/ds3000.c
@@ -248,8 +248,8 @@ static int ds3000_writereg(struct ds3000_state *state, int reg, int data)
err = i2c_transfer(state->i2c, &msg, 1);
if (err != 1) {
- printk(KERN_ERR "%s: writereg error(err == %i, reg == 0x%02x,"
- " value == 0x%02x)\n", __func__, err, reg, data);
+ printk(KERN_ERR "%s: writereg error(err == %i, reg == 0x%02x, value == 0x%02x)\n",
+ __func__, err, reg, data);
return -EREMOTEIO;
}
@@ -296,8 +296,8 @@ static int ds3000_writeFW(struct ds3000_state *state, int reg,
ret = i2c_transfer(state->i2c, &msg, 1);
if (ret != 1) {
- printk(KERN_ERR "%s: write error(err == %i, "
- "reg == 0x%02x\n", __func__, ret, reg);
+ printk(KERN_ERR "%s: write error(err == %i, reg == 0x%02x\n",
+ __func__, ret, reg);
ret = -EREMOTEIO;
goto error;
}
@@ -364,8 +364,8 @@ static int ds3000_firmware_ondemand(struct dvb_frontend *fe)
state->i2c->dev.parent);
printk(KERN_INFO "%s: Waiting for firmware upload(2)...\n", __func__);
if (ret) {
- printk(KERN_ERR "%s: No firmware uploaded (timeout or file not "
- "found?)\n", __func__);
+ printk(KERN_ERR "%s: No firmware uploaded (timeout or file not found?)\n",
+ __func__);
return ret;
}
@@ -830,7 +830,7 @@ static void ds3000_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops ds3000_ops;
+static const struct dvb_frontend_ops ds3000_ops;
struct dvb_frontend *ds3000_attach(const struct ds3000_config *config,
struct i2c_adapter *i2c)
@@ -1104,7 +1104,7 @@ static int ds3000_initfe(struct dvb_frontend *fe)
return 0;
}
-static struct dvb_frontend_ops ds3000_ops = {
+static const struct dvb_frontend_ops ds3000_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2 },
.info = {
.name = "Montage Technology DS3000",
@@ -1144,8 +1144,7 @@ static struct dvb_frontend_ops ds3000_ops = {
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)");
-MODULE_DESCRIPTION("DVB Frontend module for Montage Technology "
- "DS3000 hardware");
+MODULE_DESCRIPTION("DVB Frontend module for Montage Technology DS3000 hardware");
MODULE_AUTHOR("Konstantin Dimitrov <kosio.dimitrov@gmail.com>");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(DS3000_DEFAULT_FIRMWARE);
diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c
index 735a96662022..ef976eb23344 100644
--- a/drivers/media/dvb-frontends/dvb-pll.c
+++ b/drivers/media/dvb-frontends/dvb-pll.c
@@ -18,6 +18,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/dvb/frontend.h>
@@ -25,6 +27,9 @@
#include "dvb-pll.h"
+#define dprintk(fmt, arg...) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), __func__, ##arg)
+
struct dvb_pll_priv {
/* pll number */
int nr;
@@ -362,7 +367,7 @@ static void opera1_bw(struct dvb_frontend *fe, u8 *buf)
result = i2c_transfer(priv->i2c, &msg, 1);
if (result != 1)
- printk(KERN_ERR "%s: i2c_transfer failed:%d",
+ pr_err("%s: i2c_transfer failed:%d",
__func__, result);
if (b_w <= 10000)
@@ -432,7 +437,7 @@ static void samsung_dtos403ih102a_set(struct dvb_frontend *fe, u8 *buf)
result = i2c_transfer(priv->i2c, &msg, 1);
if (result != 1)
- printk(KERN_ERR "%s: i2c_transfer failed:%d",
+ pr_err("%s: i2c_transfer failed:%d",
__func__, result);
buf[2] = 0x9e;
@@ -578,7 +583,7 @@ static int dvb_pll_configure(struct dvb_frontend *fe, u8 *buf,
}
if (debug)
- printk("pll: %s: freq=%d | i=%d/%d\n", desc->name,
+ dprintk("pll: %s: freq=%d | i=%d/%d\n", desc->name,
frequency, i, desc->count);
if (i == desc->count)
return -EINVAL;
@@ -594,18 +599,17 @@ static int dvb_pll_configure(struct dvb_frontend *fe, u8 *buf,
desc->set(fe, buf);
if (debug)
- printk("pll: %s: div=%d | buf=0x%02x,0x%02x,0x%02x,0x%02x\n",
+ dprintk("pll: %s: div=%d | buf=0x%02x,0x%02x,0x%02x,0x%02x\n",
desc->name, div, buf[0], buf[1], buf[2], buf[3]);
// calculate the frequency we set it to
return (div * desc->entries[i].stepsize) - desc->iffreq;
}
-static int dvb_pll_release(struct dvb_frontend *fe)
+static void dvb_pll_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static int dvb_pll_sleep(struct dvb_frontend *fe)
@@ -803,10 +807,10 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
fe->tuner_priv = priv;
if ((debug) || (id[priv->nr] == pll_desc_id)) {
- printk("dvb-pll[%d]", priv->nr);
+ dprintk("dvb-pll[%d]", priv->nr);
if (i2c != NULL)
- printk(" %d-%04x", i2c_adapter_id(i2c), pll_addr);
- printk(": id# %d (%s) attached, %s\n", pll_desc_id, desc->name,
+ pr_cont(" %d-%04x", i2c_adapter_id(i2c), pll_addr);
+ pr_cont(": id# %d (%s) attached, %s\n", pll_desc_id, desc->name,
id[priv->nr] == pll_desc_id ?
"insmod option" : "autodetected");
}
diff --git a/drivers/media/dvb-frontends/dvb_dummy_fe.c b/drivers/media/dvb-frontends/dvb_dummy_fe.c
index e5bd8c62ad3a..efc3c31a7635 100644
--- a/drivers/media/dvb-frontends/dvb_dummy_fe.c
+++ b/drivers/media/dvb-frontends/dvb_dummy_fe.c
@@ -119,7 +119,7 @@ static void dvb_dummy_fe_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops dvb_dummy_fe_ofdm_ops;
+static const struct dvb_frontend_ops dvb_dummy_fe_ofdm_ops;
struct dvb_frontend* dvb_dummy_fe_ofdm_attach(void)
{
@@ -136,7 +136,7 @@ struct dvb_frontend* dvb_dummy_fe_ofdm_attach(void)
return &state->frontend;
}
-static struct dvb_frontend_ops dvb_dummy_fe_qpsk_ops;
+static const struct dvb_frontend_ops dvb_dummy_fe_qpsk_ops;
struct dvb_frontend *dvb_dummy_fe_qpsk_attach(void)
{
@@ -153,7 +153,7 @@ struct dvb_frontend *dvb_dummy_fe_qpsk_attach(void)
return &state->frontend;
}
-static struct dvb_frontend_ops dvb_dummy_fe_qam_ops;
+static const struct dvb_frontend_ops dvb_dummy_fe_qam_ops;
struct dvb_frontend *dvb_dummy_fe_qam_attach(void)
{
@@ -170,7 +170,7 @@ struct dvb_frontend *dvb_dummy_fe_qam_attach(void)
return &state->frontend;
}
-static struct dvb_frontend_ops dvb_dummy_fe_ofdm_ops = {
+static const struct dvb_frontend_ops dvb_dummy_fe_ofdm_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Dummy DVB-T",
@@ -201,7 +201,7 @@ static struct dvb_frontend_ops dvb_dummy_fe_ofdm_ops = {
.read_ucblocks = dvb_dummy_fe_read_ucblocks,
};
-static struct dvb_frontend_ops dvb_dummy_fe_qam_ops = {
+static const struct dvb_frontend_ops dvb_dummy_fe_qam_ops = {
.delsys = { SYS_DVBC_ANNEX_A },
.info = {
.name = "Dummy DVB-C",
@@ -230,7 +230,7 @@ static struct dvb_frontend_ops dvb_dummy_fe_qam_ops = {
.read_ucblocks = dvb_dummy_fe_read_ucblocks,
};
-static struct dvb_frontend_ops dvb_dummy_fe_qpsk_ops = {
+static const struct dvb_frontend_ops dvb_dummy_fe_qpsk_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Dummy DVB-S",
diff --git a/drivers/media/dvb-frontends/ec100.c b/drivers/media/dvb-frontends/ec100.c
index c9012e677cd1..d97ce21e26e1 100644
--- a/drivers/media/dvb-frontends/ec100.c
+++ b/drivers/media/dvb-frontends/ec100.c
@@ -280,7 +280,7 @@ static void ec100_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops ec100_ops;
+static const struct dvb_frontend_ops ec100_ops;
struct dvb_frontend *ec100_attach(const struct ec100_config *config,
struct i2c_adapter *i2c)
@@ -315,7 +315,7 @@ error:
}
EXPORT_SYMBOL(ec100_attach);
-static struct dvb_frontend_ops ec100_ops = {
+static const struct dvb_frontend_ops ec100_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "E3C EC100 DVB-T",
diff --git a/drivers/media/dvb-frontends/gp8psk-fe.c b/drivers/media/dvb-frontends/gp8psk-fe.c
index 93f59bfea092..efe015df7f1d 100644
--- a/drivers/media/dvb-frontends/gp8psk-fe.c
+++ b/drivers/media/dvb-frontends/gp8psk-fe.c
@@ -323,7 +323,7 @@ static void gp8psk_fe_release(struct dvb_frontend* fe)
kfree(st);
}
-static struct dvb_frontend_ops gp8psk_fe_ops;
+static const struct dvb_frontend_ops gp8psk_fe_ops;
struct dvb_frontend *gp8psk_fe_attach(const struct gp8psk_fe_ops *ops,
void *priv, bool is_rev1)
@@ -351,7 +351,7 @@ struct dvb_frontend *gp8psk_fe_attach(const struct gp8psk_fe_ops *ops,
}
EXPORT_SYMBOL_GPL(gp8psk_fe_attach);
-static struct dvb_frontend_ops gp8psk_fe_ops = {
+static const struct dvb_frontend_ops gp8psk_fe_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Genpix DVB-S",
diff --git a/drivers/media/dvb-frontends/hd29l2.c b/drivers/media/dvb-frontends/hd29l2.c
index 1c7eb477e2cd..8b53633cf325 100644
--- a/drivers/media/dvb-frontends/hd29l2.c
+++ b/drivers/media/dvb-frontends/hd29l2.c
@@ -793,7 +793,7 @@ static void hd29l2_release(struct dvb_frontend *fe)
kfree(priv);
}
-static struct dvb_frontend_ops hd29l2_ops;
+static const struct dvb_frontend_ops hd29l2_ops;
struct dvb_frontend *hd29l2_attach(const struct hd29l2_config *config,
struct i2c_adapter *i2c)
@@ -828,7 +828,7 @@ err:
}
EXPORT_SYMBOL(hd29l2_attach);
-static struct dvb_frontend_ops hd29l2_ops = {
+static const struct dvb_frontend_ops hd29l2_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "HDIC HD29L2 DMB-TH",
diff --git a/drivers/media/dvb-frontends/helene.c b/drivers/media/dvb-frontends/helene.c
index dc43c5f6d0ea..ef35c2b30ea3 100644
--- a/drivers/media/dvb-frontends/helene.c
+++ b/drivers/media/dvb-frontends/helene.c
@@ -434,14 +434,13 @@ static int helene_init(struct dvb_frontend *fe)
return helene_leave_power_save(priv);
}
-static int helene_release(struct dvb_frontend *fe)
+static void helene_release(struct dvb_frontend *fe)
{
struct helene_priv *priv = fe->tuner_priv;
dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static int helene_sleep(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/horus3a.c b/drivers/media/dvb-frontends/horus3a.c
index 0c089b5986a1..94bb4f7a2298 100644
--- a/drivers/media/dvb-frontends/horus3a.c
+++ b/drivers/media/dvb-frontends/horus3a.c
@@ -151,14 +151,13 @@ static int horus3a_init(struct dvb_frontend *fe)
return 0;
}
-static int horus3a_release(struct dvb_frontend *fe)
+static void horus3a_release(struct dvb_frontend *fe)
{
struct horus3a_priv *priv = fe->tuner_priv;
dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static int horus3a_sleep(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/itd1000.c b/drivers/media/dvb-frontends/itd1000.c
index cadcae4cff89..475525134327 100644
--- a/drivers/media/dvb-frontends/itd1000.c
+++ b/drivers/media/dvb-frontends/itd1000.c
@@ -348,11 +348,10 @@ static int itd1000_sleep(struct dvb_frontend *fe)
return 0;
}
-static int itd1000_release(struct dvb_frontend *fe)
+static void itd1000_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static const struct dvb_tuner_ops itd1000_tuner_ops = {
diff --git a/drivers/media/dvb-frontends/ix2505v.c b/drivers/media/dvb-frontends/ix2505v.c
index 2826bbb36b73..ca371680a69f 100644
--- a/drivers/media/dvb-frontends/ix2505v.c
+++ b/drivers/media/dvb-frontends/ix2505v.c
@@ -94,14 +94,13 @@ static int ix2505v_write(struct ix2505v_state *state, u8 buf[], u8 count)
return 0;
}
-static int ix2505v_release(struct dvb_frontend *fe)
+static void ix2505v_release(struct dvb_frontend *fe)
{
struct ix2505v_state *state = fe->tuner_priv;
fe->tuner_priv = NULL;
kfree(state);
- return 0;
}
/**
diff --git a/drivers/media/dvb-frontends/l64781.c b/drivers/media/dvb-frontends/l64781.c
index 2f3d0519e19b..68923c84679a 100644
--- a/drivers/media/dvb-frontends/l64781.c
+++ b/drivers/media/dvb-frontends/l64781.c
@@ -496,7 +496,7 @@ static void l64781_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops l64781_ops;
+static const struct dvb_frontend_ops l64781_ops;
struct dvb_frontend* l64781_attach(const struct l64781_config* config,
struct i2c_adapter* i2c)
@@ -571,7 +571,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops l64781_ops = {
+static const struct dvb_frontend_ops l64781_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "LSI L64781 DVB-T",
diff --git a/drivers/media/dvb-frontends/lg2160.c b/drivers/media/dvb-frontends/lg2160.c
index f51a3a0b3949..3b31e5f20f46 100644
--- a/drivers/media/dvb-frontends/lg2160.c
+++ b/drivers/media/dvb-frontends/lg2160.c
@@ -1359,7 +1359,7 @@ static void lg216x_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops lg2160_ops = {
+static const struct dvb_frontend_ops lg2160_ops = {
.delsys = { SYS_ATSCMH },
.info = {
.name = "LG Electronics LG2160 ATSC/MH Frontend",
@@ -1387,7 +1387,7 @@ static struct dvb_frontend_ops lg2160_ops = {
.release = lg216x_release,
};
-static struct dvb_frontend_ops lg2161_ops = {
+static const struct dvb_frontend_ops lg2161_ops = {
.delsys = { SYS_ATSCMH },
.info = {
.name = "LG Electronics LG2161 ATSC/MH Frontend",
diff --git a/drivers/media/dvb-frontends/lgdt3305.c b/drivers/media/dvb-frontends/lgdt3305.c
index 4503e8852fd1..9f5d9380bf5f 100644
--- a/drivers/media/dvb-frontends/lgdt3305.c
+++ b/drivers/media/dvb-frontends/lgdt3305.c
@@ -1103,8 +1103,8 @@ static void lgdt3305_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops lgdt3304_ops;
-static struct dvb_frontend_ops lgdt3305_ops;
+static const struct dvb_frontend_ops lgdt3304_ops;
+static const struct dvb_frontend_ops lgdt3305_ops;
struct dvb_frontend *lgdt3305_attach(const struct lgdt3305_config *config,
struct i2c_adapter *i2c_adap)
@@ -1164,7 +1164,7 @@ fail:
}
EXPORT_SYMBOL(lgdt3305_attach);
-static struct dvb_frontend_ops lgdt3304_ops = {
+static const struct dvb_frontend_ops lgdt3304_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "LG Electronics LGDT3304 VSB/QAM Frontend",
@@ -1187,7 +1187,7 @@ static struct dvb_frontend_ops lgdt3304_ops = {
.release = lgdt3305_release,
};
-static struct dvb_frontend_ops lgdt3305_ops = {
+static const struct dvb_frontend_ops lgdt3305_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "LG Electronics LGDT3305 VSB/QAM Frontend",
diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
index 0ca4e810e9d8..19dca46b1171 100644
--- a/drivers/media/dvb-frontends/lgdt3306a.c
+++ b/drivers/media/dvb-frontends/lgdt3306a.c
@@ -1767,7 +1767,7 @@ static void lgdt3306a_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops lgdt3306a_ops;
+static const struct dvb_frontend_ops lgdt3306a_ops;
struct dvb_frontend *lgdt3306a_attach(const struct lgdt3306a_config *config,
struct i2c_adapter *i2c_adap)
@@ -2103,7 +2103,7 @@ static void lgdt3306a_DumpRegs(struct lgdt3306a_state *state)
-static struct dvb_frontend_ops lgdt3306a_ops = {
+static const struct dvb_frontend_ops lgdt3306a_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "LG Electronics LGDT3306A VSB/QAM Frontend",
diff --git a/drivers/media/dvb-frontends/lgdt330x.c b/drivers/media/dvb-frontends/lgdt330x.c
index 96bf254da21e..2f4a0316f89c 100644
--- a/drivers/media/dvb-frontends/lgdt330x.c
+++ b/drivers/media/dvb-frontends/lgdt330x.c
@@ -405,8 +405,7 @@ static int lgdt330x_set_parameters(struct dvb_frontend *fe)
return -1;
}
if (err < 0)
- printk(KERN_WARNING "lgdt330x: %s: error blasting "
- "bytes to lgdt3303 for modulation type(%d)\n",
+ printk(KERN_WARNING "lgdt330x: %s: error blasting bytes to lgdt3303 for modulation type(%d)\n",
__func__, p->modulation);
/*
@@ -729,8 +728,8 @@ static void lgdt330x_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops lgdt3302_ops;
-static struct dvb_frontend_ops lgdt3303_ops;
+static const struct dvb_frontend_ops lgdt3302_ops;
+static const struct dvb_frontend_ops lgdt3303_ops;
struct dvb_frontend* lgdt330x_attach(const struct lgdt330x_config* config,
struct i2c_adapter* i2c)
@@ -775,7 +774,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops lgdt3302_ops = {
+static const struct dvb_frontend_ops lgdt3302_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name= "LG Electronics LGDT3302 VSB/QAM Frontend",
@@ -798,7 +797,7 @@ static struct dvb_frontend_ops lgdt3302_ops = {
.release = lgdt330x_release,
};
-static struct dvb_frontend_ops lgdt3303_ops = {
+static const struct dvb_frontend_ops lgdt3303_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name= "LG Electronics LGDT3303 VSB/QAM Frontend",
diff --git a/drivers/media/dvb-frontends/lgs8gl5.c b/drivers/media/dvb-frontends/lgs8gl5.c
index fbfd87b5b803..970e42fdbc1b 100644
--- a/drivers/media/dvb-frontends/lgs8gl5.c
+++ b/drivers/media/dvb-frontends/lgs8gl5.c
@@ -376,7 +376,7 @@ lgs8gl5_release(struct dvb_frontend *fe)
}
-static struct dvb_frontend_ops lgs8gl5_ops;
+static const struct dvb_frontend_ops lgs8gl5_ops;
struct dvb_frontend*
@@ -412,7 +412,7 @@ error:
EXPORT_SYMBOL(lgs8gl5_attach);
-static struct dvb_frontend_ops lgs8gl5_ops = {
+static const struct dvb_frontend_ops lgs8gl5_ops = {
.delsys = { SYS_DTMB },
.info = {
.name = "Legend Silicon LGS-8GL5 DMB-TH",
diff --git a/drivers/media/dvb-frontends/lgs8gxx.c b/drivers/media/dvb-frontends/lgs8gxx.c
index 919daeb96747..6d2e62469d58 100644
--- a/drivers/media/dvb-frontends/lgs8gxx.c
+++ b/drivers/media/dvb-frontends/lgs8gxx.c
@@ -985,7 +985,7 @@ static int lgs8gxx_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
return lgs8gxx_write_reg(priv, 0x01, 0);
}
-static struct dvb_frontend_ops lgs8gxx_ops = {
+static const struct dvb_frontend_ops lgs8gxx_ops = {
.delsys = { SYS_DTMB },
.info = {
.name = "Legend Silicon LGS8913/LGS8GXX DMB-TH",
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index e0fe5bc9dbce..50bce68ffd66 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -16,7 +16,7 @@
#include "m88ds3103_priv.h"
-static struct dvb_frontend_ops m88ds3103_ops;
+static const struct dvb_frontend_ops m88ds3103_ops;
/* write single register with mask */
static int m88ds3103_update_bits(struct m88ds3103_dev *dev,
@@ -1295,7 +1295,7 @@ struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg,
}
EXPORT_SYMBOL(m88ds3103_attach);
-static struct dvb_frontend_ops m88ds3103_ops = {
+static const struct dvb_frontend_ops m88ds3103_ops = {
.delsys = {SYS_DVBS, SYS_DVBS2},
.info = {
.name = "Montage Technology M88DS3103",
diff --git a/drivers/media/dvb-frontends/m88rs2000.c b/drivers/media/dvb-frontends/m88rs2000.c
index ef79a4ec31e2..ce6c21d405ee 100644
--- a/drivers/media/dvb-frontends/m88rs2000.c
+++ b/drivers/media/dvb-frontends/m88rs2000.c
@@ -75,8 +75,8 @@ static int m88rs2000_writereg(struct m88rs2000_state *state,
ret = i2c_transfer(state->i2c, &msg, 1);
if (ret != 1)
- deb_info("%s: writereg error (reg == 0x%02x, val == 0x%02x, "
- "ret == %i)\n", __func__, reg, data, ret);
+ deb_info("%s: writereg error (reg == 0x%02x, val == 0x%02x, ret == %i)\n",
+ __func__, reg, data, ret);
return (ret != 1) ? -EREMOTEIO : 0;
}
@@ -618,10 +618,9 @@ static int m88rs2000_set_frontend(struct dvb_frontend *fe)
state->no_lock_count = 0;
if (c->delivery_system != SYS_DVBS) {
- deb_info("%s: unsupported delivery "
- "system selected (%d)\n",
- __func__, c->delivery_system);
- return -EOPNOTSUPP;
+ deb_info("%s: unsupported delivery system selected (%d)\n",
+ __func__, c->delivery_system);
+ return -EOPNOTSUPP;
}
/* Set Tuner */
@@ -753,7 +752,7 @@ static void m88rs2000_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops m88rs2000_ops = {
+static const struct dvb_frontend_ops m88rs2000_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "M88RS2000 DVB-S",
diff --git a/drivers/media/dvb-frontends/mb86a16.c b/drivers/media/dvb-frontends/mb86a16.c
index 79bc671e8769..9bb122c39c1b 100644
--- a/drivers/media/dvb-frontends/mb86a16.c
+++ b/drivers/media/dvb-frontends/mb86a16.c
@@ -1816,7 +1816,7 @@ static enum dvbfe_algo mb86a16_frontend_algo(struct dvb_frontend *fe)
return DVBFE_ALGO_CUSTOM;
}
-static struct dvb_frontend_ops mb86a16_ops = {
+static const struct dvb_frontend_ops mb86a16_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Fujitsu MB86A16 DVB-S",
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
index fe79358b035e..e8ac8c3e2ec0 100644
--- a/drivers/media/dvb-frontends/mb86a20s.c
+++ b/drivers/media/dvb-frontends/mb86a20s.c
@@ -1967,6 +1967,7 @@ static int mb86a20s_read_status_and_stats(struct dvb_frontend *fe,
if (status_nr < 0) {
dev_err(&state->i2c->dev,
"%s: Can't read frontend lock status\n", __func__);
+ rc = status_nr;
goto error;
}
@@ -2059,7 +2060,7 @@ static int mb86a20s_get_frontend_algo(struct dvb_frontend *fe)
return DVBFE_ALGO_HW;
}
-static struct dvb_frontend_ops mb86a20s_ops;
+static const struct dvb_frontend_ops mb86a20s_ops;
struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config,
struct i2c_adapter *i2c)
@@ -2107,7 +2108,7 @@ error:
}
EXPORT_SYMBOL(mb86a20s_attach);
-static struct dvb_frontend_ops mb86a20s_ops = {
+static const struct dvb_frontend_ops mb86a20s_ops = {
.delsys = { SYS_ISDBT },
/* Use dib8000 values per default */
.info = {
diff --git a/drivers/media/dvb-frontends/mn88472.c b/drivers/media/dvb-frontends/mn88472.c
index 18fb2df1e2bd..29dd13b36c28 100644
--- a/drivers/media/dvb-frontends/mn88472.c
+++ b/drivers/media/dvb-frontends/mn88472.c
@@ -411,7 +411,7 @@ err:
return ret;
}
-static struct dvb_frontend_ops mn88472_ops = {
+static const struct dvb_frontend_ops mn88472_ops = {
.delsys = {SYS_DVBT, SYS_DVBT2, SYS_DVBC_ANNEX_A},
.info = {
.name = "Panasonic MN88472",
@@ -488,18 +488,6 @@ static int mn88472_probe(struct i2c_client *client,
goto err_kfree;
}
- /* Check demod answers with correct chip id */
- ret = regmap_read(dev->regmap[0], 0xff, &utmp);
- if (ret)
- goto err_regmap_0_regmap_exit;
-
- dev_dbg(&client->dev, "chip id=%02x\n", utmp);
-
- if (utmp != 0x02) {
- ret = -ENODEV;
- goto err_regmap_0_regmap_exit;
- }
-
/*
* Chip has three I2C addresses for different register banks. Used
* addresses are 0x18, 0x1a and 0x1c. We register two dummy clients,
@@ -536,6 +524,18 @@ static int mn88472_probe(struct i2c_client *client,
}
i2c_set_clientdata(dev->client[2], dev);
+ /* Check demod answers with correct chip id */
+ ret = regmap_read(dev->regmap[2], 0xff, &utmp);
+ if (ret)
+ goto err_regmap_2_regmap_exit;
+
+ dev_dbg(&client->dev, "chip id=%02x\n", utmp);
+
+ if (utmp != 0x02) {
+ ret = -ENODEV;
+ goto err_regmap_2_regmap_exit;
+ }
+
/* Sleep because chip is active by default */
ret = regmap_write(dev->regmap[2], 0x05, 0x3e);
if (ret)
diff --git a/drivers/media/dvb-frontends/mn88473.c b/drivers/media/dvb-frontends/mn88473.c
index 451974a1d7ed..c221c7d2ac3e 100644
--- a/drivers/media/dvb-frontends/mn88473.c
+++ b/drivers/media/dvb-frontends/mn88473.c
@@ -239,62 +239,68 @@ static int mn88473_read_status(struct dvb_frontend *fe, enum fe_status *status)
struct i2c_client *client = fe->demodulator_priv;
struct mn88473_dev *dev = i2c_get_clientdata(client);
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- int ret;
- unsigned int uitmp;
+ int ret, i, stmp;
+ unsigned int utmp, utmp1, utmp2;
+ u8 buf[5];
if (!dev->active) {
ret = -EAGAIN;
goto err;
}
- *status = 0;
-
+ /* Lock detection */
switch (c->delivery_system) {
case SYS_DVBT:
- ret = regmap_read(dev->regmap[0], 0x62, &uitmp);
+ ret = regmap_read(dev->regmap[0], 0x62, &utmp);
if (ret)
goto err;
- if (!(uitmp & 0xa0)) {
- if ((uitmp & 0x0f) >= 0x09)
+ if (!(utmp & 0xa0)) {
+ if ((utmp & 0x0f) >= 0x09)
*status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
FE_HAS_VITERBI | FE_HAS_SYNC |
FE_HAS_LOCK;
- else if ((uitmp & 0x0f) >= 0x03)
+ else if ((utmp & 0x0f) >= 0x03)
*status = FE_HAS_SIGNAL | FE_HAS_CARRIER;
+ } else {
+ *status = 0;
}
break;
case SYS_DVBT2:
- ret = regmap_read(dev->regmap[2], 0x8b, &uitmp);
+ ret = regmap_read(dev->regmap[2], 0x8b, &utmp);
if (ret)
goto err;
- if (!(uitmp & 0x40)) {
- if ((uitmp & 0x0f) >= 0x0d)
+ if (!(utmp & 0x40)) {
+ if ((utmp & 0x0f) >= 0x0d)
*status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
FE_HAS_VITERBI | FE_HAS_SYNC |
FE_HAS_LOCK;
- else if ((uitmp & 0x0f) >= 0x0a)
+ else if ((utmp & 0x0f) >= 0x0a)
*status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
FE_HAS_VITERBI;
- else if ((uitmp & 0x0f) >= 0x07)
+ else if ((utmp & 0x0f) >= 0x07)
*status = FE_HAS_SIGNAL | FE_HAS_CARRIER;
+ } else {
+ *status = 0;
}
break;
case SYS_DVBC_ANNEX_A:
- ret = regmap_read(dev->regmap[1], 0x85, &uitmp);
+ ret = regmap_read(dev->regmap[1], 0x85, &utmp);
if (ret)
goto err;
- if (!(uitmp & 0x40)) {
- ret = regmap_read(dev->regmap[1], 0x89, &uitmp);
+ if (!(utmp & 0x40)) {
+ ret = regmap_read(dev->regmap[1], 0x89, &utmp);
if (ret)
goto err;
- if (uitmp & 0x01)
+ if (utmp & 0x01)
*status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
- FE_HAS_VITERBI | FE_HAS_SYNC |
- FE_HAS_LOCK;
+ FE_HAS_VITERBI | FE_HAS_SYNC |
+ FE_HAS_LOCK;
+ } else {
+ *status = 0;
}
break;
default:
@@ -302,6 +308,148 @@ static int mn88473_read_status(struct dvb_frontend *fe, enum fe_status *status)
goto err;
}
+ /* Signal strength */
+ if (*status & FE_HAS_SIGNAL) {
+ for (i = 0; i < 2; i++) {
+ ret = regmap_bulk_read(dev->regmap[2], 0x86 + i,
+ &buf[i], 1);
+ if (ret)
+ goto err;
+ }
+
+ /* AGCRD[15:6] gives us a 10bit value ([5:0] are always 0) */
+ utmp1 = buf[0] << 8 | buf[1] << 0 | buf[0] >> 2;
+ dev_dbg(&client->dev, "strength=%u\n", utmp1);
+
+ c->strength.stat[0].scale = FE_SCALE_RELATIVE;
+ c->strength.stat[0].uvalue = utmp1;
+ } else {
+ c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
+
+ /* CNR */
+ if (*status & FE_HAS_VITERBI && c->delivery_system == SYS_DVBT) {
+ /* DVB-T CNR */
+ ret = regmap_bulk_read(dev->regmap[0], 0x8f, buf, 2);
+ if (ret)
+ goto err;
+
+ utmp = buf[0] << 8 | buf[1] << 0;
+ if (utmp) {
+ /* CNR[dB]: 10 * (log10(65536 / value) + 0.2) */
+ /* log10(65536) = 80807124, 0.2 = 3355443 */
+ stmp = div_u64(((u64)80807124 - intlog10(utmp)
+ + 3355443) * 10000, 1 << 24);
+ dev_dbg(&client->dev, "cnr=%d value=%u\n", stmp, utmp);
+ } else {
+ stmp = 0;
+ }
+
+ c->cnr.stat[0].svalue = stmp;
+ c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ } else if (*status & FE_HAS_VITERBI &&
+ c->delivery_system == SYS_DVBT2) {
+ /* DVB-T2 CNR */
+ for (i = 0; i < 3; i++) {
+ ret = regmap_bulk_read(dev->regmap[2], 0xb7 + i,
+ &buf[i], 1);
+ if (ret)
+ goto err;
+ }
+
+ utmp = buf[1] << 8 | buf[2] << 0;
+ utmp1 = (buf[0] >> 2) & 0x01; /* 0=SISO, 1=MISO */
+ if (utmp) {
+ if (utmp1) {
+ /* CNR[dB]: 10 * (log10(16384 / value) - 0.6) */
+ /* log10(16384) = 70706234, 0.6 = 10066330 */
+ stmp = div_u64(((u64)70706234 - intlog10(utmp)
+ - 10066330) * 10000, 1 << 24);
+ dev_dbg(&client->dev, "cnr=%d value=%u MISO\n",
+ stmp, utmp);
+ } else {
+ /* CNR[dB]: 10 * (log10(65536 / value) + 0.2) */
+ /* log10(65536) = 80807124, 0.2 = 3355443 */
+ stmp = div_u64(((u64)80807124 - intlog10(utmp)
+ + 3355443) * 10000, 1 << 24);
+ dev_dbg(&client->dev, "cnr=%d value=%u SISO\n",
+ stmp, utmp);
+ }
+ } else {
+ stmp = 0;
+ }
+
+ c->cnr.stat[0].svalue = stmp;
+ c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ } else if (*status & FE_HAS_VITERBI &&
+ c->delivery_system == SYS_DVBC_ANNEX_A) {
+ /* DVB-C CNR */
+ ret = regmap_bulk_read(dev->regmap[1], 0xa1, buf, 4);
+ if (ret)
+ goto err;
+
+ utmp1 = buf[0] << 8 | buf[1] << 0; /* signal */
+ utmp2 = buf[2] << 8 | buf[3] << 0; /* noise */
+ if (utmp1 && utmp2) {
+ /* CNR[dB]: 10 * log10(8 * (signal / noise)) */
+ /* log10(8) = 15151336 */
+ stmp = div_u64(((u64)15151336 + intlog10(utmp1)
+ - intlog10(utmp2)) * 10000, 1 << 24);
+ dev_dbg(&client->dev, "cnr=%d signal=%u noise=%u\n",
+ stmp, utmp1, utmp2);
+ } else {
+ stmp = 0;
+ }
+
+ c->cnr.stat[0].svalue = stmp;
+ c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ } else {
+ c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
+
+ /* BER */
+ if (*status & FE_HAS_LOCK && (c->delivery_system == SYS_DVBT ||
+ c->delivery_system == SYS_DVBC_ANNEX_A)) {
+ /* DVB-T & DVB-C BER */
+ ret = regmap_bulk_read(dev->regmap[0], 0x92, buf, 5);
+ if (ret)
+ goto err;
+
+ utmp1 = buf[0] << 16 | buf[1] << 8 | buf[2] << 0;
+ utmp2 = buf[3] << 8 | buf[4] << 0;
+ utmp2 = utmp2 * 8 * 204;
+ dev_dbg(&client->dev, "post_bit_error=%u post_bit_count=%u\n",
+ utmp1, utmp2);
+
+ c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_error.stat[0].uvalue += utmp1;
+ c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_count.stat[0].uvalue += utmp2;
+ } else {
+ c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
+
+ /* PER */
+ if (*status & FE_HAS_LOCK) {
+ ret = regmap_bulk_read(dev->regmap[0], 0xdd, buf, 4);
+ if (ret)
+ goto err;
+
+ utmp1 = buf[0] << 8 | buf[1] << 0;
+ utmp2 = buf[2] << 8 | buf[3] << 0;
+ dev_dbg(&client->dev, "block_error=%u block_count=%u\n",
+ utmp1, utmp2);
+
+ c->block_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->block_error.stat[0].uvalue += utmp1;
+ c->block_count.stat[0].scale = FE_SCALE_COUNTER;
+ c->block_count.stat[0].uvalue += utmp2;
+ } else {
+ c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
+
return 0;
err:
dev_dbg(&client->dev, "failed=%d\n", ret);
@@ -312,6 +460,7 @@ static int mn88473_init(struct dvb_frontend *fe)
{
struct i2c_client *client = fe->demodulator_priv;
struct mn88473_dev *dev = i2c_get_clientdata(client);
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret, len, remain;
unsigned int uitmp;
const struct firmware *fw;
@@ -378,6 +527,20 @@ warm:
dev->active = true;
+ /* init stats here to indicate which stats are supported */
+ c->strength.len = 1;
+ c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->cnr.len = 1;
+ c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_error.len = 1;
+ c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_count.len = 1;
+ c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->block_error.len = 1;
+ c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->block_count.len = 1;
+ c->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
return 0;
err_release_firmware:
release_firmware(fw);
@@ -485,18 +648,6 @@ static int mn88473_probe(struct i2c_client *client,
goto err_kfree;
}
- /* Check demod answers with correct chip id */
- ret = regmap_read(dev->regmap[0], 0xff, &uitmp);
- if (ret)
- goto err_regmap_0_regmap_exit;
-
- dev_dbg(&client->dev, "chip id=%02x\n", uitmp);
-
- if (uitmp != 0x03) {
- ret = -ENODEV;
- goto err_regmap_0_regmap_exit;
- }
-
/*
* Chip has three I2C addresses for different register banks. Used
* addresses are 0x18, 0x1a and 0x1c. We register two dummy clients,
@@ -533,6 +684,18 @@ static int mn88473_probe(struct i2c_client *client,
}
i2c_set_clientdata(dev->client[2], dev);
+ /* Check demod answers with correct chip id */
+ ret = regmap_read(dev->regmap[2], 0xff, &uitmp);
+ if (ret)
+ goto err_regmap_2_regmap_exit;
+
+ dev_dbg(&client->dev, "chip id=%02x\n", uitmp);
+
+ if (uitmp != 0x03) {
+ ret = -ENODEV;
+ goto err_regmap_2_regmap_exit;
+ }
+
/* Sleep because chip is active by default */
ret = regmap_write(dev->regmap[2], 0x05, 0x3e);
if (ret)
diff --git a/drivers/media/dvb-frontends/mn88473_priv.h b/drivers/media/dvb-frontends/mn88473_priv.h
index e6c65893e451..5fc463d147c8 100644
--- a/drivers/media/dvb-frontends/mn88473_priv.h
+++ b/drivers/media/dvb-frontends/mn88473_priv.h
@@ -18,7 +18,9 @@
#define MN88473_PRIV_H
#include "dvb_frontend.h"
+#include "dvb_math.h"
#include "mn88473.h"
+#include <linux/math64.h>
#include <linux/firmware.h>
#include <linux/regmap.h>
diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c
index fc08429c99b7..961b9a2508e0 100644
--- a/drivers/media/dvb-frontends/mt312.c
+++ b/drivers/media/dvb-frontends/mt312.c
@@ -457,8 +457,8 @@ static int mt312_read_status(struct dvb_frontend *fe, enum fe_status *s)
if (ret < 0)
return ret;
- dprintk("QPSK_STAT_H: 0x%02x, QPSK_STAT_L: 0x%02x,"
- " FEC_STATUS: 0x%02x\n", status[0], status[1], status[2]);
+ dprintk("QPSK_STAT_H: 0x%02x, QPSK_STAT_L: 0x%02x, FEC_STATUS: 0x%02x\n",
+ status[0], status[1], status[2]);
if (status[0] & 0xc0)
*s |= FE_HAS_SIGNAL; /* signal noise ratio */
@@ -748,7 +748,7 @@ static void mt312_release(struct dvb_frontend *fe)
}
#define MT312_SYS_CLK 90000000UL /* 90 MHz */
-static struct dvb_frontend_ops mt312_ops = {
+static const struct dvb_frontend_ops mt312_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Zarlink ???? DVB-S",
@@ -827,8 +827,7 @@ struct dvb_frontend *mt312_attach(const struct mt312_config *config,
state->freq_mult = 9;
break;
default:
- printk(KERN_WARNING "Only Zarlink VP310/MT312/ZL10313"
- " are supported chips.\n");
+ printk(KERN_WARNING "Only Zarlink VP310/MT312/ZL10313 are supported chips.\n");
goto error;
}
diff --git a/drivers/media/dvb-frontends/mt352.c b/drivers/media/dvb-frontends/mt352.c
index c0bb6328956b..48ea0408f02a 100644
--- a/drivers/media/dvb-frontends/mt352.c
+++ b/drivers/media/dvb-frontends/mt352.c
@@ -538,7 +538,7 @@ static void mt352_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops mt352_ops;
+static const struct dvb_frontend_ops mt352_ops;
struct dvb_frontend* mt352_attach(const struct mt352_config* config,
struct i2c_adapter* i2c)
@@ -566,7 +566,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops mt352_ops = {
+static const struct dvb_frontend_ops mt352_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Zarlink MT352 DVB-T",
diff --git a/drivers/media/dvb-frontends/nxt200x.c b/drivers/media/dvb-frontends/nxt200x.c
index 79c3040912ab..2fe40372ca07 100644
--- a/drivers/media/dvb-frontends/nxt200x.c
+++ b/drivers/media/dvb-frontends/nxt200x.c
@@ -289,8 +289,7 @@ static void nxt200x_microcontroller_stop (struct nxt200x_state* state)
counter++;
}
- pr_warn("Timeout waiting for nxt200x to stop. This is ok after "
- "firmware upload.\n");
+ pr_warn("Timeout waiting for nxt200x to stop. This is ok after firmware upload.\n");
return;
}
@@ -893,8 +892,8 @@ static int nxt2002_init(struct dvb_frontend* fe)
state->i2c->dev.parent);
pr_debug("%s: Waiting for firmware upload(2)...\n", __func__);
if (ret) {
- pr_err("%s: No firmware uploaded (timeout or file not found?)"
- "\n", __func__);
+ pr_err("%s: No firmware uploaded (timeout or file not found?)\n",
+ __func__);
return ret;
}
@@ -960,8 +959,8 @@ static int nxt2004_init(struct dvb_frontend* fe)
state->i2c->dev.parent);
pr_debug("%s: Waiting for firmware upload(2)...\n", __func__);
if (ret) {
- pr_err("%s: No firmware uploaded (timeout or file not found?)"
- "\n", __func__);
+ pr_err("%s: No firmware uploaded (timeout or file not found?)\n",
+ __func__);
return ret;
}
@@ -1150,7 +1149,7 @@ static void nxt200x_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops nxt200x_ops;
+static const struct dvb_frontend_ops nxt200x_ops;
struct dvb_frontend* nxt200x_attach(const struct nxt200x_config* config,
struct i2c_adapter* i2c)
@@ -1213,7 +1212,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops nxt200x_ops = {
+static const struct dvb_frontend_ops nxt200x_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Nextwave NXT200X VSB/QAM frontend",
diff --git a/drivers/media/dvb-frontends/nxt6000.c b/drivers/media/dvb-frontends/nxt6000.c
index 73f9505367ac..1ce5ea28489b 100644
--- a/drivers/media/dvb-frontends/nxt6000.c
+++ b/drivers/media/dvb-frontends/nxt6000.c
@@ -19,6 +19,8 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -39,7 +41,11 @@ struct nxt6000_state {
};
static int debug;
-#define dprintk if (debug) printk
+#define dprintk(fmt, arg...) do { \
+ if (debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
+} while (0)
static int nxt6000_writereg(struct nxt6000_state* state, u8 reg, u8 data)
{
@@ -215,119 +221,129 @@ static void nxt6000_dump_status(struct nxt6000_state *state)
{
u8 val;
-/*
- printk("RS_COR_STAT: 0x%02X\n", nxt6000_readreg(fe, RS_COR_STAT));
- printk("VIT_SYNC_STATUS: 0x%02X\n", nxt6000_readreg(fe, VIT_SYNC_STATUS));
- printk("OFDM_COR_STAT: 0x%02X\n", nxt6000_readreg(fe, OFDM_COR_STAT));
- printk("OFDM_SYR_STAT: 0x%02X\n", nxt6000_readreg(fe, OFDM_SYR_STAT));
- printk("OFDM_TPS_RCVD_1: 0x%02X\n", nxt6000_readreg(fe, OFDM_TPS_RCVD_1));
- printk("OFDM_TPS_RCVD_2: 0x%02X\n", nxt6000_readreg(fe, OFDM_TPS_RCVD_2));
- printk("OFDM_TPS_RCVD_3: 0x%02X\n", nxt6000_readreg(fe, OFDM_TPS_RCVD_3));
- printk("OFDM_TPS_RCVD_4: 0x%02X\n", nxt6000_readreg(fe, OFDM_TPS_RCVD_4));
- printk("OFDM_TPS_RESERVED_1: 0x%02X\n", nxt6000_readreg(fe, OFDM_TPS_RESERVED_1));
- printk("OFDM_TPS_RESERVED_2: 0x%02X\n", nxt6000_readreg(fe, OFDM_TPS_RESERVED_2));
-*/
- printk("NXT6000 status:");
+#if 0
+ pr_info("RS_COR_STAT: 0x%02X\n",
+ nxt6000_readreg(fe, RS_COR_STAT));
+ pr_info("VIT_SYNC_STATUS: 0x%02X\n",
+ nxt6000_readreg(fe, VIT_SYNC_STATUS));
+ pr_info("OFDM_COR_STAT: 0x%02X\n",
+ nxt6000_readreg(fe, OFDM_COR_STAT));
+ pr_info("OFDM_SYR_STAT: 0x%02X\n",
+ nxt6000_readreg(fe, OFDM_SYR_STAT));
+ pr_info("OFDM_TPS_RCVD_1: 0x%02X\n",
+ nxt6000_readreg(fe, OFDM_TPS_RCVD_1));
+ pr_info("OFDM_TPS_RCVD_2: 0x%02X\n",
+ nxt6000_readreg(fe, OFDM_TPS_RCVD_2));
+ pr_info("OFDM_TPS_RCVD_3: 0x%02X\n",
+ nxt6000_readreg(fe, OFDM_TPS_RCVD_3));
+ pr_info("OFDM_TPS_RCVD_4: 0x%02X\n",
+ nxt6000_readreg(fe, OFDM_TPS_RCVD_4));
+ pr_info("OFDM_TPS_RESERVED_1: 0x%02X\n",
+ nxt6000_readreg(fe, OFDM_TPS_RESERVED_1));
+ pr_info("OFDM_TPS_RESERVED_2: 0x%02X\n",
+ nxt6000_readreg(fe, OFDM_TPS_RESERVED_2));
+#endif
+ pr_info("NXT6000 status:");
val = nxt6000_readreg(state, RS_COR_STAT);
- printk(" DATA DESCR LOCK: %d,", val & 0x01);
- printk(" DATA SYNC LOCK: %d,", (val >> 1) & 0x01);
+ pr_cont(" DATA DESCR LOCK: %d,", val & 0x01);
+ pr_cont(" DATA SYNC LOCK: %d,", (val >> 1) & 0x01);
val = nxt6000_readreg(state, VIT_SYNC_STATUS);
- printk(" VITERBI LOCK: %d,", (val >> 7) & 0x01);
+ pr_cont(" VITERBI LOCK: %d,", (val >> 7) & 0x01);
switch ((val >> 4) & 0x07) {
case 0x00:
- printk(" VITERBI CODERATE: 1/2,");
+ pr_cont(" VITERBI CODERATE: 1/2,");
break;
case 0x01:
- printk(" VITERBI CODERATE: 2/3,");
+ pr_cont(" VITERBI CODERATE: 2/3,");
break;
case 0x02:
- printk(" VITERBI CODERATE: 3/4,");
+ pr_cont(" VITERBI CODERATE: 3/4,");
break;
case 0x03:
- printk(" VITERBI CODERATE: 5/6,");
+ pr_cont(" VITERBI CODERATE: 5/6,");
break;
case 0x04:
- printk(" VITERBI CODERATE: 7/8,");
+ pr_cont(" VITERBI CODERATE: 7/8,");
break;
default:
- printk(" VITERBI CODERATE: Reserved,");
+ pr_cont(" VITERBI CODERATE: Reserved,");
}
val = nxt6000_readreg(state, OFDM_COR_STAT);
- printk(" CHCTrack: %d,", (val >> 7) & 0x01);
- printk(" TPSLock: %d,", (val >> 6) & 0x01);
- printk(" SYRLock: %d,", (val >> 5) & 0x01);
- printk(" AGCLock: %d,", (val >> 4) & 0x01);
+ pr_cont(" CHCTrack: %d,", (val >> 7) & 0x01);
+ pr_cont(" TPSLock: %d,", (val >> 6) & 0x01);
+ pr_cont(" SYRLock: %d,", (val >> 5) & 0x01);
+ pr_cont(" AGCLock: %d,", (val >> 4) & 0x01);
switch (val & 0x0F) {
case 0x00:
- printk(" CoreState: IDLE,");
+ pr_cont(" CoreState: IDLE,");
break;
case 0x02:
- printk(" CoreState: WAIT_AGC,");
+ pr_cont(" CoreState: WAIT_AGC,");
break;
case 0x03:
- printk(" CoreState: WAIT_SYR,");
+ pr_cont(" CoreState: WAIT_SYR,");
break;
case 0x04:
- printk(" CoreState: WAIT_PPM,");
+ pr_cont(" CoreState: WAIT_PPM,");
break;
case 0x01:
- printk(" CoreState: WAIT_TRL,");
+ pr_cont(" CoreState: WAIT_TRL,");
break;
case 0x05:
- printk(" CoreState: WAIT_TPS,");
+ pr_cont(" CoreState: WAIT_TPS,");
break;
case 0x06:
- printk(" CoreState: MONITOR_TPS,");
+ pr_cont(" CoreState: MONITOR_TPS,");
break;
default:
- printk(" CoreState: Reserved,");
+ pr_cont(" CoreState: Reserved,");
}
val = nxt6000_readreg(state, OFDM_SYR_STAT);
- printk(" SYRLock: %d,", (val >> 4) & 0x01);
- printk(" SYRMode: %s,", (val >> 2) & 0x01 ? "8K" : "2K");
+ pr_cont(" SYRLock: %d,", (val >> 4) & 0x01);
+ pr_cont(" SYRMode: %s,", (val >> 2) & 0x01 ? "8K" : "2K");
switch ((val >> 4) & 0x03) {
case 0x00:
- printk(" SYRGuard: 1/32,");
+ pr_cont(" SYRGuard: 1/32,");
break;
case 0x01:
- printk(" SYRGuard: 1/16,");
+ pr_cont(" SYRGuard: 1/16,");
break;
case 0x02:
- printk(" SYRGuard: 1/8,");
+ pr_cont(" SYRGuard: 1/8,");
break;
case 0x03:
- printk(" SYRGuard: 1/4,");
+ pr_cont(" SYRGuard: 1/4,");
break;
}
@@ -336,77 +352,77 @@ static void nxt6000_dump_status(struct nxt6000_state *state)
switch ((val >> 4) & 0x07) {
case 0x00:
- printk(" TPSLP: 1/2,");
+ pr_cont(" TPSLP: 1/2,");
break;
case 0x01:
- printk(" TPSLP: 2/3,");
+ pr_cont(" TPSLP: 2/3,");
break;
case 0x02:
- printk(" TPSLP: 3/4,");
+ pr_cont(" TPSLP: 3/4,");
break;
case 0x03:
- printk(" TPSLP: 5/6,");
+ pr_cont(" TPSLP: 5/6,");
break;
case 0x04:
- printk(" TPSLP: 7/8,");
+ pr_cont(" TPSLP: 7/8,");
break;
default:
- printk(" TPSLP: Reserved,");
+ pr_cont(" TPSLP: Reserved,");
}
switch (val & 0x07) {
case 0x00:
- printk(" TPSHP: 1/2,");
+ pr_cont(" TPSHP: 1/2,");
break;
case 0x01:
- printk(" TPSHP: 2/3,");
+ pr_cont(" TPSHP: 2/3,");
break;
case 0x02:
- printk(" TPSHP: 3/4,");
+ pr_cont(" TPSHP: 3/4,");
break;
case 0x03:
- printk(" TPSHP: 5/6,");
+ pr_cont(" TPSHP: 5/6,");
break;
case 0x04:
- printk(" TPSHP: 7/8,");
+ pr_cont(" TPSHP: 7/8,");
break;
default:
- printk(" TPSHP: Reserved,");
+ pr_cont(" TPSHP: Reserved,");
}
val = nxt6000_readreg(state, OFDM_TPS_RCVD_4);
- printk(" TPSMode: %s,", val & 0x01 ? "8K" : "2K");
+ pr_cont(" TPSMode: %s,", val & 0x01 ? "8K" : "2K");
switch ((val >> 4) & 0x03) {
case 0x00:
- printk(" TPSGuard: 1/32,");
+ pr_cont(" TPSGuard: 1/32,");
break;
case 0x01:
- printk(" TPSGuard: 1/16,");
+ pr_cont(" TPSGuard: 1/16,");
break;
case 0x02:
- printk(" TPSGuard: 1/8,");
+ pr_cont(" TPSGuard: 1/8,");
break;
case 0x03:
- printk(" TPSGuard: 1/4,");
+ pr_cont(" TPSGuard: 1/4,");
break;
}
@@ -416,8 +432,8 @@ static void nxt6000_dump_status(struct nxt6000_state *state)
val = nxt6000_readreg(state, RF_AGC_STATUS);
val = nxt6000_readreg(state, RF_AGC_STATUS);
- printk(" RF AGC LOCK: %d,", (val >> 4) & 0x01);
- printk("\n");
+ pr_cont(" RF AGC LOCK: %d,", (val >> 4) & 0x01);
+ pr_cont("\n");
}
static int nxt6000_read_status(struct dvb_frontend *fe, enum fe_status *status)
@@ -548,7 +564,7 @@ static int nxt6000_i2c_gate_ctrl(struct dvb_frontend* fe, int enable)
}
}
-static struct dvb_frontend_ops nxt6000_ops;
+static const struct dvb_frontend_ops nxt6000_ops;
struct dvb_frontend* nxt6000_attach(const struct nxt6000_config* config,
struct i2c_adapter* i2c)
@@ -576,7 +592,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops nxt6000_ops = {
+static const struct dvb_frontend_ops nxt6000_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "NxtWave NXT6000 DVB-T",
diff --git a/drivers/media/dvb-frontends/or51132.c b/drivers/media/dvb-frontends/or51132.c
index a165af990672..17bdadd7d0e1 100644
--- a/drivers/media/dvb-frontends/or51132.c
+++ b/drivers/media/dvb-frontends/or51132.c
@@ -342,15 +342,13 @@ static int or51132_set_parameters(struct dvb_frontend *fe)
fwname);
ret = request_firmware(&fw, fwname, state->i2c->dev.parent);
if (ret) {
- printk(KERN_WARNING "or51132: No firmware up"
- "loaded(timeout or file not found?)\n");
+ printk(KERN_WARNING "or51132: No firmware uploaded(timeout or file not found?)\n");
return ret;
}
ret = or51132_load_firmware(fe, fw);
release_firmware(fw);
if (ret) {
- printk(KERN_WARNING "or51132: Writing firmware to "
- "device failed!\n");
+ printk(KERN_WARNING "or51132: Writing firmware to device failed!\n");
return ret;
}
printk("or51132: Firmware upload complete.\n");
@@ -561,7 +559,7 @@ static void or51132_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops or51132_ops;
+static const struct dvb_frontend_ops or51132_ops;
struct dvb_frontend* or51132_attach(const struct or51132_config* config,
struct i2c_adapter* i2c)
@@ -585,7 +583,7 @@ struct dvb_frontend* or51132_attach(const struct or51132_config* config,
return &state->frontend;
}
-static struct dvb_frontend_ops or51132_ops = {
+static const struct dvb_frontend_ops or51132_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Oren OR51132 VSB/QAM Frontend",
diff --git a/drivers/media/dvb-frontends/or51211.c b/drivers/media/dvb-frontends/or51211.c
index e82413b975e6..27eb73aa4f62 100644
--- a/drivers/media/dvb-frontends/or51211.c
+++ b/drivers/media/dvb-frontends/or51211.c
@@ -377,8 +377,7 @@ static int or51211_init(struct dvb_frontend* fe)
OR51211_DEFAULT_FIRMWARE);
pr_info("Got Hotplug firmware\n");
if (ret) {
- pr_warn("No firmware uploaded "
- "(timeout or file not found?)\n");
+ pr_warn("No firmware uploaded (timeout or file not found?)\n");
return ret;
}
@@ -508,7 +507,7 @@ static void or51211_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops or51211_ops;
+static const struct dvb_frontend_ops or51211_ops;
struct dvb_frontend* or51211_attach(const struct or51211_config* config,
struct i2c_adapter* i2c)
@@ -532,7 +531,7 @@ struct dvb_frontend* or51211_attach(const struct or51211_config* config,
return &state->frontend;
}
-static struct dvb_frontend_ops or51211_ops = {
+static const struct dvb_frontend_ops or51211_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Oren OR51211 VSB Frontend",
diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c
index 87226056f226..7bbfe11d11ed 100644
--- a/drivers/media/dvb-frontends/rtl2830.c
+++ b/drivers/media/dvb-frontends/rtl2830.c
@@ -548,7 +548,7 @@ static int rtl2830_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
return 0;
}
-static struct dvb_frontend_ops rtl2830_ops = {
+static const struct dvb_frontend_ops rtl2830_ops = {
.delsys = {SYS_DVBT},
.info = {
.name = "Realtek RTL2830 (DVB-T)",
diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c
index 0ced01f1012e..94bf5b7d6f3f 100644
--- a/drivers/media/dvb-frontends/rtl2832.c
+++ b/drivers/media/dvb-frontends/rtl2832.c
@@ -837,7 +837,7 @@ static int rtl2832_deselect(struct i2c_mux_core *muxc, u32 chan_id)
return 0;
}
-static struct dvb_frontend_ops rtl2832_ops = {
+static const struct dvb_frontend_ops rtl2832_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Realtek RTL2832 (DVB-T)",
diff --git a/drivers/media/dvb-frontends/s5h1409.c b/drivers/media/dvb-frontends/s5h1409.c
index c68965ad97c0..f370c6df0a8b 100644
--- a/drivers/media/dvb-frontends/s5h1409.c
+++ b/drivers/media/dvb-frontends/s5h1409.c
@@ -321,8 +321,8 @@ static int s5h1409_writereg(struct s5h1409_state *state, u8 reg, u16 data)
ret = i2c_transfer(state->i2c, &msg, 1);
if (ret != 1)
- printk(KERN_ERR "%s: error (reg == 0x%02x, val == 0x%04x, "
- "ret == %i)\n", __func__, reg, data, ret);
+ printk(KERN_ERR "%s: error (reg == 0x%02x, val == 0x%04x, ret == %i)\n",
+ __func__, reg, data, ret);
return (ret != 1) ? -1 : 0;
}
@@ -949,7 +949,7 @@ static void s5h1409_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops s5h1409_ops;
+static const struct dvb_frontend_ops s5h1409_ops;
struct dvb_frontend *s5h1409_attach(const struct s5h1409_config *config,
struct i2c_adapter *i2c)
@@ -995,7 +995,7 @@ error:
}
EXPORT_SYMBOL(s5h1409_attach);
-static struct dvb_frontend_ops s5h1409_ops = {
+static const struct dvb_frontend_ops s5h1409_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Samsung S5H1409 QAM/8VSB Frontend",
diff --git a/drivers/media/dvb-frontends/s5h1411.c b/drivers/media/dvb-frontends/s5h1411.c
index 90f86e82b087..f29750a96196 100644
--- a/drivers/media/dvb-frontends/s5h1411.c
+++ b/drivers/media/dvb-frontends/s5h1411.c
@@ -350,8 +350,8 @@ static int s5h1411_writereg(struct s5h1411_state *state,
ret = i2c_transfer(state->i2c, &msg, 1);
if (ret != 1)
- printk(KERN_ERR "%s: writereg error 0x%02x 0x%02x 0x%04x, "
- "ret == %i)\n", __func__, addr, reg, data, ret);
+ printk(KERN_ERR "%s: writereg error 0x%02x 0x%02x 0x%04x, ret == %i)\n",
+ __func__, addr, reg, data, ret);
return (ret != 1) ? -1 : 0;
}
@@ -864,7 +864,7 @@ static void s5h1411_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops s5h1411_ops;
+static const struct dvb_frontend_ops s5h1411_ops;
struct dvb_frontend *s5h1411_attach(const struct s5h1411_config *config,
struct i2c_adapter *i2c)
@@ -914,7 +914,7 @@ error:
}
EXPORT_SYMBOL(s5h1411_attach);
-static struct dvb_frontend_ops s5h1411_ops = {
+static const struct dvb_frontend_ops s5h1411_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Samsung S5H1411 QAM/8VSB Frontend",
diff --git a/drivers/media/dvb-frontends/s5h1420.c b/drivers/media/dvb-frontends/s5h1420.c
index d7d0b7d57ad7..f9a18fe94d88 100644
--- a/drivers/media/dvb-frontends/s5h1420.c
+++ b/drivers/media/dvb-frontends/s5h1420.c
@@ -880,7 +880,7 @@ struct i2c_adapter *s5h1420_get_tuner_i2c_adapter(struct dvb_frontend *fe)
}
EXPORT_SYMBOL(s5h1420_get_tuner_i2c_adapter);
-static struct dvb_frontend_ops s5h1420_ops;
+static const struct dvb_frontend_ops s5h1420_ops;
struct dvb_frontend *s5h1420_attach(const struct s5h1420_config *config,
struct i2c_adapter *i2c)
@@ -934,7 +934,7 @@ error:
}
EXPORT_SYMBOL(s5h1420_attach);
-static struct dvb_frontend_ops s5h1420_ops = {
+static const struct dvb_frontend_ops s5h1420_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Samsung S5H1420/PnpNetwork PN1010 DVB-S",
diff --git a/drivers/media/dvb-frontends/s5h1432.c b/drivers/media/dvb-frontends/s5h1432.c
index 4215652f8eb7..a32fd9bc51a9 100644
--- a/drivers/media/dvb-frontends/s5h1432.c
+++ b/drivers/media/dvb-frontends/s5h1432.c
@@ -63,8 +63,8 @@ static int s5h1432_writereg(struct s5h1432_state *state,
ret = i2c_transfer(state->i2c, &msg, 1);
if (ret != 1)
- printk(KERN_ERR "%s: writereg error 0x%02x 0x%02x 0x%04x, "
- "ret == %i)\n", __func__, addr, reg, data, ret);
+ printk(KERN_ERR "%s: writereg error 0x%02x 0x%02x 0x%04x, ret == %i)\n",
+ __func__, addr, reg, data, ret);
return (ret != 1) ? -1 : 0;
}
@@ -341,7 +341,7 @@ static void s5h1432_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops s5h1432_ops;
+static const struct dvb_frontend_ops s5h1432_ops;
struct dvb_frontend *s5h1432_attach(const struct s5h1432_config *config,
struct i2c_adapter *i2c)
@@ -370,7 +370,7 @@ struct dvb_frontend *s5h1432_attach(const struct s5h1432_config *config,
}
EXPORT_SYMBOL(s5h1432_attach);
-static struct dvb_frontend_ops s5h1432_ops = {
+static const struct dvb_frontend_ops s5h1432_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Samsung s5h1432 DVB-T Frontend",
diff --git a/drivers/media/dvb-frontends/s921.c b/drivers/media/dvb-frontends/s921.c
index b5e3d90eba5e..274544a3ae0e 100644
--- a/drivers/media/dvb-frontends/s921.c
+++ b/drivers/media/dvb-frontends/s921.c
@@ -214,8 +214,8 @@ static int s921_i2c_writereg(struct s921_state *state,
rc = i2c_transfer(state->i2c, &msg, 1);
if (rc != 1) {
- printk("%s: writereg rcor(rc == %i, reg == 0x%02x,"
- " data == 0x%02x)\n", __func__, rc, reg, data);
+ printk("%s: writereg rcor(rc == %i, reg == 0x%02x, data == 0x%02x)\n",
+ __func__, rc, reg, data);
return rc;
}
@@ -477,7 +477,7 @@ static void s921_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops s921_ops;
+static const struct dvb_frontend_ops s921_ops;
struct dvb_frontend *s921_attach(const struct s921_config *config,
struct i2c_adapter *i2c)
@@ -505,7 +505,7 @@ struct dvb_frontend *s921_attach(const struct s921_config *config,
}
EXPORT_SYMBOL(s921_attach);
-static struct dvb_frontend_ops s921_ops = {
+static const struct dvb_frontend_ops s921_ops = {
.delsys = { SYS_ISDBT },
/* Use dib8000 values per default */
.info = {
diff --git a/drivers/media/dvb-frontends/si2165.c b/drivers/media/dvb-frontends/si2165.c
index 78669ea68c61..528b82a5dd46 100644
--- a/drivers/media/dvb-frontends/si2165.c
+++ b/drivers/media/dvb-frontends/si2165.c
@@ -978,7 +978,7 @@ static int si2165_set_frontend(struct dvb_frontend *fe)
return 0;
}
-static struct dvb_frontend_ops si2165_ops = {
+static const struct dvb_frontend_ops si2165_ops = {
.info = {
.name = "Silicon Labs ",
/* For DVB-C */
diff --git a/drivers/media/dvb-frontends/si21xx.c b/drivers/media/dvb-frontends/si21xx.c
index 62ad7a7be9f8..4e8c3ac4303f 100644
--- a/drivers/media/dvb-frontends/si21xx.c
+++ b/drivers/media/dvb-frontends/si21xx.c
@@ -245,8 +245,8 @@ static int si21_writeregs(struct si21xx_state *state, u8 reg1,
ret = i2c_transfer(state->i2c, &msg, 1);
if (ret != 1)
- dprintk("%s: writereg error (reg1 == 0x%02x, data == 0x%02x, "
- "ret == %i)\n", __func__, reg1, data[0], ret);
+ dprintk("%s: writereg error (reg1 == 0x%02x, data == 0x%02x, ret == %i)\n",
+ __func__, reg1, data[0], ret);
return (ret != 1) ? -EREMOTEIO : 0;
}
@@ -265,8 +265,8 @@ static int si21_writereg(struct si21xx_state *state, u8 reg, u8 data)
ret = i2c_transfer(state->i2c, &msg, 1);
if (ret != 1)
- dprintk("%s: writereg error (reg == 0x%02x, data == 0x%02x, "
- "ret == %i)\n", __func__, reg, data, ret);
+ dprintk("%s: writereg error (reg == 0x%02x, data == 0x%02x, ret == %i)\n",
+ __func__, reg, data, ret);
return (ret != 1) ? -EREMOTEIO : 0;
}
@@ -866,7 +866,7 @@ static void si21xx_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops si21xx_ops = {
+static const struct dvb_frontend_ops si21xx_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "SL SI21XX DVB-S",
diff --git a/drivers/media/dvb-frontends/sp8870.c b/drivers/media/dvb-frontends/sp8870.c
index e87ac30d7fb8..04454cb78467 100644
--- a/drivers/media/dvb-frontends/sp8870.c
+++ b/drivers/media/dvb-frontends/sp8870.c
@@ -551,7 +551,7 @@ static void sp8870_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops sp8870_ops;
+static const struct dvb_frontend_ops sp8870_ops;
struct dvb_frontend* sp8870_attach(const struct sp8870_config* config,
struct i2c_adapter* i2c)
@@ -580,7 +580,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops sp8870_ops = {
+static const struct dvb_frontend_ops sp8870_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Spase SP8870 DVB-T",
diff --git a/drivers/media/dvb-frontends/sp887x.c b/drivers/media/dvb-frontends/sp887x.c
index 4378fe1b978e..7c511c3cd4ca 100644
--- a/drivers/media/dvb-frontends/sp887x.c
+++ b/drivers/media/dvb-frontends/sp887x.c
@@ -63,8 +63,7 @@ static int sp887x_writereg (struct sp887x_state* state, u16 reg, u16 data)
if (!(reg == 0xf1a && data == 0x000 &&
(ret == -EREMOTEIO || ret == -EFAULT)))
{
- printk("%s: writereg error "
- "(reg %03x, data %03x, ret == %i)\n",
+ printk("%s: writereg error (reg %03x, data %03x, ret == %i)\n",
__func__, reg & 0xffff, data & 0xffff, ret);
return ret;
}
@@ -562,7 +561,7 @@ static void sp887x_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops sp887x_ops;
+static const struct dvb_frontend_ops sp887x_ops;
struct dvb_frontend* sp887x_attach(const struct sp887x_config* config,
struct i2c_adapter* i2c)
@@ -591,7 +590,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops sp887x_ops = {
+static const struct dvb_frontend_ops sp887x_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Spase SP887x DVB-T",
diff --git a/drivers/media/dvb-frontends/stb0899_drv.c b/drivers/media/dvb-frontends/stb0899_drv.c
index 3d171b0e00c2..02347598277a 100644
--- a/drivers/media/dvb-frontends/stb0899_drv.c
+++ b/drivers/media/dvb-frontends/stb0899_drv.c
@@ -485,15 +485,8 @@ int stb0899_read_regs(struct stb0899_state *state, unsigned int reg, u8 *buf, u3
(((reg & 0xff00) == 0xf200) || ((reg & 0xff00) == 0xf600)))
_stb0899_read_reg(state, (reg | 0x00ff));
- if (unlikely(*state->verbose >= FE_DEBUGREG)) {
- int i;
-
- printk(KERN_DEBUG "%s [0x%04x]:", __func__, reg);
- for (i = 0; i < count; i++) {
- printk(" %02x", buf[i]);
- }
- printk("\n");
- }
+ dprintk(state->verbose, FE_DEBUGREG, 1,
+ "%s [0x%04x]: %*ph", __func__, reg, count, buf);
return 0;
err:
@@ -522,14 +515,8 @@ int stb0899_write_regs(struct stb0899_state *state, unsigned int reg, u8 *data,
buf[1] = reg & 0xff;
memcpy(&buf[2], data, count);
- if (unlikely(*state->verbose >= FE_DEBUGREG)) {
- int i;
-
- printk(KERN_DEBUG "%s [0x%04x]:", __func__, reg);
- for (i = 0; i < count; i++)
- printk(" %02x", data[i]);
- printk("\n");
- }
+ dprintk(state->verbose, FE_DEBUGREG, 1,
+ "%s [0x%04x]: %*ph", __func__, reg, count, data);
ret = i2c_transfer(state->i2c, &i2c_msg, 1);
/*
@@ -614,13 +601,19 @@ static int stb0899_postproc(struct stb0899_state *state, u8 ctl, int enable)
return 0;
}
-static void stb0899_release(struct dvb_frontend *fe)
+static void stb0899_detach(struct dvb_frontend *fe)
{
struct stb0899_state *state = fe->demodulator_priv;
- dprintk(state->verbose, FE_DEBUG, 1, "Release Frontend");
/* post process event */
stb0899_postproc(state, STB0899_POSTPROC_GPIO_POWER, 0);
+}
+
+static void stb0899_release(struct dvb_frontend *fe)
+{
+ struct stb0899_state *state = fe->demodulator_priv;
+
+ dprintk(state->verbose, FE_DEBUG, 1, "Release Frontend");
kfree(state);
}
@@ -1586,7 +1579,7 @@ static enum dvbfe_algo stb0899_frontend_algo(struct dvb_frontend *fe)
return DVBFE_ALGO_CUSTOM;
}
-static struct dvb_frontend_ops stb0899_ops = {
+static const struct dvb_frontend_ops stb0899_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2, SYS_DSS },
.info = {
.name = "STB0899 Multistandard",
@@ -1603,6 +1596,7 @@ static struct dvb_frontend_ops stb0899_ops = {
FE_CAN_QPSK
},
+ .detach = stb0899_detach,
.release = stb0899_release,
.init = stb0899_init,
.sleep = stb0899_sleep,
diff --git a/drivers/media/dvb-frontends/stb6000.c b/drivers/media/dvb-frontends/stb6000.c
index 73347d51f340..69c03892f2da 100644
--- a/drivers/media/dvb-frontends/stb6000.c
+++ b/drivers/media/dvb-frontends/stb6000.c
@@ -41,11 +41,10 @@ struct stb6000_priv {
u32 frequency;
};
-static int stb6000_release(struct dvb_frontend *fe)
+static void stb6000_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static int stb6000_sleep(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/stb6100.c b/drivers/media/dvb-frontends/stb6100.c
index 5add1182c3ca..17a955d0031b 100644
--- a/drivers/media/dvb-frontends/stb6100.c
+++ b/drivers/media/dvb-frontends/stb6100.c
@@ -61,7 +61,7 @@ struct stb6100_lkup {
u8 reg;
};
-static int stb6100_release(struct dvb_frontend *fe);
+static void stb6100_release(struct dvb_frontend *fe);
static const struct stb6100_lkup lkup[] = {
{ 0, 950000, 0x0a },
@@ -560,14 +560,12 @@ struct dvb_frontend *stb6100_attach(struct dvb_frontend *fe,
return fe;
}
-static int stb6100_release(struct dvb_frontend *fe)
+static void stb6100_release(struct dvb_frontend *fe)
{
struct stb6100_state *state = fe->tuner_priv;
fe->tuner_priv = NULL;
kfree(state);
-
- return 0;
}
EXPORT_SYMBOL(stb6100_attach);
diff --git a/drivers/media/dvb-frontends/stv0288.c b/drivers/media/dvb-frontends/stv0288.c
index c93d9a45f7f7..45cbc898ad25 100644
--- a/drivers/media/dvb-frontends/stv0288.c
+++ b/drivers/media/dvb-frontends/stv0288.c
@@ -74,8 +74,8 @@ static int stv0288_writeregI(struct stv0288_state *state, u8 reg, u8 data)
ret = i2c_transfer(state->i2c, &msg, 1);
if (ret != 1)
- dprintk("%s: writereg error (reg == 0x%02x, val == 0x%02x, "
- "ret == %i)\n", __func__, reg, data, ret);
+ dprintk("%s: writereg error (reg == 0x%02x, val == 0x%02x, ret == %i)\n",
+ __func__, reg, data, ret);
return (ret != 1) ? -EREMOTEIO : 0;
}
@@ -465,10 +465,9 @@ static int stv0288_set_frontend(struct dvb_frontend *fe)
dprintk("%s : FE_SET_FRONTEND\n", __func__);
if (c->delivery_system != SYS_DVBS) {
- dprintk("%s: unsupported delivery "
- "system selected (%d)\n",
- __func__, c->delivery_system);
- return -EOPNOTSUPP;
+ dprintk("%s: unsupported delivery system selected (%d)\n",
+ __func__, c->delivery_system);
+ return -EOPNOTSUPP;
}
if (state->config->set_ts_params)
@@ -536,7 +535,7 @@ static void stv0288_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops stv0288_ops = {
+static const struct dvb_frontend_ops stv0288_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "ST STV0288 DVB-S",
diff --git a/drivers/media/dvb-frontends/stv0297.c b/drivers/media/dvb-frontends/stv0297.c
index 81b27b7c0c96..db94d4d109f9 100644
--- a/drivers/media/dvb-frontends/stv0297.c
+++ b/drivers/media/dvb-frontends/stv0297.c
@@ -57,8 +57,8 @@ static int stv0297_writereg(struct stv0297_state *state, u8 reg, u8 data)
ret = i2c_transfer(state->i2c, &msg, 1);
if (ret != 1)
- dprintk("%s: writereg error (reg == 0x%02x, val == 0x%02x, "
- "ret == %i)\n", __func__, reg, data, ret);
+ dprintk("%s: writereg error (reg == 0x%02x, val == 0x%02x, ret == %i)\n",
+ __func__, reg, data, ret);
return (ret != 1) ? -1 : 0;
}
@@ -658,7 +658,7 @@ static void stv0297_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops stv0297_ops;
+static const struct dvb_frontend_ops stv0297_ops;
struct dvb_frontend *stv0297_attach(const struct stv0297_config *config,
struct i2c_adapter *i2c)
@@ -690,7 +690,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops stv0297_ops = {
+static const struct dvb_frontend_ops stv0297_ops = {
.delsys = { SYS_DVBC_ANNEX_A },
.info = {
.name = "ST STV0297 DVB-C",
diff --git a/drivers/media/dvb-frontends/stv0299.c b/drivers/media/dvb-frontends/stv0299.c
index 7927fa925f2f..b36b21a13201 100644
--- a/drivers/media/dvb-frontends/stv0299.c
+++ b/drivers/media/dvb-frontends/stv0299.c
@@ -88,8 +88,8 @@ static int stv0299_writeregI (struct stv0299_state* state, u8 reg, u8 data)
ret = i2c_transfer (state->i2c, &msg, 1);
if (ret != 1)
- dprintk("%s: writereg error (reg == 0x%02x, val == 0x%02x, "
- "ret == %i)\n", __func__, reg, data, ret);
+ dprintk("%s: writereg error (reg == 0x%02x, val == 0x%02x, ret == %i)\n",
+ __func__, reg, data, ret);
return (ret != 1) ? -EREMOTEIO : 0;
}
@@ -673,7 +673,7 @@ static void stv0299_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops stv0299_ops;
+static const struct dvb_frontend_ops stv0299_ops;
struct dvb_frontend* stv0299_attach(const struct stv0299_config* config,
struct i2c_adapter* i2c)
@@ -713,7 +713,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops stv0299_ops = {
+static const struct dvb_frontend_ops stv0299_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "ST STV0299 DVB-S",
@@ -761,8 +761,7 @@ module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
MODULE_DESCRIPTION("ST STV0299 DVB Demodulator driver");
-MODULE_AUTHOR("Ralph Metzler, Holger Waechtler, Peter Schildmann, Felix Domke, "
- "Andreas Oberritter, Andrew de Quincey, Kenneth Aafly");
+MODULE_AUTHOR("Ralph Metzler, Holger Waechtler, Peter Schildmann, Felix Domke, Andreas Oberritter, Andrew de Quincey, Kenneth Aafly");
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(stv0299_attach);
diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c
index abc379aea713..4ac1ce2831ba 100644
--- a/drivers/media/dvb-frontends/stv0367.c
+++ b/drivers/media/dvb-frontends/stv0367.c
@@ -2272,7 +2272,7 @@ static void stv0367_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops stv0367ter_ops = {
+static const struct dvb_frontend_ops stv0367ter_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "ST STV0367 DVB-T",
@@ -3390,7 +3390,7 @@ static int stv0367cab_read_ucblcks(struct dvb_frontend *fe, u32 *ucblocks)
return 0;
};
-static struct dvb_frontend_ops stv0367cab_ops = {
+static const struct dvb_frontend_ops stv0367cab_ops = {
.delsys = { SYS_DVBC_ANNEX_A },
.info = {
.name = "ST STV0367 DVB-C",
diff --git a/drivers/media/dvb-frontends/stv0900_core.c b/drivers/media/dvb-frontends/stv0900_core.c
index f667005a6661..43a0f69b4b14 100644
--- a/drivers/media/dvb-frontends/stv0900_core.c
+++ b/drivers/media/dvb-frontends/stv0900_core.c
@@ -1875,7 +1875,7 @@ static int stv0900_get_frontend(struct dvb_frontend *fe,
return 0;
}
-static struct dvb_frontend_ops stv0900_ops = {
+static const struct dvb_frontend_ops stv0900_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2, SYS_DSS },
.info = {
.name = "STV0900 frontend",
diff --git a/drivers/media/dvb-frontends/stv0900_sw.c b/drivers/media/dvb-frontends/stv0900_sw.c
index fa63a9e929ce..bded82774f4b 100644
--- a/drivers/media/dvb-frontends/stv0900_sw.c
+++ b/drivers/media/dvb-frontends/stv0900_sw.c
@@ -1485,8 +1485,7 @@ static u32 stv0900_search_srate_coarse(struct dvb_frontend *fe)
current_step++;
direction *= -1;
- dprintk("lock: I2C_DEMOD_MODE_FIELD =0. Search started."
- " tuner freq=%d agc2=0x%x srate_coarse=%d tmg_cpt=%d\n",
+ dprintk("lock: I2C_DEMOD_MODE_FIELD =0. Search started. tuner freq=%d agc2=0x%x srate_coarse=%d tmg_cpt=%d\n",
tuner_freq, agc2_integr, coarse_srate, timingcpt);
if ((timingcpt >= 5) &&
diff --git a/drivers/media/dvb-frontends/stv090x.c b/drivers/media/dvb-frontends/stv090x.c
index 25bdf6e0f963..7ef469c0c866 100644
--- a/drivers/media/dvb-frontends/stv090x.c
+++ b/drivers/media/dvb-frontends/stv090x.c
@@ -739,14 +739,8 @@ static int stv090x_write_regs(struct stv090x_state *state, unsigned int reg, u8
buf[1] = reg & 0xff;
memcpy(&buf[2], data, count);
- if (unlikely(*state->verbose >= FE_DEBUGREG)) {
- int i;
-
- printk(KERN_DEBUG "%s [0x%04x]:", __func__, reg);
- for (i = 0; i < count; i++)
- printk(" %02x", data[i]);
- printk("\n");
- }
+ dprintk(FE_DEBUGREG, 1, "%s [0x%04x]: %*ph",
+ __func__, reg, count, data);
ret = i2c_transfer(state->i2c, &i2c_msg, 1);
if (ret != 1) {
@@ -3698,9 +3692,12 @@ static int stv090x_read_cnr(struct dvb_frontend *fe, u16 *cnr)
}
val /= 16;
last = ARRAY_SIZE(stv090x_s2cn_tab) - 1;
- div = stv090x_s2cn_tab[0].read -
- stv090x_s2cn_tab[last].read;
- *cnr = 0xFFFF - ((val * 0xFFFF) / div);
+ div = stv090x_s2cn_tab[last].real -
+ stv090x_s2cn_tab[3].real;
+ val = stv090x_table_lookup(stv090x_s2cn_tab, last, val);
+ if (val < 0)
+ val = 0;
+ *cnr = val * 0xFFFF / div;
}
break;
@@ -3720,9 +3717,10 @@ static int stv090x_read_cnr(struct dvb_frontend *fe, u16 *cnr)
}
val /= 16;
last = ARRAY_SIZE(stv090x_s1cn_tab) - 1;
- div = stv090x_s1cn_tab[0].read -
- stv090x_s1cn_tab[last].read;
- *cnr = 0xFFFF - ((val * 0xFFFF) / div);
+ div = stv090x_s1cn_tab[last].real -
+ stv090x_s1cn_tab[0].real;
+ val = stv090x_table_lookup(stv090x_s1cn_tab, last, val);
+ *cnr = val * 0xFFFF / div;
}
break;
default:
@@ -4886,7 +4884,7 @@ static int stv090x_set_gpio(struct dvb_frontend *fe, u8 gpio, u8 dir,
return stv090x_write_reg(state, STV090x_GPIOxCFG(gpio), reg);
}
-static struct dvb_frontend_ops stv090x_ops = {
+static const struct dvb_frontend_ops stv090x_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2, SYS_DSS },
.info = {
.name = "STV090x Multistandard",
diff --git a/drivers/media/dvb-frontends/stv6110.c b/drivers/media/dvb-frontends/stv6110.c
index 66a5a7f2295c..6a72d0be2ec5 100644
--- a/drivers/media/dvb-frontends/stv6110.c
+++ b/drivers/media/dvb-frontends/stv6110.c
@@ -59,11 +59,10 @@ static s32 abssub(s32 a, s32 b)
return b - a;
};
-static int stv6110_release(struct dvb_frontend *fe)
+static void stv6110_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static int stv6110_write_regs(struct dvb_frontend *fe, u8 buf[],
diff --git a/drivers/media/dvb-frontends/stv6110x.c b/drivers/media/dvb-frontends/stv6110x.c
index c611ad210b5c..66eba38f1014 100644
--- a/drivers/media/dvb-frontends/stv6110x.c
+++ b/drivers/media/dvb-frontends/stv6110x.c
@@ -335,14 +335,12 @@ static int stv6110x_get_status(struct dvb_frontend *fe, u32 *status)
}
-static int stv6110x_release(struct dvb_frontend *fe)
+static void stv6110x_release(struct dvb_frontend *fe)
{
struct stv6110x_state *stv6110x = fe->tuner_priv;
fe->tuner_priv = NULL;
kfree(stv6110x);
-
- return 0;
}
static const struct dvb_tuner_ops stv6110x_ops = {
diff --git a/drivers/media/dvb-frontends/tc90522.c b/drivers/media/dvb-frontends/tc90522.c
index 31cd32532387..4687e1546af2 100644
--- a/drivers/media/dvb-frontends/tc90522.c
+++ b/drivers/media/dvb-frontends/tc90522.c
@@ -656,7 +656,7 @@ tc90522_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
for (i = 0; i < num; i++)
if (msgs[i].flags & I2C_M_RD)
rd_num++;
- new_msgs = kmalloc(sizeof(*new_msgs) * (num + rd_num), GFP_KERNEL);
+ new_msgs = kmalloc_array(num + rd_num, sizeof(*new_msgs), GFP_KERNEL);
if (!new_msgs)
return -ENOMEM;
@@ -794,14 +794,13 @@ static int tc90522_probe(struct i2c_client *client,
i2c_set_adapdata(adap, state);
ret = i2c_add_adapter(adap);
if (ret < 0)
- goto err;
+ goto free_state;
cfg->tuner_i2c = state->cfg.tuner_i2c = adap;
i2c_set_clientdata(client, &state->cfg);
dev_info(&client->dev, "Toshiba TC90522 attached.\n");
return 0;
-
-err:
+free_state:
kfree(state);
return ret;
}
diff --git a/drivers/media/dvb-frontends/tda10021.c b/drivers/media/dvb-frontends/tda10021.c
index 806c56691ca5..32ba8401e743 100644
--- a/drivers/media/dvb-frontends/tda10021.c
+++ b/drivers/media/dvb-frontends/tda10021.c
@@ -77,8 +77,7 @@ static int _tda10021_writereg (struct tda10021_state* state, u8 reg, u8 data)
ret = i2c_transfer (state->i2c, &msg, 1);
if (ret != 1)
- printk("DVB: TDA10021(%d): %s, writereg error "
- "(reg == 0x%02x, val == 0x%02x, ret == %i)\n",
+ printk("DVB: TDA10021(%d): %s, writereg error (reg == 0x%02x, val == 0x%02x, ret == %i)\n",
state->frontend.dvb->num, __func__, reg, data, ret);
msleep(10);
@@ -444,7 +443,7 @@ static void tda10021_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops tda10021_ops;
+static const struct dvb_frontend_ops tda10021_ops;
struct dvb_frontend* tda10021_attach(const struct tda1002x_config* config,
struct i2c_adapter* i2c,
@@ -484,7 +483,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops tda10021_ops = {
+static const struct dvb_frontend_ops tda10021_ops = {
.delsys = { SYS_DVBC_ANNEX_A, SYS_DVBC_ANNEX_C },
.info = {
.name = "Philips TDA10021 DVB-C",
diff --git a/drivers/media/dvb-frontends/tda10023.c b/drivers/media/dvb-frontends/tda10023.c
index 3b8c7e499d0d..8028007c68eb 100644
--- a/drivers/media/dvb-frontends/tda10023.c
+++ b/drivers/media/dvb-frontends/tda10023.c
@@ -72,8 +72,7 @@ static u8 tda10023_readreg (struct tda10023_state* state, u8 reg)
ret = i2c_transfer (state->i2c, msg, 2);
if (ret != 2) {
int num = state->frontend.dvb ? state->frontend.dvb->num : -1;
- printk(KERN_ERR "DVB: TDA10023(%d): %s: readreg error "
- "(reg == 0x%02x, ret == %i)\n",
+ printk(KERN_ERR "DVB: TDA10023(%d): %s: readreg error (reg == 0x%02x, ret == %i)\n",
num, __func__, reg, ret);
}
return b1[0];
@@ -88,8 +87,7 @@ static int tda10023_writereg (struct tda10023_state* state, u8 reg, u8 data)
ret = i2c_transfer (state->i2c, &msg, 1);
if (ret != 1) {
int num = state->frontend.dvb ? state->frontend.dvb->num : -1;
- printk(KERN_ERR "DVB: TDA10023(%d): %s, writereg error "
- "(reg == 0x%02x, val == 0x%02x, ret == %i)\n",
+ printk(KERN_ERR "DVB: TDA10023(%d): %s, writereg error (reg == 0x%02x, val == 0x%02x, ret == %i)\n",
num, __func__, reg, data, ret);
}
return (ret != 1) ? -EREMOTEIO : 0;
@@ -516,7 +514,7 @@ static void tda10023_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops tda10023_ops;
+static const struct dvb_frontend_ops tda10023_ops;
struct dvb_frontend *tda10023_attach(const struct tda10023_config *config,
struct i2c_adapter *i2c,
@@ -573,7 +571,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops tda10023_ops = {
+static const struct dvb_frontend_ops tda10023_ops = {
.delsys = { SYS_DVBC_ANNEX_A, SYS_DVBC_ANNEX_C },
.info = {
.name = "Philips TDA10023 DVB-C",
diff --git a/drivers/media/dvb-frontends/tda10048.c b/drivers/media/dvb-frontends/tda10048.c
index c2bf89d0b0b0..92ab34c3e0be 100644
--- a/drivers/media/dvb-frontends/tda10048.c
+++ b/drivers/media/dvb-frontends/tda10048.c
@@ -1063,38 +1063,34 @@ static void tda10048_establish_defaults(struct dvb_frontend *fe)
/* Validate/default the config */
if (config->dtv6_if_freq_khz == 0) {
config->dtv6_if_freq_khz = TDA10048_IF_4300;
- printk(KERN_WARNING "%s() tda10048_config.dtv6_if_freq_khz "
- "is not set (defaulting to %d)\n",
+ printk(KERN_WARNING "%s() tda10048_config.dtv6_if_freq_khz is not set (defaulting to %d)\n",
__func__,
config->dtv6_if_freq_khz);
}
if (config->dtv7_if_freq_khz == 0) {
config->dtv7_if_freq_khz = TDA10048_IF_4300;
- printk(KERN_WARNING "%s() tda10048_config.dtv7_if_freq_khz "
- "is not set (defaulting to %d)\n",
+ printk(KERN_WARNING "%s() tda10048_config.dtv7_if_freq_khz is not set (defaulting to %d)\n",
__func__,
config->dtv7_if_freq_khz);
}
if (config->dtv8_if_freq_khz == 0) {
config->dtv8_if_freq_khz = TDA10048_IF_4300;
- printk(KERN_WARNING "%s() tda10048_config.dtv8_if_freq_khz "
- "is not set (defaulting to %d)\n",
+ printk(KERN_WARNING "%s() tda10048_config.dtv8_if_freq_khz is not set (defaulting to %d)\n",
__func__,
config->dtv8_if_freq_khz);
}
if (config->clk_freq_khz == 0) {
config->clk_freq_khz = TDA10048_CLK_16000;
- printk(KERN_WARNING "%s() tda10048_config.clk_freq_khz "
- "is not set (defaulting to %d)\n",
+ printk(KERN_WARNING "%s() tda10048_config.clk_freq_khz is not set (defaulting to %d)\n",
__func__,
config->clk_freq_khz);
}
}
-static struct dvb_frontend_ops tda10048_ops;
+static const struct dvb_frontend_ops tda10048_ops;
struct dvb_frontend *tda10048_attach(const struct tda10048_config *config,
struct i2c_adapter *i2c)
@@ -1156,7 +1152,7 @@ error:
}
EXPORT_SYMBOL(tda10048_attach);
-static struct dvb_frontend_ops tda10048_ops = {
+static const struct dvb_frontend_ops tda10048_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "NXP TDA10048HN DVB-T",
diff --git a/drivers/media/dvb-frontends/tda1004x.c b/drivers/media/dvb-frontends/tda1004x.c
index b89848313fb9..e674508c349c 100644
--- a/drivers/media/dvb-frontends/tda1004x.c
+++ b/drivers/media/dvb-frontends/tda1004x.c
@@ -1245,7 +1245,7 @@ static void tda1004x_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops tda10045_ops = {
+static const struct dvb_frontend_ops tda10045_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Philips TDA10045H DVB-T",
@@ -1315,7 +1315,7 @@ struct dvb_frontend* tda10045_attach(const struct tda1004x_config* config,
return &state->frontend;
}
-static struct dvb_frontend_ops tda10046_ops = {
+static const struct dvb_frontend_ops tda10046_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Philips TDA10046H DVB-T",
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
index 37ebeef2bbd0..a59f4fd09df6 100644
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -20,7 +20,7 @@
#include "tda10071_priv.h"
-static struct dvb_frontend_ops tda10071_ops;
+static const struct dvb_frontend_ops tda10071_ops;
/*
* XXX: regmap_update_bits() does not fit our needs as it does not support
@@ -1102,7 +1102,7 @@ static int tda10071_get_tune_settings(struct dvb_frontend *fe,
return 0;
}
-static struct dvb_frontend_ops tda10071_ops = {
+static const struct dvb_frontend_ops tda10071_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2 },
.info = {
.name = "NXP TDA10071",
diff --git a/drivers/media/dvb-frontends/tda10086.c b/drivers/media/dvb-frontends/tda10086.c
index 31d0acb54fe8..b6d16c05904d 100644
--- a/drivers/media/dvb-frontends/tda10086.c
+++ b/drivers/media/dvb-frontends/tda10086.c
@@ -706,7 +706,7 @@ static void tda10086_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops tda10086_ops = {
+static const struct dvb_frontend_ops tda10086_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Philips TDA10086 DVB-S",
diff --git a/drivers/media/dvb-frontends/tda18271c2dd.c b/drivers/media/dvb-frontends/tda18271c2dd.c
index bc247f9b553a..6859fa5d5a85 100644
--- a/drivers/media/dvb-frontends/tda18271c2dd.c
+++ b/drivers/media/dvb-frontends/tda18271c2dd.c
@@ -1126,11 +1126,10 @@ static int init(struct dvb_frontend *fe)
return 0;
}
-static int release(struct dvb_frontend *fe)
+static void release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
diff --git a/drivers/media/dvb-frontends/tda665x.c b/drivers/media/dvb-frontends/tda665x.c
index 7ca965987f40..a63dec44295b 100644
--- a/drivers/media/dvb-frontends/tda665x.c
+++ b/drivers/media/dvb-frontends/tda665x.c
@@ -197,13 +197,12 @@ static int tda665x_set_params(struct dvb_frontend *fe)
return 0;
}
-static int tda665x_release(struct dvb_frontend *fe)
+static void tda665x_release(struct dvb_frontend *fe)
{
struct tda665x_state *state = fe->tuner_priv;
fe->tuner_priv = NULL;
kfree(state);
- return 0;
}
static const struct dvb_tuner_ops tda665x_ops = {
diff --git a/drivers/media/dvb-frontends/tda8083.c b/drivers/media/dvb-frontends/tda8083.c
index 9072d6463094..aa3200d3c352 100644
--- a/drivers/media/dvb-frontends/tda8083.c
+++ b/drivers/media/dvb-frontends/tda8083.c
@@ -421,7 +421,7 @@ static void tda8083_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops tda8083_ops;
+static const struct dvb_frontend_ops tda8083_ops;
struct dvb_frontend* tda8083_attach(const struct tda8083_config* config,
struct i2c_adapter* i2c)
@@ -449,7 +449,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops tda8083_ops = {
+static const struct dvb_frontend_ops tda8083_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Philips TDA8083 DVB-S",
diff --git a/drivers/media/dvb-frontends/tda8261.c b/drivers/media/dvb-frontends/tda8261.c
index e0df93191b9e..4eb294f330bc 100644
--- a/drivers/media/dvb-frontends/tda8261.c
+++ b/drivers/media/dvb-frontends/tda8261.c
@@ -152,13 +152,12 @@ static int tda8261_set_params(struct dvb_frontend *fe)
return 0;
}
-static int tda8261_release(struct dvb_frontend *fe)
+static void tda8261_release(struct dvb_frontend *fe)
{
struct tda8261_state *state = fe->tuner_priv;
fe->tuner_priv = NULL;
kfree(state);
- return 0;
}
static const struct dvb_tuner_ops tda8261_ops = {
diff --git a/drivers/media/dvb-frontends/tda826x.c b/drivers/media/dvb-frontends/tda826x.c
index 2ec671df1441..da427b4c2aaa 100644
--- a/drivers/media/dvb-frontends/tda826x.c
+++ b/drivers/media/dvb-frontends/tda826x.c
@@ -41,11 +41,10 @@ struct tda826x_priv {
u32 frequency;
};
-static int tda826x_release(struct dvb_frontend *fe)
+static void tda826x_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static int tda826x_sleep(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c
index a9f6bbea6df3..931e5c98da8a 100644
--- a/drivers/media/dvb-frontends/ts2020.c
+++ b/drivers/media/dvb-frontends/ts2020.c
@@ -56,7 +56,7 @@ struct ts2020_reg_val {
static void ts2020_stat_work(struct work_struct *work);
-static int ts2020_release(struct dvb_frontend *fe)
+static void ts2020_release(struct dvb_frontend *fe)
{
struct ts2020_priv *priv = fe->tuner_priv;
struct i2c_client *client = priv->client;
@@ -64,7 +64,6 @@ static int ts2020_release(struct dvb_frontend *fe)
dev_dbg(&client->dev, "\n");
i2c_unregister_device(client);
- return 0;
}
static int ts2020_sleep(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/tua6100.c b/drivers/media/dvb-frontends/tua6100.c
index 6da12b9e55eb..05ee16d29851 100644
--- a/drivers/media/dvb-frontends/tua6100.c
+++ b/drivers/media/dvb-frontends/tua6100.c
@@ -42,11 +42,10 @@ struct tua6100_priv {
u32 frequency;
};
-static int tua6100_release(struct dvb_frontend *fe)
+static void tua6100_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static int tua6100_sleep(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/ves1820.c b/drivers/media/dvb-frontends/ves1820.c
index b09fe88c40f8..178363704bd4 100644
--- a/drivers/media/dvb-frontends/ves1820.c
+++ b/drivers/media/dvb-frontends/ves1820.c
@@ -65,8 +65,8 @@ static int ves1820_writereg(struct ves1820_state *state, u8 reg, u8 data)
ret = i2c_transfer(state->i2c, &msg, 1);
if (ret != 1)
- printk("ves1820: %s(): writereg error (reg == 0x%02x, "
- "val == 0x%02x, ret == %i)\n", __func__, reg, data, ret);
+ printk("ves1820: %s(): writereg error (reg == 0x%02x, val == 0x%02x, ret == %i)\n",
+ __func__, reg, data, ret);
return (ret != 1) ? -EREMOTEIO : 0;
}
@@ -84,8 +84,8 @@ static u8 ves1820_readreg(struct ves1820_state *state, u8 reg)
ret = i2c_transfer(state->i2c, msg, 2);
if (ret != 2)
- printk("ves1820: %s(): readreg error (reg == 0x%02x, "
- "ret == %i)\n", __func__, reg, ret);
+ printk("ves1820: %s(): readreg error (reg == 0x%02x, ret == %i)\n",
+ __func__, reg, ret);
return b1[0];
}
@@ -369,7 +369,7 @@ static void ves1820_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops ves1820_ops;
+static const struct dvb_frontend_ops ves1820_ops;
struct dvb_frontend* ves1820_attach(const struct ves1820_config* config,
struct i2c_adapter* i2c,
@@ -408,7 +408,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops ves1820_ops = {
+static const struct dvb_frontend_ops ves1820_ops = {
.delsys = { SYS_DVBC_ANNEX_A },
.info = {
.name = "VLSI VES1820 DVB-C",
diff --git a/drivers/media/dvb-frontends/ves1x93.c b/drivers/media/dvb-frontends/ves1x93.c
index ed113e216e14..d0ee52f66a8e 100644
--- a/drivers/media/dvb-frontends/ves1x93.c
+++ b/drivers/media/dvb-frontends/ves1x93.c
@@ -454,7 +454,7 @@ static int ves1x93_i2c_gate_ctrl(struct dvb_frontend* fe, int enable)
}
}
-static struct dvb_frontend_ops ves1x93_ops;
+static const struct dvb_frontend_ops ves1x93_ops;
struct dvb_frontend* ves1x93_attach(const struct ves1x93_config* config,
struct i2c_adapter* i2c)
@@ -512,7 +512,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops ves1x93_ops = {
+static const struct dvb_frontend_ops ves1x93_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "VLSI VES1x93 DVB-S",
diff --git a/drivers/media/dvb-frontends/zl10036.c b/drivers/media/dvb-frontends/zl10036.c
index 7ed81315965f..a6d020fe9b8b 100644
--- a/drivers/media/dvb-frontends/zl10036.c
+++ b/drivers/media/dvb-frontends/zl10036.c
@@ -85,8 +85,8 @@ static int zl10036_read_status_reg(struct zl10036_state *state)
deb_i2c("R(status): %02x [FL=%d]\n", status,
(status & STATUS_FL) ? 1 : 0);
if (status & STATUS_POR)
- deb_info("%s: Power-On-Reset bit enabled - "
- "need to initialize the tuner\n", __func__);
+ deb_info("%s: Power-On-Reset bit enabled - need to initialize the tuner\n",
+ __func__);
return status;
}
@@ -134,14 +134,12 @@ static int zl10036_write(struct zl10036_state *state, u8 buf[], u8 count)
return 0;
}
-static int zl10036_release(struct dvb_frontend *fe)
+static void zl10036_release(struct dvb_frontend *fe)
{
struct zl10036_state *state = fe->tuner_priv;
fe->tuner_priv = NULL;
kfree(state);
-
- return 0;
}
static int zl10036_sleep(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/zl10039.c b/drivers/media/dvb-frontends/zl10039.c
index f8c271be196c..60a2954f8ff8 100644
--- a/drivers/media/dvb-frontends/zl10039.c
+++ b/drivers/media/dvb-frontends/zl10039.c
@@ -152,8 +152,7 @@ static int zl10039_init(struct dvb_frontend *fe)
/* Reset logic */
ret = zl10039_writereg(state, GENERAL, 0x40);
if (ret < 0) {
- dprintk("Note: i2c write error normal when resetting the "
- "tuner\n");
+ dprintk("Note: i2c write error normal when resetting the tuner\n");
}
/* Wake up */
ret = zl10039_writereg(state, GENERAL, 0x01);
@@ -245,14 +244,13 @@ error:
return ret;
}
-static int zl10039_release(struct dvb_frontend *fe)
+static void zl10039_release(struct dvb_frontend *fe)
{
struct zl10039_state *state = fe->tuner_priv;
dprintk("%s\n", __func__);
kfree(state);
fe->tuner_priv = NULL;
- return 0;
}
static const struct dvb_tuner_ops zl10039_ops = {
diff --git a/drivers/media/dvb-frontends/zl10353.c b/drivers/media/dvb-frontends/zl10353.c
index 3b08176d7bec..4f3ff3e853ac 100644
--- a/drivers/media/dvb-frontends/zl10353.c
+++ b/drivers/media/dvb-frontends/zl10353.c
@@ -602,7 +602,7 @@ static void zl10353_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops zl10353_ops;
+static const struct dvb_frontend_ops zl10353_ops;
struct dvb_frontend *zl10353_attach(const struct zl10353_config *config,
struct i2c_adapter *i2c)
@@ -634,7 +634,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops zl10353_ops = {
+static const struct dvb_frontend_ops zl10353_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Zarlink ZL10353 DVB-T",
diff --git a/drivers/media/firewire/firedtv-avc.c b/drivers/media/firewire/firedtv-avc.c
index 251a556112a9..5bde6c209cd7 100644
--- a/drivers/media/firewire/firedtv-avc.c
+++ b/drivers/media/firewire/firedtv-avc.c
@@ -1181,8 +1181,8 @@ int avc_ca_pmt(struct firedtv *fdtv, char *msg, int length)
if (es_info_length > 0) {
pmt_cmd_id = msg[read_pos++];
if (pmt_cmd_id != 1 && pmt_cmd_id != 4)
- dev_err(fdtv->device, "invalid pmt_cmd_id %d "
- "at stream level\n", pmt_cmd_id);
+ dev_err(fdtv->device, "invalid pmt_cmd_id %d at stream level\n",
+ pmt_cmd_id);
if (es_info_length > sizeof(c->operand) - 4 -
write_pos) {
diff --git a/drivers/media/firewire/firedtv-rc.c b/drivers/media/firewire/firedtv-rc.c
index f82d4a93feb3..04dea2aac583 100644
--- a/drivers/media/firewire/firedtv-rc.c
+++ b/drivers/media/firewire/firedtv-rc.c
@@ -184,8 +184,9 @@ void fdtv_handle_rc(struct firedtv *fdtv, unsigned int code)
else if (code >= 0x4540 && code <= 0x4542)
code = oldtable[code - 0x4521];
else {
- printk(KERN_DEBUG "firedtv: invalid key code 0x%04x "
- "from remote control\n", code);
+ dev_dbg(fdtv->device,
+ "invalid key code 0x%04x from remote control\n",
+ code);
return;
}
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 2669b4bad910..b31fa6fae009 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -221,7 +221,7 @@ config VIDEO_ADV7604
config VIDEO_ADV7604_CEC
bool "Enable Analog Devices ADV7604 CEC support"
- depends on VIDEO_ADV7604 && MEDIA_CEC
+ depends on VIDEO_ADV7604 && MEDIA_CEC_SUPPORT
---help---
When selected the adv7604 will support the optional
HDMI CEC feature.
@@ -242,7 +242,7 @@ config VIDEO_ADV7842
config VIDEO_ADV7842_CEC
bool "Enable Analog Devices ADV7842 CEC support"
- depends on VIDEO_ADV7842 && MEDIA_CEC
+ depends on VIDEO_ADV7842 && MEDIA_CEC_SUPPORT
---help---
When selected the adv7842 will support the optional
HDMI CEC feature.
@@ -481,7 +481,7 @@ config VIDEO_ADV7511
config VIDEO_ADV7511_CEC
bool "Enable Analog Devices ADV7511 CEC support"
- depends on VIDEO_ADV7511 && MEDIA_CEC
+ depends on VIDEO_ADV7511 && MEDIA_CEC_SUPPORT
---help---
When selected the adv7511 will support the optional
HDMI CEC feature.
diff --git a/drivers/media/i2c/ad5820.c b/drivers/media/i2c/ad5820.c
index beab2f381b81..a9026a91855e 100644
--- a/drivers/media/i2c/ad5820.c
+++ b/drivers/media/i2c/ad5820.c
@@ -65,16 +65,17 @@ static int ad5820_write(struct ad5820_device *coil, u16 data)
{
struct i2c_client *client = v4l2_get_subdevdata(&coil->subdev);
struct i2c_msg msg;
+ __be16 be_data;
int r;
if (!client->adapter)
return -ENODEV;
- data = cpu_to_be16(data);
+ be_data = cpu_to_be16(data);
msg.addr = client->addr;
msg.flags = 0;
msg.len = 2;
- msg.buf = (u8 *)&data;
+ msg.buf = (u8 *)&be_data;
r = i2c_transfer(client->adapter, &msg, 1);
if (r < 0) {
diff --git a/drivers/media/i2c/adv7170.c b/drivers/media/i2c/adv7170.c
index 05f1dc6c72af..fc9ec0f3679c 100644
--- a/drivers/media/i2c/adv7170.c
+++ b/drivers/media/i2c/adv7170.c
@@ -32,7 +32,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/ioctl.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
diff --git a/drivers/media/i2c/adv7175.c b/drivers/media/i2c/adv7175.c
index f554809a51e7..72139bdae1ca 100644
--- a/drivers/media/i2c/adv7175.c
+++ b/drivers/media/i2c/adv7175.c
@@ -28,7 +28,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/ioctl.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
index 5ba0f21bcfe4..8c9e28949ab1 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511.c
@@ -1732,9 +1732,10 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
static int adv7511_registered(struct v4l2_subdev *sd)
{
struct adv7511_state *state = get_adv7511_state(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
int err;
- err = cec_register_adapter(state->cec_adap);
+ err = cec_register_adapter(state->cec_adap, &client->dev);
if (err)
cec_delete_adapter(state->cec_adap);
return err;
@@ -1928,7 +1929,7 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
state->cec_adap = cec_allocate_adapter(&adv7511_cec_adap_ops,
state, dev_name(&client->dev), CEC_CAP_TRANSMIT |
CEC_CAP_LOG_ADDRS | CEC_CAP_PASSTHROUGH | CEC_CAP_RC,
- ADV7511_MAX_ADDRS, &client->dev);
+ ADV7511_MAX_ADDRS);
err = PTR_ERR_OR_ZERO(state->cec_adap);
if (err) {
destroy_workqueue(state->work_queue);
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 4003831de712..d0375cac6a05 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -1566,10 +1566,24 @@ static int adv76xx_query_dv_timings(struct v4l2_subdev *sd,
V4L2_DV_INTERLACED : V4L2_DV_PROGRESSIVE;
if (is_digital_input(sd)) {
+ bool hdmi_signal = hdmi_read(sd, 0x05) & 0x80;
+ u8 vic = 0;
+ u32 w, h;
+
+ w = hdmi_read16(sd, 0x07, info->linewidth_mask);
+ h = hdmi_read16(sd, 0x09, info->field0_height_mask);
+
+ if (hdmi_signal && (io_read(sd, 0x60) & 1))
+ vic = infoframe_read(sd, 0x04);
+
+ if (vic && v4l2_find_dv_timings_cea861_vic(timings, vic) &&
+ bt->width == w && bt->height == h)
+ goto found;
+
timings->type = V4L2_DV_BT_656_1120;
- bt->width = hdmi_read16(sd, 0x07, info->linewidth_mask);
- bt->height = hdmi_read16(sd, 0x09, info->field0_height_mask);
+ bt->width = w;
+ bt->height = h;
bt->pixelclock = info->read_hdmi_pixelclock(sd);
bt->hfrontporch = hdmi_read16(sd, 0x20, info->hfrontporch_mask);
bt->hsync = hdmi_read16(sd, 0x22, info->hsync_mask);
@@ -2617,9 +2631,10 @@ static int adv76xx_subscribe_event(struct v4l2_subdev *sd,
static int adv76xx_registered(struct v4l2_subdev *sd)
{
struct adv76xx_state *state = to_state(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
int err;
- err = cec_register_adapter(state->cec_adap);
+ err = cec_register_adapter(state->cec_adap, &client->dev);
if (err)
cec_delete_adapter(state->cec_adap);
return err;
@@ -3074,13 +3089,13 @@ static int adv76xx_parse_dt(struct adv76xx_state *state)
return ret;
}
- if (!of_property_read_u32(endpoint, "default-input", &v))
+ of_node_put(endpoint);
+
+ if (!of_property_read_u32(np, "default-input", &v))
state->pdata.default_input = v;
else
state->pdata.default_input = -1;
- of_node_put(endpoint);
-
flags = bus_cfg.bus.parallel.flags;
if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
@@ -3497,8 +3512,7 @@ static int adv76xx_probe(struct i2c_client *client,
state->cec_adap = cec_allocate_adapter(&adv76xx_cec_adap_ops,
state, dev_name(&client->dev),
CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS |
- CEC_CAP_PASSTHROUGH | CEC_CAP_RC, ADV76XX_MAX_ADDRS,
- &client->dev);
+ CEC_CAP_PASSTHROUGH | CEC_CAP_RC, ADV76XX_MAX_ADDRS);
err = PTR_ERR_OR_ZERO(state->cec_adap);
if (err)
goto err_entity;
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 8c2a52e280af..2d61f0cc2b5b 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -3250,9 +3250,10 @@ static int adv7842_subscribe_event(struct v4l2_subdev *sd,
static int adv7842_registered(struct v4l2_subdev *sd)
{
struct adv7842_state *state = to_state(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
int err;
- err = cec_register_adapter(state->cec_adap);
+ err = cec_register_adapter(state->cec_adap, &client->dev);
if (err)
cec_delete_adapter(state->cec_adap);
return err;
@@ -3568,8 +3569,7 @@ static int adv7842_probe(struct i2c_client *client,
state->cec_adap = cec_allocate_adapter(&adv7842_cec_adap_ops,
state, dev_name(&client->dev),
CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS |
- CEC_CAP_PASSTHROUGH | CEC_CAP_RC, ADV7842_MAX_ADDRS,
- &client->dev);
+ CEC_CAP_PASSTHROUGH | CEC_CAP_RC, ADV7842_MAX_ADDRS);
err = PTR_ERR_OR_ZERO(state->cec_adap);
if (err)
goto err_entity;
diff --git a/drivers/media/i2c/bt856.c b/drivers/media/i2c/bt856.c
index 48176591a80d..54c627859c8e 100644
--- a/drivers/media/i2c/bt856.c
+++ b/drivers/media/i2c/bt856.c
@@ -32,7 +32,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/ioctl.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
diff --git a/drivers/media/i2c/bt866.c b/drivers/media/i2c/bt866.c
index bbec70c882a3..0d3f46af2545 100644
--- a/drivers/media/i2c/bt866.c
+++ b/drivers/media/i2c/bt866.c
@@ -32,7 +32,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/ioctl.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
diff --git a/drivers/media/i2c/cs53l32a.c b/drivers/media/i2c/cs53l32a.c
index e4b3cf49dd38..59c1a98c5a90 100644
--- a/drivers/media/i2c/cs53l32a.c
+++ b/drivers/media/i2c/cs53l32a.c
@@ -24,7 +24,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/ioctl.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index 142ae28803bb..0dcf450052ac 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -873,10 +873,7 @@ void cx25840_std_setup(struct i2c_client *client)
"Chroma sub-carrier freq = %d.%06d MHz\n",
fsc / 1000000, fsc % 1000000);
- v4l_dbg(1, cx25840_debug, client, "hblank %i, hactive %i, "
- "vblank %i, vactive %i, vblank656 %i, src_dec %i, "
- "burst 0x%02x, luma_lpf %i, uv_lpf %i, comb 0x%02x, "
- "sc 0x%06x\n",
+ v4l_dbg(1, cx25840_debug, client, "hblank %i, hactive %i, vblank %i, vactive %i, vblank656 %i, src_dec %i, burst 0x%02x, luma_lpf %i, uv_lpf %i, comb 0x%02x, sc 0x%06x\n",
hblank, hactive, vblank, vactive, vblank656,
src_decimation, burst, luma_lpf, uv_lpf, comb, sc);
}
@@ -5169,11 +5166,9 @@ static int cx25840_probe(struct i2c_client *client,
id = CX2310X_AV;
} else if ((device_id & 0xff) == (device_id >> 8)) {
v4l_err(client,
- "likely a confused/unresponsive cx2388[578] A/V decoder"
- " found @ 0x%x (%s)\n",
+ "likely a confused/unresponsive cx2388[578] A/V decoder found @ 0x%x (%s)\n",
client->addr << 1, client->adapter->name);
- v4l_err(client, "A method to reset it from the cx25840 driver"
- " software is not known at this time\n");
+ v4l_err(client, "A method to reset it from the cx25840 driver software is not known at this time\n");
return -ENODEV;
} else {
v4l_dbg(1, cx25840_debug, client, "cx25840 not found\n");
diff --git a/drivers/media/i2c/cx25840/cx25840-ir.c b/drivers/media/i2c/cx25840/cx25840-ir.c
index 4b782012cadc..15fbd9607cee 100644
--- a/drivers/media/i2c/cx25840/cx25840-ir.c
+++ b/drivers/media/i2c/cx25840/cx25840-ir.c
@@ -1113,8 +1113,8 @@ int cx25840_ir_log_status(struct v4l2_subdev *sd)
j = 0;
break;
}
- v4l2_info(sd, "\tNext carrier edge window: 16 clocks "
- "-%1d/+%1d, %u to %u Hz\n", i, j,
+ v4l2_info(sd, "\tNext carrier edge window: 16 clocks -%1d/+%1d, %u to %u Hz\n",
+ i, j,
clock_divider_to_freq(rxclk, 16 + j),
clock_divider_to_freq(rxclk, 16 - i));
}
@@ -1124,8 +1124,7 @@ int cx25840_ir_log_status(struct v4l2_subdev *sd)
v4l2_info(sd, "\tLow pass filter: %s\n",
filtr ? "enabled" : "disabled");
if (filtr)
- v4l2_info(sd, "\tMin acceptable pulse width (LPF): %u us, "
- "%u ns\n",
+ v4l2_info(sd, "\tMin acceptable pulse width (LPF): %u us, %u ns\n",
lpf_count_to_us(filtr),
lpf_count_to_ns(filtr));
v4l2_info(sd, "\tPulse width timer timed-out: %s\n",
diff --git a/drivers/media/i2c/m52790.c b/drivers/media/i2c/m52790.c
index 81171d8e1c2c..89c28c36c5bf 100644
--- a/drivers/media/i2c/m52790.c
+++ b/drivers/media/i2c/m52790.c
@@ -24,7 +24,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/ioctl.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <media/i2c/m52790.h>
diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c
index 503b7c4f0a9b..201a9800ea52 100644
--- a/drivers/media/i2c/msp3400-driver.c
+++ b/drivers/media/i2c/msp3400-driver.c
@@ -146,11 +146,11 @@ int msp_reset(struct i2c_client *client)
},
};
- v4l_dbg(3, msp_debug, client, "msp_reset\n");
+ dev_dbg_lvl(&client->dev, 3, msp_debug, "msp_reset\n");
if (i2c_transfer(client->adapter, &reset[0], 1) != 1 ||
i2c_transfer(client->adapter, &reset[1], 1) != 1 ||
i2c_transfer(client->adapter, test, 2) != 2) {
- v4l_err(client, "chip reset failed\n");
+ dev_err(&client->dev, "chip reset failed\n");
return -1;
}
return 0;
@@ -182,17 +182,17 @@ static int msp_read(struct i2c_client *client, int dev, int addr)
for (err = 0; err < 3; err++) {
if (i2c_transfer(client->adapter, msgs, 2) == 2)
break;
- v4l_warn(client, "I/O error #%d (read 0x%02x/0x%02x)\n", err,
+ dev_warn(&client->dev, "I/O error #%d (read 0x%02x/0x%02x)\n", err,
dev, addr);
schedule_timeout_interruptible(msecs_to_jiffies(10));
}
if (err == 3) {
- v4l_warn(client, "resetting chip, sound will go off.\n");
+ dev_warn(&client->dev, "resetting chip, sound will go off.\n");
msp_reset(client);
return -1;
}
retval = read[0] << 8 | read[1];
- v4l_dbg(3, msp_debug, client, "msp_read(0x%x, 0x%x): 0x%x\n",
+ dev_dbg_lvl(&client->dev, 3, msp_debug, "msp_read(0x%x, 0x%x): 0x%x\n",
dev, addr, retval);
return retval;
}
@@ -218,17 +218,17 @@ static int msp_write(struct i2c_client *client, int dev, int addr, int val)
buffer[3] = val >> 8;
buffer[4] = val & 0xff;
- v4l_dbg(3, msp_debug, client, "msp_write(0x%x, 0x%x, 0x%x)\n",
+ dev_dbg_lvl(&client->dev, 3, msp_debug, "msp_write(0x%x, 0x%x, 0x%x)\n",
dev, addr, val);
for (err = 0; err < 3; err++) {
if (i2c_master_send(client, buffer, 5) == 5)
break;
- v4l_warn(client, "I/O error #%d (write 0x%02x/0x%02x)\n", err,
+ dev_warn(&client->dev, "I/O error #%d (write 0x%02x/0x%02x)\n", err,
dev, addr);
schedule_timeout_interruptible(msecs_to_jiffies(10));
}
if (err == 3) {
- v4l_warn(client, "resetting chip, sound will go off.\n");
+ dev_warn(&client->dev, "resetting chip, sound will go off.\n");
msp_reset(client);
return -1;
}
@@ -301,7 +301,7 @@ void msp_set_scart(struct i2c_client *client, int in, int out)
} else
state->acb = 0xf60; /* Mute Input and SCART 1 Output */
- v4l_dbg(1, msp_debug, client, "scart switch: %s => %d (ACB=0x%04x)\n",
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "scart switch: %s => %d (ACB=0x%04x)\n",
scart_names[in], out, state->acb);
msp_write_dsp(client, 0x13, state->acb);
@@ -359,7 +359,7 @@ static int msp_s_ctrl(struct v4l2_ctrl *ctrl)
if (!reallymuted)
val = (val * 0x7f / 65535) << 8;
- v4l_dbg(1, msp_debug, client, "mute=%s scanning=%s volume=%d\n",
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "mute=%s scanning=%s volume=%d\n",
state->muted->val ? "on" : "off",
state->scan_in_progress ? "yes" : "no",
state->volume->val);
@@ -426,7 +426,7 @@ static int msp_s_radio(struct v4l2_subdev *sd)
if (state->radio)
return 0;
state->radio = 1;
- v4l_dbg(1, msp_debug, client, "switching to radio mode\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "switching to radio mode\n");
state->watch_stereo = 0;
switch (state->opmode) {
case OPMODE_MANUAL:
@@ -461,7 +461,7 @@ static int msp_querystd(struct v4l2_subdev *sd, v4l2_std_id *id)
*id &= state->detected_std;
- v4l_dbg(2, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 2, msp_debug,
"detected standard: %s(0x%08Lx)\n",
msp_standard_std_name(state->std), state->detected_std);
@@ -555,7 +555,7 @@ static int msp_s_i2s_clock_freq(struct v4l2_subdev *sd, u32 freq)
struct msp_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
- v4l_dbg(1, msp_debug, client, "Setting I2S speed to %d\n", freq);
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "Setting I2S speed to %d\n", freq);
switch (freq) {
case 1024000:
@@ -579,7 +579,7 @@ static int msp_log_status(struct v4l2_subdev *sd)
if (state->opmode == OPMODE_AUTOSELECT)
msp_detect_stereo(client);
- v4l_info(client, "%s rev1 = 0x%04x rev2 = 0x%04x\n",
+ dev_info(&client->dev, "%s rev1 = 0x%04x rev2 = 0x%04x\n",
client->name, state->rev1, state->rev2);
snprintf(prefix, sizeof(prefix), "%s: Audio: ", sd->name);
v4l2_ctrl_handler_log_status(&state->hdl, prefix);
@@ -596,23 +596,23 @@ static int msp_log_status(struct v4l2_subdev *sd)
default: p = "unknown"; break;
}
if (state->mode == MSP_MODE_EXTERN) {
- v4l_info(client, "Mode: %s\n", p);
+ dev_info(&client->dev, "Mode: %s\n", p);
} else if (state->opmode == OPMODE_MANUAL) {
- v4l_info(client, "Mode: %s (%s%s)\n", p,
+ dev_info(&client->dev, "Mode: %s (%s%s)\n", p,
(state->rxsubchans & V4L2_TUNER_SUB_STEREO) ? "stereo" : "mono",
(state->rxsubchans & V4L2_TUNER_SUB_LANG2) ? ", dual" : "");
} else {
if (state->opmode == OPMODE_AUTODETECT)
- v4l_info(client, "Mode: %s\n", p);
- v4l_info(client, "Standard: %s (%s%s)\n",
+ dev_info(&client->dev, "Mode: %s\n", p);
+ dev_info(&client->dev, "Standard: %s (%s%s)\n",
msp_standard_std_name(state->std),
(state->rxsubchans & V4L2_TUNER_SUB_STEREO) ? "stereo" : "mono",
(state->rxsubchans & V4L2_TUNER_SUB_LANG2) ? ", dual" : "");
}
- v4l_info(client, "Audmode: 0x%04x\n", state->audmode);
- v4l_info(client, "Routing: 0x%08x (input) 0x%08x (output)\n",
+ dev_info(&client->dev, "Audmode: 0x%04x\n", state->audmode);
+ dev_info(&client->dev, "Routing: 0x%08x (input) 0x%08x (output)\n",
state->route_in, state->route_out);
- v4l_info(client, "ACB: 0x%04x\n", state->acb);
+ dev_info(&client->dev, "ACB: 0x%04x\n", state->acb);
return 0;
}
@@ -620,7 +620,7 @@ static int msp_log_status(struct v4l2_subdev *sd)
static int msp_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- v4l_dbg(1, msp_debug, client, "suspend\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "suspend\n");
msp_reset(client);
return 0;
}
@@ -628,7 +628,7 @@ static int msp_suspend(struct device *dev)
static int msp_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- v4l_dbg(1, msp_debug, client, "resume\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "resume\n");
msp_wake_thread(client);
return 0;
}
@@ -670,6 +670,13 @@ static const struct v4l2_subdev_ops msp_ops = {
/* ----------------------------------------------------------------------- */
+
+static const char * const opmode_str[] = {
+ [OPMODE_MANUAL] = "manual",
+ [OPMODE_AUTODETECT] = "autodetect",
+ [OPMODE_AUTOSELECT] = "autodetect and autoselect",
+};
+
static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct msp_state *state;
@@ -689,7 +696,7 @@ static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id)
strlcpy(client->name, "msp3400", sizeof(client->name));
if (msp_reset(client) == -1) {
- v4l_dbg(1, msp_debug, client, "msp3400 not found\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "msp3400 not found\n");
return -ENODEV;
}
@@ -724,10 +731,10 @@ static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id)
state->rev1 = msp_read_dsp(client, 0x1e);
if (state->rev1 != -1)
state->rev2 = msp_read_dsp(client, 0x1f);
- v4l_dbg(1, msp_debug, client, "rev1=0x%04x, rev2=0x%04x\n",
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "rev1=0x%04x, rev2=0x%04x\n",
state->rev1, state->rev2);
if (state->rev1 == -1 || (state->rev1 == 0 && state->rev2 == 0)) {
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"not an msp3400 (cannot read chip version)\n");
return -ENODEV;
}
@@ -791,7 +798,8 @@ static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id)
msp_family == 3 && msp_revision == 'G' && msp_prod_hi == 3;
state->opmode = opmode;
- if (state->opmode == OPMODE_AUTO) {
+ if (state->opmode < OPMODE_MANUAL
+ || state->opmode > OPMODE_AUTOSELECT) {
/* MSP revision G and up have both autodetect and autoselect */
if (msp_revision >= 'G')
state->opmode = OPMODE_AUTOSELECT;
@@ -829,43 +837,35 @@ static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id)
v4l2_ctrl_cluster(2, &state->volume);
v4l2_ctrl_handler_setup(hdl);
- /* hello world :-) */
- v4l_info(client, "MSP%d4%02d%c-%c%d found @ 0x%x (%s)\n",
- msp_family, msp_product,
- msp_revision, msp_hard, msp_rom,
- client->addr << 1, client->adapter->name);
- v4l_info(client, "%s ", client->name);
- if (state->has_nicam && state->has_radio)
- printk(KERN_CONT "supports nicam and radio, ");
- else if (state->has_nicam)
- printk(KERN_CONT "supports nicam, ");
- else if (state->has_radio)
- printk(KERN_CONT "supports radio, ");
- printk(KERN_CONT "mode is ");
+ dev_info(&client->dev,
+ "MSP%d4%02d%c-%c%d found on %s: supports %s%s%s, mode is %s\n",
+ msp_family, msp_product,
+ msp_revision, msp_hard, msp_rom,
+ client->adapter->name,
+ (state->has_nicam) ? "nicam" : "",
+ (state->has_nicam && state->has_radio) ? " and " : "",
+ (state->has_radio) ? "radio" : "",
+ opmode_str[state->opmode]);
/* version-specific initialization */
switch (state->opmode) {
case OPMODE_MANUAL:
- printk(KERN_CONT "manual");
thread_func = msp3400c_thread;
break;
case OPMODE_AUTODETECT:
- printk(KERN_CONT "autodetect");
thread_func = msp3410d_thread;
break;
case OPMODE_AUTOSELECT:
- printk(KERN_CONT "autodetect and autoselect");
thread_func = msp34xxg_thread;
break;
}
- printk(KERN_CONT "\n");
/* startup control thread if needed */
if (thread_func) {
state->kthread = kthread_run(thread_func, client, "msp34xx");
if (IS_ERR(state->kthread))
- v4l_warn(client, "kernel_thread() failed\n");
+ dev_warn(&client->dev, "kernel_thread() failed\n");
msp_wake_thread(client);
}
return 0;
diff --git a/drivers/media/i2c/msp3400-kthreads.c b/drivers/media/i2c/msp3400-kthreads.c
index 17120804fab7..eec7aa4c6f98 100644
--- a/drivers/media/i2c/msp3400-kthreads.c
+++ b/drivers/media/i2c/msp3400-kthreads.c
@@ -220,7 +220,7 @@ void msp3400c_set_mode(struct i2c_client *client, int mode)
int tuner = (state->route_in >> 3) & 1;
int i;
- v4l_dbg(1, msp_debug, client, "set_mode: %d\n", mode);
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "set_mode: %d\n", mode);
state->mode = mode;
state->rxsubchans = V4L2_TUNER_SUB_MONO;
@@ -266,7 +266,7 @@ static void msp3400c_set_audmode(struct i2c_client *client)
/* this method would break everything, let's make sure
* it's never called
*/
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"set_audmode called with mode=%d instead of set_source (ignored)\n",
state->audmode);
return;
@@ -295,7 +295,7 @@ static void msp3400c_set_audmode(struct i2c_client *client)
/* switch demodulator */
switch (state->mode) {
case MSP_MODE_FM_TERRA:
- v4l_dbg(1, msp_debug, client, "FM set_audmode: %s\n", modestr);
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "FM set_audmode: %s\n", modestr);
switch (audmode) {
case V4L2_TUNER_MODE_STEREO:
msp_write_dsp(client, 0x000e, 0x3001);
@@ -309,7 +309,7 @@ static void msp3400c_set_audmode(struct i2c_client *client)
}
break;
case MSP_MODE_FM_SAT:
- v4l_dbg(1, msp_debug, client, "SAT set_audmode: %s\n", modestr);
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "SAT set_audmode: %s\n", modestr);
switch (audmode) {
case V4L2_TUNER_MODE_MONO:
msp3400c_set_carrier(client, MSP_CARRIER(6.5), MSP_CARRIER(6.5));
@@ -329,31 +329,31 @@ static void msp3400c_set_audmode(struct i2c_client *client)
case MSP_MODE_FM_NICAM1:
case MSP_MODE_FM_NICAM2:
case MSP_MODE_AM_NICAM:
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"NICAM set_audmode: %s\n", modestr);
if (state->nicam_on)
src = 0x0100; /* NICAM */
break;
case MSP_MODE_BTSC:
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"BTSC set_audmode: %s\n", modestr);
break;
case MSP_MODE_EXTERN:
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"extern set_audmode: %s\n", modestr);
src = 0x0200; /* SCART */
break;
case MSP_MODE_FM_RADIO:
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"FM-Radio set_audmode: %s\n", modestr);
break;
default:
- v4l_dbg(1, msp_debug, client, "mono set_audmode\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "mono set_audmode\n");
return;
}
/* switch audio */
- v4l_dbg(1, msp_debug, client, "set audmode %d\n", audmode);
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "set audmode %d\n", audmode);
switch (audmode) {
case V4L2_TUNER_MODE_STEREO:
case V4L2_TUNER_MODE_LANG1_LANG2:
@@ -361,7 +361,7 @@ static void msp3400c_set_audmode(struct i2c_client *client)
break;
case V4L2_TUNER_MODE_MONO:
if (state->mode == MSP_MODE_AM_NICAM) {
- v4l_dbg(1, msp_debug, client, "switching to AM mono\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "switching to AM mono\n");
/* AM mono decoding is handled by tuner, not MSP chip */
/* SCART switching control register */
msp_set_scart(client, SCART_MONO, 0);
@@ -377,7 +377,7 @@ static void msp3400c_set_audmode(struct i2c_client *client)
src |= 0x0010;
break;
}
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"set_audmode final source/matrix = 0x%x\n", src);
msp_set_source(client, src);
@@ -388,23 +388,23 @@ static void msp3400c_print_mode(struct i2c_client *client)
struct msp_state *state = to_state(i2c_get_clientdata(client));
if (state->main == state->second)
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"mono sound carrier: %d.%03d MHz\n",
state->main / 910000, (state->main / 910) % 1000);
else
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"main sound carrier: %d.%03d MHz\n",
state->main / 910000, (state->main / 910) % 1000);
if (state->mode == MSP_MODE_FM_NICAM1 || state->mode == MSP_MODE_FM_NICAM2)
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"NICAM/FM carrier : %d.%03d MHz\n",
state->second / 910000, (state->second/910) % 1000);
if (state->mode == MSP_MODE_AM_NICAM)
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"NICAM/AM carrier : %d.%03d MHz\n",
state->second / 910000, (state->second / 910) % 1000);
if (state->mode == MSP_MODE_FM_TERRA && state->main != state->second) {
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"FM-stereo carrier : %d.%03d MHz\n",
state->second / 910000, (state->second / 910) % 1000);
}
@@ -425,7 +425,7 @@ static int msp3400c_detect_stereo(struct i2c_client *client)
val = msp_read_dsp(client, 0x18);
if (val > 32767)
val -= 65536;
- v4l_dbg(2, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 2, msp_debug,
"stereo detect register: %d\n", val);
if (val > 8192) {
rxsubchans = V4L2_TUNER_SUB_STEREO;
@@ -440,7 +440,7 @@ static int msp3400c_detect_stereo(struct i2c_client *client)
case MSP_MODE_FM_NICAM2:
case MSP_MODE_AM_NICAM:
val = msp_read_dem(client, 0x23);
- v4l_dbg(2, msp_debug, client, "nicam sync=%d, mode=%d\n",
+ dev_dbg_lvl(&client->dev, 2, msp_debug, "nicam sync=%d, mode=%d\n",
val & 1, (val & 0x1e) >> 1);
if (val & 1) {
@@ -471,14 +471,14 @@ static int msp3400c_detect_stereo(struct i2c_client *client)
}
if (rxsubchans != state->rxsubchans) {
update = 1;
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"watch: rxsubchans %02x => %02x\n",
state->rxsubchans, rxsubchans);
state->rxsubchans = rxsubchans;
}
if (newnicam != state->nicam_on) {
update = 1;
- v4l_dbg(1, msp_debug, client, "watch: nicam %d => %d\n",
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "watch: nicam %d => %d\n",
state->nicam_on, newnicam);
state->nicam_on = newnicam;
}
@@ -508,23 +508,23 @@ int msp3400c_thread(void *data)
struct msp3400c_carrier_detect *cd;
int count, max1, max2, val1, val2, val, i;
- v4l_dbg(1, msp_debug, client, "msp3400 daemon started\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "msp3400 daemon started\n");
state->detected_std = V4L2_STD_ALL;
set_freezable();
for (;;) {
- v4l_dbg(2, msp_debug, client, "msp3400 thread: sleep\n");
+ dev_dbg_lvl(&client->dev, 2, msp_debug, "msp3400 thread: sleep\n");
msp_sleep(state, -1);
- v4l_dbg(2, msp_debug, client, "msp3400 thread: wakeup\n");
+ dev_dbg_lvl(&client->dev, 2, msp_debug, "msp3400 thread: wakeup\n");
restart:
- v4l_dbg(2, msp_debug, client, "thread: restart scan\n");
+ dev_dbg_lvl(&client->dev, 2, msp_debug, "thread: restart scan\n");
state->restart = 0;
if (kthread_should_stop())
break;
if (state->radio || MSP_MODE_EXTERN == state->mode) {
/* no carrier scan, just unmute */
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"thread: no carrier scan\n");
state->scan_in_progress = 0;
msp_update_volume(state);
@@ -553,7 +553,7 @@ restart:
/* autodetect doesn't work well with AM ... */
max1 = 3;
count = 0;
- v4l_dbg(1, msp_debug, client, "AM sound override\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "AM sound override\n");
}
for (i = 0; i < count; i++) {
@@ -565,7 +565,7 @@ restart:
val -= 65536;
if (val1 < val)
val1 = val, max1 = i;
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"carrier1 val: %5d / %s\n", val, cd[i].name);
}
@@ -602,7 +602,7 @@ restart:
val -= 65536;
if (val2 < val)
val2 = val, max2 = i;
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"carrier2 val: %5d / %s\n", val, cd[i].name);
}
@@ -687,7 +687,7 @@ no_second:
watch_stereo(client);
}
}
- v4l_dbg(1, msp_debug, client, "thread: exit\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "thread: exit\n");
return 0;
}
@@ -698,23 +698,23 @@ int msp3410d_thread(void *data)
struct msp_state *state = to_state(i2c_get_clientdata(client));
int val, i, std, count;
- v4l_dbg(1, msp_debug, client, "msp3410 daemon started\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "msp3410 daemon started\n");
state->detected_std = V4L2_STD_ALL;
set_freezable();
for (;;) {
- v4l_dbg(2, msp_debug, client, "msp3410 thread: sleep\n");
+ dev_dbg_lvl(&client->dev, 2, msp_debug, "msp3410 thread: sleep\n");
msp_sleep(state, -1);
- v4l_dbg(2, msp_debug, client, "msp3410 thread: wakeup\n");
+ dev_dbg_lvl(&client->dev, 2, msp_debug, "msp3410 thread: wakeup\n");
restart:
- v4l_dbg(2, msp_debug, client, "thread: restart scan\n");
+ dev_dbg_lvl(&client->dev, 2, msp_debug, "thread: restart scan\n");
state->restart = 0;
if (kthread_should_stop())
break;
if (state->mode == MSP_MODE_EXTERN) {
/* no carrier scan needed, just unmute */
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"thread: no carrier scan\n");
state->scan_in_progress = 0;
msp_update_volume(state);
@@ -740,7 +740,7 @@ restart:
goto restart;
if (msp_debug)
- v4l_dbg(2, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 2, msp_debug,
"setting standard: %s (0x%04x)\n",
msp_standard_std_name(std), std);
@@ -758,14 +758,14 @@ restart:
val = msp_read_dem(client, 0x7e);
if (val < 0x07ff)
break;
- v4l_dbg(2, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 2, msp_debug,
"detection still in progress\n");
}
}
for (i = 0; msp_stdlist[i].name != NULL; i++)
if (msp_stdlist[i].retval == val)
break;
- v4l_dbg(1, msp_debug, client, "current standard: %s (0x%04x)\n",
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "current standard: %s (0x%04x)\n",
msp_standard_std_name(val), val);
state->main = msp_stdlist[i].main;
state->second = msp_stdlist[i].second;
@@ -775,8 +775,7 @@ restart:
if (msp_amsound && !state->radio &&
(state->v4l2_std & V4L2_STD_SECAM) && (val != 0x0009)) {
/* autodetection has failed, let backup */
- v4l_dbg(1, msp_debug, client, "autodetection failed,"
- " switching to backup standard: %s (0x%04x)\n",
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "autodetection failed, switching to backup standard: %s (0x%04x)\n",
msp_stdlist[8].name ?
msp_stdlist[8].name : "unknown", val);
state->std = val = 0x0009;
@@ -850,7 +849,7 @@ restart:
watch_stereo(client);
}
}
- v4l_dbg(1, msp_debug, client, "thread: exit\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "thread: exit\n");
return 0;
}
@@ -867,23 +866,23 @@ static int msp34xxg_modus(struct i2c_client *client)
struct msp_state *state = to_state(i2c_get_clientdata(client));
if (state->radio) {
- v4l_dbg(1, msp_debug, client, "selected radio modus\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "selected radio modus\n");
return 0x0001;
}
if (state->v4l2_std == V4L2_STD_NTSC_M_JP) {
- v4l_dbg(1, msp_debug, client, "selected M (EIA-J) modus\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "selected M (EIA-J) modus\n");
return 0x4001;
}
if (state->v4l2_std == V4L2_STD_NTSC_M_KR) {
- v4l_dbg(1, msp_debug, client, "selected M (A2) modus\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "selected M (A2) modus\n");
return 0x0001;
}
if (state->v4l2_std == V4L2_STD_SECAM_L) {
- v4l_dbg(1, msp_debug, client, "selected SECAM-L modus\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "selected SECAM-L modus\n");
return 0x6001;
}
if (state->v4l2_std & V4L2_STD_MN) {
- v4l_dbg(1, msp_debug, client, "selected M (BTSC) modus\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "selected M (BTSC) modus\n");
return 0x2001;
}
return 0x7001;
@@ -927,7 +926,7 @@ static void msp34xxg_set_source(struct i2c_client *client, u16 reg, int in)
else
source = (in << 8) | matrix;
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"set source to %d (0x%x) for output %02x\n", in, source, reg);
msp_write_dsp(client, reg, source);
}
@@ -996,23 +995,23 @@ int msp34xxg_thread(void *data)
struct msp_state *state = to_state(i2c_get_clientdata(client));
int val, i;
- v4l_dbg(1, msp_debug, client, "msp34xxg daemon started\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "msp34xxg daemon started\n");
state->detected_std = V4L2_STD_ALL;
set_freezable();
for (;;) {
- v4l_dbg(2, msp_debug, client, "msp34xxg thread: sleep\n");
+ dev_dbg_lvl(&client->dev, 2, msp_debug, "msp34xxg thread: sleep\n");
msp_sleep(state, -1);
- v4l_dbg(2, msp_debug, client, "msp34xxg thread: wakeup\n");
+ dev_dbg_lvl(&client->dev, 2, msp_debug, "msp34xxg thread: wakeup\n");
restart:
- v4l_dbg(1, msp_debug, client, "thread: restart scan\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "thread: restart scan\n");
state->restart = 0;
if (kthread_should_stop())
break;
if (state->mode == MSP_MODE_EXTERN) {
/* no carrier scan needed, just unmute */
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"thread: no carrier scan\n");
state->scan_in_progress = 0;
msp_update_volume(state);
@@ -1029,7 +1028,7 @@ restart:
goto unmute;
/* watch autodetect */
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"started autodetect, waiting for result\n");
for (i = 0; i < 10; i++) {
if (msp_sleep(state, 100))
@@ -1041,17 +1040,17 @@ restart:
state->std = val;
break;
}
- v4l_dbg(2, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 2, msp_debug,
"detection still in progress\n");
}
if (state->std == 1) {
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"detection still in progress after 10 tries. giving up.\n");
continue;
}
unmute:
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"detected standard: %s (0x%04x)\n",
msp_standard_std_name(state->std), state->std);
state->detected_std = msp_standard_std(state->std);
@@ -1084,7 +1083,7 @@ unmute:
goto restart;
}
}
- v4l_dbg(1, msp_debug, client, "thread: exit\n");
+ dev_dbg_lvl(&client->dev, 1, msp_debug, "thread: exit\n");
return 0;
}
@@ -1111,7 +1110,7 @@ static int msp34xxg_detect_stereo(struct i2c_client *client)
state->rxsubchans =
V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
}
- v4l_dbg(1, msp_debug, client,
+ dev_dbg_lvl(&client->dev, 1, msp_debug,
"status=0x%x, stereo=%d, bilingual=%d -> rxsubchans=%d\n",
status, is_stereo, is_bilingual, state->rxsubchans);
return (oldrx != state->rxsubchans);
diff --git a/drivers/media/i2c/saa6588.c b/drivers/media/i2c/saa6588.c
index 89e458c23983..00640233a5e3 100644
--- a/drivers/media/i2c/saa6588.c
+++ b/drivers/media/i2c/saa6588.c
@@ -29,7 +29,7 @@
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/wait.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <media/i2c/saa6588.h>
#include <media/v4l2-device.h>
diff --git a/drivers/media/i2c/saa7110.c b/drivers/media/i2c/saa7110.c
index 6f49886806ee..ad456ce051f9 100644
--- a/drivers/media/i2c/saa7110.c
+++ b/drivers/media/i2c/saa7110.c
@@ -31,7 +31,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/wait.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
diff --git a/drivers/media/i2c/saa7185.c b/drivers/media/i2c/saa7185.c
index eecad2d1edce..119050e1197a 100644
--- a/drivers/media/i2c/saa7185.c
+++ b/drivers/media/i2c/saa7185.c
@@ -28,7 +28,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/ioctl.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
diff --git a/drivers/media/i2c/smiapp-pll.c b/drivers/media/i2c/smiapp-pll.c
index e3348db56c46..771db56332b2 100644
--- a/drivers/media/i2c/smiapp-pll.c
+++ b/drivers/media/i2c/smiapp-pll.c
@@ -479,7 +479,8 @@ int smiapp_pll_calculate(struct device *dev,
return 0;
}
- dev_info(dev, "unable to compute pre_pll divisor\n");
+ dev_dbg(dev, "unable to compute pre_pll divisor\n");
+
return rval;
}
EXPORT_SYMBOL_GPL(smiapp_pll_calculate);
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index 44f8c7e10a35..59872b31f832 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -26,6 +26,7 @@
#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
+#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/smiapp.h>
@@ -68,10 +69,9 @@ static int smiapp_read_frame_fmt(struct smiapp_sensor *sensor)
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
u32 fmt_model_type, fmt_model_subtype, ncol_desc, nrow_desc;
unsigned int i;
- int rval;
+ int pixel_count = 0;
int line_count = 0;
- int embedded_start = -1, embedded_end = -1;
- int image_start = 0;
+ int rval;
rval = smiapp_read(sensor, SMIAPP_REG_U8_FRAME_FORMAT_MODEL_TYPE,
&fmt_model_type);
@@ -101,12 +101,11 @@ static int smiapp_read_frame_fmt(struct smiapp_sensor *sensor)
u32 pixels;
char *which;
char *what;
+ u32 reg;
if (fmt_model_type == SMIAPP_FRAME_FORMAT_MODEL_TYPE_2BYTE) {
- rval = smiapp_read(
- sensor,
- SMIAPP_REG_U16_FRAME_FORMAT_DESCRIPTOR_2(i),
- &desc);
+ reg = SMIAPP_REG_U16_FRAME_FORMAT_DESCRIPTOR_2(i);
+ rval = smiapp_read(sensor, reg, &desc);
if (rval)
return rval;
@@ -117,10 +116,8 @@ static int smiapp_read_frame_fmt(struct smiapp_sensor *sensor)
pixels = desc & SMIAPP_FRAME_FORMAT_DESC_2_PIXELS_MASK;
} else if (fmt_model_type
== SMIAPP_FRAME_FORMAT_MODEL_TYPE_4BYTE) {
- rval = smiapp_read(
- sensor,
- SMIAPP_REG_U32_FRAME_FORMAT_DESCRIPTOR_4(i),
- &desc);
+ reg = SMIAPP_REG_U32_FRAME_FORMAT_DESCRIPTOR_4(i);
+ rval = smiapp_read(sensor, reg, &desc);
if (rval)
return rval;
@@ -159,40 +156,47 @@ static int smiapp_read_frame_fmt(struct smiapp_sensor *sensor)
break;
default:
what = "invalid";
- dev_dbg(&client->dev, "pixelcode %d\n", pixelcode);
break;
}
- dev_dbg(&client->dev, "%s pixels: %d %s\n",
- what, pixels, which);
-
- if (i < ncol_desc)
+ dev_dbg(&client->dev,
+ "0x%8.8x %s pixels: %d %s (pixelcode %u)\n", reg,
+ what, pixels, which, pixelcode);
+
+ if (i < ncol_desc) {
+ if (pixelcode ==
+ SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_VISIBLE)
+ sensor->visible_pixel_start = pixel_count;
+ pixel_count += pixels;
continue;
+ }
/* Handle row descriptors */
- if (pixelcode
- == SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_EMBEDDED) {
- embedded_start = line_count;
- } else {
- if (pixelcode == SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_VISIBLE
- || pixels >= sensor->limits[SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES] / 2)
- image_start = line_count;
- if (embedded_start != -1 && embedded_end == -1)
- embedded_end = line_count;
+ switch (pixelcode) {
+ case SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_EMBEDDED:
+ if (sensor->embedded_end)
+ break;
+ sensor->embedded_start = line_count;
+ sensor->embedded_end = line_count + pixels;
+ break;
+ case SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_VISIBLE:
+ sensor->image_start = line_count;
+ break;
}
line_count += pixels;
}
- if (embedded_start == -1 || embedded_end == -1) {
- embedded_start = 0;
- embedded_end = 0;
+ if (sensor->embedded_end > sensor->image_start) {
+ dev_dbg(&client->dev,
+ "adjusting image start line to %u (was %u)\n",
+ sensor->embedded_end, sensor->image_start);
+ sensor->image_start = sensor->embedded_end;
}
- sensor->image_start = image_start;
-
dev_dbg(&client->dev, "embedded data from lines %d to %d\n",
- embedded_start, embedded_end);
- dev_dbg(&client->dev, "image data starts at line %d\n", image_start);
+ sensor->embedded_start, sensor->embedded_end);
+ dev_dbg(&client->dev, "image data starts at line %d\n",
+ sensor->image_start);
return 0;
}
@@ -443,8 +447,7 @@ static int smiapp_set_ctrl(struct v4l2_ctrl *ctrl)
orient |= SMIAPP_IMAGE_ORIENTATION_VFLIP;
orient ^= sensor->hvflip_inv_mask;
- rval = smiapp_write(sensor,
- SMIAPP_REG_U8_IMAGE_ORIENTATION,
+ rval = smiapp_write(sensor, SMIAPP_REG_U8_IMAGE_ORIENTATION,
orient);
if (rval < 0)
return rval;
@@ -459,10 +462,8 @@ static int smiapp_set_ctrl(struct v4l2_ctrl *ctrl)
__smiapp_update_exposure_limits(sensor);
if (exposure > sensor->exposure->maximum) {
- sensor->exposure->val =
- sensor->exposure->maximum;
- rval = smiapp_set_ctrl(
- sensor->exposure);
+ sensor->exposure->val = sensor->exposure->maximum;
+ rval = smiapp_set_ctrl(sensor->exposure);
if (rval < 0)
return rval;
}
@@ -621,7 +622,7 @@ static int smiapp_init_controls(struct smiapp_sensor *sensor)
static int smiapp_init_late_controls(struct smiapp_sensor *sensor)
{
unsigned long *valid_link_freqs = &sensor->valid_link_freqs[
- sensor->csi_format->compressed - SMIAPP_COMPRESSED_BASE];
+ sensor->csi_format->compressed - sensor->compressed_min_bpp];
unsigned int max, i;
for (i = 0; i < ARRAY_SIZE(sensor->test_data); i++) {
@@ -754,6 +755,7 @@ static int smiapp_get_mbus_formats(struct smiapp_sensor *sensor)
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
struct smiapp_pll *pll = &sensor->pll;
+ u8 compressed_max_bpp = 0;
unsigned int type, n;
unsigned int i, pixel_order;
int rval;
@@ -826,16 +828,27 @@ static int smiapp_get_mbus_formats(struct smiapp_sensor *sensor)
pll->scale_m = sensor->scale_m;
for (i = 0; i < ARRAY_SIZE(smiapp_csi_data_formats); i++) {
+ sensor->compressed_min_bpp =
+ min(smiapp_csi_data_formats[i].compressed,
+ sensor->compressed_min_bpp);
+ compressed_max_bpp =
+ max(smiapp_csi_data_formats[i].compressed,
+ compressed_max_bpp);
+ }
+
+ sensor->valid_link_freqs = devm_kcalloc(
+ &client->dev,
+ compressed_max_bpp - sensor->compressed_min_bpp + 1,
+ sizeof(*sensor->valid_link_freqs), GFP_KERNEL);
+
+ for (i = 0; i < ARRAY_SIZE(smiapp_csi_data_formats); i++) {
const struct smiapp_csi_data_format *f =
&smiapp_csi_data_formats[i];
unsigned long *valid_link_freqs =
&sensor->valid_link_freqs[
- f->compressed - SMIAPP_COMPRESSED_BASE];
+ f->compressed - sensor->compressed_min_bpp];
unsigned int j;
- BUG_ON(f->compressed < SMIAPP_COMPRESSED_BASE);
- BUG_ON(f->compressed > SMIAPP_COMPRESSED_MAX);
-
if (!(sensor->default_mbus_frame_fmts & 1 << i))
continue;
@@ -914,12 +927,6 @@ static int smiapp_update_mode(struct smiapp_sensor *sensor)
unsigned int binning_mode;
int rval;
- dev_dbg(&client->dev, "frame size: %dx%d\n",
- sensor->src->crop[SMIAPP_PAD_SRC].width,
- sensor->src->crop[SMIAPP_PAD_SRC].height);
- dev_dbg(&client->dev, "csi format width: %d\n",
- sensor->csi_format->width);
-
/* Binning has to be set up here; it affects limits */
if (sensor->binning_horizontal == 1 &&
sensor->binning_vertical == 1) {
@@ -1196,9 +1203,17 @@ out:
* Power management
*/
-static int smiapp_power_on(struct smiapp_sensor *sensor)
+static int smiapp_power_on(struct device *dev)
{
- struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct smiapp_subdev *ssd = to_smiapp_subdev(subdev);
+ /*
+ * The sub-device related to the I2C device is always the
+ * source one, i.e. ssds[0].
+ */
+ struct smiapp_sensor *sensor =
+ container_of(ssd, struct smiapp_sensor, ssds[0]);
unsigned int sleep;
int rval;
@@ -1307,8 +1322,7 @@ static int smiapp_power_on(struct smiapp_sensor *sensor)
if (!sensor->pixel_array)
return 0;
- rval = v4l2_ctrl_handler_setup(
- &sensor->pixel_array->ctrl_handler);
+ rval = v4l2_ctrl_handler_setup(&sensor->pixel_array->ctrl_handler);
if (rval)
goto out_cci_addr_fail;
@@ -1325,16 +1339,24 @@ static int smiapp_power_on(struct smiapp_sensor *sensor)
return 0;
out_cci_addr_fail:
+
gpiod_set_value(sensor->xshutdown, 0);
clk_disable_unprepare(sensor->ext_clk);
out_xclk_fail:
regulator_disable(sensor->vana);
+
return rval;
}
-static void smiapp_power_off(struct smiapp_sensor *sensor)
+static int smiapp_power_off(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct smiapp_subdev *ssd = to_smiapp_subdev(subdev);
+ struct smiapp_sensor *sensor =
+ container_of(ssd, struct smiapp_sensor, ssds[0]);
+
/*
* Currently power/clock to lens are enable/disabled separately
* but they are essentially the same signals. So if the sensor is
@@ -1352,31 +1374,31 @@ static void smiapp_power_off(struct smiapp_sensor *sensor)
usleep_range(5000, 5000);
regulator_disable(sensor->vana);
sensor->streaming = false;
+
+ return 0;
}
static int smiapp_set_power(struct v4l2_subdev *subdev, int on)
{
- struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
- int ret = 0;
+ int rval;
- mutex_lock(&sensor->power_mutex);
+ if (!on) {
+ pm_runtime_mark_last_busy(subdev->dev);
+ pm_runtime_put_autosuspend(subdev->dev);
- if (on && !sensor->power_count) {
- /* Power on and perform initialisation. */
- ret = smiapp_power_on(sensor);
- if (ret < 0)
- goto out;
- } else if (!on && sensor->power_count == 1) {
- smiapp_power_off(sensor);
+ return 0;
}
- /* Update the power count. */
- sensor->power_count += on ? 1 : -1;
- WARN_ON(sensor->power_count < 0);
+ rval = pm_runtime_get_sync(subdev->dev);
+ if (rval >= 0)
+ return 0;
-out:
- mutex_unlock(&sensor->power_mutex);
- return ret;
+ if (rval != -EBUSY && rval != -EAGAIN)
+ pm_runtime_set_active(subdev->dev);
+
+ pm_runtime_put(subdev->dev);
+
+ return rval;
}
/* -----------------------------------------------------------------------------
@@ -1614,7 +1636,8 @@ static int __smiapp_get_format(struct v4l2_subdev *subdev,
struct smiapp_subdev *ssd = to_smiapp_subdev(subdev);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- fmt->format = *v4l2_subdev_get_try_format(subdev, cfg, fmt->pad);
+ fmt->format = *v4l2_subdev_get_try_format(subdev, cfg,
+ fmt->pad);
} else {
struct v4l2_rect *r;
@@ -1714,7 +1737,6 @@ static void smiapp_propagate(struct v4l2_subdev *subdev,
static const struct smiapp_csi_data_format
*smiapp_validate_csi_data_format(struct smiapp_sensor *sensor, u32 code)
{
- const struct smiapp_csi_data_format *csi_format = sensor->csi_format;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(smiapp_csi_data_formats); i++) {
@@ -1723,7 +1745,7 @@ static const struct smiapp_csi_data_format
return &smiapp_csi_data_formats[i];
}
- return csi_format;
+ return sensor->csi_format;
}
static int smiapp_set_format_source(struct v4l2_subdev *subdev,
@@ -1769,7 +1791,7 @@ static int smiapp_set_format_source(struct v4l2_subdev *subdev,
valid_link_freqs =
&sensor->valid_link_freqs[sensor->csi_format->compressed
- - SMIAPP_COMPRESSED_BASE];
+ - sensor->compressed_min_bpp];
__v4l2_ctrl_modify_range(
sensor->link_freq, 0,
@@ -2057,8 +2079,7 @@ static int smiapp_set_compose(struct v4l2_subdev *subdev,
smiapp_set_compose_scaler(subdev, cfg, sel, crops, comp);
*comp = sel->r;
- smiapp_propagate(subdev, cfg, sel->which,
- V4L2_SEL_TGT_COMPOSE);
+ smiapp_propagate(subdev, cfg, sel->which, V4L2_SEL_TGT_COMPOSE);
if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE)
return smiapp_update_mode(sensor);
@@ -2135,9 +2156,8 @@ static int smiapp_set_crop(struct v4l2_subdev *subdev,
->height;
src_size = &_r;
} else {
- src_size =
- v4l2_subdev_get_try_compose(
- subdev, cfg, ssd->sink_pad);
+ src_size = v4l2_subdev_get_try_compose(
+ subdev, cfg, ssd->sink_pad);
}
}
@@ -2161,6 +2181,15 @@ static int smiapp_set_crop(struct v4l2_subdev *subdev,
return 0;
}
+static void smiapp_get_native_size(struct smiapp_subdev *ssd,
+ struct v4l2_rect *r)
+{
+ r->top = 0;
+ r->left = 0;
+ r->width = ssd->sensor->limits[SMIAPP_LIMIT_X_ADDR_MAX] + 1;
+ r->height = ssd->sensor->limits[SMIAPP_LIMIT_Y_ADDR_MAX] + 1;
+}
+
static int __smiapp_get_selection(struct v4l2_subdev *subdev,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
@@ -2192,17 +2221,12 @@ static int __smiapp_get_selection(struct v4l2_subdev *subdev,
switch (sel->target) {
case V4L2_SEL_TGT_CROP_BOUNDS:
case V4L2_SEL_TGT_NATIVE_SIZE:
- if (ssd == sensor->pixel_array) {
- sel->r.left = sel->r.top = 0;
- sel->r.width =
- sensor->limits[SMIAPP_LIMIT_X_ADDR_MAX] + 1;
- sel->r.height =
- sensor->limits[SMIAPP_LIMIT_Y_ADDR_MAX] + 1;
- } else if (sel->pad == ssd->sink_pad) {
+ if (ssd == sensor->pixel_array)
+ smiapp_get_native_size(ssd, &sel->r);
+ else if (sel->pad == ssd->sink_pad)
sel->r = sink_fmt;
- } else {
+ else
sel->r = *comp;
- }
break;
case V4L2_SEL_TGT_CROP:
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
@@ -2303,15 +2327,26 @@ smiapp_sysfs_nvm_read(struct device *dev, struct device_attribute *attr,
return -EBUSY;
if (!sensor->nvm_size) {
+ int rval;
+
/* NVM not read yet - read it now */
sensor->nvm_size = sensor->hwcfg->nvm_size;
- if (smiapp_set_power(subdev, 1) < 0)
+
+ rval = pm_runtime_get_sync(&client->dev);
+ if (rval < 0) {
+ if (rval != -EBUSY && rval != -EAGAIN)
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_put(&client->dev);
return -ENODEV;
+ }
+
if (smiapp_read_nvm(sensor, sensor->nvm)) {
dev_err(&client->dev, "nvm read failed\n");
return -ENODEV;
}
- smiapp_set_power(subdev, 0);
+
+ pm_runtime_mark_last_busy(&client->dev);
+ pm_runtime_put_autosuspend(&client->dev);
}
/*
* NVM is still way below a PAGE_SIZE, so we can safely
@@ -2475,383 +2510,160 @@ static const struct v4l2_subdev_ops smiapp_ops;
static const struct v4l2_subdev_internal_ops smiapp_internal_ops;
static const struct media_entity_operations smiapp_entity_ops;
-static int smiapp_register_subdevs(struct smiapp_sensor *sensor)
+static int smiapp_register_subdev(struct smiapp_sensor *sensor,
+ struct smiapp_subdev *ssd,
+ struct smiapp_subdev *sink_ssd,
+ u16 source_pad, u16 sink_pad, u32 link_flags)
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
- struct smiapp_subdev *ssds[] = {
- sensor->scaler,
- sensor->binner,
- sensor->pixel_array,
- };
- unsigned int i;
int rval;
- for (i = 0; i < SMIAPP_SUBDEVS - 1; i++) {
- struct smiapp_subdev *this = ssds[i + 1];
- struct smiapp_subdev *last = ssds[i];
-
- if (!last)
- continue;
+ if (!sink_ssd)
+ return 0;
- rval = media_entity_pads_init(&this->sd.entity,
- this->npads, this->pads);
- if (rval) {
- dev_err(&client->dev,
- "media_entity_pads_init failed\n");
- return rval;
- }
+ rval = media_entity_pads_init(&ssd->sd.entity,
+ ssd->npads, ssd->pads);
+ if (rval) {
+ dev_err(&client->dev,
+ "media_entity_pads_init failed\n");
+ return rval;
+ }
- rval = v4l2_device_register_subdev(sensor->src->sd.v4l2_dev,
- &this->sd);
- if (rval) {
- dev_err(&client->dev,
- "v4l2_device_register_subdev failed\n");
- return rval;
- }
+ rval = v4l2_device_register_subdev(sensor->src->sd.v4l2_dev,
+ &ssd->sd);
+ if (rval) {
+ dev_err(&client->dev,
+ "v4l2_device_register_subdev failed\n");
+ return rval;
+ }
- rval = media_create_pad_link(&this->sd.entity,
- this->source_pad,
- &last->sd.entity,
- last->sink_pad,
- MEDIA_LNK_FL_ENABLED |
- MEDIA_LNK_FL_IMMUTABLE);
- if (rval) {
- dev_err(&client->dev,
- "media_create_pad_link failed\n");
- return rval;
- }
+ rval = media_create_pad_link(&ssd->sd.entity, source_pad,
+ &sink_ssd->sd.entity, sink_pad,
+ link_flags);
+ if (rval) {
+ dev_err(&client->dev,
+ "media_create_pad_link failed\n");
+ v4l2_device_unregister_subdev(&ssd->sd);
+ return rval;
}
return 0;
}
-static void smiapp_cleanup(struct smiapp_sensor *sensor)
+static void smiapp_unregistered(struct v4l2_subdev *subdev)
{
- struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
-
- device_remove_file(&client->dev, &dev_attr_nvm);
- device_remove_file(&client->dev, &dev_attr_ident);
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ unsigned int i;
- smiapp_free_controls(sensor);
+ for (i = 1; i < sensor->ssds_used; i++)
+ v4l2_device_unregister_subdev(&sensor->ssds[i].sd);
}
-static int smiapp_init(struct smiapp_sensor *sensor)
+static int smiapp_registered(struct v4l2_subdev *subdev)
{
- struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
- struct smiapp_pll *pll = &sensor->pll;
- struct smiapp_subdev *last = NULL;
- unsigned int i;
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
int rval;
- sensor->vana = devm_regulator_get(&client->dev, "vana");
- if (IS_ERR(sensor->vana)) {
- dev_err(&client->dev, "could not get regulator for vana\n");
- return PTR_ERR(sensor->vana);
- }
-
- sensor->ext_clk = devm_clk_get(&client->dev, NULL);
- if (IS_ERR(sensor->ext_clk)) {
- dev_err(&client->dev, "could not get clock (%ld)\n",
- PTR_ERR(sensor->ext_clk));
- return -EPROBE_DEFER;
- }
-
- rval = clk_set_rate(sensor->ext_clk,
- sensor->hwcfg->ext_clk);
- if (rval < 0) {
- dev_err(&client->dev,
- "unable to set clock freq to %u\n",
- sensor->hwcfg->ext_clk);
- return rval;
+ if (sensor->scaler) {
+ rval = smiapp_register_subdev(
+ sensor, sensor->binner, sensor->scaler,
+ SMIAPP_PAD_SRC, SMIAPP_PAD_SINK,
+ MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
+ if (rval < 0)
+ return rval;
}
- sensor->xshutdown = devm_gpiod_get_optional(&client->dev, "xshutdown",
- GPIOD_OUT_LOW);
- if (IS_ERR(sensor->xshutdown))
- return PTR_ERR(sensor->xshutdown);
-
- rval = smiapp_power_on(sensor);
+ rval = smiapp_register_subdev(
+ sensor, sensor->pixel_array, sensor->binner,
+ SMIAPP_PA_PAD_SRC, SMIAPP_PAD_SINK,
+ MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
if (rval)
- return -ENODEV;
-
- rval = smiapp_identify_module(sensor);
- if (rval) {
- rval = -ENODEV;
- goto out_power_off;
- }
-
- rval = smiapp_get_all_limits(sensor);
- if (rval) {
- rval = -ENODEV;
- goto out_power_off;
- }
-
- /*
- * Handle Sensor Module orientation on the board.
- *
- * The application of H-FLIP and V-FLIP on the sensor is modified by
- * the sensor orientation on the board.
- *
- * For SMIAPP_BOARD_SENSOR_ORIENT_180 the default behaviour is to set
- * both H-FLIP and V-FLIP for normal operation which also implies
- * that a set/unset operation for user space HFLIP and VFLIP v4l2
- * controls will need to be internally inverted.
- *
- * Rotation also changes the bayer pattern.
- */
- if (sensor->hwcfg->module_board_orient ==
- SMIAPP_MODULE_BOARD_ORIENT_180)
- sensor->hvflip_inv_mask = SMIAPP_IMAGE_ORIENTATION_HFLIP |
- SMIAPP_IMAGE_ORIENTATION_VFLIP;
-
- rval = smiapp_call_quirk(sensor, limits);
- if (rval) {
- dev_err(&client->dev, "limits quirks failed\n");
- goto out_power_off;
- }
-
- if (sensor->limits[SMIAPP_LIMIT_BINNING_CAPABILITY]) {
- u32 val;
-
- rval = smiapp_read(sensor,
- SMIAPP_REG_U8_BINNING_SUBTYPES, &val);
- if (rval < 0) {
- rval = -ENODEV;
- goto out_power_off;
- }
- sensor->nbinning_subtypes = min_t(u8, val,
- SMIAPP_BINNING_SUBTYPES);
-
- for (i = 0; i < sensor->nbinning_subtypes; i++) {
- rval = smiapp_read(
- sensor, SMIAPP_REG_U8_BINNING_TYPE_n(i), &val);
- if (rval < 0) {
- rval = -ENODEV;
- goto out_power_off;
- }
- sensor->binning_subtypes[i] =
- *(struct smiapp_binning_subtype *)&val;
+ goto out_err;
- dev_dbg(&client->dev, "binning %xx%x\n",
- sensor->binning_subtypes[i].horizontal,
- sensor->binning_subtypes[i].vertical);
- }
- }
- sensor->binning_horizontal = 1;
- sensor->binning_vertical = 1;
+ return 0;
- if (device_create_file(&client->dev, &dev_attr_ident) != 0) {
- dev_err(&client->dev, "sysfs ident entry creation failed\n");
- rval = -ENOENT;
- goto out_power_off;
- }
- /* SMIA++ NVM initialization - it will be read from the sensor
- * when it is first requested by userspace.
- */
- if (sensor->minfo.smiapp_version && sensor->hwcfg->nvm_size) {
- sensor->nvm = devm_kzalloc(&client->dev,
- sensor->hwcfg->nvm_size, GFP_KERNEL);
- if (sensor->nvm == NULL) {
- dev_err(&client->dev, "nvm buf allocation failed\n");
- rval = -ENOMEM;
- goto out_cleanup;
- }
+out_err:
+ smiapp_unregistered(subdev);
- if (device_create_file(&client->dev, &dev_attr_nvm) != 0) {
- dev_err(&client->dev, "sysfs nvm entry failed\n");
- rval = -EBUSY;
- goto out_cleanup;
- }
- }
+ return rval;
+}
- /* We consider this as profile 0 sensor if any of these are zero. */
- if (!sensor->limits[SMIAPP_LIMIT_MIN_OP_SYS_CLK_DIV] ||
- !sensor->limits[SMIAPP_LIMIT_MAX_OP_SYS_CLK_DIV] ||
- !sensor->limits[SMIAPP_LIMIT_MIN_OP_PIX_CLK_DIV] ||
- !sensor->limits[SMIAPP_LIMIT_MAX_OP_PIX_CLK_DIV]) {
- sensor->minfo.smiapp_profile = SMIAPP_PROFILE_0;
- } else if (sensor->limits[SMIAPP_LIMIT_SCALING_CAPABILITY]
- != SMIAPP_SCALING_CAPABILITY_NONE) {
- if (sensor->limits[SMIAPP_LIMIT_SCALING_CAPABILITY]
- == SMIAPP_SCALING_CAPABILITY_HORIZONTAL)
- sensor->minfo.smiapp_profile = SMIAPP_PROFILE_1;
- else
- sensor->minfo.smiapp_profile = SMIAPP_PROFILE_2;
- sensor->scaler = &sensor->ssds[sensor->ssds_used];
- sensor->ssds_used++;
- } else if (sensor->limits[SMIAPP_LIMIT_DIGITAL_CROP_CAPABILITY]
- == SMIAPP_DIGITAL_CROP_CAPABILITY_INPUT_CROP) {
- sensor->scaler = &sensor->ssds[sensor->ssds_used];
- sensor->ssds_used++;
- }
- sensor->binner = &sensor->ssds[sensor->ssds_used];
- sensor->ssds_used++;
- sensor->pixel_array = &sensor->ssds[sensor->ssds_used];
- sensor->ssds_used++;
+static void smiapp_cleanup(struct smiapp_sensor *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
- sensor->scale_m = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
+ device_remove_file(&client->dev, &dev_attr_nvm);
+ device_remove_file(&client->dev, &dev_attr_ident);
- /* prepare PLL configuration input values */
- pll->bus_type = SMIAPP_PLL_BUS_TYPE_CSI2;
- pll->csi2.lanes = sensor->hwcfg->lanes;
- pll->ext_clk_freq_hz = sensor->hwcfg->ext_clk;
- pll->scale_n = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
- /* Profile 0 sensors have no separate OP clock branch. */
- if (sensor->minfo.smiapp_profile == SMIAPP_PROFILE_0)
- pll->flags |= SMIAPP_PLL_FLAG_NO_OP_CLOCKS;
-
- for (i = 0; i < SMIAPP_SUBDEVS; i++) {
- struct {
- struct smiapp_subdev *ssd;
- char *name;
- } const __this[] = {
- { sensor->scaler, "scaler", },
- { sensor->binner, "binner", },
- { sensor->pixel_array, "pixel array", },
- }, *_this = &__this[i];
- struct smiapp_subdev *this = _this->ssd;
-
- if (!this)
- continue;
+ smiapp_free_controls(sensor);
+}
- if (this != sensor->src)
- v4l2_subdev_init(&this->sd, &smiapp_ops);
+static void smiapp_create_subdev(struct smiapp_sensor *sensor,
+ struct smiapp_subdev *ssd, const char *name,
+ unsigned short num_pads)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
- this->sensor = sensor;
+ if (!ssd)
+ return;
- if (this == sensor->pixel_array) {
- this->npads = 1;
- } else {
- this->npads = 2;
- this->source_pad = 1;
- }
+ if (ssd != sensor->src)
+ v4l2_subdev_init(&ssd->sd, &smiapp_ops);
- snprintf(this->sd.name,
- sizeof(this->sd.name), "%s %s %d-%4.4x",
- sensor->minfo.name, _this->name,
- i2c_adapter_id(client->adapter), client->addr);
-
- this->sink_fmt.width =
- sensor->limits[SMIAPP_LIMIT_X_ADDR_MAX] + 1;
- this->sink_fmt.height =
- sensor->limits[SMIAPP_LIMIT_Y_ADDR_MAX] + 1;
- this->compose.width = this->sink_fmt.width;
- this->compose.height = this->sink_fmt.height;
- this->crop[this->source_pad] = this->compose;
- this->pads[this->source_pad].flags = MEDIA_PAD_FL_SOURCE;
- if (this != sensor->pixel_array) {
- this->crop[this->sink_pad] = this->compose;
- this->pads[this->sink_pad].flags = MEDIA_PAD_FL_SINK;
- }
+ ssd->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ ssd->sensor = sensor;
- this->sd.entity.ops = &smiapp_entity_ops;
+ ssd->npads = num_pads;
+ ssd->source_pad = num_pads - 1;
- if (last == NULL) {
- last = this;
- continue;
- }
+ snprintf(ssd->sd.name,
+ sizeof(ssd->sd.name), "%s %s %d-%4.4x", sensor->minfo.name,
+ name, i2c_adapter_id(client->adapter), client->addr);
- this->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
- this->sd.internal_ops = &smiapp_internal_ops;
- this->sd.owner = THIS_MODULE;
- v4l2_set_subdevdata(&this->sd, client);
+ smiapp_get_native_size(ssd, &ssd->sink_fmt);
- last = this;
+ ssd->compose.width = ssd->sink_fmt.width;
+ ssd->compose.height = ssd->sink_fmt.height;
+ ssd->crop[ssd->source_pad] = ssd->compose;
+ ssd->pads[ssd->source_pad].flags = MEDIA_PAD_FL_SOURCE;
+ if (ssd != sensor->pixel_array) {
+ ssd->crop[ssd->sink_pad] = ssd->compose;
+ ssd->pads[ssd->sink_pad].flags = MEDIA_PAD_FL_SINK;
}
- dev_dbg(&client->dev, "profile %d\n", sensor->minfo.smiapp_profile);
+ ssd->sd.entity.ops = &smiapp_entity_ops;
- sensor->pixel_array->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
-
- /* final steps */
- smiapp_read_frame_fmt(sensor);
- rval = smiapp_init_controls(sensor);
- if (rval < 0)
- goto out_cleanup;
-
- rval = smiapp_call_quirk(sensor, init);
- if (rval)
- goto out_cleanup;
+ if (ssd == sensor->src)
+ return;
- rval = smiapp_get_mbus_formats(sensor);
- if (rval) {
- rval = -ENODEV;
- goto out_cleanup;
- }
-
- rval = smiapp_init_late_controls(sensor);
- if (rval) {
- rval = -ENODEV;
- goto out_cleanup;
- }
-
- mutex_lock(&sensor->mutex);
- rval = smiapp_update_mode(sensor);
- mutex_unlock(&sensor->mutex);
- if (rval) {
- dev_err(&client->dev, "update mode failed\n");
- goto out_cleanup;
- }
-
- sensor->streaming = false;
- sensor->dev_init_done = true;
-
- smiapp_power_off(sensor);
-
- return 0;
-
-out_cleanup:
- smiapp_cleanup(sensor);
-
-out_power_off:
- smiapp_power_off(sensor);
- return rval;
-}
-
-static int smiapp_registered(struct v4l2_subdev *subdev)
-{
- struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
- struct i2c_client *client = v4l2_get_subdevdata(subdev);
- int rval;
-
- if (!client->dev.of_node) {
- rval = smiapp_init(sensor);
- if (rval)
- return rval;
- }
-
- rval = smiapp_register_subdevs(sensor);
- if (rval)
- smiapp_cleanup(sensor);
-
- return rval;
+ ssd->sd.internal_ops = &smiapp_internal_ops;
+ ssd->sd.owner = THIS_MODULE;
+ ssd->sd.dev = &client->dev;
+ v4l2_set_subdevdata(&ssd->sd, client);
}
static int smiapp_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct smiapp_subdev *ssd = to_smiapp_subdev(sd);
struct smiapp_sensor *sensor = ssd->sensor;
- u32 mbus_code =
- smiapp_csi_data_formats[smiapp_pixel_order(sensor)].code;
unsigned int i;
+ int rval;
mutex_lock(&sensor->mutex);
for (i = 0; i < ssd->npads; i++) {
struct v4l2_mbus_framefmt *try_fmt =
v4l2_subdev_get_try_format(sd, fh->pad, i);
- struct v4l2_rect *try_crop = v4l2_subdev_get_try_crop(sd, fh->pad, i);
+ struct v4l2_rect *try_crop =
+ v4l2_subdev_get_try_crop(sd, fh->pad, i);
struct v4l2_rect *try_comp;
- try_fmt->width = sensor->limits[SMIAPP_LIMIT_X_ADDR_MAX] + 1;
- try_fmt->height = sensor->limits[SMIAPP_LIMIT_Y_ADDR_MAX] + 1;
- try_fmt->code = mbus_code;
- try_fmt->field = V4L2_FIELD_NONE;
+ smiapp_get_native_size(ssd, try_crop);
- try_crop->top = 0;
- try_crop->left = 0;
- try_crop->width = try_fmt->width;
- try_crop->height = try_fmt->height;
+ try_fmt->width = try_crop->width;
+ try_fmt->height = try_crop->height;
+ try_fmt->code = sensor->internal_csi_format->code;
+ try_fmt->field = V4L2_FIELD_NONE;
if (ssd != sensor->pixel_array)
continue;
@@ -2862,12 +2674,23 @@ static int smiapp_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
mutex_unlock(&sensor->mutex);
- return smiapp_set_power(sd, 1);
+ rval = pm_runtime_get_sync(sd->dev);
+ if (rval >= 0)
+ return 0;
+
+ if (rval != -EBUSY && rval != -EAGAIN)
+ pm_runtime_set_active(sd->dev);
+ pm_runtime_put(sd->dev);
+
+ return rval;
}
static int smiapp_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- return smiapp_set_power(sd, 0);
+ pm_runtime_mark_last_busy(sd->dev);
+ pm_runtime_put_autosuspend(sd->dev);
+
+ return 0;
}
static const struct v4l2_subdev_video_ops smiapp_video_ops = {
@@ -2904,6 +2727,7 @@ static const struct media_entity_operations smiapp_entity_ops = {
static const struct v4l2_subdev_internal_ops smiapp_internal_src_ops = {
.registered = smiapp_registered,
+ .unregistered = smiapp_unregistered,
.open = smiapp_open,
.close = smiapp_close,
};
@@ -2924,20 +2748,20 @@ static int smiapp_suspend(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
- bool streaming;
-
- BUG_ON(mutex_is_locked(&sensor->mutex));
+ bool streaming = sensor->streaming;
+ int rval;
- if (sensor->power_count == 0)
- return 0;
+ rval = pm_runtime_get_sync(dev);
+ if (rval < 0) {
+ if (rval != -EBUSY && rval != -EAGAIN)
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_put(dev);
+ return -EAGAIN;
+ }
if (sensor->streaming)
smiapp_stop_streaming(sensor);
- streaming = sensor->streaming;
-
- smiapp_power_off(sensor);
-
/* save state for resume */
sensor->streaming = streaming;
@@ -2949,14 +2773,9 @@ static int smiapp_resume(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
- int rval;
-
- if (sensor->power_count == 0)
- return 0;
+ int rval = 0;
- rval = smiapp_power_on(sensor);
- if (rval)
- return rval;
+ pm_runtime_put(dev);
if (sensor->streaming)
rval = smiapp_start_streaming(sensor);
@@ -3051,6 +2870,7 @@ static int smiapp_probe(struct i2c_client *client,
{
struct smiapp_sensor *sensor;
struct smiapp_hwconfig *hwcfg = smiapp_get_hwconfig(&client->dev);
+ unsigned int i;
int rval;
if (hwcfg == NULL)
@@ -3062,35 +2882,240 @@ static int smiapp_probe(struct i2c_client *client,
sensor->hwcfg = hwcfg;
mutex_init(&sensor->mutex);
- mutex_init(&sensor->power_mutex);
sensor->src = &sensor->ssds[sensor->ssds_used];
v4l2_i2c_subdev_init(&sensor->src->sd, client, &smiapp_ops);
sensor->src->sd.internal_ops = &smiapp_internal_src_ops;
- sensor->src->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
- sensor->src->sensor = sensor;
- sensor->src->pads[0].flags = MEDIA_PAD_FL_SOURCE;
- rval = media_entity_pads_init(&sensor->src->sd.entity, 2,
- sensor->src->pads);
- if (rval < 0)
+ sensor->vana = devm_regulator_get(&client->dev, "vana");
+ if (IS_ERR(sensor->vana)) {
+ dev_err(&client->dev, "could not get regulator for vana\n");
+ return PTR_ERR(sensor->vana);
+ }
+
+ sensor->ext_clk = devm_clk_get(&client->dev, NULL);
+ if (IS_ERR(sensor->ext_clk)) {
+ dev_err(&client->dev, "could not get clock (%ld)\n",
+ PTR_ERR(sensor->ext_clk));
+ return -EPROBE_DEFER;
+ }
+
+ rval = clk_set_rate(sensor->ext_clk, sensor->hwcfg->ext_clk);
+ if (rval < 0) {
+ dev_err(&client->dev,
+ "unable to set clock freq to %u\n",
+ sensor->hwcfg->ext_clk);
return rval;
+ }
- if (client->dev.of_node) {
- rval = smiapp_init(sensor);
- if (rval)
- goto out_media_entity_cleanup;
+ sensor->xshutdown = devm_gpiod_get_optional(&client->dev, "xshutdown",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(sensor->xshutdown))
+ return PTR_ERR(sensor->xshutdown);
+
+ pm_runtime_enable(&client->dev);
+
+ rval = pm_runtime_get_sync(&client->dev);
+ if (rval < 0) {
+ rval = -ENODEV;
+ goto out_power_off;
+ }
+
+ rval = smiapp_identify_module(sensor);
+ if (rval) {
+ rval = -ENODEV;
+ goto out_power_off;
+ }
+
+ rval = smiapp_get_all_limits(sensor);
+ if (rval) {
+ rval = -ENODEV;
+ goto out_power_off;
}
+ rval = smiapp_read_frame_fmt(sensor);
+ if (rval) {
+ rval = -ENODEV;
+ goto out_power_off;
+ }
+
+ /*
+ * Handle Sensor Module orientation on the board.
+ *
+ * The application of H-FLIP and V-FLIP on the sensor is modified by
+ * the sensor orientation on the board.
+ *
+ * For SMIAPP_BOARD_SENSOR_ORIENT_180 the default behaviour is to set
+ * both H-FLIP and V-FLIP for normal operation which also implies
+ * that a set/unset operation for user space HFLIP and VFLIP v4l2
+ * controls will need to be internally inverted.
+ *
+ * Rotation also changes the bayer pattern.
+ */
+ if (sensor->hwcfg->module_board_orient ==
+ SMIAPP_MODULE_BOARD_ORIENT_180)
+ sensor->hvflip_inv_mask = SMIAPP_IMAGE_ORIENTATION_HFLIP |
+ SMIAPP_IMAGE_ORIENTATION_VFLIP;
+
+ rval = smiapp_call_quirk(sensor, limits);
+ if (rval) {
+ dev_err(&client->dev, "limits quirks failed\n");
+ goto out_power_off;
+ }
+
+ if (sensor->limits[SMIAPP_LIMIT_BINNING_CAPABILITY]) {
+ u32 val;
+
+ rval = smiapp_read(sensor,
+ SMIAPP_REG_U8_BINNING_SUBTYPES, &val);
+ if (rval < 0) {
+ rval = -ENODEV;
+ goto out_power_off;
+ }
+ sensor->nbinning_subtypes = min_t(u8, val,
+ SMIAPP_BINNING_SUBTYPES);
+
+ for (i = 0; i < sensor->nbinning_subtypes; i++) {
+ rval = smiapp_read(
+ sensor, SMIAPP_REG_U8_BINNING_TYPE_n(i), &val);
+ if (rval < 0) {
+ rval = -ENODEV;
+ goto out_power_off;
+ }
+ sensor->binning_subtypes[i] =
+ *(struct smiapp_binning_subtype *)&val;
+
+ dev_dbg(&client->dev, "binning %xx%x\n",
+ sensor->binning_subtypes[i].horizontal,
+ sensor->binning_subtypes[i].vertical);
+ }
+ }
+ sensor->binning_horizontal = 1;
+ sensor->binning_vertical = 1;
+
+ if (device_create_file(&client->dev, &dev_attr_ident) != 0) {
+ dev_err(&client->dev, "sysfs ident entry creation failed\n");
+ rval = -ENOENT;
+ goto out_power_off;
+ }
+ /* SMIA++ NVM initialization - it will be read from the sensor
+ * when it is first requested by userspace.
+ */
+ if (sensor->minfo.smiapp_version && sensor->hwcfg->nvm_size) {
+ sensor->nvm = devm_kzalloc(&client->dev,
+ sensor->hwcfg->nvm_size, GFP_KERNEL);
+ if (sensor->nvm == NULL) {
+ rval = -ENOMEM;
+ goto out_cleanup;
+ }
+
+ if (device_create_file(&client->dev, &dev_attr_nvm) != 0) {
+ dev_err(&client->dev, "sysfs nvm entry failed\n");
+ rval = -EBUSY;
+ goto out_cleanup;
+ }
+ }
+
+ /* We consider this as profile 0 sensor if any of these are zero. */
+ if (!sensor->limits[SMIAPP_LIMIT_MIN_OP_SYS_CLK_DIV] ||
+ !sensor->limits[SMIAPP_LIMIT_MAX_OP_SYS_CLK_DIV] ||
+ !sensor->limits[SMIAPP_LIMIT_MIN_OP_PIX_CLK_DIV] ||
+ !sensor->limits[SMIAPP_LIMIT_MAX_OP_PIX_CLK_DIV]) {
+ sensor->minfo.smiapp_profile = SMIAPP_PROFILE_0;
+ } else if (sensor->limits[SMIAPP_LIMIT_SCALING_CAPABILITY]
+ != SMIAPP_SCALING_CAPABILITY_NONE) {
+ if (sensor->limits[SMIAPP_LIMIT_SCALING_CAPABILITY]
+ == SMIAPP_SCALING_CAPABILITY_HORIZONTAL)
+ sensor->minfo.smiapp_profile = SMIAPP_PROFILE_1;
+ else
+ sensor->minfo.smiapp_profile = SMIAPP_PROFILE_2;
+ sensor->scaler = &sensor->ssds[sensor->ssds_used];
+ sensor->ssds_used++;
+ } else if (sensor->limits[SMIAPP_LIMIT_DIGITAL_CROP_CAPABILITY]
+ == SMIAPP_DIGITAL_CROP_CAPABILITY_INPUT_CROP) {
+ sensor->scaler = &sensor->ssds[sensor->ssds_used];
+ sensor->ssds_used++;
+ }
+ sensor->binner = &sensor->ssds[sensor->ssds_used];
+ sensor->ssds_used++;
+ sensor->pixel_array = &sensor->ssds[sensor->ssds_used];
+ sensor->ssds_used++;
+
+ sensor->scale_m = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
+
+ /* prepare PLL configuration input values */
+ sensor->pll.bus_type = SMIAPP_PLL_BUS_TYPE_CSI2;
+ sensor->pll.csi2.lanes = sensor->hwcfg->lanes;
+ sensor->pll.ext_clk_freq_hz = sensor->hwcfg->ext_clk;
+ sensor->pll.scale_n = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
+ /* Profile 0 sensors have no separate OP clock branch. */
+ if (sensor->minfo.smiapp_profile == SMIAPP_PROFILE_0)
+ sensor->pll.flags |= SMIAPP_PLL_FLAG_NO_OP_CLOCKS;
+
+ smiapp_create_subdev(sensor, sensor->scaler, "scaler", 2);
+ smiapp_create_subdev(sensor, sensor->binner, "binner", 2);
+ smiapp_create_subdev(sensor, sensor->pixel_array, "pixel_array", 1);
+
+ dev_dbg(&client->dev, "profile %d\n", sensor->minfo.smiapp_profile);
+
+ sensor->pixel_array->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ rval = smiapp_init_controls(sensor);
+ if (rval < 0)
+ goto out_cleanup;
+
+ rval = smiapp_call_quirk(sensor, init);
+ if (rval)
+ goto out_cleanup;
+
+ rval = smiapp_get_mbus_formats(sensor);
+ if (rval) {
+ rval = -ENODEV;
+ goto out_cleanup;
+ }
+
+ rval = smiapp_init_late_controls(sensor);
+ if (rval) {
+ rval = -ENODEV;
+ goto out_cleanup;
+ }
+
+ mutex_lock(&sensor->mutex);
+ rval = smiapp_update_mode(sensor);
+ mutex_unlock(&sensor->mutex);
+ if (rval) {
+ dev_err(&client->dev, "update mode failed\n");
+ goto out_cleanup;
+ }
+
+ sensor->streaming = false;
+ sensor->dev_init_done = true;
+
+ rval = media_entity_pads_init(&sensor->src->sd.entity, 2,
+ sensor->src->pads);
+ if (rval < 0)
+ goto out_media_entity_cleanup;
+
rval = v4l2_async_register_subdev(&sensor->src->sd);
if (rval < 0)
goto out_media_entity_cleanup;
+ pm_runtime_set_autosuspend_delay(&client->dev, 1000);
+ pm_runtime_use_autosuspend(&client->dev);
+ pm_runtime_put_autosuspend(&client->dev);
+
return 0;
out_media_entity_cleanup:
media_entity_cleanup(&sensor->src->sd.entity);
+out_cleanup:
+ smiapp_cleanup(sensor);
+
+out_power_off:
+ pm_runtime_put(&client->dev);
+ pm_runtime_disable(&client->dev);
+
return rval;
}
@@ -3102,11 +3127,8 @@ static int smiapp_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(subdev);
- if (sensor->power_count) {
- gpiod_set_value(sensor->xshutdown, 0);
- clk_disable_unprepare(sensor->ext_clk);
- sensor->power_count = 0;
- }
+ pm_runtime_suspend(&client->dev);
+ pm_runtime_disable(&client->dev);
for (i = 0; i < sensor->ssds_used; i++) {
v4l2_device_unregister_subdev(&sensor->ssds[i].sd);
@@ -3130,8 +3152,8 @@ static const struct i2c_device_id smiapp_id_table[] = {
MODULE_DEVICE_TABLE(i2c, smiapp_id_table);
static const struct dev_pm_ops smiapp_pm_ops = {
- .suspend = smiapp_suspend,
- .resume = smiapp_resume,
+ SET_SYSTEM_SLEEP_PM_OPS(smiapp_suspend, smiapp_resume)
+ SET_RUNTIME_PM_OPS(smiapp_power_off, smiapp_power_on, NULL)
};
static struct i2c_driver smiapp_i2c_driver = {
diff --git a/drivers/media/i2c/smiapp/smiapp-regs.c b/drivers/media/i2c/smiapp/smiapp-regs.c
index 1e501c06d18c..d6779e35d36f 100644
--- a/drivers/media/i2c/smiapp/smiapp-regs.c
+++ b/drivers/media/i2c/smiapp/smiapp-regs.c
@@ -268,8 +268,8 @@ int smiapp_write_no_quirk(struct smiapp_sensor *sensor, u32 reg, u32 val)
if (r == 1) {
if (retries)
dev_err(&client->dev,
- "sensor i2c stall encountered. "
- "retries: %d\n", retries);
+ "sensor i2c stall encountered. retries: %d\n",
+ retries);
return 0;
}
diff --git a/drivers/media/i2c/smiapp/smiapp.h b/drivers/media/i2c/smiapp/smiapp.h
index aae72bc87bf7..f74d695018b9 100644
--- a/drivers/media/i2c/smiapp/smiapp.h
+++ b/drivers/media/i2c/smiapp/smiapp.h
@@ -150,11 +150,6 @@ struct smiapp_csi_data_format {
#define SMIAPP_PAD_SRC 1
#define SMIAPP_PADS 2
-#define SMIAPP_COMPRESSED_BASE 8
-#define SMIAPP_COMPRESSED_MAX 16
-#define SMIAPP_NR_OF_COMPRESSED (SMIAPP_COMPRESSED_MAX - \
- SMIAPP_COMPRESSED_BASE + 1)
-
struct smiapp_binning_subtype {
u8 horizontal:4;
u8 vertical:4;
@@ -162,9 +157,9 @@ struct smiapp_binning_subtype {
struct smiapp_subdev {
struct v4l2_subdev sd;
- struct media_pad pads[2];
+ struct media_pad pads[SMIAPP_PADS];
struct v4l2_rect sink_fmt;
- struct v4l2_rect crop[2];
+ struct v4l2_rect crop[SMIAPP_PADS];
struct v4l2_rect compose; /* compose on sink */
unsigned short sink_pad;
unsigned short source_pad;
@@ -181,16 +176,9 @@ struct smiapp_sensor {
* "mutex" is used to serialise access to all fields here
* except v4l2_ctrls at the end of the struct. "mutex" is also
* used to serialise access to file handle specific
- * information. The exception to this rule is the power_mutex
- * below.
+ * information.
*/
struct mutex mutex;
- /*
- * power_mutex is used to serialise power management related
- * activities. Acquiring "mutex" at that time isn't necessary
- * since there are no other users anyway.
- */
- struct mutex power_mutex;
struct smiapp_subdev ssds[SMIAPP_SUBDEVS];
u32 ssds_used;
struct smiapp_subdev *src;
@@ -218,12 +206,14 @@ struct smiapp_sensor {
u8 hvflip_inv_mask; /* H/VFLIP inversion due to sensor orientation */
u8 frame_skip;
- u16 image_start; /* Offset to first line after metadata lines */
-
- int power_count;
+ u16 embedded_start; /* embedded data start line */
+ u16 embedded_end;
+ u16 image_start; /* image data start line */
+ u16 visible_pixel_start; /* start pixel of the visible image */
bool streaming;
bool dev_init_done;
+ u8 compressed_min_bpp;
u8 *nvm; /* nvm memory buffer */
unsigned int nvm_size; /* bytes */
@@ -233,7 +223,7 @@ struct smiapp_sensor {
struct smiapp_pll pll;
/* Is a default format supported for a given BPP? */
- unsigned long valid_link_freqs[SMIAPP_NR_OF_COMPRESSED];
+ unsigned long *valid_link_freqs;
/* Pixel array controls */
struct v4l2_ctrl *analog_gain;
diff --git a/drivers/media/i2c/soc_camera/ov772x.c b/drivers/media/i2c/soc_camera/ov772x.c
index 7e68762b3a4b..985a3672b243 100644
--- a/drivers/media/i2c/soc_camera/ov772x.c
+++ b/drivers/media/i2c/soc_camera/ov772x.c
@@ -1064,8 +1064,7 @@ static int ov772x_probe(struct i2c_client *client,
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_err(&adapter->dev,
- "I2C-Adapter doesn't support "
- "I2C_FUNC_SMBUS_BYTE_DATA\n");
+ "I2C-Adapter doesn't support I2C_FUNC_SMBUS_BYTE_DATA\n");
return -EIO;
}
diff --git a/drivers/media/i2c/soc_camera/ov9740.c b/drivers/media/i2c/soc_camera/ov9740.c
index 0da632d7d33a..f11f76cdacad 100644
--- a/drivers/media/i2c/soc_camera/ov9740.c
+++ b/drivers/media/i2c/soc_camera/ov9740.c
@@ -881,8 +881,7 @@ static int ov9740_video_probe(struct i2c_client *client)
goto done;
}
- dev_info(&client->dev, "ov9740 Model ID 0x%04x, Revision 0x%02x, "
- "Manufacturer 0x%02x, SMIA Version 0x%02x\n",
+ dev_info(&client->dev, "ov9740 Model ID 0x%04x, Revision 0x%02x, Manufacturer 0x%02x, SMIA Version 0x%02x\n",
priv->model, priv->revision, priv->manid, priv->smiaver);
ret = v4l2_ctrl_handler_setup(&priv->hdl);
diff --git a/drivers/media/i2c/soc_camera/tw9910.c b/drivers/media/i2c/soc_camera/tw9910.c
index 4002c07f3857..c9c49ed707b8 100644
--- a/drivers/media/i2c/soc_camera/tw9910.c
+++ b/drivers/media/i2c/soc_camera/tw9910.c
@@ -947,8 +947,7 @@ static int tw9910_probe(struct i2c_client *client,
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_err(&client->dev,
- "I2C-Adapter doesn't support "
- "I2C_FUNC_SMBUS_BYTE_DATA\n");
+ "I2C-Adapter doesn't support I2C_FUNC_SMBUS_BYTE_DATA\n");
return -EIO;
}
diff --git a/drivers/media/i2c/tlv320aic23b.c b/drivers/media/i2c/tlv320aic23b.c
index 2e06c06cac9b..cc6104da34ef 100644
--- a/drivers/media/i2c/tlv320aic23b.c
+++ b/drivers/media/i2c/tlv320aic23b.c
@@ -27,7 +27,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/ioctl.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
diff --git a/drivers/media/i2c/tvaudio.c b/drivers/media/i2c/tvaudio.c
index 42d1e26e581c..ce86534450ac 100644
--- a/drivers/media/i2c/tvaudio.c
+++ b/drivers/media/i2c/tvaudio.c
@@ -1894,8 +1894,9 @@ static int tvaudio_probe(struct i2c_client *client, const struct i2c_device_id *
printk(KERN_INFO "tvaudio: TV audio decoder + audio/video mux driver\n");
printk(KERN_INFO "tvaudio: known chips: ");
for (desc = chiplist; desc->name != NULL; desc++)
- printk("%s%s", (desc == chiplist) ? "" : ", ", desc->name);
- printk("\n");
+ printk(KERN_CONT "%s%s",
+ (desc == chiplist) ? "" : ", ", desc->name);
+ printk(KERN_CONT "\n");
}
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c
index d5c9347f4c6d..0c62899c3667 100644
--- a/drivers/media/i2c/tvp514x.c
+++ b/drivers/media/i2c/tvp514x.c
@@ -894,7 +894,7 @@ static int tvp514x_enum_mbus_code(struct v4l2_subdev *sd,
if (index != 0)
return -EINVAL;
- code->code = MEDIA_BUS_FMT_YUYV8_2X8;
+ code->code = MEDIA_BUS_FMT_UYVY8_2X8;
return 0;
}
@@ -922,7 +922,7 @@ static int tvp514x_get_pad_format(struct v4l2_subdev *sd,
return 0;
}
- format->format.code = MEDIA_BUS_FMT_YUYV8_2X8;
+ format->format.code = MEDIA_BUS_FMT_UYVY8_2X8;
format->format.width = tvp514x_std_list[decoder->current_std].width;
format->format.height = tvp514x_std_list[decoder->current_std].height;
format->format.colorspace = V4L2_COLORSPACE_SMPTE170M;
@@ -946,7 +946,7 @@ static int tvp514x_set_pad_format(struct v4l2_subdev *sd,
struct tvp514x_decoder *decoder = to_decoder(sd);
if (fmt->format.field != V4L2_FIELD_INTERLACED ||
- fmt->format.code != MEDIA_BUS_FMT_YUYV8_2X8 ||
+ fmt->format.code != MEDIA_BUS_FMT_UYVY8_2X8 ||
fmt->format.colorspace != V4L2_COLORSPACE_SMPTE170M ||
fmt->format.width != tvp514x_std_list[decoder->current_std].width ||
fmt->format.height != tvp514x_std_list[decoder->current_std].height)
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 4740da39d698..3a0fe8cc64e9 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -36,6 +36,8 @@ static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Debug level (0-2)");
+#define dprintk0(__dev, __arg...) dev_dbg_lvl(__dev, 0, 0, __arg)
+
struct tvp5150 {
struct v4l2_subdev sd;
#ifdef CONFIG_MEDIA_CONTROLLER
@@ -74,11 +76,11 @@ static int tvp5150_read(struct v4l2_subdev *sd, unsigned char addr)
rc = i2c_smbus_read_byte_data(c, addr);
if (rc < 0) {
- v4l2_err(sd, "i2c i/o error: rc == %d\n", rc);
+ dev_err(sd->dev, "i2c i/o error: rc == %d\n", rc);
return rc;
}
- v4l2_dbg(2, debug, sd, "tvp5150: read 0x%02x = 0x%02x\n", addr, rc);
+ dev_dbg_lvl(sd->dev, 2, debug, "tvp5150: read 0x%02x = %02x\n", addr, rc);
return rc;
}
@@ -89,10 +91,10 @@ static int tvp5150_write(struct v4l2_subdev *sd, unsigned char addr,
struct i2c_client *c = v4l2_get_subdevdata(sd);
int rc;
- v4l2_dbg(2, debug, sd, "tvp5150: writing 0x%02x 0x%02x\n", addr, value);
+ dev_dbg_lvl(sd->dev, 2, debug, "tvp5150: writing %02x %02x\n", addr, value);
rc = i2c_smbus_write_byte_data(c, addr, value);
if (rc < 0)
- v4l2_err(sd, "i2c i/o error: rc == %d\n", rc);
+ dev_err(sd->dev, "i2c i/o error: rc == %d\n", rc);
return rc;
}
@@ -100,138 +102,140 @@ static int tvp5150_write(struct v4l2_subdev *sd, unsigned char addr,
static void dump_reg_range(struct v4l2_subdev *sd, char *s, u8 init,
const u8 end, int max_line)
{
- int i = 0;
+ u8 buf[16];
+ int i = 0, j, len;
- while (init != (u8)(end + 1)) {
- if ((i % max_line) == 0) {
- if (i > 0)
- printk("\n");
- printk("tvp5150: %s reg 0x%02x = ", s, init);
- }
- printk("%02x ", tvp5150_read(sd, init));
+ if (max_line > 16) {
+ dprintk0(sd->dev, "too much data to dump\n");
+ return;
+ }
+
+ for (i = init; i < end; i += max_line) {
+ len = (end - i > max_line) ? max_line : end - i;
+
+ for (j = 0; j < len; j++)
+ buf[j] = tvp5150_read(sd, i + j);
- init++;
- i++;
+ dprintk0(sd->dev, "%s reg %02x = %*ph\n", s, i, len, buf);
}
- printk("\n");
}
static int tvp5150_log_status(struct v4l2_subdev *sd)
{
- printk("tvp5150: Video input source selection #1 = 0x%02x\n",
- tvp5150_read(sd, TVP5150_VD_IN_SRC_SEL_1));
- printk("tvp5150: Analog channel controls = 0x%02x\n",
- tvp5150_read(sd, TVP5150_ANAL_CHL_CTL));
- printk("tvp5150: Operation mode controls = 0x%02x\n",
- tvp5150_read(sd, TVP5150_OP_MODE_CTL));
- printk("tvp5150: Miscellaneous controls = 0x%02x\n",
- tvp5150_read(sd, TVP5150_MISC_CTL));
- printk("tvp5150: Autoswitch mask= 0x%02x\n",
- tvp5150_read(sd, TVP5150_AUTOSW_MSK));
- printk("tvp5150: Color killer threshold control = 0x%02x\n",
- tvp5150_read(sd, TVP5150_COLOR_KIL_THSH_CTL));
- printk("tvp5150: Luminance processing controls #1 #2 and #3 = %02x %02x %02x\n",
- tvp5150_read(sd, TVP5150_LUMA_PROC_CTL_1),
- tvp5150_read(sd, TVP5150_LUMA_PROC_CTL_2),
- tvp5150_read(sd, TVP5150_LUMA_PROC_CTL_3));
- printk("tvp5150: Brightness control = 0x%02x\n",
- tvp5150_read(sd, TVP5150_BRIGHT_CTL));
- printk("tvp5150: Color saturation control = 0x%02x\n",
- tvp5150_read(sd, TVP5150_SATURATION_CTL));
- printk("tvp5150: Hue control = 0x%02x\n",
- tvp5150_read(sd, TVP5150_HUE_CTL));
- printk("tvp5150: Contrast control = 0x%02x\n",
- tvp5150_read(sd, TVP5150_CONTRAST_CTL));
- printk("tvp5150: Outputs and data rates select = 0x%02x\n",
- tvp5150_read(sd, TVP5150_DATA_RATE_SEL));
- printk("tvp5150: Configuration shared pins = 0x%02x\n",
- tvp5150_read(sd, TVP5150_CONF_SHARED_PIN));
- printk("tvp5150: Active video cropping start = 0x%02x%02x\n",
- tvp5150_read(sd, TVP5150_ACT_VD_CROP_ST_MSB),
- tvp5150_read(sd, TVP5150_ACT_VD_CROP_ST_LSB));
- printk("tvp5150: Active video cropping stop = 0x%02x%02x\n",
- tvp5150_read(sd, TVP5150_ACT_VD_CROP_STP_MSB),
- tvp5150_read(sd, TVP5150_ACT_VD_CROP_STP_LSB));
- printk("tvp5150: Genlock/RTC = 0x%02x\n",
- tvp5150_read(sd, TVP5150_GENLOCK));
- printk("tvp5150: Horizontal sync start = 0x%02x\n",
- tvp5150_read(sd, TVP5150_HORIZ_SYNC_START));
- printk("tvp5150: Vertical blanking start = 0x%02x\n",
- tvp5150_read(sd, TVP5150_VERT_BLANKING_START));
- printk("tvp5150: Vertical blanking stop = 0x%02x\n",
- tvp5150_read(sd, TVP5150_VERT_BLANKING_STOP));
- printk("tvp5150: Chrominance processing control #1 and #2 = %02x %02x\n",
- tvp5150_read(sd, TVP5150_CHROMA_PROC_CTL_1),
- tvp5150_read(sd, TVP5150_CHROMA_PROC_CTL_2));
- printk("tvp5150: Interrupt reset register B = 0x%02x\n",
- tvp5150_read(sd, TVP5150_INT_RESET_REG_B));
- printk("tvp5150: Interrupt enable register B = 0x%02x\n",
- tvp5150_read(sd, TVP5150_INT_ENABLE_REG_B));
- printk("tvp5150: Interrupt configuration register B = 0x%02x\n",
- tvp5150_read(sd, TVP5150_INTT_CONFIG_REG_B));
- printk("tvp5150: Video standard = 0x%02x\n",
- tvp5150_read(sd, TVP5150_VIDEO_STD));
- printk("tvp5150: Chroma gain factor: Cb=0x%02x Cr=0x%02x\n",
- tvp5150_read(sd, TVP5150_CB_GAIN_FACT),
- tvp5150_read(sd, TVP5150_CR_GAIN_FACTOR));
- printk("tvp5150: Macrovision on counter = 0x%02x\n",
- tvp5150_read(sd, TVP5150_MACROVISION_ON_CTR));
- printk("tvp5150: Macrovision off counter = 0x%02x\n",
- tvp5150_read(sd, TVP5150_MACROVISION_OFF_CTR));
- printk("tvp5150: ITU-R BT.656.%d timing(TVP5150AM1 only)\n",
- (tvp5150_read(sd, TVP5150_REV_SELECT) & 1) ? 3 : 4);
- printk("tvp5150: Device ID = %02x%02x\n",
- tvp5150_read(sd, TVP5150_MSB_DEV_ID),
- tvp5150_read(sd, TVP5150_LSB_DEV_ID));
- printk("tvp5150: ROM version = (hex) %02x.%02x\n",
- tvp5150_read(sd, TVP5150_ROM_MAJOR_VER),
- tvp5150_read(sd, TVP5150_ROM_MINOR_VER));
- printk("tvp5150: Vertical line count = 0x%02x%02x\n",
- tvp5150_read(sd, TVP5150_VERT_LN_COUNT_MSB),
- tvp5150_read(sd, TVP5150_VERT_LN_COUNT_LSB));
- printk("tvp5150: Interrupt status register B = 0x%02x\n",
- tvp5150_read(sd, TVP5150_INT_STATUS_REG_B));
- printk("tvp5150: Interrupt active register B = 0x%02x\n",
- tvp5150_read(sd, TVP5150_INT_ACTIVE_REG_B));
- printk("tvp5150: Status regs #1 to #5 = %02x %02x %02x %02x %02x\n",
- tvp5150_read(sd, TVP5150_STATUS_REG_1),
- tvp5150_read(sd, TVP5150_STATUS_REG_2),
- tvp5150_read(sd, TVP5150_STATUS_REG_3),
- tvp5150_read(sd, TVP5150_STATUS_REG_4),
- tvp5150_read(sd, TVP5150_STATUS_REG_5));
+ dprintk0(sd->dev, "tvp5150: Video input source selection #1 = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_VD_IN_SRC_SEL_1));
+ dprintk0(sd->dev, "tvp5150: Analog channel controls = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_ANAL_CHL_CTL));
+ dprintk0(sd->dev, "tvp5150: Operation mode controls = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_OP_MODE_CTL));
+ dprintk0(sd->dev, "tvp5150: Miscellaneous controls = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_MISC_CTL));
+ dprintk0(sd->dev, "tvp5150: Autoswitch mask= 0x%02x\n",
+ tvp5150_read(sd, TVP5150_AUTOSW_MSK));
+ dprintk0(sd->dev, "tvp5150: Color killer threshold control = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_COLOR_KIL_THSH_CTL));
+ dprintk0(sd->dev, "tvp5150: Luminance processing controls #1 #2 and #3 = %02x %02x %02x\n",
+ tvp5150_read(sd, TVP5150_LUMA_PROC_CTL_1),
+ tvp5150_read(sd, TVP5150_LUMA_PROC_CTL_2),
+ tvp5150_read(sd, TVP5150_LUMA_PROC_CTL_3));
+ dprintk0(sd->dev, "tvp5150: Brightness control = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_BRIGHT_CTL));
+ dprintk0(sd->dev, "tvp5150: Color saturation control = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_SATURATION_CTL));
+ dprintk0(sd->dev, "tvp5150: Hue control = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_HUE_CTL));
+ dprintk0(sd->dev, "tvp5150: Contrast control = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_CONTRAST_CTL));
+ dprintk0(sd->dev, "tvp5150: Outputs and data rates select = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_DATA_RATE_SEL));
+ dprintk0(sd->dev, "tvp5150: Configuration shared pins = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_CONF_SHARED_PIN));
+ dprintk0(sd->dev, "tvp5150: Active video cropping start = 0x%02x%02x\n",
+ tvp5150_read(sd, TVP5150_ACT_VD_CROP_ST_MSB),
+ tvp5150_read(sd, TVP5150_ACT_VD_CROP_ST_LSB));
+ dprintk0(sd->dev, "tvp5150: Active video cropping stop = 0x%02x%02x\n",
+ tvp5150_read(sd, TVP5150_ACT_VD_CROP_STP_MSB),
+ tvp5150_read(sd, TVP5150_ACT_VD_CROP_STP_LSB));
+ dprintk0(sd->dev, "tvp5150: Genlock/RTC = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_GENLOCK));
+ dprintk0(sd->dev, "tvp5150: Horizontal sync start = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_HORIZ_SYNC_START));
+ dprintk0(sd->dev, "tvp5150: Vertical blanking start = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_VERT_BLANKING_START));
+ dprintk0(sd->dev, "tvp5150: Vertical blanking stop = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_VERT_BLANKING_STOP));
+ dprintk0(sd->dev, "tvp5150: Chrominance processing control #1 and #2 = %02x %02x\n",
+ tvp5150_read(sd, TVP5150_CHROMA_PROC_CTL_1),
+ tvp5150_read(sd, TVP5150_CHROMA_PROC_CTL_2));
+ dprintk0(sd->dev, "tvp5150: Interrupt reset register B = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_INT_RESET_REG_B));
+ dprintk0(sd->dev, "tvp5150: Interrupt enable register B = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_INT_ENABLE_REG_B));
+ dprintk0(sd->dev, "tvp5150: Interrupt configuration register B = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_INTT_CONFIG_REG_B));
+ dprintk0(sd->dev, "tvp5150: Video standard = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_VIDEO_STD));
+ dprintk0(sd->dev, "tvp5150: Chroma gain factor: Cb=0x%02x Cr=0x%02x\n",
+ tvp5150_read(sd, TVP5150_CB_GAIN_FACT),
+ tvp5150_read(sd, TVP5150_CR_GAIN_FACTOR));
+ dprintk0(sd->dev, "tvp5150: Macrovision on counter = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_MACROVISION_ON_CTR));
+ dprintk0(sd->dev, "tvp5150: Macrovision off counter = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_MACROVISION_OFF_CTR));
+ dprintk0(sd->dev, "tvp5150: ITU-R BT.656.%d timing(TVP5150AM1 only)\n",
+ (tvp5150_read(sd, TVP5150_REV_SELECT) & 1) ? 3 : 4);
+ dprintk0(sd->dev, "tvp5150: Device ID = %02x%02x\n",
+ tvp5150_read(sd, TVP5150_MSB_DEV_ID),
+ tvp5150_read(sd, TVP5150_LSB_DEV_ID));
+ dprintk0(sd->dev, "tvp5150: ROM version = (hex) %02x.%02x\n",
+ tvp5150_read(sd, TVP5150_ROM_MAJOR_VER),
+ tvp5150_read(sd, TVP5150_ROM_MINOR_VER));
+ dprintk0(sd->dev, "tvp5150: Vertical line count = 0x%02x%02x\n",
+ tvp5150_read(sd, TVP5150_VERT_LN_COUNT_MSB),
+ tvp5150_read(sd, TVP5150_VERT_LN_COUNT_LSB));
+ dprintk0(sd->dev, "tvp5150: Interrupt status register B = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_INT_STATUS_REG_B));
+ dprintk0(sd->dev, "tvp5150: Interrupt active register B = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_INT_ACTIVE_REG_B));
+ dprintk0(sd->dev, "tvp5150: Status regs #1 to #5 = %02x %02x %02x %02x %02x\n",
+ tvp5150_read(sd, TVP5150_STATUS_REG_1),
+ tvp5150_read(sd, TVP5150_STATUS_REG_2),
+ tvp5150_read(sd, TVP5150_STATUS_REG_3),
+ tvp5150_read(sd, TVP5150_STATUS_REG_4),
+ tvp5150_read(sd, TVP5150_STATUS_REG_5));
dump_reg_range(sd, "Teletext filter 1", TVP5150_TELETEXT_FIL1_INI,
TVP5150_TELETEXT_FIL1_END, 8);
dump_reg_range(sd, "Teletext filter 2", TVP5150_TELETEXT_FIL2_INI,
TVP5150_TELETEXT_FIL2_END, 8);
- printk("tvp5150: Teletext filter enable = 0x%02x\n",
- tvp5150_read(sd, TVP5150_TELETEXT_FIL_ENA));
- printk("tvp5150: Interrupt status register A = 0x%02x\n",
- tvp5150_read(sd, TVP5150_INT_STATUS_REG_A));
- printk("tvp5150: Interrupt enable register A = 0x%02x\n",
- tvp5150_read(sd, TVP5150_INT_ENABLE_REG_A));
- printk("tvp5150: Interrupt configuration = 0x%02x\n",
- tvp5150_read(sd, TVP5150_INT_CONF));
- printk("tvp5150: VDP status register = 0x%02x\n",
- tvp5150_read(sd, TVP5150_VDP_STATUS_REG));
- printk("tvp5150: FIFO word count = 0x%02x\n",
- tvp5150_read(sd, TVP5150_FIFO_WORD_COUNT));
- printk("tvp5150: FIFO interrupt threshold = 0x%02x\n",
- tvp5150_read(sd, TVP5150_FIFO_INT_THRESHOLD));
- printk("tvp5150: FIFO reset = 0x%02x\n",
- tvp5150_read(sd, TVP5150_FIFO_RESET));
- printk("tvp5150: Line number interrupt = 0x%02x\n",
- tvp5150_read(sd, TVP5150_LINE_NUMBER_INT));
- printk("tvp5150: Pixel alignment register = 0x%02x%02x\n",
- tvp5150_read(sd, TVP5150_PIX_ALIGN_REG_HIGH),
- tvp5150_read(sd, TVP5150_PIX_ALIGN_REG_LOW));
- printk("tvp5150: FIFO output control = 0x%02x\n",
- tvp5150_read(sd, TVP5150_FIFO_OUT_CTRL));
- printk("tvp5150: Full field enable = 0x%02x\n",
- tvp5150_read(sd, TVP5150_FULL_FIELD_ENA));
- printk("tvp5150: Full field mode register = 0x%02x\n",
- tvp5150_read(sd, TVP5150_FULL_FIELD_MODE_REG));
+ dprintk0(sd->dev, "tvp5150: Teletext filter enable = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_TELETEXT_FIL_ENA));
+ dprintk0(sd->dev, "tvp5150: Interrupt status register A = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_INT_STATUS_REG_A));
+ dprintk0(sd->dev, "tvp5150: Interrupt enable register A = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_INT_ENABLE_REG_A));
+ dprintk0(sd->dev, "tvp5150: Interrupt configuration = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_INT_CONF));
+ dprintk0(sd->dev, "tvp5150: VDP status register = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_VDP_STATUS_REG));
+ dprintk0(sd->dev, "tvp5150: FIFO word count = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_FIFO_WORD_COUNT));
+ dprintk0(sd->dev, "tvp5150: FIFO interrupt threshold = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_FIFO_INT_THRESHOLD));
+ dprintk0(sd->dev, "tvp5150: FIFO reset = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_FIFO_RESET));
+ dprintk0(sd->dev, "tvp5150: Line number interrupt = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_LINE_NUMBER_INT));
+ dprintk0(sd->dev, "tvp5150: Pixel alignment register = 0x%02x%02x\n",
+ tvp5150_read(sd, TVP5150_PIX_ALIGN_REG_HIGH),
+ tvp5150_read(sd, TVP5150_PIX_ALIGN_REG_LOW));
+ dprintk0(sd->dev, "tvp5150: FIFO output control = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_FIFO_OUT_CTRL));
+ dprintk0(sd->dev, "tvp5150: Full field enable = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_FULL_FIELD_ENA));
+ dprintk0(sd->dev, "tvp5150: Full field mode register = 0x%02x\n",
+ tvp5150_read(sd, TVP5150_FULL_FIELD_MODE_REG));
dump_reg_range(sd, "CC data", TVP5150_CC_DATA_INI,
TVP5150_CC_DATA_END, 8);
@@ -254,7 +258,7 @@ static int tvp5150_log_status(struct v4l2_subdev *sd)
Basic functions
****************************************************************************/
-static inline void tvp5150_selmux(struct v4l2_subdev *sd)
+static void tvp5150_selmux(struct v4l2_subdev *sd)
{
int opmode = 0;
struct tvp5150 *decoder = to_tvp5150(sd);
@@ -280,8 +284,7 @@ static inline void tvp5150_selmux(struct v4l2_subdev *sd)
break;
}
- v4l2_dbg(1, debug, sd, "Selecting video route: route input=%i, output=%i "
- "=> tvp5150 input=%i, opmode=%i\n",
+ dev_dbg_lvl(sd->dev, 1, debug, "Selecting video route: route input=%i, output=%i => tvp5150 input=%i, opmode=%i\n",
decoder->input, decoder->output,
input, opmode);
@@ -293,7 +296,7 @@ static inline void tvp5150_selmux(struct v4l2_subdev *sd)
*/
val = tvp5150_read(sd, TVP5150_MISC_CTL);
if (val < 0) {
- v4l2_err(sd, "%s: failed with error = %d\n", __func__, val);
+ dev_err(sd->dev, "%s: failed with error = %d\n", __func__, val);
return;
}
@@ -611,7 +614,7 @@ static int tvp5150_g_sliced_vbi_cap(struct v4l2_subdev *sd,
const struct i2c_vbi_ram_value *regs = vbi_ram_default;
int line;
- v4l2_dbg(1, debug, sd, "g_sliced_vbi_cap\n");
+ dev_dbg_lvl(sd->dev, 1, debug, "g_sliced_vbi_cap\n");
memset(cap, 0, sizeof *cap);
while (regs->reg != (u16)-1 ) {
@@ -649,7 +652,7 @@ static int tvp5150_set_vbi(struct v4l2_subdev *sd,
int pos=0;
if (std == V4L2_STD_ALL) {
- v4l2_err(sd, "VBI can't be configured without knowing number of lines\n");
+ dev_err(sd->dev, "VBI can't be configured without knowing number of lines\n");
return 0;
} else if (std & V4L2_STD_625_50) {
/* Don't follow NTSC Line number convension */
@@ -697,7 +700,7 @@ static int tvp5150_get_vbi(struct v4l2_subdev *sd,
int i, ret = 0;
if (std == V4L2_STD_ALL) {
- v4l2_err(sd, "VBI can't be configured without knowing number of lines\n");
+ dev_err(sd->dev, "VBI can't be configured without knowing number of lines\n");
return 0;
} else if (std & V4L2_STD_625_50) {
/* Don't follow NTSC Line number convension */
@@ -712,7 +715,7 @@ static int tvp5150_get_vbi(struct v4l2_subdev *sd,
for (i = 0; i <= 1; i++) {
ret = tvp5150_read(sd, reg + i);
if (ret < 0) {
- v4l2_err(sd, "%s: failed with error = %d\n",
+ dev_err(sd->dev, "%s: failed with error = %d\n",
__func__, ret);
return 0;
}
@@ -749,7 +752,7 @@ static int tvp5150_set_std(struct v4l2_subdev *sd, v4l2_std_id std)
fmt = VIDEO_STD_SECAM_BIT;
}
- v4l2_dbg(1, debug, sd, "Set video std register to %d.\n", fmt);
+ dev_dbg_lvl(sd->dev, 1, debug, "Set video std register to %d.\n", fmt);
tvp5150_write(sd, TVP5150_VIDEO_STD, fmt);
return 0;
}
@@ -815,6 +818,7 @@ static int tvp5150_s_ctrl(struct v4l2_ctrl *ctrl)
return 0;
case V4L2_CID_HUE:
tvp5150_write(sd, TVP5150_HUE_CTL, ctrl->val);
+ break;
case V4L2_CID_TEST_PATTERN:
decoder->enable = ctrl->val ? false : true;
tvp5150_selmux(sd);
@@ -866,7 +870,7 @@ static int tvp5150_fill_fmt(struct v4l2_subdev *sd,
f->field = V4L2_FIELD_ALTERNATE;
f->colorspace = V4L2_COLORSPACE_SMPTE170M;
- v4l2_dbg(1, debug, sd, "width = %d, height = %d\n", f->width,
+ dev_dbg_lvl(sd->dev, 1, debug, "width = %d, height = %d\n", f->width,
f->height);
return 0;
}
@@ -884,7 +888,7 @@ static int tvp5150_set_selection(struct v4l2_subdev *sd,
sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
- v4l2_dbg(1, debug, sd, "%s left=%d, top=%d, width=%d, height=%d\n",
+ dev_dbg_lvl(sd->dev, 1, debug, "%s left=%d, top=%d, width=%d, height=%d\n",
__func__, rect.left, rect.top, rect.width, rect.height);
/* tvp5150 has some special limits */
@@ -1010,11 +1014,11 @@ static int tvp5150_enum_frame_size(struct v4l2_subdev *sd,
Media entity ops
****************************************************************************/
+#ifdef CONFIG_MEDIA_CONTROLLER
static int tvp5150_link_setup(struct media_entity *entity,
const struct media_pad *local,
const struct media_pad *remote, u32 flags)
{
-#ifdef CONFIG_MEDIA_CONTROLLER
struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
struct tvp5150 *decoder = to_tvp5150(sd);
int i;
@@ -1031,7 +1035,6 @@ static int tvp5150_link_setup(struct media_entity *entity,
decoder->input = i;
tvp5150_selmux(sd);
-#endif
return 0;
}
@@ -1039,6 +1042,7 @@ static int tvp5150_link_setup(struct media_entity *entity,
static const struct media_entity_operations tvp5150_sd_media_ops = {
.link_setup = tvp5150_link_setup,
};
+#endif
/****************************************************************************
I2C Command
@@ -1148,7 +1152,7 @@ static int tvp5150_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *
res = tvp5150_read(sd, reg->reg & 0xff);
if (res < 0) {
- v4l2_err(sd, "%s: failed with error = %d\n", __func__, res);
+ dev_err(sd->dev, "%s: failed with error = %d\n", __func__, res);
return res;
}
@@ -1288,21 +1292,21 @@ static int tvp5150_detect_version(struct tvp5150 *core)
core->dev_id = (regs[0] << 8) | regs[1];
core->rom_ver = (regs[2] << 8) | regs[3];
- v4l2_info(sd, "tvp%04x (%u.%u) chip found @ 0x%02x (%s)\n",
+ dev_info(sd->dev, "tvp%04x (%u.%u) chip found @ 0x%02x (%s)\n",
core->dev_id, regs[2], regs[3], c->addr << 1,
c->adapter->name);
if (core->dev_id == 0x5150 && core->rom_ver == 0x0321) {
- v4l2_info(sd, "tvp5150a detected.\n");
+ dev_info(sd->dev, "tvp5150a detected.\n");
} else if (core->dev_id == 0x5150 && core->rom_ver == 0x0400) {
- v4l2_info(sd, "tvp5150am1 detected.\n");
+ dev_info(sd->dev, "tvp5150am1 detected.\n");
/* ITU-T BT.656.4 timing */
tvp5150_write(sd, TVP5150_REV_SELECT, 0);
} else if (core->dev_id == 0x5151 && core->rom_ver == 0x0100) {
- v4l2_info(sd, "tvp5151 detected.\n");
+ dev_info(sd->dev, "tvp5151 detected.\n");
} else {
- v4l2_info(sd, "*** unknown tvp%04x chip detected.\n",
+ dev_info(sd->dev, "*** unknown tvp%04x chip detected.\n",
core->dev_id);
}
@@ -1381,7 +1385,7 @@ static int tvp5150_parse_dt(struct tvp5150 *decoder, struct device_node *np)
for_each_available_child_of_node(connectors, child) {
ret = of_property_read_u32(child, "input", &input_type);
if (ret) {
- v4l2_err(&decoder->sd,
+ dev_err(decoder->sd.dev,
"missing type property in node %s\n",
child->name);
goto err_connector;
@@ -1396,7 +1400,7 @@ static int tvp5150_parse_dt(struct tvp5150 *decoder, struct device_node *np)
/* Each input connector can only be defined once */
if (input->name) {
- v4l2_err(&decoder->sd,
+ dev_err(decoder->sd.dev,
"input %s with same type already exists\n",
input->name);
ret = -EINVAL;
@@ -1417,7 +1421,7 @@ static int tvp5150_parse_dt(struct tvp5150 *decoder, struct device_node *np)
ret = of_property_read_string(child, "label", &name);
if (ret < 0) {
- v4l2_err(&decoder->sd,
+ dev_err(decoder->sd.dev,
"missing label property in node %s\n",
child->name);
goto err_connector;
@@ -1465,7 +1469,7 @@ static int tvp5150_probe(struct i2c_client *c,
if (IS_ENABLED(CONFIG_OF) && np) {
res = tvp5150_parse_dt(core, np);
if (res) {
- v4l2_err(sd, "DT parsing error: %d\n", res);
+ dev_err(sd->dev, "DT parsing error: %d\n", res);
return res;
}
} else {
@@ -1549,7 +1553,7 @@ static int tvp5150_remove(struct i2c_client *c)
struct v4l2_subdev *sd = i2c_get_clientdata(c);
struct tvp5150 *decoder = to_tvp5150(sd);
- v4l2_dbg(1, debug, sd,
+ dev_dbg_lvl(sd->dev, 1, debug,
"tvp5150.c: removing tvp5150 adapter on address 0x%x\n",
c->addr << 1);
diff --git a/drivers/media/i2c/vp27smpx.c b/drivers/media/i2c/vp27smpx.c
index d6c23bdbcd4a..ef0d8b8e3df7 100644
--- a/drivers/media/i2c/vp27smpx.c
+++ b/drivers/media/i2c/vp27smpx.c
@@ -25,7 +25,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/ioctl.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
diff --git a/drivers/media/i2c/vpx3220.c b/drivers/media/i2c/vpx3220.c
index 90b693f4e2ab..ce9f09370e22 100644
--- a/drivers/media/i2c/vpx3220.c
+++ b/drivers/media/i2c/vpx3220.c
@@ -23,7 +23,7 @@
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/slab.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
diff --git a/drivers/media/i2c/wm8739.c b/drivers/media/i2c/wm8739.c
index f086e5e6e844..c885def54b15 100644
--- a/drivers/media/i2c/wm8739.c
+++ b/drivers/media/i2c/wm8739.c
@@ -25,7 +25,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/ioctl.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
diff --git a/drivers/media/i2c/wm8775.c b/drivers/media/i2c/wm8775.c
index 5581f4db02af..45039d756753 100644
--- a/drivers/media/i2c/wm8775.c
+++ b/drivers/media/i2c/wm8775.c
@@ -29,7 +29,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/ioctl.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index 2783531f9fc0..8756275e9fc4 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -801,9 +801,13 @@ void media_device_unregister(struct media_device *mdev)
/* Remove all interfaces from the media device */
list_for_each_entry_safe(intf, tmp_intf, &mdev->interfaces,
graph_obj.list) {
+ /*
+ * Unlink the interface, but don't free it here; the
+ * module which created it is responsible for freeing
+ * it
+ */
__media_remove_intf_links(intf);
media_gobj_destroy(&intf->graph_obj);
- kfree(intf);
}
mutex_unlock(&mdev->graph_mutex);
@@ -817,32 +821,6 @@ void media_device_unregister(struct media_device *mdev)
}
EXPORT_SYMBOL_GPL(media_device_unregister);
-static void media_device_release_devres(struct device *dev, void *res)
-{
-}
-
-struct media_device *media_device_get_devres(struct device *dev)
-{
- struct media_device *mdev;
-
- mdev = devres_find(dev, media_device_release_devres, NULL, NULL);
- if (mdev)
- return mdev;
-
- mdev = devres_alloc(media_device_release_devres,
- sizeof(struct media_device), GFP_KERNEL);
- if (!mdev)
- return NULL;
- return devres_get(dev, mdev, NULL, NULL);
-}
-EXPORT_SYMBOL_GPL(media_device_get_devres);
-
-struct media_device *media_device_find_devres(struct device *dev)
-{
- return devres_find(dev, media_device_release_devres, NULL, NULL);
-}
-EXPORT_SYMBOL_GPL(media_device_find_devres);
-
#if IS_ENABLED(CONFIG_PCI)
void media_device_pci_init(struct media_device *mdev,
struct pci_dev *pci_dev,
diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c
index c68239e60487..f9f723f5e4f0 100644
--- a/drivers/media/media-entity.c
+++ b/drivers/media/media-entity.c
@@ -205,10 +205,16 @@ void media_gobj_destroy(struct media_gobj *gobj)
{
dev_dbg_obj(__func__, gobj);
+ /* Do nothing if the object is not linked. */
+ if (gobj->mdev == NULL)
+ return;
+
gobj->mdev->topology_version++;
/* Remove the object from mdev list */
list_del(&gobj->list);
+
+ gobj->mdev = NULL;
}
int media_entity_pads_init(struct media_entity *entity, u16 num_pads,
diff --git a/drivers/media/pci/b2c2/flexcop-dma.c b/drivers/media/pci/b2c2/flexcop-dma.c
index 2881e0d956ad..913dc97f8b49 100644
--- a/drivers/media/pci/b2c2/flexcop-dma.c
+++ b/drivers/media/pci/b2c2/flexcop-dma.c
@@ -57,8 +57,7 @@ int flexcop_dma_config(struct flexcop_device *fc,
fc->write_ibi_reg(fc,dma2_014,v0x4);
fc->write_ibi_reg(fc,dma2_01c,v0xc);
} else {
- err("either DMA1 or DMA2 can be configured within one "
- "flexcop_dma_config call.");
+ err("either DMA1 or DMA2 can be configured within one flexcop_dma_config call.");
return -EINVAL;
}
@@ -82,8 +81,7 @@ int flexcop_dma_xfer_control(struct flexcop_device *fc,
r0x0 = dma2_010;
r0xc = dma2_01c;
} else {
- err("either transfer DMA1 or DMA2 can be started within one "
- "flexcop_dma_xfer_control call.");
+ err("either transfer DMA1 or DMA2 can be started within one flexcop_dma_xfer_control call.");
return -EINVAL;
}
diff --git a/drivers/media/pci/b2c2/flexcop-pci.c b/drivers/media/pci/b2c2/flexcop-pci.c
index 4cac1fc233f2..99ce28442a75 100644
--- a/drivers/media/pci/b2c2/flexcop-pci.c
+++ b/drivers/media/pci/b2c2/flexcop-pci.c
@@ -185,8 +185,7 @@ static irqreturn_t flexcop_pci_isr(int irq, void *dev_id)
fc->read_ibi_reg(fc,dma1_008).dma_0x8.dma_cur_addr << 2;
u32 cur_pos = cur_addr - fc_pci->dma[0].dma_addr0;
- deb_irq("%u irq: %08x cur_addr: %llx: cur_pos: %08x, "
- "last_cur_pos: %08x ",
+ deb_irq("%u irq: %08x cur_addr: %llx: cur_pos: %08x, last_cur_pos: %08x ",
jiffies_to_usecs(jiffies - fc_pci->last_irq),
v.raw, (unsigned long long)cur_addr, cur_pos,
fc_pci->last_dma1_cur_pos);
@@ -220,8 +219,8 @@ static irqreturn_t flexcop_pci_isr(int irq, void *dev_id)
fc_pci->last_dma1_cur_pos = cur_pos;
fc_pci->count++;
} else {
- deb_irq("isr for flexcop called, "
- "apparently without reason (%08x)\n", v.raw);
+ deb_irq("isr for flexcop called, apparently without reason (%08x)\n",
+ v.raw);
ret = IRQ_NONE;
}
diff --git a/drivers/media/pci/bt8xx/btcx-risc.c b/drivers/media/pci/bt8xx/btcx-risc.c
index 57c7f58c3af2..70bdf93fc020 100644
--- a/drivers/media/pci/bt8xx/btcx-risc.c
+++ b/drivers/media/pci/bt8xx/btcx-risc.c
@@ -22,6 +22,8 @@
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
@@ -36,6 +38,13 @@ static unsigned int btcx_debug;
module_param(btcx_debug, int, 0644);
MODULE_PARM_DESC(btcx_debug,"debug messages, default is 0 (no)");
+#define dprintk(fmt, arg...) do { \
+ if (btcx_debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
+} while (0)
+
+
/* ---------------------------------------------------------- */
/* allocate/free risc memory */
@@ -46,11 +55,11 @@ void btcx_riscmem_free(struct pci_dev *pci,
{
if (NULL == risc->cpu)
return;
- if (btcx_debug) {
- memcnt--;
- printk("btcx: riscmem free [%d] dma=%lx\n",
- memcnt, (unsigned long)risc->dma);
- }
+
+ memcnt--;
+ dprintk("btcx: riscmem free [%d] dma=%lx\n",
+ memcnt, (unsigned long)risc->dma);
+
pci_free_consistent(pci, risc->size, risc->cpu, risc->dma);
memset(risc,0,sizeof(*risc));
}
@@ -71,11 +80,10 @@ int btcx_riscmem_alloc(struct pci_dev *pci,
risc->cpu = cpu;
risc->dma = dma;
risc->size = size;
- if (btcx_debug) {
- memcnt++;
- printk("btcx: riscmem alloc [%d] dma=%lx cpu=%p size=%d\n",
- memcnt, (unsigned long)dma, cpu, size);
- }
+
+ memcnt++;
+ dprintk("btcx: riscmem alloc [%d] dma=%lx cpu=%p size=%d\n",
+ memcnt, (unsigned long)dma, cpu, size);
}
memset(risc->cpu,0,risc->size);
return 0;
@@ -137,9 +145,8 @@ btcx_align(struct v4l2_rect *win, struct v4l2_clip *clips, unsigned int n, int m
dx = nx - win->left;
win->left = nx;
win->width = nw;
- if (btcx_debug)
- printk(KERN_DEBUG "btcx: window align %dx%d+%d+%d [dx=%d]\n",
- win->width, win->height, win->left, win->top, dx);
+ dprintk("btcx: window align %dx%d+%d+%d [dx=%d]\n",
+ win->width, win->height, win->left, win->top, dx);
/* fixup clips */
for (i = 0; i < n; i++) {
@@ -149,10 +156,9 @@ btcx_align(struct v4l2_rect *win, struct v4l2_clip *clips, unsigned int n, int m
nw += mask+1;
clips[i].c.left = nx;
clips[i].c.width = nw;
- if (btcx_debug)
- printk(KERN_DEBUG "btcx: clip align %dx%d+%d+%d\n",
- clips[i].c.width, clips[i].c.height,
- clips[i].c.left, clips[i].c.top);
+ dprintk("btcx: clip align %dx%d+%d+%d\n",
+ clips[i].c.width, clips[i].c.height,
+ clips[i].c.left, clips[i].c.top);
}
return 0;
}
@@ -228,10 +234,10 @@ btcx_calc_skips(int line, int width, int *maxy,
*maxy = maxline;
if (btcx_debug) {
- printk(KERN_DEBUG "btcx: skips line %d-%d:",line,maxline);
+ dprintk("btcx: skips line %d-%d:", line, maxline);
for (skip = 0; skip < *nskips; skip++) {
- printk(" %d-%d",skips[skip].start,skips[skip].end);
+ pr_cont(" %d-%d", skips[skip].start, skips[skip].end);
}
- printk("\n");
+ pr_cont("\n");
}
}
diff --git a/drivers/media/pci/bt8xx/bttv-cards.c b/drivers/media/pci/bt8xx/bttv-cards.c
index 8a17cc0bfa07..a1b0f3193bc0 100644
--- a/drivers/media/pci/bt8xx/bttv-cards.c
+++ b/drivers/media/pci/bt8xx/bttv-cards.c
@@ -125,10 +125,8 @@ module_param_array(remote, int, NULL, 0444);
module_param_array(audiodev, int, NULL, 0444);
module_param_array(audiomux, int, NULL, 0444);
-MODULE_PARM_DESC(triton1,"set ETBF pci config bit "
- "[enable bug compatibility for triton1 + others]");
-MODULE_PARM_DESC(vsfx,"set VSFX pci config bit "
- "[yet another chipset flaw workaround]");
+MODULE_PARM_DESC(triton1, "set ETBF pci config bit [enable bug compatibility for triton1 + others]");
+MODULE_PARM_DESC(vsfx, "set VSFX pci config bit [yet another chipset flaw workaround]");
MODULE_PARM_DESC(latency,"pci latency timer");
MODULE_PARM_DESC(card,"specify TV/grabber card model, see CARDLIST file for a list");
MODULE_PARM_DESC(pll, "specify installed crystal (0=none, 28=28 MHz, 35=35 MHz, 14=14 MHz)");
@@ -141,8 +139,7 @@ MODULE_PARM_DESC(audiodev, "specify audio device:\n"
"\t\t 2 = tda7432\n"
"\t\t 3 = tvaudio");
MODULE_PARM_DESC(saa6588, "if 1, then load the saa6588 RDS module, default (0) is to use the card definition.");
-MODULE_PARM_DESC(no_overlay,"allow override overlay default (0 disables, 1 enables)"
- " [some VIA/SIS chipsets are known to have problem with overlay]");
+MODULE_PARM_DESC(no_overlay, "allow override overlay default (0 disables, 1 enables) [some VIA/SIS chipsets are known to have problem with overlay]");
/* ----------------------------------------------------------------------- */
/* list of card IDs for bt878+ cards */
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 97b91a9f9fa9..fb4aefbcc8f8 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -148,8 +148,7 @@ MODULE_PARM_DESC(irq_debug, "irq handler debug messages, default is 0 (no)");
MODULE_PARM_DESC(disable_ir, "disable infrared remote support");
MODULE_PARM_DESC(gbuffers, "number of capture buffers. range 2-32, default 8");
MODULE_PARM_DESC(gbufsize, "size of the capture buffers, default is 0x208000");
-MODULE_PARM_DESC(reset_crop, "reset cropping parameters at open(), default "
- "is 1 (yes) for compatibility with older applications");
+MODULE_PARM_DESC(reset_crop, "reset cropping parameters at open(), default is 1 (yes) for compatibility with older applications");
MODULE_PARM_DESC(automute, "mute audio on bad/missing video signal, default is 1 (yes)");
MODULE_PARM_DESC(chroma_agc, "enables the AGC of chroma signal, default is 0 (no)");
MODULE_PARM_DESC(agc_crush, "enables the luminance AGC crush, default is 1 (yes)");
@@ -3506,8 +3505,7 @@ static void bttv_irq_debug_low_latency(struct bttv *btv, u32 rc)
(unsigned long)rc);
if (0 == (btread(BT848_DSTATUS) & BT848_DSTATUS_HLOC)) {
- pr_notice("%d: Oh, there (temporarily?) is no input signal. "
- "Ok, then this is harmless, don't worry ;)\n",
+ pr_notice("%d: Oh, there (temporarily?) is no input signal. Ok, then this is harmless, don't worry ;)\n",
btv->c.nr);
return;
}
diff --git a/drivers/media/pci/bt8xx/bttv-i2c.c b/drivers/media/pci/bt8xx/bttv-i2c.c
index d43911deb617..274fd036b306 100644
--- a/drivers/media/pci/bt8xx/bttv-i2c.c
+++ b/drivers/media/pci/bt8xx/bttv-i2c.c
@@ -44,15 +44,13 @@ static int i2c_scan;
module_param(i2c_debug, int, 0644);
MODULE_PARM_DESC(i2c_debug, "configure i2c debug level");
module_param(i2c_hw, int, 0444);
-MODULE_PARM_DESC(i2c_hw,"force use of hardware i2c support, "
- "instead of software bitbang");
+MODULE_PARM_DESC(i2c_hw, "force use of hardware i2c support, instead of software bitbang");
module_param(i2c_scan, int, 0444);
MODULE_PARM_DESC(i2c_scan,"scan i2c bus at insmod time");
static unsigned int i2c_udelay = 5;
module_param(i2c_udelay, int, 0444);
-MODULE_PARM_DESC(i2c_udelay,"soft i2c delay at insmod time, in usecs "
- "(should be 5 or higher). Lower value means higher bus speed.");
+MODULE_PARM_DESC(i2c_udelay, "soft i2c delay at insmod time, in usecs (should be 5 or higher). Lower value means higher bus speed.");
/* ----------------------------------------------------------------------- */
/* I2C functions - bitbanging adapter (software i2c) */
diff --git a/drivers/media/pci/bt8xx/bttv-input.c b/drivers/media/pci/bt8xx/bttv-input.c
index a75c53da224a..4da720e4867e 100644
--- a/drivers/media/pci/bt8xx/bttv-input.c
+++ b/drivers/media/pci/bt8xx/bttv-input.c
@@ -185,8 +185,8 @@ static u32 bttv_rc5_decode(unsigned int code)
return 0;
}
}
- dprintk("code=%x, rc5=%x, start=%x, toggle=%x, address=%x, "
- "instr=%x\n", rc5, org_code, RC5_START(rc5),
+ dprintk("code=%x, rc5=%x, start=%x, toggle=%x, address=%x, instr=%x\n",
+ rc5, org_code, RC5_START(rc5),
RC5_TOGGLE(rc5), RC5_ADDR(rc5), RC5_INSTR(rc5));
return rc5;
}
diff --git a/drivers/media/pci/bt8xx/dst.c b/drivers/media/pci/bt8xx/dst.c
index 35bc9b2287b4..7166d2279465 100644
--- a/drivers/media/pci/bt8xx/dst.c
+++ b/drivers/media/pci/bt8xx/dst.c
@@ -18,6 +18,8 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -30,9 +32,9 @@
#include "dst_priv.h"
#include "dst_common.h"
-static unsigned int verbose = 1;
+static unsigned int verbose;
module_param(verbose, int, 0644);
-MODULE_PARM_DESC(verbose, "verbose startup messages, default is 1 (yes)");
+MODULE_PARM_DESC(verbose, "verbosity level (0 to 3)");
static unsigned int dst_addons;
module_param(dst_addons, int, 0644);
@@ -46,29 +48,10 @@ MODULE_PARM_DESC(dst_algo, "tuning algo: default is 0=(SW), 1=(HW)");
#define ATTEMPT_TUNE 2
#define HAS_POWER 4
-#define DST_ERROR 0
-#define DST_NOTICE 1
-#define DST_INFO 2
-#define DST_DEBUG 3
-
-#define dprintk(x, y, z, format, arg...) do { \
- if (z) { \
- if ((x > DST_ERROR) && (x > y)) \
- printk(KERN_ERR "dst(%d) %s: " format "\n", \
- state->bt->nr, __func__ , ##arg); \
- else if ((x > DST_NOTICE) && (x > y)) \
- printk(KERN_NOTICE "dst(%d) %s: " format "\n", \
- state->bt->nr, __func__ , ##arg); \
- else if ((x > DST_INFO) && (x > y)) \
- printk(KERN_INFO "dst(%d) %s: " format "\n", \
- state->bt->nr, __func__ , ##arg); \
- else if ((x > DST_DEBUG) && (x > y)) \
- printk(KERN_DEBUG "dst(%d) %s: " format "\n", \
- state->bt->nr, __func__ , ##arg); \
- } else { \
- if (x > y) \
- printk(format, ##arg); \
- } \
+#define dprintk(level, fmt, arg...) do { \
+ if (level >= verbose) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
} while(0)
static int dst_command(struct dst_state *state, u8 *data, u8 len);
@@ -91,9 +74,11 @@ static int dst_gpio_outb(struct dst_state *state, u32 mask, u32 enbb,
enb.enb.mask = mask;
enb.enb.enable = enbb;
- dprintk(verbose, DST_INFO, 1, "mask=[%04x], enbb=[%04x], outhigh=[%04x]", mask, enbb, outhigh);
+ dprintk(2, "mask=[%04x], enbb=[%04x], outhigh=[%04x]\n",
+ mask, enbb, outhigh);
if ((err = bt878_device_control(state->bt, DST_IG_ENABLE, &enb)) < 0) {
- dprintk(verbose, DST_INFO, 1, "dst_gpio_enb error (err == %i, mask == %02x, enb == %02x)", err, mask, enbb);
+ dprintk(2, "dst_gpio_enb error (err == %i, mask == %02x, enb == %02x)\n",
+ err, mask, enbb);
return -EREMOTEIO;
}
udelay(1000);
@@ -105,7 +90,8 @@ static int dst_gpio_outb(struct dst_state *state, u32 mask, u32 enbb,
bits.outp.mask = enbb;
bits.outp.highvals = outhigh;
if ((err = bt878_device_control(state->bt, DST_IG_WRITE, &bits)) < 0) {
- dprintk(verbose, DST_INFO, 1, "dst_gpio_outb error (err == %i, enbb == %02x, outhigh == %02x)", err, enbb, outhigh);
+ dprintk(2, "dst_gpio_outb error (err == %i, enbb == %02x, outhigh == %02x)\n",
+ err, enbb, outhigh);
return -EREMOTEIO;
}
@@ -119,7 +105,7 @@ static int dst_gpio_inb(struct dst_state *state, u8 *result)
*result = 0;
if ((err = bt878_device_control(state->bt, DST_IG_READ, &rd_packet)) < 0) {
- dprintk(verbose, DST_ERROR, 1, "dst_gpio_inb error (err == %i)", err);
+ pr_err("dst_gpio_inb error (err == %i)\n", err);
return -EREMOTEIO;
}
*result = (u8) rd_packet.rd.value;
@@ -129,14 +115,14 @@ static int dst_gpio_inb(struct dst_state *state, u8 *result)
int rdc_reset_state(struct dst_state *state)
{
- dprintk(verbose, DST_INFO, 1, "Resetting state machine");
+ dprintk(2, "Resetting state machine\n");
if (dst_gpio_outb(state, RDC_8820_INT, RDC_8820_INT, 0, NO_DELAY) < 0) {
- dprintk(verbose, DST_ERROR, 1, "dst_gpio_outb ERROR !");
+ pr_err("dst_gpio_outb ERROR !\n");
return -1;
}
msleep(10);
if (dst_gpio_outb(state, RDC_8820_INT, RDC_8820_INT, RDC_8820_INT, NO_DELAY) < 0) {
- dprintk(verbose, DST_ERROR, 1, "dst_gpio_outb ERROR !");
+ pr_err("dst_gpio_outb ERROR !\n");
msleep(10);
return -1;
}
@@ -147,14 +133,14 @@ EXPORT_SYMBOL(rdc_reset_state);
static int rdc_8820_reset(struct dst_state *state)
{
- dprintk(verbose, DST_DEBUG, 1, "Resetting DST");
+ dprintk(3, "Resetting DST\n");
if (dst_gpio_outb(state, RDC_8820_RESET, RDC_8820_RESET, 0, NO_DELAY) < 0) {
- dprintk(verbose, DST_ERROR, 1, "dst_gpio_outb ERROR !");
+ pr_err("dst_gpio_outb ERROR !\n");
return -1;
}
udelay(1000);
if (dst_gpio_outb(state, RDC_8820_RESET, RDC_8820_RESET, RDC_8820_RESET, DELAY) < 0) {
- dprintk(verbose, DST_ERROR, 1, "dst_gpio_outb ERROR !");
+ pr_err("dst_gpio_outb ERROR !\n");
return -1;
}
@@ -164,7 +150,7 @@ static int rdc_8820_reset(struct dst_state *state)
static int dst_pio_enable(struct dst_state *state)
{
if (dst_gpio_outb(state, ~0, RDC_8820_PIO_0_ENABLE, 0, NO_DELAY) < 0) {
- dprintk(verbose, DST_ERROR, 1, "dst_gpio_outb ERROR !");
+ pr_err("dst_gpio_outb ERROR !\n");
return -1;
}
udelay(1000);
@@ -175,7 +161,7 @@ static int dst_pio_enable(struct dst_state *state)
int dst_pio_disable(struct dst_state *state)
{
if (dst_gpio_outb(state, ~0, RDC_8820_PIO_0_DISABLE, RDC_8820_PIO_0_DISABLE, NO_DELAY) < 0) {
- dprintk(verbose, DST_ERROR, 1, "dst_gpio_outb ERROR !");
+ pr_err("dst_gpio_outb ERROR !\n");
return -1;
}
if (state->type_flags & DST_TYPE_HAS_FW_1)
@@ -192,16 +178,16 @@ int dst_wait_dst_ready(struct dst_state *state, u8 delay_mode)
for (i = 0; i < 200; i++) {
if (dst_gpio_inb(state, &reply) < 0) {
- dprintk(verbose, DST_ERROR, 1, "dst_gpio_inb ERROR !");
+ pr_err("dst_gpio_inb ERROR !\n");
return -1;
}
if ((reply & RDC_8820_PIO_0_ENABLE) == 0) {
- dprintk(verbose, DST_INFO, 1, "dst wait ready after %d", i);
+ dprintk(2, "dst wait ready after %d\n", i);
return 1;
}
msleep(10);
}
- dprintk(verbose, DST_NOTICE, 1, "dst wait NOT ready after %d", i);
+ dprintk(1, "dst wait NOT ready after %d\n", i);
return 0;
}
@@ -209,7 +195,7 @@ EXPORT_SYMBOL(dst_wait_dst_ready);
int dst_error_recovery(struct dst_state *state)
{
- dprintk(verbose, DST_NOTICE, 1, "Trying to return from previous errors.");
+ dprintk(1, "Trying to return from previous errors.\n");
dst_pio_disable(state);
msleep(10);
dst_pio_enable(state);
@@ -221,7 +207,7 @@ EXPORT_SYMBOL(dst_error_recovery);
int dst_error_bailout(struct dst_state *state)
{
- dprintk(verbose, DST_INFO, 1, "Trying to bailout from previous error.");
+ dprintk(2, "Trying to bailout from previous error.\n");
rdc_8820_reset(state);
dst_pio_disable(state);
msleep(10);
@@ -232,13 +218,13 @@ EXPORT_SYMBOL(dst_error_bailout);
int dst_comm_init(struct dst_state *state)
{
- dprintk(verbose, DST_INFO, 1, "Initializing DST.");
+ dprintk(2, "Initializing DST.\n");
if ((dst_pio_enable(state)) < 0) {
- dprintk(verbose, DST_ERROR, 1, "PIO Enable Failed");
+ pr_err("PIO Enable Failed\n");
return -1;
}
if ((rdc_reset_state(state)) < 0) {
- dprintk(verbose, DST_ERROR, 1, "RDC 8820 State RESET Failed.");
+ pr_err("RDC 8820 State RESET Failed.\n");
return -1;
}
if (state->type_flags & DST_TYPE_HAS_FW_1)
@@ -260,23 +246,21 @@ int write_dst(struct dst_state *state, u8 *data, u8 len)
};
int err;
- u8 cnt, i;
+ u8 cnt;
- dprintk(verbose, DST_NOTICE, 0, "writing [ ");
- for (i = 0; i < len; i++)
- dprintk(verbose, DST_NOTICE, 0, "%02x ", data[i]);
- dprintk(verbose, DST_NOTICE, 0, "]\n");
+ dprintk(1, "writing [ %*ph ]\n", len, data);
for (cnt = 0; cnt < 2; cnt++) {
if ((err = i2c_transfer(state->i2c, &msg, 1)) < 0) {
- dprintk(verbose, DST_INFO, 1, "_write_dst error (err == %i, len == 0x%02x, b0 == 0x%02x)", err, len, data[0]);
+ dprintk(2, "_write_dst error (err == %i, len == 0x%02x, b0 == 0x%02x)\n",
+ err, len, data[0]);
dst_error_recovery(state);
continue;
} else
break;
}
if (cnt >= 2) {
- dprintk(verbose, DST_INFO, 1, "RDC 8820 RESET");
+ dprintk(2, "RDC 8820 RESET\n");
dst_error_bailout(state);
return -1;
@@ -300,23 +284,20 @@ int read_dst(struct dst_state *state, u8 *ret, u8 len)
for (cnt = 0; cnt < 2; cnt++) {
if ((err = i2c_transfer(state->i2c, &msg, 1)) < 0) {
- dprintk(verbose, DST_INFO, 1, "read_dst error (err == %i, len == 0x%02x, b0 == 0x%02x)", err, len, ret[0]);
+ dprintk(2, "read_dst error (err == %i, len == 0x%02x, b0 == 0x%02x)\n",
+ err, len, ret[0]);
dst_error_recovery(state);
continue;
} else
break;
}
if (cnt >= 2) {
- dprintk(verbose, DST_INFO, 1, "RDC 8820 RESET");
+ dprintk(2, "RDC 8820 RESET\n");
dst_error_bailout(state);
return -1;
}
- dprintk(verbose, DST_DEBUG, 1, "reply is 0x%x", ret[0]);
- for (err = 1; err < len; err++)
- dprintk(verbose, DST_DEBUG, 0, " 0x%x", ret[err]);
- if (err > 1)
- dprintk(verbose, DST_DEBUG, 0, "\n");
+ dprintk(3, "reply is %*ph\n", len, ret);
return 0;
}
@@ -326,11 +307,11 @@ static int dst_set_polarization(struct dst_state *state)
{
switch (state->voltage) {
case SEC_VOLTAGE_13: /* Vertical */
- dprintk(verbose, DST_INFO, 1, "Polarization=[Vertical]");
+ dprintk(2, "Polarization=[Vertical]\n");
state->tx_tuna[8] &= ~0x40;
break;
case SEC_VOLTAGE_18: /* Horizontal */
- dprintk(verbose, DST_INFO, 1, "Polarization=[Horizontal]");
+ dprintk(2, "Polarization=[Horizontal]\n");
state->tx_tuna[8] |= 0x40;
break;
case SEC_VOLTAGE_OFF:
@@ -343,7 +324,7 @@ static int dst_set_polarization(struct dst_state *state)
static int dst_set_freq(struct dst_state *state, u32 freq)
{
state->frequency = freq;
- dprintk(verbose, DST_INFO, 1, "set Frequency %u", freq);
+ dprintk(2, "set Frequency %u\n", freq);
if (state->dst_type == DST_TYPE_IS_SAT) {
freq = freq / 1000;
@@ -463,7 +444,7 @@ static int dst_set_symbolrate(struct dst_state *state, u32 srate)
if (state->dst_type == DST_TYPE_IS_TERR) {
return -EOPNOTSUPP;
}
- dprintk(verbose, DST_INFO, 1, "set symrate %u", srate);
+ dprintk(2, "set symrate %u\n", srate);
srate /= 1000;
if (state->dst_type == DST_TYPE_IS_SAT) {
if (state->type_flags & DST_TYPE_HAS_SYMDIV) {
@@ -471,7 +452,7 @@ static int dst_set_symbolrate(struct dst_state *state, u32 srate)
sval <<= 20;
do_div(sval, 88000);
symcalc = (u32) sval;
- dprintk(verbose, DST_INFO, 1, "set symcalc %u", symcalc);
+ dprintk(2, "set symcalc %u\n", symcalc);
state->tx_tuna[5] = (u8) (symcalc >> 12);
state->tx_tuna[6] = (u8) (symcalc >> 4);
state->tx_tuna[7] = (u8) (symcalc << 4);
@@ -486,7 +467,7 @@ static int dst_set_symbolrate(struct dst_state *state, u32 srate)
state->tx_tuna[8] |= 0x20;
}
} else if (state->dst_type == DST_TYPE_IS_CABLE) {
- dprintk(verbose, DST_DEBUG, 1, "%s", state->fw_name);
+ dprintk(3, "%s\n", state->fw_name);
if (!strncmp(state->fw_name, "DCTNEW", 6)) {
state->tx_tuna[5] = (u8) (srate >> 8);
state->tx_tuna[6] = (u8) srate;
@@ -561,24 +542,24 @@ static void dst_type_flags_print(struct dst_state *state)
{
u32 type_flags = state->type_flags;
- dprintk(verbose, DST_ERROR, 0, "DST type flags :");
+ pr_err("DST type flags :\n");
if (type_flags & DST_TYPE_HAS_TS188)
- dprintk(verbose, DST_ERROR, 0, " 0x%x newtuner", DST_TYPE_HAS_TS188);
+ pr_err(" 0x%x newtuner\n", DST_TYPE_HAS_TS188);
if (type_flags & DST_TYPE_HAS_NEWTUNE_2)
- dprintk(verbose, DST_ERROR, 0, " 0x%x newtuner 2", DST_TYPE_HAS_NEWTUNE_2);
+ pr_err(" 0x%x newtuner 2\n", DST_TYPE_HAS_NEWTUNE_2);
if (type_flags & DST_TYPE_HAS_TS204)
- dprintk(verbose, DST_ERROR, 0, " 0x%x ts204", DST_TYPE_HAS_TS204);
+ pr_err(" 0x%x ts204\n", DST_TYPE_HAS_TS204);
if (type_flags & DST_TYPE_HAS_VLF)
- dprintk(verbose, DST_ERROR, 0, " 0x%x VLF", DST_TYPE_HAS_VLF);
+ pr_err(" 0x%x VLF\n", DST_TYPE_HAS_VLF);
if (type_flags & DST_TYPE_HAS_SYMDIV)
- dprintk(verbose, DST_ERROR, 0, " 0x%x symdiv", DST_TYPE_HAS_SYMDIV);
+ pr_err(" 0x%x symdiv\n", DST_TYPE_HAS_SYMDIV);
if (type_flags & DST_TYPE_HAS_FW_1)
- dprintk(verbose, DST_ERROR, 0, " 0x%x firmware version = 1", DST_TYPE_HAS_FW_1);
+ pr_err(" 0x%x firmware version = 1\n", DST_TYPE_HAS_FW_1);
if (type_flags & DST_TYPE_HAS_FW_2)
- dprintk(verbose, DST_ERROR, 0, " 0x%x firmware version = 2", DST_TYPE_HAS_FW_2);
+ pr_err(" 0x%x firmware version = 2\n", DST_TYPE_HAS_FW_2);
if (type_flags & DST_TYPE_HAS_FW_3)
- dprintk(verbose, DST_ERROR, 0, " 0x%x firmware version = 3", DST_TYPE_HAS_FW_3);
- dprintk(verbose, DST_ERROR, 0, "\n");
+ pr_err(" 0x%x firmware version = 3\n", DST_TYPE_HAS_FW_3);
+ pr_err("\n");
}
@@ -603,10 +584,10 @@ static int dst_type_print(struct dst_state *state, u8 type)
break;
default:
- dprintk(verbose, DST_INFO, 1, "invalid dst type %d", type);
+ dprintk(2, "invalid dst type %d\n", type);
return -EINVAL;
}
- dprintk(verbose, DST_INFO, 1, "DST type: %s", otype);
+ dprintk(2, "DST type: %s\n", otype);
return 0;
}
@@ -914,12 +895,12 @@ static int dst_get_mac(struct dst_state *state)
u8 get_mac[] = { 0x00, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
get_mac[7] = dst_check_sum(get_mac, 7);
if (dst_command(state, get_mac, 8) < 0) {
- dprintk(verbose, DST_INFO, 1, "Unsupported Command");
+ dprintk(2, "Unsupported Command\n");
return -1;
}
memset(&state->mac_address, '\0', 8);
memcpy(&state->mac_address, &state->rxbuffer, 6);
- dprintk(verbose, DST_ERROR, 1, "MAC Address=[%pM]", state->mac_address);
+ pr_err("MAC Address=[%pM]\n", state->mac_address);
return 0;
}
@@ -929,11 +910,11 @@ static int dst_fw_ver(struct dst_state *state)
u8 get_ver[] = { 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
get_ver[7] = dst_check_sum(get_ver, 7);
if (dst_command(state, get_ver, 8) < 0) {
- dprintk(verbose, DST_INFO, 1, "Unsupported Command");
+ dprintk(2, "Unsupported Command\n");
return -1;
}
memcpy(&state->fw_version, &state->rxbuffer, 8);
- dprintk(verbose, DST_ERROR, 1, "Firmware Ver = %x.%x Build = %02x, on %x:%x, %x-%x-20%02x",
+ pr_err("Firmware Ver = %x.%x Build = %02x, on %x:%x, %x-%x-20%02x\n",
state->fw_version[0] >> 4, state->fw_version[0] & 0x0f,
state->fw_version[1],
state->fw_version[5], state->fw_version[6],
@@ -950,17 +931,17 @@ static int dst_card_type(struct dst_state *state)
u8 get_type[] = { 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
get_type[7] = dst_check_sum(get_type, 7);
if (dst_command(state, get_type, 8) < 0) {
- dprintk(verbose, DST_INFO, 1, "Unsupported Command");
+ dprintk(2, "Unsupported Command\n");
return -1;
}
memset(&state->card_info, '\0', 8);
memcpy(&state->card_info, &state->rxbuffer, 7);
- dprintk(verbose, DST_ERROR, 1, "Device Model=[%s]", &state->card_info[0]);
+ pr_err("Device Model=[%s]\n", &state->card_info[0]);
for (j = 0, p_tuner_list = tuner_list; j < ARRAY_SIZE(tuner_list); j++, p_tuner_list++) {
if (!strcmp(&state->card_info[0], p_tuner_list->board_name)) {
state->tuner_type = p_tuner_list->tuner_type;
- dprintk(verbose, DST_ERROR, 1, "DST has [%s] tuner, tuner type=[%d]",
+ pr_err("DST has [%s] tuner, tuner type=[%d]\n",
p_tuner_list->tuner_name, p_tuner_list->tuner_type);
}
}
@@ -973,26 +954,19 @@ static int dst_get_vendor(struct dst_state *state)
u8 get_vendor[] = { 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
get_vendor[7] = dst_check_sum(get_vendor, 7);
if (dst_command(state, get_vendor, 8) < 0) {
- dprintk(verbose, DST_INFO, 1, "Unsupported Command");
+ dprintk(2, "Unsupported Command\n");
return -1;
}
memset(&state->vendor, '\0', 8);
memcpy(&state->vendor, &state->rxbuffer, 7);
- dprintk(verbose, DST_ERROR, 1, "Vendor=[%s]", &state->vendor[0]);
+ pr_err("Vendor=[%s]\n", &state->vendor[0]);
return 0;
}
static void debug_dst_buffer(struct dst_state *state)
{
- int i;
-
- if (verbose > 2) {
- printk("%s: [", __func__);
- for (i = 0; i < 8; i++)
- printk(" %02x", state->rxbuffer[i]);
- printk("]\n");
- }
+ dprintk(3, "%s: [ %*ph ]\n", __func__, 8, state->rxbuffer);
}
static int dst_check_stv0299(struct dst_state *state)
@@ -1001,13 +975,13 @@ static int dst_check_stv0299(struct dst_state *state)
check_stv0299[7] = dst_check_sum(check_stv0299, 7);
if (dst_command(state, check_stv0299, 8) < 0) {
- dprintk(verbose, DST_ERROR, 1, "Cmd=[0x04] failed");
+ pr_err("Cmd=[0x04] failed\n");
return -1;
}
debug_dst_buffer(state);
if (memcmp(&check_stv0299, &state->rxbuffer, 8)) {
- dprintk(verbose, DST_ERROR, 1, "Found a STV0299 NIM");
+ pr_err("Found a STV0299 NIM\n");
state->tuner_type = TUNER_TYPE_STV0299;
return 0;
}
@@ -1021,13 +995,13 @@ static int dst_check_mb86a15(struct dst_state *state)
check_mb86a15[7] = dst_check_sum(check_mb86a15, 7);
if (dst_command(state, check_mb86a15, 8) < 0) {
- dprintk(verbose, DST_ERROR, 1, "Cmd=[0x10], failed");
+ pr_err("Cmd=[0x10], failed\n");
return -1;
}
debug_dst_buffer(state);
if (memcmp(&check_mb86a15, &state->rxbuffer, 8) < 0) {
- dprintk(verbose, DST_ERROR, 1, "Found a MB86A15 NIM");
+ pr_err("Found a MB86A15 NIM\n");
state->tuner_type = TUNER_TYPE_MB86A15;
return 0;
}
@@ -1042,21 +1016,21 @@ static int dst_get_tuner_info(struct dst_state *state)
get_tuner_1[7] = dst_check_sum(get_tuner_1, 7);
get_tuner_2[7] = dst_check_sum(get_tuner_2, 7);
- dprintk(verbose, DST_ERROR, 1, "DST TYpe = MULTI FE");
+ pr_err("DST TYpe = MULTI FE\n");
if (state->type_flags & DST_TYPE_HAS_MULTI_FE) {
if (dst_command(state, get_tuner_1, 8) < 0) {
- dprintk(verbose, DST_INFO, 1, "Cmd=[0x13], Unsupported");
+ dprintk(2, "Cmd=[0x13], Unsupported\n");
goto force;
}
} else {
if (dst_command(state, get_tuner_2, 8) < 0) {
- dprintk(verbose, DST_INFO, 1, "Cmd=[0xb], Unsupported");
+ dprintk(2, "Cmd=[0xb], Unsupported\n");
goto force;
}
}
memcpy(&state->board_info, &state->rxbuffer, 8);
if (state->type_flags & DST_TYPE_HAS_MULTI_FE) {
- dprintk(verbose, DST_ERROR, 1, "DST type has TS=188");
+ pr_err("DST type has TS=188\n");
}
if (state->board_info[0] == 0xbc) {
if (state->dst_type != DST_TYPE_IS_ATSC)
@@ -1066,7 +1040,7 @@ static int dst_get_tuner_info(struct dst_state *state)
if (state->board_info[1] == 0x01) {
state->dst_hw_cap |= DST_TYPE_HAS_DBOARD;
- dprintk(verbose, DST_ERROR, 1, "DST has Daughterboard");
+ pr_err("DST has Daughterboard\n");
}
}
@@ -1074,7 +1048,7 @@ static int dst_get_tuner_info(struct dst_state *state)
force:
if (!strncmp(state->fw_name, "DCT-CI", 6)) {
state->type_flags |= DST_TYPE_HAS_TS204;
- dprintk(verbose, DST_ERROR, 1, "Forcing [%s] to TS188", state->fw_name);
+ pr_err("Forcing [%s] to TS188\n", state->fw_name);
}
return -1;
@@ -1103,7 +1077,7 @@ static int dst_get_device_id(struct dst_state *state)
if (read_dst(state, &reply, GET_ACK))
return -1; /* Read failure */
if (reply != ACK) {
- dprintk(verbose, DST_INFO, 1, "Write not Acknowledged! [Reply=0x%02x]", reply);
+ dprintk(2, "Write not Acknowledged! [Reply=0x%02x]\n", reply);
return -1; /* Unack'd write */
}
if (!dst_wait_dst_ready(state, DEVICE_INIT))
@@ -1113,7 +1087,7 @@ static int dst_get_device_id(struct dst_state *state)
dst_pio_disable(state);
if (state->rxbuffer[7] != dst_check_sum(state->rxbuffer, 7)) {
- dprintk(verbose, DST_INFO, 1, "Checksum failure!");
+ dprintk(2, "Checksum failure!\n");
return -1; /* Checksum failure */
}
state->rxbuffer[7] = '\0';
@@ -1125,7 +1099,7 @@ static int dst_get_device_id(struct dst_state *state)
/* Card capabilities */
state->dst_hw_cap = p_dst_type->dst_feature;
- dprintk(verbose, DST_ERROR, 1, "Recognise [%s]", p_dst_type->device_id);
+ pr_err("Recognise [%s]\n", p_dst_type->device_id);
strncpy(&state->fw_name[0], p_dst_type->device_id, 6);
/* Multiple tuners */
if (p_dst_type->tuner_type & TUNER_TYPE_MULTI) {
@@ -1133,7 +1107,7 @@ static int dst_get_device_id(struct dst_state *state)
case DST_TYPE_IS_SAT:
/* STV0299 check */
if (dst_check_stv0299(state) < 0) {
- dprintk(verbose, DST_ERROR, 1, "Unsupported");
+ pr_err("Unsupported\n");
state->tuner_type = TUNER_TYPE_MB86A15;
}
break;
@@ -1141,7 +1115,7 @@ static int dst_get_device_id(struct dst_state *state)
break;
}
if (dst_check_mb86a15(state) < 0)
- dprintk(verbose, DST_ERROR, 1, "Unsupported");
+ pr_err("Unsupported\n");
/* Single tuner */
} else {
state->tuner_type = p_dst_type->tuner_type;
@@ -1149,7 +1123,7 @@ static int dst_get_device_id(struct dst_state *state)
for (j = 0, p_tuner_list = tuner_list; j < ARRAY_SIZE(tuner_list); j++, p_tuner_list++) {
if (!(strncmp(p_dst_type->device_id, p_tuner_list->fw_name, 7)) &&
p_tuner_list->tuner_type == state->tuner_type) {
- dprintk(verbose, DST_ERROR, 1, "[%s] has a [%s]",
+ pr_err("[%s] has a [%s]\n",
p_dst_type->device_id, p_tuner_list->tuner_name);
}
}
@@ -1158,8 +1132,8 @@ static int dst_get_device_id(struct dst_state *state)
}
if (i >= ARRAY_SIZE(dst_tlist)) {
- dprintk(verbose, DST_ERROR, 1, "Unable to recognize %s or %s", &state->rxbuffer[0], &state->rxbuffer[1]);
- dprintk(verbose, DST_ERROR, 1, "please email linux-dvb@linuxtv.org with this type in");
+ pr_err("Unable to recognize %s or %s\n", &state->rxbuffer[0], &state->rxbuffer[1]);
+ pr_err("please email linux-dvb@linuxtv.org with this type in");
use_dst_type = DST_TYPE_IS_SAT;
use_type_flags = DST_TYPE_HAS_SYMDIV;
}
@@ -1176,7 +1150,7 @@ static int dst_probe(struct dst_state *state)
mutex_init(&state->dst_mutex);
if (dst_addons & DST_TYPE_HAS_CA) {
if ((rdc_8820_reset(state)) < 0) {
- dprintk(verbose, DST_ERROR, 1, "RDC 8820 RESET Failed.");
+ pr_err("RDC 8820 RESET Failed.\n");
return -1;
}
msleep(4000);
@@ -1184,35 +1158,35 @@ static int dst_probe(struct dst_state *state)
msleep(100);
}
if ((dst_comm_init(state)) < 0) {
- dprintk(verbose, DST_ERROR, 1, "DST Initialization Failed.");
+ pr_err("DST Initialization Failed.\n");
return -1;
}
msleep(100);
if (dst_get_device_id(state) < 0) {
- dprintk(verbose, DST_ERROR, 1, "unknown device.");
+ pr_err("unknown device.\n");
return -1;
}
if (dst_get_mac(state) < 0) {
- dprintk(verbose, DST_INFO, 1, "MAC: Unsupported command");
+ dprintk(2, "MAC: Unsupported command\n");
}
if ((state->type_flags & DST_TYPE_HAS_MULTI_FE) || (state->type_flags & DST_TYPE_HAS_FW_BUILD)) {
if (dst_get_tuner_info(state) < 0)
- dprintk(verbose, DST_INFO, 1, "Tuner: Unsupported command");
+ dprintk(2, "Tuner: Unsupported command\n");
}
if (state->type_flags & DST_TYPE_HAS_TS204) {
dst_packsize(state, 204);
}
if (state->type_flags & DST_TYPE_HAS_FW_BUILD) {
if (dst_fw_ver(state) < 0) {
- dprintk(verbose, DST_INFO, 1, "FW: Unsupported command");
+ dprintk(2, "FW: Unsupported command\n");
return 0;
}
if (dst_card_type(state) < 0) {
- dprintk(verbose, DST_INFO, 1, "Card: Unsupported command");
+ dprintk(2, "Card: Unsupported command\n");
return 0;
}
if (dst_get_vendor(state) < 0) {
- dprintk(verbose, DST_INFO, 1, "Vendor: Unsupported command");
+ dprintk(2, "Vendor: Unsupported command\n");
return 0;
}
}
@@ -1226,33 +1200,33 @@ static int dst_command(struct dst_state *state, u8 *data, u8 len)
mutex_lock(&state->dst_mutex);
if ((dst_comm_init(state)) < 0) {
- dprintk(verbose, DST_NOTICE, 1, "DST Communication Initialization Failed.");
+ dprintk(1, "DST Communication Initialization Failed.\n");
goto error;
}
if (write_dst(state, data, len)) {
- dprintk(verbose, DST_INFO, 1, "Trying to recover.. ");
+ dprintk(2, "Trying to recover..\n");
if ((dst_error_recovery(state)) < 0) {
- dprintk(verbose, DST_ERROR, 1, "Recovery Failed.");
+ pr_err("Recovery Failed.\n");
goto error;
}
goto error;
}
if ((dst_pio_disable(state)) < 0) {
- dprintk(verbose, DST_ERROR, 1, "PIO Disable Failed.");
+ pr_err("PIO Disable Failed.\n");
goto error;
}
if (state->type_flags & DST_TYPE_HAS_FW_1)
mdelay(3);
if (read_dst(state, &reply, GET_ACK)) {
- dprintk(verbose, DST_DEBUG, 1, "Trying to recover.. ");
+ dprintk(3, "Trying to recover..\n");
if ((dst_error_recovery(state)) < 0) {
- dprintk(verbose, DST_INFO, 1, "Recovery Failed.");
+ dprintk(2, "Recovery Failed.\n");
goto error;
}
goto error;
}
if (reply != ACK) {
- dprintk(verbose, DST_INFO, 1, "write not acknowledged 0x%02x ", reply);
+ dprintk(2, "write not acknowledged 0x%02x\n", reply);
goto error;
}
if (len >= 2 && data[0] == 0 && (data[1] == 1 || data[1] == 3))
@@ -1264,15 +1238,15 @@ static int dst_command(struct dst_state *state, u8 *data, u8 len)
if (!dst_wait_dst_ready(state, NO_DELAY))
goto error;
if (read_dst(state, state->rxbuffer, FIXED_COMM)) {
- dprintk(verbose, DST_DEBUG, 1, "Trying to recover.. ");
+ dprintk(3, "Trying to recover..\n");
if ((dst_error_recovery(state)) < 0) {
- dprintk(verbose, DST_INFO, 1, "Recovery failed.");
+ dprintk(2, "Recovery failed.\n");
goto error;
}
goto error;
}
if (state->rxbuffer[7] != dst_check_sum(state->rxbuffer, 7)) {
- dprintk(verbose, DST_INFO, 1, "checksum failure");
+ dprintk(2, "checksum failure\n");
goto error;
}
mutex_unlock(&state->dst_mutex);
@@ -1348,19 +1322,19 @@ static int dst_get_tuna(struct dst_state *state)
else
retval = read_dst(state, &state->rx_tuna[2], FIXED_COMM);
if (retval < 0) {
- dprintk(verbose, DST_DEBUG, 1, "read not successful");
+ dprintk(3, "read not successful\n");
return retval;
}
if ((state->type_flags & DST_TYPE_HAS_VLF) &&
!(state->dst_type == DST_TYPE_IS_ATSC)) {
if (state->rx_tuna[9] != dst_check_sum(&state->rx_tuna[0], 9)) {
- dprintk(verbose, DST_INFO, 1, "checksum failure ? ");
+ dprintk(2, "checksum failure ?\n");
return -EIO;
}
} else {
if (state->rx_tuna[9] != dst_check_sum(&state->rx_tuna[2], 7)) {
- dprintk(verbose, DST_INFO, 1, "checksum failure? ");
+ dprintk(2, "checksum failure?\n");
return -EIO;
}
}
@@ -1387,7 +1361,7 @@ static int dst_write_tuna(struct dvb_frontend *fe)
int retval;
u8 reply;
- dprintk(verbose, DST_INFO, 1, "type_flags 0x%x ", state->type_flags);
+ dprintk(2, "type_flags 0x%x\n", state->type_flags);
state->decode_freq = 0;
state->decode_lock = state->decode_strength = state->decode_snr = 0;
if (state->dst_type == DST_TYPE_IS_SAT) {
@@ -1397,7 +1371,7 @@ static int dst_write_tuna(struct dvb_frontend *fe)
state->diseq_flags &= ~(HAS_LOCK | ATTEMPT_TUNE);
mutex_lock(&state->dst_mutex);
if ((dst_comm_init(state)) < 0) {
- dprintk(verbose, DST_DEBUG, 1, "DST Communication initialization failed.");
+ dprintk(3, "DST Communication initialization failed.\n");
goto error;
}
// if (state->type_flags & DST_TYPE_HAS_NEWTUNE) {
@@ -1412,19 +1386,19 @@ static int dst_write_tuna(struct dvb_frontend *fe)
}
if (retval < 0) {
dst_pio_disable(state);
- dprintk(verbose, DST_DEBUG, 1, "write not successful");
+ dprintk(3, "write not successful\n");
goto werr;
}
if ((dst_pio_disable(state)) < 0) {
- dprintk(verbose, DST_DEBUG, 1, "DST PIO disable failed !");
+ dprintk(3, "DST PIO disable failed !\n");
goto error;
}
if ((read_dst(state, &reply, GET_ACK) < 0)) {
- dprintk(verbose, DST_DEBUG, 1, "read verify not successful.");
+ dprintk(3, "read verify not successful.\n");
goto error;
}
if (reply != ACK) {
- dprintk(verbose, DST_DEBUG, 1, "write not acknowledged 0x%02x ", reply);
+ dprintk(3, "write not acknowledged 0x%02x\n", reply);
goto error;
}
state->diseq_flags |= ATTEMPT_TUNE;
@@ -1622,7 +1596,7 @@ static int dst_set_frontend(struct dvb_frontend *fe)
retval = dst_set_freq(state, p->frequency);
if(retval != 0)
return retval;
- dprintk(verbose, DST_DEBUG, 1, "Set Frequency=[%d]", p->frequency);
+ dprintk(3, "Set Frequency=[%d]\n", p->frequency);
if (state->dst_type == DST_TYPE_IS_SAT) {
if (state->type_flags & DST_TYPE_HAS_OBS_REGS)
@@ -1630,7 +1604,7 @@ static int dst_set_frontend(struct dvb_frontend *fe)
dst_set_fec(state, p->fec_inner);
dst_set_symbolrate(state, p->symbol_rate);
dst_set_polarization(state);
- dprintk(verbose, DST_DEBUG, 1, "Set Symbolrate=[%d]", p->symbol_rate);
+ dprintk(3, "Set Symbolrate=[%d]\n", p->symbol_rate);
} else if (state->dst_type == DST_TYPE_IS_TERR)
dst_set_bandwidth(state, p->bandwidth_hz);
@@ -1656,7 +1630,7 @@ static int dst_tune_frontend(struct dvb_frontend* fe,
if (re_tune) {
dst_set_freq(state, p->frequency);
- dprintk(verbose, DST_DEBUG, 1, "Set Frequency=[%d]", p->frequency);
+ dprintk(3, "Set Frequency=[%d]\n", p->frequency);
if (state->dst_type == DST_TYPE_IS_SAT) {
if (state->type_flags & DST_TYPE_HAS_OBS_REGS)
@@ -1664,7 +1638,7 @@ static int dst_tune_frontend(struct dvb_frontend* fe,
dst_set_fec(state, p->fec_inner);
dst_set_symbolrate(state, p->symbol_rate);
dst_set_polarization(state);
- dprintk(verbose, DST_DEBUG, 1, "Set Symbolrate=[%d]", p->symbol_rate);
+ dprintk(3, "Set Symbolrate=[%d]\n", p->symbol_rate);
} else if (state->dst_type == DST_TYPE_IS_TERR)
dst_set_bandwidth(state, p->bandwidth_hz);
@@ -1722,10 +1696,10 @@ static void bt8xx_dst_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops dst_dvbt_ops;
-static struct dvb_frontend_ops dst_dvbs_ops;
-static struct dvb_frontend_ops dst_dvbc_ops;
-static struct dvb_frontend_ops dst_atsc_ops;
+static const struct dvb_frontend_ops dst_dvbt_ops;
+static const struct dvb_frontend_ops dst_dvbs_ops;
+static const struct dvb_frontend_ops dst_dvbc_ops;
+static const struct dvb_frontend_ops dst_atsc_ops;
struct dst_state *dst_attach(struct dst_state *state, struct dvb_adapter *dvb_adapter)
{
@@ -1750,7 +1724,7 @@ struct dst_state *dst_attach(struct dst_state *state, struct dvb_adapter *dvb_ad
memcpy(&state->frontend.ops, &dst_atsc_ops, sizeof(struct dvb_frontend_ops));
break;
default:
- dprintk(verbose, DST_ERROR, 1, "unknown DST type. please report to the LinuxTV.org DVB mailinglist.");
+ pr_err("unknown DST type. please report to the LinuxTV.org DVB mailinglist.\n");
kfree(state);
return NULL;
}
@@ -1761,7 +1735,7 @@ struct dst_state *dst_attach(struct dst_state *state, struct dvb_adapter *dvb_ad
EXPORT_SYMBOL(dst_attach);
-static struct dvb_frontend_ops dst_dvbt_ops = {
+static const struct dvb_frontend_ops dst_dvbt_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "DST DVB-T",
@@ -1790,7 +1764,7 @@ static struct dvb_frontend_ops dst_dvbt_ops = {
.read_snr = dst_read_snr,
};
-static struct dvb_frontend_ops dst_dvbs_ops = {
+static const struct dvb_frontend_ops dst_dvbs_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "DST DVB-S",
@@ -1819,7 +1793,7 @@ static struct dvb_frontend_ops dst_dvbs_ops = {
.set_tone = dst_set_tone,
};
-static struct dvb_frontend_ops dst_dvbc_ops = {
+static const struct dvb_frontend_ops dst_dvbc_ops = {
.delsys = { SYS_DVBC_ANNEX_A },
.info = {
.name = "DST DVB-C",
@@ -1848,7 +1822,7 @@ static struct dvb_frontend_ops dst_dvbc_ops = {
.read_snr = dst_read_snr,
};
-static struct dvb_frontend_ops dst_atsc_ops = {
+static const struct dvb_frontend_ops dst_atsc_ops = {
.delsys = { SYS_ATSC },
.info = {
.name = "DST ATSC",
diff --git a/drivers/media/pci/bt8xx/dvb-bt8xx.c b/drivers/media/pci/bt8xx/dvb-bt8xx.c
index e69d338ab9be..6100fa71ece8 100644
--- a/drivers/media/pci/bt8xx/dvb-bt8xx.c
+++ b/drivers/media/pci/bt8xx/dvb-bt8xx.c
@@ -19,7 +19,7 @@
*
*/
-#define pr_fmt(fmt) "dvb_bt8xx: " fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bitops.h>
#include <linux/module.h>
@@ -44,10 +44,12 @@ MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
-#define dprintk( args... ) \
- do { \
- if (debug) printk(KERN_DEBUG args); \
- } while (0)
+#define dprintk(fmt, arg...) do { \
+ if (debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
+} while (0)
+
#define IF_FREQUENCYx6 217 /* 6 * 36.16666666667MHz */
@@ -55,7 +57,7 @@ static void dvb_bt8xx_task(unsigned long data)
{
struct dvb_bt8xx_card *card = (struct dvb_bt8xx_card *)data;
- //printk("%d ", card->bt->finished_block);
+ dprintk("%d\n", card->bt->finished_block);
while (card->bt->last_block != card->bt->finished_block) {
(card->bt->TS_Size ? dvb_dmx_swfilter_204 : dvb_dmx_swfilter)
@@ -443,7 +445,7 @@ static void or51211_reset(struct dvb_frontend * fe)
/* reset & PRM1,2&4 are outputs */
int ret = bttv_gpio_enable(bt->bttv_nr, 0x001F, 0x001F);
if (ret != 0)
- printk(KERN_WARNING "or51211: Init Error - Can't Reset DVR (%i)\n", ret);
+ pr_warn("or51211: Init Error - Can't Reset DVR (%i)\n", ret);
bttv_write_gpio(bt->bttv_nr, 0x001F, 0x0000); /* Reset */
msleep(20);
/* Now set for normal operation */
@@ -560,7 +562,8 @@ static void digitv_alps_tded4_reset(struct dvb_bt8xx_card *bt)
int ret = bttv_gpio_enable(bt->bttv_nr, 0x08, 0x08);
if (ret != 0)
- printk(KERN_WARNING "digitv_alps_tded4: Init Error - Can't Reset DVR (%i)\n", ret);
+ pr_warn("digitv_alps_tded4: Init Error - Can't Reset DVR (%i)\n",
+ ret);
/* Pulse the reset line */
bttv_write_gpio(bt->bttv_nr, 0x08, 0x08); /* High */
@@ -620,7 +623,7 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type)
dvb_attach(simple_tuner_attach, card->fe,
card->i2c_adapter, 0x61,
TUNER_LG_TDVS_H06XF);
- dprintk ("dvb_bt8xx: lgdt330x detected\n");
+ dprintk("dvb_bt8xx: lgdt330x detected\n");
}
break;
@@ -635,7 +638,7 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type)
card->fe = dvb_attach(nxt6000_attach, &vp3021_alps_tded4_config, card->i2c_adapter);
if (card->fe != NULL) {
card->fe->ops.tuner_ops.set_params = vp3021_alps_tded4_tuner_set_params;
- dprintk ("dvb_bt8xx: an nxt6000 was detected on your digitv card\n");
+ dprintk("dvb_bt8xx: an nxt6000 was detected on your digitv card\n");
break;
}
@@ -645,7 +648,7 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type)
if (card->fe != NULL) {
card->fe->ops.tuner_ops.calc_regs = digitv_alps_tded4_tuner_calc_regs;
- dprintk ("dvb_bt8xx: an mt352 was detected on your digitv card\n");
+ dprintk("dvb_bt8xx: an mt352 was detected on your digitv card\n");
}
break;
diff --git a/drivers/media/pci/cobalt/cobalt-v4l2.c b/drivers/media/pci/cobalt/cobalt-v4l2.c
index 5c76637900d0..def4a3b37084 100644
--- a/drivers/media/pci/cobalt/cobalt-v4l2.c
+++ b/drivers/media/pci/cobalt/cobalt-v4l2.c
@@ -527,7 +527,7 @@ static void cobalt_video_input_status_show(struct cobalt_stream *s)
cvi_ctrl = ioread32(&cvi->control);
cvi_stat = ioread32(&cvi->status);
vmr_ctrl = ioread32(&vmr->control);
- vmr_stat = ioread32(&vmr->control);
+ vmr_stat = ioread32(&vmr->status);
cobalt_info("rx%d: cvi resolution: %dx%d\n", rx,
ioread32(&cvi->frame_width), ioread32(&cvi->frame_height));
cobalt_info("rx%d: cvi control: %s%s%s\n", rx,
@@ -1084,12 +1084,33 @@ static int cobalt_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
return 0;
}
+static int cobalt_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cc)
+{
+ struct cobalt_stream *s = video_drvdata(file);
+ struct v4l2_dv_timings timings;
+ int err = 0;
+
+ if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ if (s->input == 1)
+ timings = cea1080p60;
+ else
+ err = v4l2_subdev_call(s->sd, video, g_dv_timings, &timings);
+ if (!err) {
+ cc->bounds.width = cc->defrect.width = timings.bt.width;
+ cc->bounds.height = cc->defrect.height = timings.bt.height;
+ cc->pixelaspect = v4l2_dv_timings_aspect_ratio(&timings);
+ }
+ return err;
+}
+
static const struct v4l2_ioctl_ops cobalt_ioctl_ops = {
.vidioc_querycap = cobalt_querycap,
.vidioc_g_parm = cobalt_g_parm,
.vidioc_log_status = cobalt_log_status,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_cropcap = cobalt_cropcap,
.vidioc_enum_input = cobalt_enum_input,
.vidioc_g_input = cobalt_g_input,
.vidioc_s_input = cobalt_s_input,
diff --git a/drivers/media/pci/cx18/cx18-alsa-main.c b/drivers/media/pci/cx18/cx18-alsa-main.c
index 0b0e8015ad34..9fb7f5978c8b 100644
--- a/drivers/media/pci/cx18/cx18-alsa-main.c
+++ b/drivers/media/pci/cx18/cx18-alsa-main.c
@@ -217,8 +217,8 @@ static int cx18_alsa_load(struct cx18 *cx)
s = &cx->streams[CX18_ENC_STREAM_TYPE_PCM];
if (s->video_dev.v4l2_dev == NULL) {
- CX18_DEBUG_ALSA_INFO("%s: PCM stream for card is disabled - "
- "skipping\n", __func__);
+ CX18_DEBUG_ALSA_INFO("%s: PCM stream for card is disabled - skipping\n",
+ __func__);
return 0;
}
@@ -232,8 +232,8 @@ static int cx18_alsa_load(struct cx18 *cx)
CX18_ALSA_ERR("%s: failed to create struct snd_cx18_card\n",
__func__);
} else {
- CX18_DEBUG_ALSA_INFO("%s: created cx18 ALSA interface instance "
- "\n", __func__);
+ CX18_DEBUG_ALSA_INFO("%s: created cx18 ALSA interface instance\n",
+ __func__);
}
return 0;
}
diff --git a/drivers/media/pci/cx18/cx18-av-core.c b/drivers/media/pci/cx18/cx18-av-core.c
index 30bbe8d1ea55..7f7306fd9a7f 100644
--- a/drivers/media/pci/cx18/cx18-av-core.c
+++ b/drivers/media/pci/cx18/cx18-av-core.c
@@ -468,21 +468,19 @@ void cx18_av_std_setup(struct cx18 *cx)
CX18_DEBUG_INFO_DEV(sd, "Pixel rate = %d.%06d Mpixel/sec\n",
pll / 8000000, (pll / 8) % 1000000);
- CX18_DEBUG_INFO_DEV(sd, "ADC XTAL/pixel clock decimation ratio "
- "= %d.%03d\n", src_decimation / 256,
+ CX18_DEBUG_INFO_DEV(sd, "ADC XTAL/pixel clock decimation ratio = %d.%03d\n",
+ src_decimation / 256,
((src_decimation % 256) * 1000) / 256);
tmp = 28636360 * (u64) sc;
do_div(tmp, src_decimation);
fsc = tmp >> 13;
CX18_DEBUG_INFO_DEV(sd,
- "Chroma sub-carrier initial freq = %d.%06d "
- "MHz\n", fsc / 1000000, fsc % 1000000);
+ "Chroma sub-carrier initial freq = %d.%06d MHz\n",
+ fsc / 1000000, fsc % 1000000);
- CX18_DEBUG_INFO_DEV(sd, "hblank %i, hactive %i, vblank %i, "
- "vactive %i, vblank656 %i, src_dec %i, "
- "burst 0x%02x, luma_lpf %i, uv_lpf %i, "
- "comb 0x%02x, sc 0x%06x\n",
+ CX18_DEBUG_INFO_DEV(sd,
+ "hblank %i, hactive %i, vblank %i, vactive %i, vblank656 %i, src_dec %i, burst 0x%02x, luma_lpf %i, uv_lpf %i, comb 0x%02x, sc 0x%06x\n",
hblank, hactive, vblank, vactive, vblank656,
src_decimation, burst, luma_lpf, uv_lpf,
comb, sc);
@@ -1069,8 +1067,7 @@ static void log_video_status(struct cx18 *cx)
CX18_INFO_DEV(sd, "Specified video input: Composite %d\n",
vid_input - CX18_AV_COMPOSITE1 + 1);
} else {
- CX18_INFO_DEV(sd, "Specified video input: "
- "S-Video (Luma In%d, Chroma In%d)\n",
+ CX18_INFO_DEV(sd, "Specified video input: S-Video (Luma In%d, Chroma In%d)\n",
(vid_input & 0xf0) >> 4,
(vid_input & 0xf00) >> 8);
}
diff --git a/drivers/media/pci/cx18/cx18-av-firmware.c b/drivers/media/pci/cx18/cx18-av-firmware.c
index a34fd082b76e..160e2e53383f 100644
--- a/drivers/media/pci/cx18/cx18-av-firmware.c
+++ b/drivers/media/pci/cx18/cx18-av-firmware.c
@@ -61,8 +61,7 @@ static int cx18_av_verifyfw(struct cx18 *cx, const struct firmware *fw)
dl_control &= 0xffff3fff; /* ignore top 2 bits of address */
expected = 0x0f000000 | ((u32)data[addr] << 16) | addr;
if (expected != dl_control) {
- CX18_ERR_DEV(sd, "verification of %s firmware load "
- "failed: expected %#010x got %#010x\n",
+ CX18_ERR_DEV(sd, "verification of %s firmware load failed: expected %#010x got %#010x\n",
FWFILE, expected, dl_control);
ret = -EIO;
break;
diff --git a/drivers/media/pci/cx18/cx18-controls.c b/drivers/media/pci/cx18/cx18-controls.c
index adb5a8c72c06..812a2507945a 100644
--- a/drivers/media/pci/cx18/cx18-controls.c
+++ b/drivers/media/pci/cx18/cx18-controls.c
@@ -44,8 +44,7 @@ static int cx18_s_stream_vbi_fmt(struct cx2341x_handler *cxhdl, u32 fmt)
type == V4L2_MPEG_STREAM_TYPE_MPEG2_SVCD)) {
/* Only IVTV fmt VBI insertion & only MPEG-2 PS type streams */
cx->vbi.insert_mpeg = V4L2_MPEG_STREAM_VBI_FMT_NONE;
- CX18_DEBUG_INFO("disabled insertion of sliced VBI data into "
- "the MPEG stream\n");
+ CX18_DEBUG_INFO("disabled insertion of sliced VBI data into the MPEG stream\n");
return 0;
}
@@ -63,16 +62,14 @@ static int cx18_s_stream_vbi_fmt(struct cx2341x_handler *cxhdl, u32 fmt)
}
cx->vbi.insert_mpeg =
V4L2_MPEG_STREAM_VBI_FMT_NONE;
- CX18_WARN("Unable to allocate buffers for "
- "sliced VBI data insertion\n");
+ CX18_WARN("Unable to allocate buffers for sliced VBI data insertion\n");
return -ENOMEM;
}
}
}
cx->vbi.insert_mpeg = fmt;
- CX18_DEBUG_INFO("enabled insertion of sliced VBI data into the MPEG PS,"
- "when sliced VBI is enabled\n");
+ CX18_DEBUG_INFO("enabled insertion of sliced VBI data into the MPEG PS,when sliced VBI is enabled\n");
/*
* If our current settings have no lines set for capture, store a valid,
diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
index 2f23b26b16c0..b8eedbe51c8f 100644
--- a/drivers/media/pci/cx18/cx18-driver.c
+++ b/drivers/media/pci/cx18/cx18-driver.c
@@ -405,8 +405,8 @@ static void cx18_process_eeprom(struct cx18 *cx)
CX18_ERR("Invalid EEPROM\n");
return;
default:
- CX18_ERR("Unknown model %d, defaulting to original HVR-1600 "
- "(cardtype=1)\n", tv.model);
+ CX18_ERR("Unknown model %d, defaulting to original HVR-1600 (cardtype=1)\n",
+ tv.model);
cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT);
break;
}
@@ -635,8 +635,8 @@ static void cx18_process_options(struct cx18 *cx)
/* convert from kB to bytes */
cx->stream_buf_size[i] *= 1024;
}
- CX18_DEBUG_INFO("Stream type %d options: %d MB, %d buffers, "
- "%d bytes\n", i, cx->options.megabytes[i],
+ CX18_DEBUG_INFO("Stream type %d options: %d MB, %d buffers, %d bytes\n",
+ i, cx->options.megabytes[i],
cx->stream_buffers[i], cx->stream_buf_size[i]);
}
@@ -838,14 +838,13 @@ static int cx18_setup_pci(struct cx18 *cx, struct pci_dev *pci_dev,
pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &pci_latency);
if (pci_latency < 64 && cx18_pci_latency) {
- CX18_INFO("Unreasonably low latency timer, "
- "setting to 64 (was %d)\n", pci_latency);
+ CX18_INFO("Unreasonably low latency timer, setting to 64 (was %d)\n",
+ pci_latency);
pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, 64);
pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &pci_latency);
}
- CX18_DEBUG_INFO("cx%d (rev %d) at %02x:%02x.%x, "
- "irq: %d, latency: %d, memory: 0x%llx\n",
+ CX18_DEBUG_INFO("cx%d (rev %d) at %02x:%02x.%x, irq: %d, latency: %d, memory: 0x%llx\n",
cx->pci_dev->device, cx->card_rev, pci_dev->bus->number,
PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn),
cx->pci_dev->irq, pci_latency, (u64)cx->base_addr);
@@ -910,8 +909,8 @@ static int cx18_probe(struct pci_dev *pci_dev,
/* FIXME - module parameter arrays constrain max instances */
i = atomic_inc_return(&cx18_instance) - 1;
if (i >= CX18_MAX_CARDS) {
- printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
- "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
+ printk(KERN_ERR "cx18: cannot manage card %d, driver has a limit of 0 - %d\n",
+ i, CX18_MAX_CARDS - 1);
return -ENOMEM;
}
@@ -926,8 +925,8 @@ static int cx18_probe(struct pci_dev *pci_dev,
retval = v4l2_device_register(&pci_dev->dev, &cx->v4l2_dev);
if (retval) {
- printk(KERN_ERR "cx18: v4l2_device_register of card %d failed"
- "\n", cx->instance);
+ printk(KERN_ERR "cx18: v4l2_device_register of card %d failed\n",
+ cx->instance);
kfree(cx);
return retval;
}
@@ -958,13 +957,10 @@ static int cx18_probe(struct pci_dev *pci_dev,
cx->enc_mem = ioremap_nocache(cx->base_addr + CX18_MEM_OFFSET,
CX18_MEM_SIZE);
if (!cx->enc_mem) {
- CX18_ERR("ioremap failed. Can't get a window into CX23418 "
- "memory and register space\n");
- CX18_ERR("Each capture card with a CX23418 needs 64 MB of "
- "vmalloc address space for the window\n");
+ CX18_ERR("ioremap failed. Can't get a window into CX23418 memory and register space\n");
+ CX18_ERR("Each capture card with a CX23418 needs 64 MB of vmalloc address space for the window\n");
CX18_ERR("Check the output of 'grep Vmalloc /proc/meminfo'\n");
- CX18_ERR("Use the vmalloc= kernel command line option to set "
- "VmallocTotal to a larger value\n");
+ CX18_ERR("Use the vmalloc= kernel command line option to set VmallocTotal to a larger value\n");
retval = -ENOMEM;
goto free_mem;
}
@@ -1000,8 +996,7 @@ static int cx18_probe(struct pci_dev *pci_dev,
/* Initialize GPIO Reset Controller to do chip resets during i2c init */
if (cx->card->hw_all & CX18_HW_GPIO_RESET_CTRL) {
if (cx18_gpio_register(cx, CX18_HW_GPIO_RESET_CTRL) != 0)
- CX18_WARN("Could not register GPIO reset controller"
- "subdevice; proceeding anyway.\n");
+ CX18_WARN("Could not register GPIO reset controllersubdevice; proceeding anyway.\n");
else
cx->hw_flags |= CX18_HW_GPIO_RESET_CTRL;
}
diff --git a/drivers/media/pci/cx18/cx18-dvb.c b/drivers/media/pci/cx18/cx18-dvb.c
index 3eac59c51231..03d0478170a7 100644
--- a/drivers/media/pci/cx18/cx18-dvb.c
+++ b/drivers/media/pci/cx18/cx18-dvb.c
@@ -155,10 +155,8 @@ static int yuan_mpc718_mt352_reqfw(struct cx18_stream *stream,
}
if (ret) {
- CX18_ERR("The MPC718 board variant with the MT352 DVB-T"
- "demodualtor will not work without it\n");
- CX18_ERR("Run 'linux/Documentation/dvb/get_dvb_firmware "
- "mpc718' if you need the firmware\n");
+ CX18_ERR("The MPC718 board variant with the MT352 DVB-Tdemodualtor will not work without it\n");
+ CX18_ERR("Run 'linux/Documentation/dvb/get_dvb_firmware mpc718' if you need the firmware\n");
}
return ret;
}
diff --git a/drivers/media/pci/cx18/cx18-fileops.c b/drivers/media/pci/cx18/cx18-fileops.c
index df837408efd5..78b399b8613e 100644
--- a/drivers/media/pci/cx18/cx18-fileops.c
+++ b/drivers/media/pci/cx18/cx18-fileops.c
@@ -49,8 +49,7 @@ int cx18_claim_stream(struct cx18_open_id *id, int type)
/* Nothing should ever try to directly claim the IDX stream */
if (type == CX18_ENC_STREAM_TYPE_IDX) {
- CX18_WARN("MPEG Index stream cannot be claimed "
- "directly, but something tried.\n");
+ CX18_WARN("MPEG Index stream cannot be claimed directly, but something tried.\n");
return -EINVAL;
}
@@ -728,8 +727,7 @@ void cx18_stop_capture(struct cx18_open_id *id, int gop_end)
/* Stop internal use associated VBI and IDX streams */
if (test_bit(CX18_F_S_STREAMING, &s_vbi->s_flags) &&
!test_bit(CX18_F_S_APPL_IO, &s_vbi->s_flags)) {
- CX18_DEBUG_INFO("close stopping embedded VBI "
- "capture\n");
+ CX18_DEBUG_INFO("close stopping embedded VBI capture\n");
cx18_stop_v4l2_encode_stream(s_vbi, 0);
}
if (test_bit(CX18_F_S_STREAMING, &s_idx->s_flags)) {
diff --git a/drivers/media/pci/cx18/cx18-ioctl.c b/drivers/media/pci/cx18/cx18-ioctl.c
index fecca2a63891..0faeb979ceb9 100644
--- a/drivers/media/pci/cx18/cx18-ioctl.c
+++ b/drivers/media/pci/cx18/cx18-ioctl.c
@@ -951,8 +951,7 @@ static int cx18_encoder_cmd(struct file *file, void *fh,
return 0;
h = cx18_find_handle(cx);
if (h == CX18_INVALID_TASK_HANDLE) {
- CX18_ERR("Can't find valid task handle for "
- "V4L2_ENC_CMD_PAUSE\n");
+ CX18_ERR("Can't find valid task handle for V4L2_ENC_CMD_PAUSE\n");
return -EBADFD;
}
cx18_mute(cx);
@@ -968,8 +967,7 @@ static int cx18_encoder_cmd(struct file *file, void *fh,
return 0;
h = cx18_find_handle(cx);
if (h == CX18_INVALID_TASK_HANDLE) {
- CX18_ERR("Can't find valid task handle for "
- "V4L2_ENC_CMD_RESUME\n");
+ CX18_ERR("Can't find valid task handle for V4L2_ENC_CMD_RESUME\n");
return -EBADFD;
}
cx18_vapi(cx, CX18_CPU_CAPTURE_RESUME, 1, h);
diff --git a/drivers/media/pci/cx18/cx18-irq.c b/drivers/media/pci/cx18/cx18-irq.c
index 80edfe93a3d8..361426485e98 100644
--- a/drivers/media/pci/cx18/cx18-irq.c
+++ b/drivers/media/pci/cx18/cx18-irq.c
@@ -59,8 +59,8 @@ irqreturn_t cx18_irq_handler(int irq, void *dev_id)
cx18_write_reg_expect(cx, hw2, HW2_INT_CLR_STATUS, ~hw2, hw2);
if (sw1 || sw2 || hw2)
- CX18_DEBUG_HI_IRQ("received interrupts "
- "SW1: %x SW2: %x HW2: %x\n", sw1, sw2, hw2);
+ CX18_DEBUG_HI_IRQ("received interrupts SW1: %x SW2: %x HW2: %x\n",
+ sw1, sw2, hw2);
/*
* SW1 responses have to happen first. The sending XPU times out the
diff --git a/drivers/media/pci/cx18/cx18-mailbox.c b/drivers/media/pci/cx18/cx18-mailbox.c
index 1f8aa9a749a1..d3cf3588879f 100644
--- a/drivers/media/pci/cx18/cx18-mailbox.c
+++ b/drivers/media/pci/cx18/cx18-mailbox.c
@@ -123,8 +123,8 @@ static void dump_mb(struct cx18 *cx, struct cx18_mailbox *mb, char *name)
if (!(cx18_debug & CX18_DBGFLG_API))
return;
- CX18_DEBUG_API("%s: req %#010x ack %#010x cmd %#010x err %#010x args%s"
- "\n", name, mb->request, mb->ack, mb->cmd, mb->error,
+ CX18_DEBUG_API("%s: req %#010x ack %#010x cmd %#010x err %#010x args%s\n",
+ name, mb->request, mb->ack, mb->cmd, mb->error,
u32arr2hex(mb->args, MAX_MB_ARGUMENTS, argstr));
}
@@ -255,8 +255,8 @@ static void epu_dma_done(struct cx18 *cx, struct cx18_in_work_order *order)
s = cx18_handle_to_stream(cx, handle);
if (s == NULL) {
- CX18_WARN("Got DMA done notification for unknown/inactive"
- " handle %d, %s mailbox seq no %d\n", handle,
+ CX18_WARN("Got DMA done notification for unknown/inactive handle %d, %s mailbox seq no %d\n",
+ handle,
(order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) ?
"stale" : "good", mb->request);
return;
@@ -290,9 +290,8 @@ static void epu_dma_done(struct cx18 *cx, struct cx18_in_work_order *order)
if ((order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) &&
!(id >= s->mdl_base_idx &&
id < (s->mdl_base_idx + s->buffers))) {
- CX18_WARN("Fell behind! Ignoring stale mailbox with "
- " inconsistent data. Lost MDL for mailbox "
- "seq no %d\n", mb->request);
+ CX18_WARN("Fell behind! Ignoring stale mailbox with inconsistent data. Lost MDL for mailbox seq no %d\n",
+ mb->request);
break;
}
mdl = cx18_queue_get_mdl(s, id, mdl_ack->data_used);
@@ -418,9 +417,7 @@ static void mb_ack_irq(struct cx18 *cx, struct cx18_in_work_order *order)
/* Don't ack if the RPU has gotten impatient and timed us out */
if (req != cx18_readl(cx, &ack_mb->request) ||
req == cx18_readl(cx, &ack_mb->ack)) {
- CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our "
- "incoming %s to EPU mailbox (sequence no. %u) "
- "while processing\n",
+ CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our incoming %s to EPU mailbox (sequence no. %u) while processing\n",
rpu_str[order->rpu], rpu_str[order->rpu], req);
order->flags |= CX18_F_EWO_MB_STALE_WHILE_PROC;
return;
@@ -555,8 +552,7 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
order = alloc_in_work_order_irq(cx);
if (order == NULL) {
- CX18_WARN("Unable to find blank work order form to schedule "
- "incoming mailbox command processing\n");
+ CX18_WARN("Unable to find blank work order form to schedule incoming mailbox command processing\n");
return;
}
@@ -573,9 +569,7 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
(&order_mb->request)[i] = cx18_readl(cx, &mb->request + i);
if (order_mb->request == order_mb->ack) {
- CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our "
- "incoming %s to EPU mailbox (sequence no. %u)"
- "\n",
+ CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our incoming %s to EPU mailbox (sequence no. %u)\n",
rpu_str[rpu], rpu_str[rpu], order_mb->request);
if (cx18_debug & CX18_DBGFLG_WARN)
dump_mb(cx, order_mb, "incoming");
@@ -663,8 +657,8 @@ static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[])
if (req != ack) {
/* waited long enough, make the mbox "not busy" from our end */
cx18_writel(cx, req, &mb->ack);
- CX18_ERR("mbox was found stuck busy when setting up for %s; "
- "clearing busy and trying to proceed\n", info->name);
+ CX18_ERR("mbox was found stuck busy when setting up for %s; clearing busy and trying to proceed\n",
+ info->name);
} else if (ret != timeout)
CX18_DEBUG_API("waited %u msecs for busy mbox to be acked\n",
jiffies_to_msecs(timeout-ret));
@@ -707,14 +701,10 @@ static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[])
mutex_unlock(mb_lock);
if (ret >= timeout) {
/* Timed out */
- CX18_DEBUG_WARN("sending %s timed out waiting %d msecs "
- "for RPU acknowledgement\n",
+ CX18_DEBUG_WARN("sending %s timed out waiting %d msecs for RPU acknowledgment\n",
info->name, jiffies_to_msecs(ret));
} else {
- CX18_DEBUG_WARN("woken up before mailbox ack was ready "
- "after submitting %s to RPU. only "
- "waited %d msecs on req %u but awakened"
- " with unmatched ack %u\n",
+ CX18_DEBUG_WARN("woken up before mailbox ack was ready after submitting %s to RPU. only waited %d msecs on req %u but awakened with unmatched ack %u\n",
info->name,
jiffies_to_msecs(ret),
req, ack);
@@ -723,8 +713,7 @@ static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[])
}
if (ret >= timeout)
- CX18_DEBUG_WARN("failed to be awakened upon RPU acknowledgment "
- "sending %s; timed out waiting %d msecs\n",
+ CX18_DEBUG_WARN("failed to be awakened upon RPU acknowledgment sending %s; timed out waiting %d msecs\n",
info->name, jiffies_to_msecs(ret));
else
CX18_DEBUG_HI_API("waited %u msecs for %s to be acked\n",
diff --git a/drivers/media/pci/cx18/cx18-queue.c b/drivers/media/pci/cx18/cx18-queue.c
index 2a247d264b87..13e96d6055eb 100644
--- a/drivers/media/pci/cx18/cx18-queue.c
+++ b/drivers/media/pci/cx18/cx18-queue.c
@@ -164,9 +164,8 @@ struct cx18_mdl *cx18_queue_get_mdl(struct cx18_stream *s, u32 id,
mdl->skipped++;
if (mdl->skipped >= atomic_read(&s->q_busy.depth)-1) {
/* mdl must have fallen out of rotation */
- CX18_WARN("Skipped %s, MDL %d, %d "
- "times - it must have dropped out of "
- "rotation\n", s->name, mdl->id,
+ CX18_WARN("Skipped %s, MDL %d, %d times - it must have dropped out of rotation\n",
+ s->name, mdl->id,
mdl->skipped);
/* Sweep it up to put it back into rotation */
list_move_tail(&mdl->list, &sweep_up);
@@ -352,8 +351,7 @@ int cx18_stream_alloc(struct cx18_stream *s)
if (s->buffers == 0)
return 0;
- CX18_DEBUG_INFO("Allocate %s stream: %d x %d buffers "
- "(%d.%02d kB total)\n",
+ CX18_DEBUG_INFO("Allocate %s stream: %d x %d buffers (%d.%02d kB total)\n",
s->name, s->buffers, s->buf_size,
s->buffers * s->buf_size / 1024,
(s->buffers * s->buf_size * 100 / 1024) % 100);
diff --git a/drivers/media/pci/cx18/cx18-streams.c b/drivers/media/pci/cx18/cx18-streams.c
index f3802ec1b383..7f699f0ee76c 100644
--- a/drivers/media/pci/cx18/cx18-streams.c
+++ b/drivers/media/pci/cx18/cx18-streams.c
@@ -353,8 +353,8 @@ static int cx18_prep_dev(struct cx18 *cx, int type)
if (cx->card->hw_all & CX18_HW_DVB) {
s->dvb = kzalloc(sizeof(struct cx18_dvb), GFP_KERNEL);
if (s->dvb == NULL) {
- CX18_ERR("Couldn't allocate cx18_dvb structure"
- " for %s\n", s->name);
+ CX18_ERR("Couldn't allocate cx18_dvb structure for %s\n",
+ s->name);
return -ENOMEM;
}
} else {
@@ -462,8 +462,7 @@ static int cx18_reg_dev(struct cx18 *cx, int type)
case VFL_TYPE_VBI:
if (cx->stream_buffers[type])
- CX18_INFO("Registered device %s for %s "
- "(%d x %d bytes)\n",
+ CX18_INFO("Registered device %s for %s (%d x %d bytes)\n",
name, s->name, cx->stream_buffers[type],
cx->stream_buf_size[type]);
else
diff --git a/drivers/media/pci/cx23885/altera-ci.c b/drivers/media/pci/cx23885/altera-ci.c
index aaf4e46ff3e9..5c94e312cba3 100644
--- a/drivers/media/pci/cx23885/altera-ci.c
+++ b/drivers/media/pci/cx23885/altera-ci.c
@@ -48,6 +48,9 @@
* | DATA7| DATA6| DATA5| DATA4| DATA3| DATA2| DATA1| DATA0|
* +-------+-------+-------+-------+-------+-------+-------+-------+
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <dvb_demux.h>
#include <dvb_frontend.h>
#include "altera-ci.h"
@@ -84,16 +87,18 @@ MODULE_DESCRIPTION("altera FPGA CI module");
MODULE_AUTHOR("Igor M. Liplianin <liplianin@netup.ru>");
MODULE_LICENSE("GPL");
-#define ci_dbg_print(args...) \
+#define ci_dbg_print(fmt, args...) \
do { \
if (ci_dbg) \
- printk(KERN_DEBUG args); \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##args); \
} while (0)
-#define pid_dbg_print(args...) \
+#define pid_dbg_print(fmt, args...) \
do { \
if (pid_dbg) \
- printk(KERN_DEBUG args); \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##args); \
} while (0)
struct altera_ci_state;
@@ -718,7 +723,7 @@ int altera_ci_init(struct altera_ci_config *config, int ci_nr)
if (temp_int != NULL) {
inter = temp_int->internal;
(inter->cis_used)++;
- inter->fpga_rw = config->fpga_rw;
+ inter->fpga_rw = config->fpga_rw;
ci_dbg_print("%s: Find Internal Structure!\n", __func__);
} else {
inter = kzalloc(sizeof(struct fpga_internal), GFP_KERNEL);
diff --git a/drivers/media/pci/cx23885/altera-ci.h b/drivers/media/pci/cx23885/altera-ci.h
index 57a40c84b46e..ababd80fee93 100644
--- a/drivers/media/pci/cx23885/altera-ci.h
+++ b/drivers/media/pci/cx23885/altera-ci.h
@@ -48,24 +48,24 @@ extern int altera_ci_tuner_reset(void *dev, int ci_nr);
static inline int altera_ci_init(struct altera_ci_config *config, int ci_nr)
{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ pr_warn("%s: driver disabled by Kconfig\n", __func__);
return 0;
}
static inline void altera_ci_release(void *dev, int ci_nr)
{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ pr_warn("%s: driver disabled by Kconfig\n", __func__);
}
static inline int altera_ci_irq(void *dev)
{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ pr_warn("%s: driver disabled by Kconfig\n", __func__);
return 0;
}
static inline int altera_ci_tuner_reset(void *dev, int ci_nr)
{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ pr_warn("%s: driver disabled by Kconfig\n", __func__);
return 0;
}
@@ -74,19 +74,19 @@ static inline int altera_ci_tuner_reset(void *dev, int ci_nr)
static inline int altera_hw_filt_init(struct altera_ci_config *config,
int hw_filt_nr)
{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ pr_warn("%s: driver disabled by Kconfig\n", __func__);
return 0;
}
static inline void altera_hw_filt_release(void *dev, int filt_nr)
{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ pr_warn("%s: driver disabled by Kconfig\n", __func__);
}
static inline int altera_pid_feed_control(void *dev, int filt_nr,
struct dvb_demux_feed *dvbdmxfeed, int onoff)
{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ pr_warn("%s: driver disabled by Kconfig\n", __func__);
return 0;
}
diff --git a/drivers/media/pci/cx23885/cimax2.c b/drivers/media/pci/cx23885/cimax2.c
index 631e4f24aea6..5e8e134d81c2 100644
--- a/drivers/media/pci/cx23885/cimax2.c
+++ b/drivers/media/pci/cx23885/cimax2.c
@@ -65,10 +65,11 @@ static unsigned int ci_irq_enable;
module_param(ci_irq_enable, int, 0644);
MODULE_PARM_DESC(ci_irq_enable, "Enable IRQ from CAM");
-#define ci_dbg_print(args...) \
+#define ci_dbg_print(fmt, args...) \
do { \
if (ci_dbg) \
- printk(KERN_DEBUG args); \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##args); \
} while (0)
#define ci_irq_flags() (ci_irq_enable ? NETUP_IRQ_IRQAM : 0)
@@ -135,8 +136,7 @@ static int netup_write_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg,
};
if (1 + len > sizeof(buffer)) {
- printk(KERN_WARNING
- "%s: i2c wr reg=%04x: len=%d is too big!\n",
+ pr_warn("%s: i2c wr reg=%04x: len=%d is too big!\n",
KBUILD_MODNAME, reg, len);
return -EINVAL;
}
@@ -365,11 +365,8 @@ static void netup_read_ci_status(struct work_struct *work)
if (ret != 0)
return;
- ci_dbg_print("%s: Slot Status Addr=[0x%04x], "
- "Reg=[0x%02x], data=%02x, "
- "TS config = %02x\n", __func__,
- state->ci_i2c_addr, 0, buf[0],
- buf[0]);
+ ci_dbg_print("%s: Slot Status Addr=[0x%04x], Reg=[0x%02x], data=%02x, TS config = %02x\n",
+ __func__, state->ci_i2c_addr, 0, buf[0], buf[0]);
if (buf[0] & 1)
diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c
index da892f3e3c29..2ff1d1e274be 100644
--- a/drivers/media/pci/cx23885/cx23885-417.c
+++ b/drivers/media/pci/cx23885/cx23885-417.c
@@ -20,6 +20,9 @@
* GNU General Public License for more details.
*/
+#include "cx23885.h"
+#include "cx23885-ioctl.h"
+
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
@@ -32,9 +35,6 @@
#include <media/v4l2-ioctl.h>
#include <media/drv-intf/cx2341x.h>
-#include "cx23885.h"
-#include "cx23885-ioctl.h"
-
#define CX23885_FIRM_IMAGE_SIZE 376836
#define CX23885_FIRM_IMAGE_NAME "v4l-cx23885-enc.fw"
@@ -55,8 +55,8 @@ MODULE_PARM_DESC(v4l_debug, "enable V4L debug messages");
#define dprintk(level, fmt, arg...)\
do { if (v4l_debug >= level) \
- printk(KERN_DEBUG "%s: " fmt, \
- (dev) ? dev->name : "cx23885[?]", ## arg); \
+ printk(KERN_DEBUG pr_fmt("%s: 417:" fmt), \
+ __func__, ##arg); \
} while (0)
static struct cx23885_tvnorm cx23885_tvnorms[] = {
@@ -769,10 +769,8 @@ static int cx23885_mbox_func(void *priv,
without side effects */
mc417_memory_read(dev, dev->cx23417_mailbox - 4, &value);
if (value != 0x12345678) {
- printk(KERN_ERR
- "Firmware and/or mailbox pointer not initialized "
- "or corrupted, signature = 0x%x, cmd = %s\n", value,
- cmd_to_str(command));
+ pr_err("Firmware and/or mailbox pointer not initialized or corrupted, signature = 0x%x, cmd = %s\n",
+ value, cmd_to_str(command));
return -1;
}
@@ -781,8 +779,8 @@ static int cx23885_mbox_func(void *priv,
*/
mc417_memory_read(dev, dev->cx23417_mailbox, &flag);
if (flag) {
- printk(KERN_ERR "ERROR: Mailbox appears to be in use "
- "(%x), cmd = %s\n", flag, cmd_to_str(command));
+ pr_err("ERROR: Mailbox appears to be in use (%x), cmd = %s\n",
+ flag, cmd_to_str(command));
return -1;
}
@@ -811,7 +809,7 @@ static int cx23885_mbox_func(void *priv,
if (0 != (flag & 4))
break;
if (time_after(jiffies, timeout)) {
- printk(KERN_ERR "ERROR: API Mailbox timeout\n");
+ pr_err("ERROR: API Mailbox timeout\n");
return -1;
}
udelay(10);
@@ -888,7 +886,7 @@ static int cx23885_find_mailbox(struct cx23885_dev *dev)
return i+1;
}
}
- printk(KERN_ERR "Mailbox signature values not found!\n");
+ pr_err("Mailbox signature values not found!\n");
return -1;
}
@@ -923,7 +921,7 @@ static int cx23885_load_firmware(struct cx23885_dev *dev)
IVTV_REG_APU, 0);
if (retval != 0) {
- printk(KERN_ERR "%s: Error with mc417_register_write\n",
+ pr_err("%s: Error with mc417_register_write\n",
__func__);
return -1;
}
@@ -932,25 +930,21 @@ static int cx23885_load_firmware(struct cx23885_dev *dev)
&dev->pci->dev);
if (retval != 0) {
- printk(KERN_ERR
- "ERROR: Hotplug firmware request failed (%s).\n",
- CX23885_FIRM_IMAGE_NAME);
- printk(KERN_ERR "Please fix your hotplug setup, the board will "
- "not work without firmware loaded!\n");
+ pr_err("ERROR: Hotplug firmware request failed (%s).\n",
+ CX23885_FIRM_IMAGE_NAME);
+ pr_err("Please fix your hotplug setup, the board will not work without firmware loaded!\n");
return -1;
}
if (firmware->size != CX23885_FIRM_IMAGE_SIZE) {
- printk(KERN_ERR "ERROR: Firmware size mismatch "
- "(have %zu, expected %d)\n",
- firmware->size, CX23885_FIRM_IMAGE_SIZE);
+ pr_err("ERROR: Firmware size mismatch (have %zu, expected %d)\n",
+ firmware->size, CX23885_FIRM_IMAGE_SIZE);
release_firmware(firmware);
return -1;
}
if (0 != memcmp(firmware->data, magic, 8)) {
- printk(KERN_ERR
- "ERROR: Firmware magic mismatch, wrong file?\n");
+ pr_err("ERROR: Firmware magic mismatch, wrong file?\n");
release_firmware(firmware);
return -1;
}
@@ -962,7 +956,7 @@ static int cx23885_load_firmware(struct cx23885_dev *dev)
value = *dataptr;
checksum += ~value;
if (mc417_memory_write(dev, i, value) != 0) {
- printk(KERN_ERR "ERROR: Loading firmware failed!\n");
+ pr_err("ERROR: Loading firmware failed!\n");
release_firmware(firmware);
return -1;
}
@@ -973,15 +967,14 @@ static int cx23885_load_firmware(struct cx23885_dev *dev)
dprintk(1, "Verifying firmware ...\n");
for (i--; i >= 0; i--) {
if (mc417_memory_read(dev, i, &value) != 0) {
- printk(KERN_ERR "ERROR: Reading firmware failed!\n");
+ pr_err("ERROR: Reading firmware failed!\n");
release_firmware(firmware);
return -1;
}
checksum -= ~value;
}
if (checksum) {
- printk(KERN_ERR
- "ERROR: Firmware load failed (checksum mismatch).\n");
+ pr_err("ERROR: Firmware load failed (checksum mismatch).\n");
release_firmware(firmware);
return -1;
}
@@ -1006,7 +999,7 @@ static int cx23885_load_firmware(struct cx23885_dev *dev)
mc417_register_read(dev, 0x900C, &gpio_value);
if (retval < 0)
- printk(KERN_ERR "%s: Error with mc417_register_write\n",
+ pr_err("%s: Error with mc417_register_write\n",
__func__);
return 0;
}
@@ -1058,27 +1051,25 @@ static int cx23885_initialize_codec(struct cx23885_dev *dev, int startencoder)
dprintk(2, "%s() PING OK\n", __func__);
retval = cx23885_load_firmware(dev);
if (retval < 0) {
- printk(KERN_ERR "%s() f/w load failed\n", __func__);
+ pr_err("%s() f/w load failed\n", __func__);
return retval;
}
retval = cx23885_find_mailbox(dev);
if (retval < 0) {
- printk(KERN_ERR "%s() mailbox < 0, error\n",
+ pr_err("%s() mailbox < 0, error\n",
__func__);
return -1;
}
dev->cx23417_mailbox = retval;
retval = cx23885_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0);
if (retval < 0) {
- printk(KERN_ERR
- "ERROR: cx23417 firmware ping failed!\n");
+ pr_err("ERROR: cx23417 firmware ping failed!\n");
return -1;
}
retval = cx23885_api_cmd(dev, CX2341X_ENC_GET_VERSION, 0, 1,
&version);
if (retval < 0) {
- printk(KERN_ERR "ERROR: cx23417 firmware get encoder :"
- "version failed!\n");
+ pr_err("ERROR: cx23417 firmware get encoder :version failed!\n");
return -1;
}
dprintk(1, "cx23417 firmware version is 0x%08x\n", version);
@@ -1563,11 +1554,11 @@ int cx23885_417_register(struct cx23885_dev *dev)
err = video_register_device(dev->v4l_device,
VFL_TYPE_GRABBER, -1);
if (err < 0) {
- printk(KERN_INFO "%s: can't register mpeg device\n", dev->name);
+ pr_info("%s: can't register mpeg device\n", dev->name);
return err;
}
- printk(KERN_INFO "%s: registered device %s [mpeg]\n",
+ pr_info("%s: registered device %s [mpeg]\n",
dev->name, video_device_node_name(dev->v4l_device));
/* ST: Configure the encoder paramaters, but don't begin
diff --git a/drivers/media/pci/cx23885/cx23885-alsa.c b/drivers/media/pci/cx23885/cx23885-alsa.c
index 6115d4e148ba..c148f9a4a9ac 100644
--- a/drivers/media/pci/cx23885/cx23885-alsa.c
+++ b/drivers/media/pci/cx23885/cx23885-alsa.c
@@ -17,6 +17,9 @@
* GNU General Public License for more details.
*/
+#include "cx23885.h"
+#include "cx23885-reg.h"
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
@@ -35,20 +38,14 @@
#include <sound/tlv.h>
-
-#include "cx23885.h"
-#include "cx23885-reg.h"
-
#define AUDIO_SRAM_CHANNEL SRAM_CH07
#define dprintk(level, fmt, arg...) do { \
if (audio_debug + 1 > level) \
- printk(KERN_INFO "%s: " fmt, chip->dev->name , ## arg); \
+ printk(KERN_DEBUG pr_fmt("%s: alsa: " fmt), \
+ chip->dev->name, ##arg); \
} while(0)
-#define dprintk_core(level, fmt, arg...) if (audio_debug >= level) \
- printk(KERN_DEBUG "%s: " fmt, chip->dev->name , ## arg)
-
/****************************************************************************
Module global static vars
****************************************************************************/
@@ -186,8 +183,8 @@ static int cx23885_start_audio_dma(struct cx23885_audio_dev *chip)
cx_write(AUD_INT_A_GPCNT_CTL, GP_COUNT_CONTROL_RESET);
atomic_set(&chip->count, 0);
- dprintk(1, "Start audio DMA, %d B/line, %d lines/FIFO, %d periods, %d "
- "byte buffer\n", buf->bpl, cx_read(audio_ch->cmds_start+12)>>1,
+ dprintk(1, "Start audio DMA, %d B/line, %d lines/FIFO, %d periods, %d byte buffer\n",
+ buf->bpl, cx_read(audio_ch->cmds_start+12)>>1,
chip->num_periods, buf->bpl * chip->num_periods);
/* Enables corresponding bits at AUD_INT_STAT */
@@ -247,7 +244,7 @@ int cx23885_audio_irq(struct cx23885_dev *dev, u32 status, u32 mask)
/* risc op code error */
if (status & AUD_INT_OPC_ERR) {
- printk(KERN_WARNING "%s/1: Audio risc op code error\n",
+ pr_warn("%s/1: Audio risc op code error\n",
dev->name);
cx_clear(AUD_INT_DMA_CTL, 0x11);
cx23885_sram_channel_dump(dev,
@@ -327,8 +324,7 @@ static int snd_cx23885_pcm_open(struct snd_pcm_substream *substream)
int err;
if (!chip) {
- printk(KERN_ERR "BUG: cx23885 can't find device struct."
- " Can't proceed with open\n");
+ pr_err("BUG: cx23885 can't find device struct. Can't proceed with open\n");
return -ENODEV;
}
@@ -555,8 +551,8 @@ struct cx23885_audio_dev *cx23885_audio_register(struct cx23885_dev *dev)
return NULL;
if (dev->sram_channels[AUDIO_SRAM_CHANNEL].cmds_start == 0) {
- printk(KERN_WARNING "%s(): Missing SRAM channel configuration "
- "for analog TV Audio\n", __func__);
+ pr_warn("%s(): Missing SRAM channel configuration for analog TV Audio\n",
+ __func__);
return NULL;
}
@@ -590,8 +586,8 @@ struct cx23885_audio_dev *cx23885_audio_register(struct cx23885_dev *dev)
error:
snd_card_free(card);
- printk(KERN_ERR "%s(): Failed to register analog "
- "audio adapter\n", __func__);
+ pr_err("%s(): Failed to register analog audio adapter\n",
+ __func__);
return NULL;
}
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index 99ba8d6328f0..0350f13c5a9f 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -15,6 +15,8 @@
* GNU General Public License for more details.
*/
+#include "cx23885.h"
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -23,7 +25,6 @@
#include <linux/firmware.h>
#include <misc/altera.h>
-#include "cx23885.h"
#include "tuner-xc2028.h"
#include "netup-eeprom.h"
#include "netup-init.h"
@@ -1096,26 +1097,24 @@ void cx23885_card_list(struct cx23885_dev *dev)
if (0 == dev->pci->subsystem_vendor &&
0 == dev->pci->subsystem_device) {
- printk(KERN_INFO
- "%s: Board has no valid PCIe Subsystem ID and can't\n"
- "%s: be autodetected. Pass card=<n> insmod option\n"
- "%s: to workaround that. Redirect complaints to the\n"
- "%s: vendor of the TV card. Best regards,\n"
- "%s: -- tux\n",
- dev->name, dev->name, dev->name, dev->name, dev->name);
+ pr_info("%s: Board has no valid PCIe Subsystem ID and can't\n"
+ "%s: be autodetected. Pass card=<n> insmod option\n"
+ "%s: to workaround that. Redirect complaints to the\n"
+ "%s: vendor of the TV card. Best regards,\n"
+ "%s: -- tux\n",
+ dev->name, dev->name, dev->name, dev->name, dev->name);
} else {
- printk(KERN_INFO
- "%s: Your board isn't known (yet) to the driver.\n"
- "%s: Try to pick one of the existing card configs via\n"
- "%s: card=<n> insmod option. Updating to the latest\n"
- "%s: version might help as well.\n",
- dev->name, dev->name, dev->name, dev->name);
+ pr_info("%s: Your board isn't known (yet) to the driver.\n"
+ "%s: Try to pick one of the existing card configs via\n"
+ "%s: card=<n> insmod option. Updating to the latest\n"
+ "%s: version might help as well.\n",
+ dev->name, dev->name, dev->name, dev->name);
}
- printk(KERN_INFO "%s: Here is a list of valid choices for the card=<n> insmod option:\n",
+ pr_info("%s: Here is a list of valid choices for the card=<n> insmod option:\n",
dev->name);
for (i = 0; i < cx23885_bcount; i++)
- printk(KERN_INFO "%s: card=%d -> %s\n",
- dev->name, i, cx23885_boards[i].name);
+ pr_info("%s: card=%d -> %s\n",
+ dev->name, i, cx23885_boards[i].name);
}
static void viewcast_eeprom(struct cx23885_dev *dev, u8 *eeprom_data)
@@ -1304,14 +1303,13 @@ static void hauppauge_eeprom(struct cx23885_dev *dev, u8 *eeprom_data)
*/
break;
default:
- printk(KERN_WARNING "%s: warning: "
- "unknown hauppauge model #%d\n",
+ pr_warn("%s: warning: unknown hauppauge model #%d\n",
dev->name, tv.model);
break;
}
- printk(KERN_INFO "%s: hauppauge eeprom: model=%d\n",
- dev->name, tv.model);
+ pr_info("%s: hauppauge eeprom: model=%d\n",
+ dev->name, tv.model);
}
/* Some TBS cards require initing a chip using a bitbanged SPI attached
@@ -1353,8 +1351,8 @@ int cx23885_tuner_callback(void *priv, int component, int command, int arg)
return 0;
if (command != 0) {
- printk(KERN_ERR "%s(): Unknown command 0x%x.\n",
- __func__, command);
+ pr_err("%s(): Unknown command 0x%x.\n",
+ __func__, command);
return -EINVAL;
}
@@ -2337,14 +2335,13 @@ void cx23885_card_setup(struct cx23885_dev *dev)
filename = "dvb-netup-altera-01.fw";
break;
}
- printk(KERN_INFO "NetUP card rev=0x%x fw_filename=%s\n",
- cinfo.rev, filename);
+ pr_info("NetUP card rev=0x%x fw_filename=%s\n",
+ cinfo.rev, filename);
ret = request_firmware(&fw, filename, &dev->pci->dev);
if (ret != 0)
- printk(KERN_ERR "did not find the firmware file. (%s) "
- "Please see linux/Documentation/dvb/ for more details "
- "on firmware-problems.", filename);
+ pr_err("did not find the firmware file. (%s) Please see linux/Documentation/dvb/ for more details on firmware-problems.",
+ filename);
else
altera_init(&netup_config, fw);
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index c86b1093ab99..02b5ec549369 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -15,6 +15,8 @@
* GNU General Public License for more details.
*/
+#include "cx23885.h"
+
#include <linux/init.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -27,7 +29,6 @@
#include <asm/div64.h>
#include <linux/firmware.h>
-#include "cx23885.h"
#include "cimax2.h"
#include "altera-ci.h"
#include "cx23888-ir.h"
@@ -50,7 +51,8 @@ MODULE_PARM_DESC(card, "card type");
#define dprintk(level, fmt, arg...)\
do { if (debug >= level)\
- printk(KERN_DEBUG "%s: " fmt, dev->name, ## arg);\
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
} while (0)
static unsigned int cx23885_devcount;
@@ -407,19 +409,18 @@ static int cx23885_risc_decode(u32 risc)
};
int i;
- printk("0x%08x [ %s", risc,
+ printk(KERN_DEBUG "0x%08x [ %s", risc,
instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
if (risc & (1 << (i + 12)))
- printk(" %s", bits[i]);
- printk(" count=%d ]\n", risc & 0xfff);
+ pr_cont(" %s", bits[i]);
+ pr_cont(" count=%d ]\n", risc & 0xfff);
return incr[risc >> 28] ? incr[risc >> 28] : 1;
}
static void cx23885_wakeup(struct cx23885_tsport *port,
struct cx23885_dmaqueue *q, u32 count)
{
- struct cx23885_dev *dev = port->dev;
struct cx23885_buffer *buf;
if (list_empty(&q->active))
@@ -530,44 +531,44 @@ void cx23885_sram_channel_dump(struct cx23885_dev *dev,
u32 risc;
unsigned int i, j, n;
- printk(KERN_WARNING "%s: %s - dma channel status dump\n",
- dev->name, ch->name);
+ pr_warn("%s: %s - dma channel status dump\n",
+ dev->name, ch->name);
for (i = 0; i < ARRAY_SIZE(name); i++)
- printk(KERN_WARNING "%s: cmds: %-15s: 0x%08x\n",
- dev->name, name[i],
- cx_read(ch->cmds_start + 4*i));
+ pr_warn("%s: cmds: %-15s: 0x%08x\n",
+ dev->name, name[i],
+ cx_read(ch->cmds_start + 4*i));
for (i = 0; i < 4; i++) {
risc = cx_read(ch->cmds_start + 4 * (i + 14));
- printk(KERN_WARNING "%s: risc%d: ", dev->name, i);
+ pr_warn("%s: risc%d: ", dev->name, i);
cx23885_risc_decode(risc);
}
for (i = 0; i < (64 >> 2); i += n) {
risc = cx_read(ch->ctrl_start + 4 * i);
/* No consideration for bits 63-32 */
- printk(KERN_WARNING "%s: (0x%08x) iq %x: ", dev->name,
- ch->ctrl_start + 4 * i, i);
+ pr_warn("%s: (0x%08x) iq %x: ", dev->name,
+ ch->ctrl_start + 4 * i, i);
n = cx23885_risc_decode(risc);
for (j = 1; j < n; j++) {
risc = cx_read(ch->ctrl_start + 4 * (i + j));
- printk(KERN_WARNING "%s: iq %x: 0x%08x [ arg #%d ]\n",
- dev->name, i+j, risc, j);
+ pr_warn("%s: iq %x: 0x%08x [ arg #%d ]\n",
+ dev->name, i+j, risc, j);
}
}
- printk(KERN_WARNING "%s: fifo: 0x%08x -> 0x%x\n",
- dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
- printk(KERN_WARNING "%s: ctrl: 0x%08x -> 0x%x\n",
- dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
- printk(KERN_WARNING "%s: ptr1_reg: 0x%08x\n",
- dev->name, cx_read(ch->ptr1_reg));
- printk(KERN_WARNING "%s: ptr2_reg: 0x%08x\n",
- dev->name, cx_read(ch->ptr2_reg));
- printk(KERN_WARNING "%s: cnt1_reg: 0x%08x\n",
- dev->name, cx_read(ch->cnt1_reg));
- printk(KERN_WARNING "%s: cnt2_reg: 0x%08x\n",
- dev->name, cx_read(ch->cnt2_reg));
+ pr_warn("%s: fifo: 0x%08x -> 0x%x\n",
+ dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
+ pr_warn("%s: ctrl: 0x%08x -> 0x%x\n",
+ dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
+ pr_warn("%s: ptr1_reg: 0x%08x\n",
+ dev->name, cx_read(ch->ptr1_reg));
+ pr_warn("%s: ptr2_reg: 0x%08x\n",
+ dev->name, cx_read(ch->ptr2_reg));
+ pr_warn("%s: cnt1_reg: 0x%08x\n",
+ dev->name, cx_read(ch->cnt1_reg));
+ pr_warn("%s: cnt2_reg: 0x%08x\n",
+ dev->name, cx_read(ch->cnt2_reg));
}
static void cx23885_risc_disasm(struct cx23885_tsport *port,
@@ -576,14 +577,14 @@ static void cx23885_risc_disasm(struct cx23885_tsport *port,
struct cx23885_dev *dev = port->dev;
unsigned int i, j, n;
- printk(KERN_INFO "%s: risc disasm: %p [dma=0x%08lx]\n",
+ pr_info("%s: risc disasm: %p [dma=0x%08lx]\n",
dev->name, risc->cpu, (unsigned long)risc->dma);
for (i = 0; i < (risc->size >> 2); i += n) {
- printk(KERN_INFO "%s: %04d: ", dev->name, i);
+ pr_info("%s: %04d: ", dev->name, i);
n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
for (j = 1; j < n; j++)
- printk(KERN_INFO "%s: %04d: 0x%08x [ arg #%d ]\n",
- dev->name, i + j, risc->cpu[i + j], j);
+ pr_info("%s: %04d: 0x%08x [ arg #%d ]\n",
+ dev->name, i + j, risc->cpu[i + j], j);
if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
break;
}
@@ -674,8 +675,8 @@ static int get_resources(struct cx23885_dev *dev)
dev->name))
return 0;
- printk(KERN_ERR "%s: can't get MMIO memory @ 0x%llx\n",
- dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
+ pr_err("%s: can't get MMIO memory @ 0x%llx\n",
+ dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
return -EBUSY;
}
@@ -793,15 +794,15 @@ static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
dev->hwrevision = 0xb1;
break;
default:
- printk(KERN_ERR "%s() New hardware revision found 0x%x\n",
- __func__, dev->hwrevision);
+ pr_err("%s() New hardware revision found 0x%x\n",
+ __func__, dev->hwrevision);
}
if (dev->hwrevision)
- printk(KERN_INFO "%s() Hardware revision = 0x%02x\n",
+ pr_info("%s() Hardware revision = 0x%02x\n",
__func__, dev->hwrevision);
else
- printk(KERN_ERR "%s() Hardware revision unknown 0x%x\n",
- __func__, dev->hwrevision);
+ pr_err("%s() Hardware revision unknown 0x%x\n",
+ __func__, dev->hwrevision);
}
/* Find the first v4l2_subdev member of the group id in hw */
@@ -915,8 +916,7 @@ static int cx23885_dev_setup(struct cx23885_dev *dev)
cx23885_init_tsport(dev, &dev->ts2, 2);
if (get_resources(dev) < 0) {
- printk(KERN_ERR "CORE %s No more PCIe resources for "
- "subsystem: %04x:%04x\n",
+ pr_err("CORE %s No more PCIe resources for subsystem: %04x:%04x\n",
dev->name, dev->pci->subsystem_vendor,
dev->pci->subsystem_device);
@@ -930,11 +930,11 @@ static int cx23885_dev_setup(struct cx23885_dev *dev)
dev->bmmio = (u8 __iomem *)dev->lmmio;
- printk(KERN_INFO "CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
- dev->name, dev->pci->subsystem_vendor,
- dev->pci->subsystem_device, cx23885_boards[dev->board].name,
- dev->board, card[dev->nr] == dev->board ?
- "insmod option" : "autodetected");
+ pr_info("CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
+ dev->name, dev->pci->subsystem_vendor,
+ dev->pci->subsystem_device, cx23885_boards[dev->board].name,
+ dev->board, card[dev->nr] == dev->board ?
+ "insmod option" : "autodetected");
cx23885_pci_quirks(dev);
@@ -980,8 +980,8 @@ static int cx23885_dev_setup(struct cx23885_dev *dev)
if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
if (cx23885_video_register(dev) < 0) {
- printk(KERN_ERR "%s() Failed to register analog "
- "video adapters on VID_A\n", __func__);
+ pr_err("%s() Failed to register analog video adapters on VID_A\n",
+ __func__);
}
}
@@ -990,14 +990,13 @@ static int cx23885_dev_setup(struct cx23885_dev *dev)
dev->ts1.num_frontends =
cx23885_boards[dev->board].num_fds_portb;
if (cx23885_dvb_register(&dev->ts1) < 0) {
- printk(KERN_ERR "%s() Failed to register dvb adapters on VID_B\n",
+ pr_err("%s() Failed to register dvb adapters on VID_B\n",
__func__);
}
} else
if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
if (cx23885_417_register(dev) < 0) {
- printk(KERN_ERR
- "%s() Failed to register 417 on VID_B\n",
+ pr_err("%s() Failed to register 417 on VID_B\n",
__func__);
}
}
@@ -1007,15 +1006,13 @@ static int cx23885_dev_setup(struct cx23885_dev *dev)
dev->ts2.num_frontends =
cx23885_boards[dev->board].num_fds_portc;
if (cx23885_dvb_register(&dev->ts2) < 0) {
- printk(KERN_ERR
- "%s() Failed to register dvb on VID_C\n",
+ pr_err("%s() Failed to register dvb on VID_C\n",
__func__);
}
} else
if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) {
if (cx23885_417_register(dev) < 0) {
- printk(KERN_ERR
- "%s() Failed to register 417 on VID_C\n",
+ pr_err("%s() Failed to register 417 on VID_C\n",
__func__);
}
}
@@ -1344,7 +1341,7 @@ int cx23885_start_dma(struct cx23885_tsport *port,
if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
(!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
- printk("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
+ pr_err("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
__func__,
cx23885_boards[dev->board].portb,
cx23885_boards[dev->board].portc);
@@ -1531,7 +1528,6 @@ void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
static void do_cancel_buffers(struct cx23885_tsport *port, char *reason)
{
- struct cx23885_dev *dev = port->dev;
struct cx23885_dmaqueue *q = &port->mpegq;
struct cx23885_buffer *buf;
unsigned long flags;
@@ -1551,8 +1547,6 @@ static void do_cancel_buffers(struct cx23885_tsport *port, char *reason)
void cx23885_cancel_buffers(struct cx23885_tsport *port)
{
- struct cx23885_dev *dev = port->dev;
-
dprintk(1, "%s()\n", __func__);
cx23885_stop_dma(port);
do_cancel_buffers(port, "cancel");
@@ -1579,8 +1573,8 @@ int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
(status & VID_B_MSK_VBI_SYNC) ||
(status & VID_B_MSK_OF) ||
(status & VID_B_MSK_VBI_OF)) {
- printk(KERN_ERR "%s: V4L mpeg risc op code error, status "
- "= 0x%x\n", dev->name, status);
+ pr_err("%s: V4L mpeg risc op code error, status = 0x%x\n",
+ dev->name, status);
if (status & VID_B_MSK_BAD_PKT)
dprintk(1, " VID_B_MSK_BAD_PKT\n");
if (status & VID_B_MSK_OPC_ERR)
@@ -1641,7 +1635,7 @@ static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
dprintk(7, " (VID_BC_MSK_OF 0x%08x)\n",
VID_BC_MSK_OF);
- printk(KERN_ERR "%s: mpeg risc op code error\n", dev->name);
+ pr_err("%s: mpeg risc op code error\n", dev->name);
cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
cx23885_sram_channel_dump(dev,
@@ -1881,15 +1875,14 @@ void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
if (mask & 0x0007fff8) {
if (encoder_on_portb(dev) || encoder_on_portc(dev))
- printk(KERN_ERR
- "%s: Setting GPIO on encoder ports\n",
+ pr_err("%s: Setting GPIO on encoder ports\n",
dev->name);
cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
}
/* TODO: 23-19 */
if (mask & 0x00f80000)
- printk(KERN_INFO "%s: Unsupported\n", dev->name);
+ pr_info("%s: Unsupported\n", dev->name);
}
void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
@@ -1899,15 +1892,14 @@ void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
if (mask & 0x0007fff8) {
if (encoder_on_portb(dev) || encoder_on_portc(dev))
- printk(KERN_ERR
- "%s: Clearing GPIO moving on encoder ports\n",
+ pr_err("%s: Clearing GPIO moving on encoder ports\n",
dev->name);
cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
}
/* TODO: 23-19 */
if (mask & 0x00f80000)
- printk(KERN_INFO "%s: Unsupported\n", dev->name);
+ pr_info("%s: Unsupported\n", dev->name);
}
u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
@@ -1917,15 +1909,14 @@ u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
if (mask & 0x0007fff8) {
if (encoder_on_portb(dev) || encoder_on_portc(dev))
- printk(KERN_ERR
- "%s: Reading GPIO moving on encoder ports\n",
+ pr_err("%s: Reading GPIO moving on encoder ports\n",
dev->name);
return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3;
}
/* TODO: 23-19 */
if (mask & 0x00f80000)
- printk(KERN_INFO "%s: Unsupported\n", dev->name);
+ pr_info("%s: Unsupported\n", dev->name);
return 0;
}
@@ -1939,8 +1930,7 @@ void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
if (mask & 0x0007fff8) {
if (encoder_on_portb(dev) || encoder_on_portc(dev))
- printk(KERN_ERR
- "%s: Enabling GPIO on encoder ports\n",
+ pr_err("%s: Enabling GPIO on encoder ports\n",
dev->name);
}
@@ -1995,8 +1985,8 @@ static int cx23885_initdev(struct pci_dev *pci_dev,
/* print pci info */
dev->pci_rev = pci_dev->revision;
pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
- printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, "
- "latency: %d, mmio: 0x%llx\n", dev->name,
+ pr_info("%s/0: found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n",
+ dev->name,
pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
dev->pci_lat,
(unsigned long long)pci_resource_start(pci_dev, 0));
@@ -2004,14 +1994,14 @@ static int cx23885_initdev(struct pci_dev *pci_dev,
pci_set_master(pci_dev);
err = pci_set_dma_mask(pci_dev, 0xffffffff);
if (err) {
- printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
+ pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
goto fail_ctrl;
}
err = request_irq(pci_dev->irq, cx23885_irq,
IRQF_SHARED, dev->name, dev);
if (err < 0) {
- printk(KERN_ERR "%s: can't get IRQ %d\n",
+ pr_err("%s: can't get IRQ %d\n",
dev->name, pci_dev->irq);
goto fail_irq;
}
@@ -2097,7 +2087,7 @@ static struct pci_driver cx23885_pci_driver = {
static int __init cx23885_init(void)
{
- printk(KERN_INFO "cx23885 driver version %s loaded\n",
+ pr_info("cx23885 driver version %s loaded\n",
CX23885_VERSION);
return pci_register_driver(&cx23885_pci_driver);
}
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index 818f3c2fc98d..589a168d1df4 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -15,6 +15,8 @@
* GNU General Public License for more details.
*/
+#include "cx23885.h"
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
@@ -23,7 +25,6 @@
#include <linux/file.h>
#include <linux/suspend.h>
-#include "cx23885.h"
#include <media/v4l2-common.h>
#include "dvb_ca_en50221.h"
@@ -80,7 +81,8 @@ static unsigned int debug;
#define dprintk(level, fmt, arg...)\
do { if (debug >= level)\
- printk(KERN_DEBUG "%s/0: " fmt, dev->name, ## arg);\
+ printk(KERN_DEBUG pr_fmt("%s dvb: " fmt), \
+ __func__, ##arg); \
} while (0)
/* ------------------------------------------------------------------ */
@@ -1101,7 +1103,7 @@ static int dvb_register_ci_mac(struct cx23885_tsport *port)
netup_get_card_info(&dev->i2c_bus[0].i2c_adap, &cinfo);
memcpy(port->frontends.adapter.proposed_mac,
cinfo.port[port->nr - 1].mac, 6);
- printk(KERN_INFO "NetUP Dual DVB-S2 CI card port%d MAC=%pM\n",
+ pr_info("NetUP Dual DVB-S2 CI card port%d MAC=%pM\n",
port->nr, port->frontends.adapter.proposed_mac);
netup_ci_init(port);
@@ -1127,7 +1129,7 @@ static int dvb_register_ci_mac(struct cx23885_tsport *port)
/* Read entire EEPROM */
dev->i2c_bus[0].i2c_client.addr = 0xa0 >> 1;
tveeprom_read(&dev->i2c_bus[0].i2c_client, eeprom, sizeof(eeprom));
- printk(KERN_INFO "TeVii S470 MAC= %pM\n", eeprom + 0xa0);
+ pr_info("TeVii S470 MAC= %pM\n", eeprom + 0xa0);
memcpy(port->frontends.adapter.proposed_mac, eeprom + 0xa0, 6);
return 0;
}
@@ -1144,7 +1146,7 @@ static int dvb_register_ci_mac(struct cx23885_tsport *port)
dev->i2c_bus[0].i2c_client.addr = 0xa0 >> 1;
tveeprom_read(&dev->i2c_bus[0].i2c_client, eeprom,
sizeof(eeprom));
- printk(KERN_INFO "%s port %d MAC address: %pM\n",
+ pr_info("%s port %d MAC address: %pM\n",
cx23885_boards[dev->board].name, port->nr,
eeprom + 0xc0 + (port->nr-1) * 8);
memcpy(port->frontends.adapter.proposed_mac, eeprom + 0xc0 +
@@ -1185,7 +1187,7 @@ static int dvb_register_ci_mac(struct cx23885_tsport *port)
dev->i2c_bus[0].i2c_client.addr = 0xa0 >> 1;
tveeprom_read(&dev->i2c_bus[0].i2c_client, eeprom,
sizeof(eeprom));
- printk(KERN_INFO "%s MAC address: %pM\n",
+ pr_info("%s MAC address: %pM\n",
cx23885_boards[dev->board].name, eeprom + 0xc0);
memcpy(port->frontends.adapter.proposed_mac, eeprom + 0xc0, 6);
return 0;
@@ -1464,7 +1466,7 @@ static int dvb_register(struct cx23885_tsport *port)
return -ENODEV;
if (dib7000p_ops.i2c_enumeration(&i2c_bus->i2c_adap, 1, 0x12, &dib7070p_dib7000p_config) < 0) {
- printk(KERN_WARNING "Unable to enumerate dib7000p\n");
+ pr_warn("Unable to enumerate dib7000p\n");
return -ENODEV;
}
fe0->dvb.frontend = dib7000p_ops.init(&i2c_bus->i2c_adap, 0x80, &dib7070p_dib7000p_config);
@@ -1524,7 +1526,7 @@ static int dvb_register(struct cx23885_tsport *port)
fe = dvb_attach(xc4000_attach, fe0->dvb.frontend,
&dev->i2c_bus[1].i2c_adap, &cfg);
if (!fe) {
- printk(KERN_ERR "%s/2: xc4000 attach failed\n",
+ pr_err("%s/2: xc4000 attach failed\n",
dev->name);
goto frontend_detach;
}
@@ -1597,8 +1599,7 @@ static int dvb_register(struct cx23885_tsport *port)
&i2c_bus->i2c_adap,
LNBH24_PCL | LNBH24_TTX,
LNBH24_TEN, 0x09))
- printk(KERN_ERR
- "No LNBH24 found!\n");
+ pr_err("No LNBH24 found!\n");
}
}
@@ -1618,8 +1619,7 @@ static int dvb_register(struct cx23885_tsport *port)
&i2c_bus->i2c_adap,
LNBH24_PCL | LNBH24_TTX,
LNBH24_TEN, 0x0a))
- printk(KERN_ERR
- "No LNBH24 found!\n");
+ pr_err("No LNBH24 found!\n");
}
}
@@ -2482,14 +2482,13 @@ static int dvb_register(struct cx23885_tsport *port)
break;
default:
- printk(KERN_INFO "%s: The frontend of your DVB/ATSC card "
- " isn't supported yet\n",
- dev->name);
+ pr_info("%s: The frontend of your DVB/ATSC card isn't supported yet\n",
+ dev->name);
break;
}
if ((NULL == fe0->dvb.frontend) || (fe1 && NULL == fe1->dvb.frontend)) {
- printk(KERN_ERR "%s: frontend initialization failed\n",
+ pr_err("%s: frontend initialization failed\n",
dev->name);
goto frontend_detach;
}
@@ -2570,7 +2569,7 @@ int cx23885_dvb_register(struct cx23885_tsport *port)
* are for safety, and should provide a good foundation for the
* future addition of any multi-frontend cx23885 based boards.
*/
- printk(KERN_INFO "%s() allocating %d frontend(s)\n", __func__,
+ pr_info("%s() allocating %d frontend(s)\n", __func__,
port->num_frontends);
for (i = 1; i <= port->num_frontends; i++) {
@@ -2578,7 +2577,7 @@ int cx23885_dvb_register(struct cx23885_tsport *port)
if (vb2_dvb_alloc_frontend(
&port->frontends, i) == NULL) {
- printk(KERN_ERR "%s() failed to alloc\n", __func__);
+ pr_err("%s() failed to alloc\n", __func__);
return -ENOMEM;
}
@@ -2597,7 +2596,7 @@ int cx23885_dvb_register(struct cx23885_tsport *port)
/* dvb stuff */
/* We have to init the queue for each frontend on a port. */
- printk(KERN_INFO "%s: cx23885 based dvb card\n", dev->name);
+ pr_info("%s: cx23885 based dvb card\n", dev->name);
q = &fe0->dvb.dvbq;
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
@@ -2617,8 +2616,8 @@ int cx23885_dvb_register(struct cx23885_tsport *port)
}
err = dvb_register(port);
if (err != 0)
- printk(KERN_ERR "%s() dvb_register failed err = %d\n",
- __func__, err);
+ pr_err("%s() dvb_register failed err = %d\n",
+ __func__, err);
return err;
}
diff --git a/drivers/media/pci/cx23885/cx23885-f300.c b/drivers/media/pci/cx23885/cx23885-f300.c
index a6c45eb0a105..460cb8f314b2 100644
--- a/drivers/media/pci/cx23885/cx23885-f300.c
+++ b/drivers/media/pci/cx23885/cx23885-f300.c
@@ -122,7 +122,7 @@ static u8 f300_xfer(struct dvb_frontend *fe, u8 *buf)
}
if (i > 7) {
- printk(KERN_ERR "%s: timeout, the slave no response\n",
+ pr_err("%s: timeout, the slave no response\n",
__func__);
ret = 1; /* timeout, the slave no response */
} else { /* the slave not busy, prepare for getting data */
diff --git a/drivers/media/pci/cx23885/cx23885-i2c.c b/drivers/media/pci/cx23885/cx23885-i2c.c
index 61591225be9a..8528032090f2 100644
--- a/drivers/media/pci/cx23885/cx23885-i2c.c
+++ b/drivers/media/pci/cx23885/cx23885-i2c.c
@@ -15,14 +15,14 @@
* GNU General Public License for more details.
*/
+#include "cx23885.h"
+
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <asm/io.h>
-#include "cx23885.h"
-
#include <media/v4l2-common.h>
static unsigned int i2c_debug;
@@ -35,7 +35,8 @@ MODULE_PARM_DESC(i2c_scan, "scan i2c bus at insmod time");
#define dprintk(level, fmt, arg...)\
do { if (i2c_debug >= level)\
- printk(KERN_DEBUG "%s/0: " fmt, dev->name, ## arg);\
+ printk(KERN_DEBUG pr_fmt("%s: i2c:" fmt), \
+ __func__, ##arg); \
} while (0)
#define I2C_WAIT_DELAY 32
@@ -119,9 +120,9 @@ static int i2c_sendbytes(struct i2c_adapter *i2c_adap,
if (!i2c_wait_done(i2c_adap))
goto eio;
if (i2c_debug) {
- printk(" <W %02x %02x", msg->addr << 1, msg->buf[0]);
+ printk(KERN_DEBUG " <W %02x %02x", msg->addr << 1, msg->buf[0]);
if (!(ctrl & I2C_NOSTOP))
- printk(" >\n");
+ pr_cont(" >\n");
}
for (cnt = 1; cnt < msg->len; cnt++) {
@@ -141,9 +142,9 @@ static int i2c_sendbytes(struct i2c_adapter *i2c_adap,
if (!i2c_wait_done(i2c_adap))
goto eio;
if (i2c_debug) {
- dprintk(1, " %02x", msg->buf[cnt]);
+ pr_cont(" %02x", msg->buf[cnt]);
if (!(ctrl & I2C_NOSTOP))
- dprintk(1, " >\n");
+ pr_cont(" >\n");
}
}
return msg->len;
@@ -151,7 +152,7 @@ static int i2c_sendbytes(struct i2c_adapter *i2c_adap,
eio:
retval = -EIO;
if (i2c_debug)
- printk(KERN_ERR " ERR: %d\n", retval);
+ pr_err(" ERR: %d\n", retval);
return retval;
}
@@ -212,15 +213,13 @@ static int i2c_readbytes(struct i2c_adapter *i2c_adap,
eio:
retval = -EIO;
if (i2c_debug)
- printk(KERN_ERR " ERR: %d\n", retval);
+ pr_err(" ERR: %d\n", retval);
return retval;
}
static int i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msgs, int num)
{
- struct cx23885_i2c *bus = i2c_adap->algo_data;
- struct cx23885_dev *dev = bus->dev;
int i, retval = 0;
dprintk(1, "%s(num = %d)\n", __func__, num);
@@ -302,7 +301,7 @@ static void do_i2c_scan(char *name, struct i2c_client *c)
rc = i2c_master_recv(c, &buf, 0);
if (rc < 0)
continue;
- printk(KERN_INFO "%s: i2c scan: found device @ 0x%04x [%s]\n",
+ pr_info("%s: i2c scan: found device @ 0x%04x [%s]\n",
name, i, i2c_devs[i] ? i2c_devs[i] : "???");
}
}
@@ -330,12 +329,12 @@ int cx23885_i2c_register(struct cx23885_i2c *bus)
if (0 == bus->i2c_rc) {
dprintk(1, "%s: i2c bus %d registered\n", dev->name, bus->nr);
if (i2c_scan) {
- printk(KERN_INFO "%s: scan bus %d:\n",
+ pr_info("%s: scan bus %d:\n",
dev->name, bus->nr);
do_i2c_scan(dev->name, &bus->i2c_client);
}
} else
- printk(KERN_WARNING "%s: i2c bus %d register FAILED\n",
+ pr_warn("%s: i2c bus %d register FAILED\n",
dev->name, bus->nr);
/* Instantiate the IR receiver device, if present */
diff --git a/drivers/media/pci/cx23885/cx23885-input.c b/drivers/media/pci/cx23885/cx23885-input.c
index 410c3141c163..1f092febdbd1 100644
--- a/drivers/media/pci/cx23885/cx23885-input.c
+++ b/drivers/media/pci/cx23885/cx23885-input.c
@@ -30,13 +30,13 @@
* GNU General Public License for more details.
*/
+#include "cx23885.h"
+#include "cx23885-input.h"
+
#include <linux/slab.h>
#include <media/rc-core.h>
#include <media/v4l2-subdev.h>
-#include "cx23885.h"
-#include "cx23885-input.h"
-
#define MODULE_NAME "cx23885"
static void cx23885_input_process_measurements(struct cx23885_dev *dev,
diff --git a/drivers/media/pci/cx23885/cx23885-ir.c b/drivers/media/pci/cx23885/cx23885-ir.c
index 89dc4cc3e1ce..2cd5ac41ab75 100644
--- a/drivers/media/pci/cx23885/cx23885-ir.c
+++ b/drivers/media/pci/cx23885/cx23885-ir.c
@@ -16,12 +16,12 @@
* GNU General Public License for more details.
*/
-#include <media/v4l2-device.h>
-
#include "cx23885.h"
#include "cx23885-ir.h"
#include "cx23885-input.h"
+#include <media/v4l2-device.h>
+
#define CX23885_IR_RX_FIFO_SERVICE_REQ 0
#define CX23885_IR_RX_END_OF_RX_DETECTED 1
#define CX23885_IR_RX_HW_FIFO_OVERRUN 2
diff --git a/drivers/media/pci/cx23885/cx23885-vbi.c b/drivers/media/pci/cx23885/cx23885-vbi.c
index 75e7fa7b1121..369e545cac04 100644
--- a/drivers/media/pci/cx23885/cx23885-vbi.c
+++ b/drivers/media/pci/cx23885/cx23885-vbi.c
@@ -15,13 +15,13 @@
* GNU General Public License for more details.
*/
+#include "cx23885.h"
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
-#include "cx23885.h"
-
static unsigned int vbibufs = 4;
module_param(vbibufs, int, 0644);
MODULE_PARM_DESC(vbibufs, "number of vbi buffers, range 2-32");
@@ -32,7 +32,8 @@ MODULE_PARM_DESC(vbi_debug, "enable debug messages [vbi]");
#define dprintk(level, fmt, arg...)\
do { if (vbi_debug >= level)\
- printk(KERN_DEBUG "%s/0: " fmt, dev->name, ## arg);\
+ printk(KERN_DEBUG pr_fmt("%s: vbi:" fmt), \
+ __func__, ##arg); \
} while (0)
/* ------------------------------------------------------------------ */
diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
index 33d168ef278d..ecc580af0148 100644
--- a/drivers/media/pci/cx23885/cx23885-video.c
+++ b/drivers/media/pci/cx23885/cx23885-video.c
@@ -15,6 +15,9 @@
* GNU General Public License for more details.
*/
+#include "cx23885.h"
+#include "cx23885-video.h"
+
#include <linux/init.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -27,8 +30,6 @@
#include <linux/kthread.h>
#include <asm/div64.h>
-#include "cx23885.h"
-#include "cx23885-video.h"
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-event.h>
@@ -66,7 +67,8 @@ MODULE_PARM_DESC(vid_limit, "capture memory limit in megabytes");
#define dprintk(level, fmt, arg...)\
do { if (video_debug >= level)\
- printk(KERN_DEBUG "%s: " fmt, dev->name, ## arg);\
+ printk(KERN_DEBUG pr_fmt("%s: video:" fmt), \
+ __func__, ##arg); \
} while (0)
/* ------------------------------------------------------------------- */
@@ -194,7 +196,7 @@ u8 cx23885_flatiron_read(struct cx23885_dev *dev, u8 reg)
ret = i2c_transfer(&dev->i2c_bus[2].i2c_adap, &msg[0], 2);
if (ret != 2)
- printk(KERN_ERR "%s() error\n", __func__);
+ pr_err("%s() error\n", __func__);
return b1[0];
}
@@ -811,7 +813,6 @@ static int vidioc_log_status(struct file *file, void *priv)
static int cx23885_query_audinput(struct file *file, void *priv,
struct v4l2_audio *i)
{
- struct cx23885_dev *dev = video_drvdata(file);
static const char *iname[] = {
[0] = "Baseband L/R 1",
[1] = "Baseband L/R 2",
@@ -1000,7 +1001,7 @@ static int cx23885_set_freq_via_ops(struct cx23885_dev *dev,
fe->ops.tuner_ops.set_analog_params(fe, &params);
}
else
- printk(KERN_ERR "%s() No analog tuner, aborting\n", __func__);
+ pr_err("%s() No analog tuner, aborting\n", __func__);
/* When changing channels it is required to reset TVAUDIO */
msleep(100);
@@ -1058,15 +1059,14 @@ int cx23885_video_irq(struct cx23885_dev *dev, u32 status)
if (status & VID_BC_MSK_OPC_ERR) {
dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
VID_BC_MSK_OPC_ERR);
- printk(KERN_WARNING "%s: video risc op code error\n",
+ pr_warn("%s: video risc op code error\n",
dev->name);
cx23885_sram_channel_dump(dev,
&dev->sram_channels[SRAM_CH01]);
}
if (status & VID_BC_MSK_SYNC)
- dprintk(7, " (VID_BC_MSK_SYNC 0x%08x) "
- "video lines miss-match\n",
+ dprintk(7, " (VID_BC_MSK_SYNC 0x%08x) video lines miss-match\n",
VID_BC_MSK_SYNC);
if (status & VID_BC_MSK_OF)
@@ -1297,11 +1297,11 @@ int cx23885_video_register(struct cx23885_dev *dev)
err = video_register_device(dev->video_dev, VFL_TYPE_GRABBER,
video_nr[dev->nr]);
if (err < 0) {
- printk(KERN_INFO "%s: can't register video device\n",
+ pr_info("%s: can't register video device\n",
dev->name);
goto fail_unreg;
}
- printk(KERN_INFO "%s: registered device %s [v4l2]\n",
+ pr_info("%s: registered device %s [v4l2]\n",
dev->name, video_device_node_name(dev->video_dev));
/* register VBI device */
@@ -1311,11 +1311,11 @@ int cx23885_video_register(struct cx23885_dev *dev)
err = video_register_device(dev->vbi_dev, VFL_TYPE_VBI,
vbi_nr[dev->nr]);
if (err < 0) {
- printk(KERN_INFO "%s: can't register vbi device\n",
+ pr_info("%s: can't register vbi device\n",
dev->name);
goto fail_unreg;
}
- printk(KERN_INFO "%s: registered device %s\n",
+ pr_info("%s: registered device %s\n",
dev->name, video_device_node_name(dev->vbi_dev));
/* Register ALSA audio device */
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
index a6735afe2269..cb714ab60d69 100644
--- a/drivers/media/pci/cx23885/cx23885.h
+++ b/drivers/media/pci/cx23885/cx23885.h
@@ -15,6 +15,8 @@
* GNU General Public License for more details.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/pci.h>
#include <linux/i2c.h>
#include <linux/kdev_t.h>
diff --git a/drivers/media/pci/cx23885/cx23888-ir.c b/drivers/media/pci/cx23885/cx23888-ir.c
index c1aa888af705..040323b0f945 100644
--- a/drivers/media/pci/cx23885/cx23888-ir.c
+++ b/drivers/media/pci/cx23885/cx23888-ir.c
@@ -16,15 +16,15 @@
* GNU General Public License for more details.
*/
+#include "cx23885.h"
+#include "cx23888-ir.h"
+
#include <linux/kfifo.h>
#include <linux/slab.h>
#include <media/v4l2-device.h>
#include <media/rc-core.h>
-#include "cx23885.h"
-#include "cx23888-ir.h"
-
static unsigned int ir_888_debug;
module_param(ir_888_debug, int, 0644);
MODULE_PARM_DESC(ir_888_debug, "enable debug messages [CX23888 IR controller]");
@@ -1015,8 +1015,8 @@ static int cx23888_ir_log_status(struct v4l2_subdev *sd)
j = 0;
break;
}
- v4l2_info(sd, "\tNext carrier edge window: 16 clocks "
- "-%1d/+%1d, %u to %u Hz\n", i, j,
+ v4l2_info(sd, "\tNext carrier edge window: 16 clocks -%1d/+%1d, %u to %u Hz\n",
+ i, j,
clock_divider_to_freq(rxclk, 16 + j),
clock_divider_to_freq(rxclk, 16 - i));
}
@@ -1026,8 +1026,7 @@ static int cx23888_ir_log_status(struct v4l2_subdev *sd)
v4l2_info(sd, "\tLow pass filter: %s\n",
filtr ? "enabled" : "disabled");
if (filtr)
- v4l2_info(sd, "\tMin acceptable pulse width (LPF): %u us, "
- "%u ns\n",
+ v4l2_info(sd, "\tMin acceptable pulse width (LPF): %u us, %u ns\n",
lpf_count_to_us(filtr),
lpf_count_to_ns(filtr));
v4l2_info(sd, "\tPulse width timer timed-out: %s\n",
diff --git a/drivers/media/pci/cx23885/netup-eeprom.c b/drivers/media/pci/cx23885/netup-eeprom.c
index b6542ee4385b..6384c12aa38e 100644
--- a/drivers/media/pci/cx23885/netup-eeprom.c
+++ b/drivers/media/pci/cx23885/netup-eeprom.c
@@ -52,7 +52,7 @@ int netup_eeprom_read(struct i2c_adapter *i2c_adap, u8 addr)
ret = i2c_transfer(i2c_adap, msg, 2);
if (ret != 2) {
- printk(KERN_ERR "eeprom i2c read error, status=%d\n", ret);
+ pr_err("eeprom i2c read error, status=%d\n", ret);
return -1;
}
@@ -80,7 +80,7 @@ int netup_eeprom_write(struct i2c_adapter *i2c_adap, u8 addr, u8 data)
ret = i2c_transfer(i2c_adap, msg, 1);
if (ret != 1) {
- printk(KERN_ERR "eeprom i2c write error, status=%d\n", ret);
+ pr_err("eeprom i2c write error, status=%d\n", ret);
return -1;
}
diff --git a/drivers/media/pci/cx23885/netup-init.c b/drivers/media/pci/cx23885/netup-init.c
index 76d9487aafc8..6a27ef5d9ec2 100644
--- a/drivers/media/pci/cx23885/netup-init.c
+++ b/drivers/media/pci/cx23885/netup-init.c
@@ -40,7 +40,7 @@ static void i2c_av_write(struct i2c_adapter *i2c, u16 reg, u8 val)
ret = i2c_transfer(i2c, &msg, 1);
if (ret != 1)
- printk(KERN_ERR "%s: i2c write error!\n", __func__);
+ pr_err("%s: i2c write error!\n", __func__);
}
static void i2c_av_write4(struct i2c_adapter *i2c, u16 reg, u32 val)
@@ -64,7 +64,7 @@ static void i2c_av_write4(struct i2c_adapter *i2c, u16 reg, u32 val)
ret = i2c_transfer(i2c, &msg, 1);
if (ret != 1)
- printk(KERN_ERR "%s: i2c write error!\n", __func__);
+ pr_err("%s: i2c write error!\n", __func__);
}
static u8 i2c_av_read(struct i2c_adapter *i2c, u16 reg)
@@ -84,7 +84,7 @@ static u8 i2c_av_read(struct i2c_adapter *i2c, u16 reg)
ret = i2c_transfer(i2c, &msg, 1);
if (ret != 1)
- printk(KERN_ERR "%s: i2c write error!\n", __func__);
+ pr_err("%s: i2c write error!\n", __func__);
msg.flags = I2C_M_RD;
msg.len = 1;
@@ -92,7 +92,7 @@ static u8 i2c_av_read(struct i2c_adapter *i2c, u16 reg)
ret = i2c_transfer(i2c, &msg, 1);
if (ret != 1)
- printk(KERN_ERR "%s: i2c read error!\n", __func__);
+ pr_err("%s: i2c read error!\n", __func__);
return buf[0];
}
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
index 723f06462104..c81fe4681d14 100644
--- a/drivers/media/pci/cx88/cx88-alsa.c
+++ b/drivers/media/pci/cx88/cx88-alsa.c
@@ -1,5 +1,4 @@
/*
- *
* Support for audio capture
* PCI function #1 of the cx2388x.
*
@@ -18,14 +17,14 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "cx88.h"
+#include "cx88-reg.h"
+
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/vmalloc.h>
@@ -33,7 +32,6 @@
#include <linux/pci.h>
#include <linux/slab.h>
-#include <asm/delay.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -42,22 +40,15 @@
#include <sound/tlv.h>
#include <media/i2c/wm8775.h>
-#include "cx88.h"
-#include "cx88-reg.h"
-
#define dprintk(level, fmt, arg...) do { \
if (debug + 1 > level) \
- printk(KERN_INFO "%s/1: " fmt, chip->core->name , ## arg);\
-} while(0)
-
-#define dprintk_core(level, fmt, arg...) do { \
- if (debug + 1 > level) \
- printk(KERN_DEBUG "%s/1: " fmt, chip->core->name , ## arg);\
-} while(0)
+ printk(KERN_DEBUG pr_fmt("%s: alsa: " fmt), \
+ chip->core->name, ##arg); \
+} while (0)
-/****************************************************************************
- Data type declarations - Can be moded to a header file later
- ****************************************************************************/
+/*
+ * Data type declarations - Can be moded to a header file later
+ */
struct cx88_audio_buffer {
unsigned int bpl;
@@ -91,13 +82,10 @@ struct cx88_audio_dev {
struct snd_pcm_substream *substream;
};
-typedef struct cx88_audio_dev snd_cx88_card_t;
-
-
-/****************************************************************************
- Module global static vars
- ****************************************************************************/
+/*
+ * Module global static vars
+ */
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
static const char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
@@ -109,10 +97,9 @@ MODULE_PARM_DESC(enable, "Enable cx88x soundcard. default enabled.");
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for cx88x capture interface(s).");
-
-/****************************************************************************
- Module macros
- ****************************************************************************/
+/*
+ * Module macros
+ */
MODULE_DESCRIPTION("ALSA driver module for cx2388x based TV cards");
MODULE_AUTHOR("Ricardo Cerqueira");
@@ -120,25 +107,23 @@ MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
MODULE_LICENSE("GPL");
MODULE_VERSION(CX88_VERSION);
-MODULE_SUPPORTED_DEVICE("{{Conexant,23881},"
- "{{Conexant,23882},"
- "{{Conexant,23883}");
+MODULE_SUPPORTED_DEVICE("{{Conexant,23881},{{Conexant,23882},{{Conexant,23883}");
static unsigned int debug;
-module_param(debug,int,0644);
-MODULE_PARM_DESC(debug,"enable debug messages");
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "enable debug messages");
-/****************************************************************************
- Module specific funtions
- ****************************************************************************/
+/*
+ * Module specific functions
+ */
/*
* BOARD Specific: Sets audio DMA
*/
-static int _cx88_start_audio_dma(snd_cx88_card_t *chip)
+static int _cx88_start_audio_dma(struct cx88_audio_dev *chip)
{
struct cx88_audio_buffer *buf = chip->buf;
- struct cx88_core *core=chip->core;
+ struct cx88_core *core = chip->core;
const struct sram_channel *audio_ch = &cx88_sram_channels[SRAM_CH25];
/* Make sure RISC/FIFO are off before changing FIFO/RISC settings */
@@ -154,8 +139,9 @@ static int _cx88_start_audio_dma(snd_cx88_card_t *chip)
cx_write(MO_AUDD_GPCNTRL, GP_COUNT_CONTROL_RESET);
atomic_set(&chip->count, 0);
- dprintk(1, "Start audio DMA, %d B/line, %d lines/FIFO, %d periods, %d "
- "byte buffer\n", buf->bpl, cx_read(audio_ch->cmds_start + 8)>>1,
+ dprintk(1,
+ "Start audio DMA, %d B/line, %d lines/FIFO, %d periods, %d byte buffer\n",
+ buf->bpl, cx_read(audio_ch->cmds_start + 8) >> 1,
chip->num_periods, buf->bpl * chip->num_periods);
/* Enables corresponding bits at AUD_INT_STAT */
@@ -169,8 +155,11 @@ static int _cx88_start_audio_dma(snd_cx88_card_t *chip)
cx_set(MO_PCI_INTMSK, chip->core->pci_irqmask | PCI_INT_AUDINT);
/* start dma */
- cx_set(MO_DEV_CNTRL2, (1<<5)); /* Enables Risc Processor */
- cx_set(MO_AUD_DMACNTRL, 0x11); /* audio downstream FIFO and RISC enable */
+
+ /* Enables Risc Processor */
+ cx_set(MO_DEV_CNTRL2, (1 << 5));
+ /* audio downstream FIFO and RISC enable */
+ cx_set(MO_AUD_DMACNTRL, 0x11);
if (debug)
cx88_sram_channel_dump(chip->core, audio_ch);
@@ -181,9 +170,10 @@ static int _cx88_start_audio_dma(snd_cx88_card_t *chip)
/*
* BOARD Specific: Resets audio DMA
*/
-static int _cx88_stop_audio_dma(snd_cx88_card_t *chip)
+static int _cx88_stop_audio_dma(struct cx88_audio_dev *chip)
{
- struct cx88_core *core=chip->core;
+ struct cx88_core *core = chip->core;
+
dprintk(1, "Stopping audio DMA\n");
/* stop dma */
@@ -195,7 +185,8 @@ static int _cx88_stop_audio_dma(snd_cx88_card_t *chip)
AUD_INT_DN_RISCI2 | AUD_INT_DN_RISCI1);
if (debug)
- cx88_sram_channel_dump(chip->core, &cx88_sram_channels[SRAM_CH25]);
+ cx88_sram_channel_dump(chip->core,
+ &cx88_sram_channels[SRAM_CH25]);
return 0;
}
@@ -221,7 +212,7 @@ static const char *cx88_aud_irqs[32] = {
/*
* BOARD Specific: Threats IRQ audio specific calls
*/
-static void cx8801_aud_irq(snd_cx88_card_t *chip)
+static void cx8801_aud_irq(struct cx88_audio_dev *chip)
{
struct cx88_core *core = chip->core;
u32 status, mask;
@@ -232,12 +223,12 @@ static void cx8801_aud_irq(snd_cx88_card_t *chip)
return;
cx_write(MO_AUD_INTSTAT, status);
if (debug > 1 || (status & mask & ~0xff))
- cx88_print_irqbits(core->name, "irq aud",
+ cx88_print_irqbits("irq aud",
cx88_aud_irqs, ARRAY_SIZE(cx88_aud_irqs),
status, mask);
/* risc op code error */
if (status & AUD_INT_OPC_ERR) {
- printk(KERN_WARNING "%s/1: Audio risc op code error\n",core->name);
+ pr_warn("Audio risc op code error\n");
cx_clear(MO_AUD_DMACNTRL, 0x11);
cx88_sram_channel_dump(core, &cx88_sram_channels[SRAM_CH25]);
}
@@ -259,7 +250,7 @@ static void cx8801_aud_irq(snd_cx88_card_t *chip)
*/
static irqreturn_t cx8801_irq(int irq, void *dev_id)
{
- snd_cx88_card_t *chip = dev_id;
+ struct cx88_audio_dev *chip = dev_id;
struct cx88_core *core = chip->core;
u32 status;
int loop, handled = 0;
@@ -267,7 +258,7 @@ static irqreturn_t cx8801_irq(int irq, void *dev_id)
for (loop = 0; loop < MAX_IRQ_LOOP; loop++) {
status = cx_read(MO_PCI_INTSTAT) &
(core->pci_irqmask | PCI_INT_AUDINT);
- if (0 == status)
+ if (status == 0)
goto out;
dprintk(3, "cx8801_irq loop %d/%d, status %x\n",
loop, MAX_IRQ_LOOP, status);
@@ -280,10 +271,8 @@ static irqreturn_t cx8801_irq(int irq, void *dev_id)
cx8801_aud_irq(chip);
}
- if (MAX_IRQ_LOOP == loop) {
- printk(KERN_ERR
- "%s/1: IRQ loop detected, disabling interrupts\n",
- core->name);
+ if (loop == MAX_IRQ_LOOP) {
+ pr_err("IRQ loop detected, disabling interrupts\n");
cx_clear(MO_PCI_INTMSK, PCI_INT_AUDINT);
}
@@ -298,26 +287,25 @@ static int cx88_alsa_dma_init(struct cx88_audio_dev *chip, int nr_pages)
int i;
buf->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT);
- if (NULL == buf->vaddr) {
+ if (!buf->vaddr) {
dprintk(1, "vmalloc_32(%d pages) failed\n", nr_pages);
return -ENOMEM;
}
dprintk(1, "vmalloc is at addr 0x%08lx, size=%d\n",
- (unsigned long)buf->vaddr,
- nr_pages << PAGE_SHIFT);
+ (unsigned long)buf->vaddr, nr_pages << PAGE_SHIFT);
memset(buf->vaddr, 0, nr_pages << PAGE_SHIFT);
buf->nr_pages = nr_pages;
buf->sglist = vzalloc(buf->nr_pages * sizeof(*buf->sglist));
- if (NULL == buf->sglist)
+ if (!buf->sglist)
goto vzalloc_err;
sg_init_table(buf->sglist, buf->nr_pages);
for (i = 0; i < buf->nr_pages; i++) {
pg = vmalloc_to_page(buf->vaddr + i * PAGE_SIZE);
- if (NULL == pg)
+ if (!pg)
goto vmalloc_to_page_err;
sg_set_page(&buf->sglist[i], pg, PAGE_SIZE, 0);
}
@@ -339,7 +327,7 @@ static int cx88_alsa_dma_map(struct cx88_audio_dev *dev)
buf->sglen = dma_map_sg(&dev->pci->dev, buf->sglist,
buf->nr_pages, PCI_DMA_FROMDEVICE);
- if (0 == buf->sglen) {
+ if (buf->sglen == 0) {
pr_warn("%s: cx88_alsa_map_sg failed\n", __func__);
return -ENOMEM;
}
@@ -353,7 +341,8 @@ static int cx88_alsa_dma_unmap(struct cx88_audio_dev *dev)
if (!buf->sglen)
return 0;
- dma_unmap_sg(&dev->pci->dev, buf->sglist, buf->sglen, PCI_DMA_FROMDEVICE);
+ dma_unmap_sg(&dev->pci->dev, buf->sglist, buf->sglen,
+ PCI_DMA_FROMDEVICE);
buf->sglen = 0;
return 0;
}
@@ -367,18 +356,18 @@ static int cx88_alsa_dma_free(struct cx88_audio_buffer *buf)
return 0;
}
-
-static int dsp_buffer_free(snd_cx88_card_t *chip)
+static int dsp_buffer_free(struct cx88_audio_dev *chip)
{
struct cx88_riscmem *risc = &chip->buf->risc;
- BUG_ON(!chip->dma_size);
+ WARN_ON(!chip->dma_size);
- dprintk(2,"Freeing buffer\n");
+ dprintk(2, "Freeing buffer\n");
cx88_alsa_dma_unmap(chip);
cx88_alsa_dma_free(chip->buf);
if (risc->cpu)
- pci_free_consistent(chip->pci, risc->size, risc->cpu, risc->dma);
+ pci_free_consistent(chip->pci, risc->size,
+ risc->cpu, risc->dma);
kfree(chip->buf);
chip->buf = NULL;
@@ -386,9 +375,9 @@ static int dsp_buffer_free(snd_cx88_card_t *chip)
return 0;
}
-/****************************************************************************
- ALSA PCM Interface
- ****************************************************************************/
+/*
+ * ALSA PCM Interface
+ */
/*
* Digital hardware definition
@@ -406,13 +395,15 @@ static const struct snd_pcm_hardware snd_cx88_digital_hw = {
.rate_max = 48000,
.channels_min = 2,
.channels_max = 2,
- /* Analog audio output will be full of clicks and pops if there
- are not exactly four lines in the SRAM FIFO buffer. */
- .period_bytes_min = DEFAULT_FIFO_SIZE/4,
- .period_bytes_max = DEFAULT_FIFO_SIZE/4,
+ /*
+ * Analog audio output will be full of clicks and pops if there
+ * are not exactly four lines in the SRAM FIFO buffer.
+ */
+ .period_bytes_min = DEFAULT_FIFO_SIZE / 4,
+ .period_bytes_max = DEFAULT_FIFO_SIZE / 4,
.periods_min = 1,
.periods_max = 1024,
- .buffer_bytes_max = (1024*1024),
+ .buffer_bytes_max = (1024 * 1024),
};
/*
@@ -420,17 +411,17 @@ static const struct snd_pcm_hardware snd_cx88_digital_hw = {
*/
static int snd_cx88_pcm_open(struct snd_pcm_substream *substream)
{
- snd_cx88_card_t *chip = snd_pcm_substream_chip(substream);
+ struct cx88_audio_dev *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
if (!chip) {
- printk(KERN_ERR "BUG: cx88 can't find device struct."
- " Can't proceed with open\n");
+ pr_err("BUG: cx88 can't find device struct. Can't proceed with open\n");
return -ENODEV;
}
- err = snd_pcm_hw_constraint_pow2(runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS);
+ err = snd_pcm_hw_constraint_pow2(runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIODS);
if (err < 0)
goto _error;
@@ -440,6 +431,7 @@ static int snd_cx88_pcm_open(struct snd_pcm_substream *substream)
if (cx88_sram_channels[SRAM_CH25].fifo_size != DEFAULT_FIFO_SIZE) {
unsigned int bpl = cx88_sram_channels[SRAM_CH25].fifo_size / 4;
+
bpl &= ~7; /* must be multiple of 8 */
runtime->hw.period_bytes_min = bpl;
runtime->hw.period_bytes_max = bpl;
@@ -447,7 +439,7 @@ static int snd_cx88_pcm_open(struct snd_pcm_substream *substream)
return 0;
_error:
- dprintk(1,"Error opening PCM!\n");
+ dprintk(1, "Error opening PCM!\n");
return err;
}
@@ -462,10 +454,10 @@ static int snd_cx88_close(struct snd_pcm_substream *substream)
/*
* hw_params callback
*/
-static int snd_cx88_hw_params(struct snd_pcm_substream * substream,
- struct snd_pcm_hw_params * hw_params)
+static int snd_cx88_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
{
- snd_cx88_card_t *chip = snd_pcm_substream_chip(substream);
+ struct cx88_audio_dev *chip = snd_pcm_substream_chip(substream);
struct cx88_audio_buffer *buf;
int ret;
@@ -479,18 +471,18 @@ static int snd_cx88_hw_params(struct snd_pcm_substream * substream,
chip->num_periods = params_periods(hw_params);
chip->dma_size = chip->period_size * params_periods(hw_params);
- BUG_ON(!chip->dma_size);
- BUG_ON(chip->num_periods & (chip->num_periods-1));
+ WARN_ON(!chip->dma_size);
+ WARN_ON(chip->num_periods & (chip->num_periods - 1));
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
- if (NULL == buf)
+ if (!buf)
return -ENOMEM;
chip->buf = buf;
buf->bpl = chip->period_size;
ret = cx88_alsa_dma_init(chip,
- (PAGE_ALIGN(chip->dma_size) >> PAGE_SHIFT));
+ (PAGE_ALIGN(chip->dma_size) >> PAGE_SHIFT));
if (ret < 0)
goto error;
@@ -504,7 +496,7 @@ static int snd_cx88_hw_params(struct snd_pcm_substream * substream,
goto error;
/* Loop back to start of program */
- buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP|RISC_IRQ1|RISC_CNT_INC);
+ buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
substream->runtime->dma_area = chip->buf->vaddr;
@@ -520,10 +512,9 @@ error:
/*
* hw free callback
*/
-static int snd_cx88_hw_free(struct snd_pcm_substream * substream)
+static int snd_cx88_hw_free(struct snd_pcm_substream *substream)
{
-
- snd_cx88_card_t *chip = snd_pcm_substream_chip(substream);
+ struct cx88_audio_dev *chip = snd_pcm_substream_chip(substream);
if (substream->runtime->dma_area) {
dsp_buffer_free(chip);
@@ -546,7 +537,7 @@ static int snd_cx88_prepare(struct snd_pcm_substream *substream)
*/
static int snd_cx88_card_trigger(struct snd_pcm_substream *substream, int cmd)
{
- snd_cx88_card_t *chip = snd_pcm_substream_chip(substream);
+ struct cx88_audio_dev *chip = snd_pcm_substream_chip(substream);
int err;
/* Local interrupts are already disabled by ALSA */
@@ -554,13 +545,13 @@ static int snd_cx88_card_trigger(struct snd_pcm_substream *substream, int cmd)
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
- err=_cx88_start_audio_dma(chip);
+ err = _cx88_start_audio_dma(chip);
break;
case SNDRV_PCM_TRIGGER_STOP:
- err=_cx88_stop_audio_dma(chip);
+ err = _cx88_stop_audio_dma(chip);
break;
default:
- err=-EINVAL;
+ err = -EINVAL;
break;
}
@@ -574,7 +565,7 @@ static int snd_cx88_card_trigger(struct snd_pcm_substream *substream, int cmd)
*/
static snd_pcm_uframes_t snd_cx88_pointer(struct snd_pcm_substream *substream)
{
- snd_cx88_card_t *chip = snd_pcm_substream_chip(substream);
+ struct cx88_audio_dev *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
u16 count;
@@ -583,16 +574,17 @@ static snd_pcm_uframes_t snd_cx88_pointer(struct snd_pcm_substream *substream)
// dprintk(2, "%s - count %d (+%u), period %d, frame %lu\n", __func__,
// count, new, count & (runtime->periods-1),
// runtime->period_size * (count & (runtime->periods-1)));
- return runtime->period_size * (count & (runtime->periods-1));
+ return runtime->period_size * (count & (runtime->periods - 1));
}
/*
* page callback (needed for mmap)
*/
static struct page *snd_cx88_page(struct snd_pcm_substream *substream,
- unsigned long offset)
+ unsigned long offset)
{
void *pageptr = substream->runtime->dma_area + offset;
+
return vmalloc_to_page(pageptr);
}
@@ -614,7 +606,8 @@ static const struct snd_pcm_ops snd_cx88_pcm_ops = {
/*
* create a PCM device
*/
-static int snd_cx88_pcm(snd_cx88_card_t *chip, int device, const char *name)
+static int snd_cx88_pcm(struct cx88_audio_dev *chip, int device,
+ const char *name)
{
int err;
struct snd_pcm *pcm;
@@ -629,9 +622,9 @@ static int snd_cx88_pcm(snd_cx88_card_t *chip, int device, const char *name)
return 0;
}
-/****************************************************************************
- CONTROL INTERFACE
- ****************************************************************************/
+/*
+ * CONTROL INTERFACE
+ */
static int snd_cx88_volume_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *info)
{
@@ -646,8 +639,8 @@ static int snd_cx88_volume_info(struct snd_kcontrol *kcontrol,
static int snd_cx88_volume_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *value)
{
- snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
- struct cx88_core *core=chip->core;
+ struct cx88_audio_dev *chip = snd_kcontrol_chip(kcontrol);
+ struct cx88_core *core = chip->core;
int vol = 0x3f - (cx_read(AUD_VOL_CTL) & 0x3f),
bal = cx_read(AUD_BAL_CTL);
@@ -659,9 +652,9 @@ static int snd_cx88_volume_get(struct snd_kcontrol *kcontrol,
}
static void snd_cx88_wm8775_volume_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *value)
+ struct snd_ctl_elem_value *value)
{
- snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
+ struct cx88_audio_dev *chip = snd_kcontrol_chip(kcontrol);
struct cx88_core *core = chip->core;
int left = value->value.integer.value[0];
int right = value->value.integer.value[1];
@@ -683,8 +676,8 @@ static void snd_cx88_wm8775_volume_put(struct snd_kcontrol *kcontrol,
static int snd_cx88_volume_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *value)
{
- snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
- struct cx88_core *core=chip->core;
+ struct cx88_audio_dev *chip = snd_kcontrol_chip(kcontrol);
+ struct cx88_core *core = chip->core;
int left, right, v, b;
int changed = 0;
u32 old;
@@ -733,7 +726,7 @@ static const struct snd_kcontrol_new snd_cx88_volume = {
static int snd_cx88_switch_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *value)
{
- snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
+ struct cx88_audio_dev *chip = snd_kcontrol_chip(kcontrol);
struct cx88_core *core = chip->core;
u32 bit = kcontrol->private_value;
@@ -742,9 +735,9 @@ static int snd_cx88_switch_get(struct snd_kcontrol *kcontrol,
}
static int snd_cx88_switch_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *value)
+ struct snd_ctl_elem_value *value)
{
- snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
+ struct cx88_audio_dev *chip = snd_kcontrol_chip(kcontrol);
struct cx88_core *core = chip->core;
u32 bit = kcontrol->private_value;
int ret = 0;
@@ -756,8 +749,9 @@ static int snd_cx88_switch_put(struct snd_kcontrol *kcontrol,
vol ^= bit;
cx_swrite(SHADOW_AUD_VOL_CTL, AUD_VOL_CTL, vol);
/* Pass mute onto any WM8775 */
- if (core->sd_wm8775 && ((1<<6) == bit))
- wm8775_s_ctrl(core, V4L2_CID_AUDIO_MUTE, 0 != (vol & bit));
+ if (core->sd_wm8775 && ((1 << 6) == bit))
+ wm8775_s_ctrl(core,
+ V4L2_CID_AUDIO_MUTE, 0 != (vol & bit));
ret = 1;
}
spin_unlock_irq(&chip->reg_lock);
@@ -770,7 +764,7 @@ static const struct snd_kcontrol_new snd_cx88_dac_switch = {
.info = snd_ctl_boolean_mono_info,
.get = snd_cx88_switch_get,
.put = snd_cx88_switch_put,
- .private_value = (1<<8),
+ .private_value = (1 << 8),
};
static const struct snd_kcontrol_new snd_cx88_source_switch = {
@@ -779,13 +773,13 @@ static const struct snd_kcontrol_new snd_cx88_source_switch = {
.info = snd_ctl_boolean_mono_info,
.get = snd_cx88_switch_get,
.put = snd_cx88_switch_put,
- .private_value = (1<<6),
+ .private_value = (1 << 6),
};
static int snd_cx88_alc_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *value)
+ struct snd_ctl_elem_value *value)
{
- snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
+ struct cx88_audio_dev *chip = snd_kcontrol_chip(kcontrol);
struct cx88_core *core = chip->core;
s32 val;
@@ -795,9 +789,9 @@ static int snd_cx88_alc_get(struct snd_kcontrol *kcontrol,
}
static int snd_cx88_alc_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *value)
+ struct snd_ctl_elem_value *value)
{
- snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
+ struct cx88_audio_dev *chip = snd_kcontrol_chip(kcontrol);
struct cx88_core *core = chip->core;
wm8775_s_ctrl(core, V4L2_CID_AUDIO_LOUDNESS,
@@ -813,9 +807,9 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
.put = snd_cx88_alc_put,
};
-/****************************************************************************
- Basic Flow for Sound Devices
- ****************************************************************************/
+/*
+ * Basic Flow for Sound Devices
+ */
/*
* PCI ID Table - 14f1:8801 and 14f1:8811 means function 1: Audio
@@ -823,8 +817,8 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
*/
static const struct pci_device_id cx88_audio_pci_tbl[] = {
- {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
- {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
+ {0x14f1, 0x8801, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {0x14f1, 0x8811, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0, }
};
MODULE_DEVICE_TABLE(pci, cx88_audio_pci_tbl);
@@ -833,13 +827,12 @@ MODULE_DEVICE_TABLE(pci, cx88_audio_pci_tbl);
* Chip-specific destructor
*/
-static int snd_cx88_free(snd_cx88_card_t *chip)
+static int snd_cx88_free(struct cx88_audio_dev *chip)
{
-
if (chip->irq >= 0)
free_irq(chip->irq, chip);
- cx88_core_put(chip->core,chip->pci);
+ cx88_core_put(chip->core, chip->pci);
pci_disable_device(chip->pci);
return 0;
@@ -848,27 +841,26 @@ static int snd_cx88_free(snd_cx88_card_t *chip)
/*
* Component Destructor
*/
-static void snd_cx88_dev_free(struct snd_card * card)
+static void snd_cx88_dev_free(struct snd_card *card)
{
- snd_cx88_card_t *chip = card->private_data;
+ struct cx88_audio_dev *chip = card->private_data;
snd_cx88_free(chip);
}
-
/*
* Alsa Constructor - Component probe
*/
static int devno;
static int snd_cx88_create(struct snd_card *card, struct pci_dev *pci,
- snd_cx88_card_t **rchip,
+ struct cx88_audio_dev **rchip,
struct cx88_core **core_ptr)
{
- snd_cx88_card_t *chip;
- struct cx88_core *core;
- int err;
- unsigned char pci_lat;
+ struct cx88_audio_dev *chip;
+ struct cx88_core *core;
+ int err;
+ unsigned char pci_lat;
*rchip = NULL;
@@ -881,19 +873,18 @@ static int snd_cx88_create(struct snd_card *card, struct pci_dev *pci,
chip = card->private_data;
core = cx88_core_get(pci);
- if (NULL == core) {
+ if (!core) {
err = -EINVAL;
return err;
}
- err = pci_set_dma_mask(pci,DMA_BIT_MASK(32));
+ err = pci_set_dma_mask(pci, DMA_BIT_MASK(32));
if (err) {
- dprintk(0, "%s/1: Oops: no 32bit PCI DMA ???\n",core->name);
+ dprintk(0, "%s/1: Oops: no 32bit PCI DMA ???\n", core->name);
cx88_core_put(core, pci);
return err;
}
-
/* pci init */
chip->card = card;
chip->pci = pci;
@@ -907,17 +898,18 @@ static int snd_cx88_create(struct snd_card *card, struct pci_dev *pci,
IRQF_SHARED, chip->core->name, chip);
if (err < 0) {
dprintk(0, "%s: can't get IRQ %d\n",
- chip->core->name, chip->pci->irq);
+ chip->core->name, chip->pci->irq);
return err;
}
/* print pci info */
pci_read_config_byte(pci, PCI_LATENCY_TIMER, &pci_lat);
- dprintk(1,"ALSA %s/%i: found at %s, rev: %d, irq: %d, "
- "latency: %d, mmio: 0x%llx\n", core->name, devno,
- pci_name(pci), pci->revision, pci->irq,
- pci_lat, (unsigned long long)pci_resource_start(pci,0));
+ dprintk(1,
+ "ALSA %s/%i: found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n",
+ core->name, devno,
+ pci_name(pci), pci->revision, pci->irq,
+ pci_lat, (unsigned long long)pci_resource_start(pci, 0));
chip->irq = pci->irq;
synchronize_irq(chip->irq);
@@ -931,10 +923,10 @@ static int snd_cx88_create(struct snd_card *card, struct pci_dev *pci,
static int cx88_audio_initdev(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
- struct snd_card *card;
- snd_cx88_card_t *chip;
- struct cx88_core *core = NULL;
- int err;
+ struct snd_card *card;
+ struct cx88_audio_dev *chip;
+ struct cx88_core *core = NULL;
+ int err;
if (devno >= SNDRV_CARDS)
return (-ENODEV);
@@ -945,7 +937,7 @@ static int cx88_audio_initdev(struct pci_dev *pci,
}
err = snd_card_new(&pci->dev, index[devno], id[devno], THIS_MODULE,
- sizeof(snd_cx88_card_t), &card);
+ sizeof(struct cx88_audio_dev), &card);
if (err < 0)
return err;
@@ -973,19 +965,20 @@ static int cx88_audio_initdev(struct pci_dev *pci,
if (core->sd_wm8775)
snd_ctl_add(card, snd_ctl_new1(&snd_cx88_alc_switch, chip));
- strcpy (card->driver, "CX88x");
+ strcpy(card->driver, "CX88x");
sprintf(card->shortname, "Conexant CX%x", pci->device);
sprintf(card->longname, "%s at %#llx",
- card->shortname,(unsigned long long)pci_resource_start(pci, 0));
- strcpy (card->mixername, "CX88");
+ card->shortname,
+ (unsigned long long)pci_resource_start(pci, 0));
+ strcpy(card->mixername, "CX88");
- dprintk (0, "%s/%i: ALSA support for cx2388x boards\n",
- card->driver,devno);
+ dprintk(0, "%s/%i: ALSA support for cx2388x boards\n",
+ card->driver, devno);
err = snd_card_register(card);
if (err < 0)
goto error;
- pci_set_drvdata(pci,card);
+ pci_set_drvdata(pci, card);
devno++;
return 0;
@@ -994,6 +987,7 @@ error:
snd_card_free(card);
return err;
}
+
/*
* ALSA destructor
*/
diff --git a/drivers/media/pci/cx88/cx88-blackbird.c b/drivers/media/pci/cx88/cx88-blackbird.c
index b532e49e8f33..aa49c9597d9c 100644
--- a/drivers/media/pci/cx88/cx88-blackbird.c
+++ b/drivers/media/pci/cx88/cx88-blackbird.c
@@ -1,5 +1,4 @@
/*
- *
* Support for a cx23416 mpeg encoder via cx2388x host port.
* "blackbird" reference design.
*
@@ -20,12 +19,10 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "cx88.h"
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -38,21 +35,20 @@
#include <media/v4l2-event.h>
#include <media/drv-intf/cx2341x.h>
-#include "cx88.h"
-
MODULE_DESCRIPTION("driver for cx2388x/cx23416 based mpeg encoder cards");
MODULE_AUTHOR("Jelle Foks <jelle@foks.us>, Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
MODULE_LICENSE("GPL");
MODULE_VERSION(CX88_VERSION);
static unsigned int debug;
-module_param(debug,int,0644);
-MODULE_PARM_DESC(debug,"enable debug messages [blackbird]");
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "enable debug messages [blackbird]");
-#define dprintk(level, fmt, arg...) do { \
- if (debug + 1 > level) \
- printk(KERN_DEBUG "%s/2-bb: " fmt, dev->core->name , ## arg); \
-} while(0)
+#define dprintk(level, fmt, arg...) do { \
+ if (debug + 1 > level) \
+ printk(KERN_DEBUG pr_fmt("%s: blackbird:" fmt), \
+ __func__, ##arg); \
+} while (0)
/* ------------------------------------------------------------------ */
@@ -70,6 +66,7 @@ enum blackbird_capture_type {
BLACKBIRD_RAW_CAPTURE,
BLACKBIRD_RAW_PASSTHRU_CAPTURE
};
+
enum blackbird_capture_bits {
BLACKBIRD_RAW_BITS_NONE = 0x00,
BLACKBIRD_RAW_BITS_YUV_CAPTURE = 0x01,
@@ -78,33 +75,40 @@ enum blackbird_capture_bits {
BLACKBIRD_RAW_BITS_PASSTHRU_CAPTURE = 0x08,
BLACKBIRD_RAW_BITS_TO_HOST_CAPTURE = 0x10
};
+
enum blackbird_capture_end {
BLACKBIRD_END_AT_GOP, /* stop at the end of gop, generate irq */
BLACKBIRD_END_NOW, /* stop immediately, no irq */
};
+
enum blackbird_framerate {
BLACKBIRD_FRAMERATE_NTSC_30, /* NTSC: 30fps */
BLACKBIRD_FRAMERATE_PAL_25 /* PAL: 25fps */
};
+
enum blackbird_stream_port {
BLACKBIRD_OUTPUT_PORT_MEMORY,
BLACKBIRD_OUTPUT_PORT_STREAMING,
BLACKBIRD_OUTPUT_PORT_SERIAL
};
+
enum blackbird_data_xfer_status {
BLACKBIRD_MORE_BUFFERS_FOLLOW,
BLACKBIRD_LAST_BUFFER,
};
+
enum blackbird_picture_mask {
BLACKBIRD_PICTURE_MASK_NONE,
BLACKBIRD_PICTURE_MASK_I_FRAMES,
BLACKBIRD_PICTURE_MASK_I_P_FRAMES = 0x3,
BLACKBIRD_PICTURE_MASK_ALL_FRAMES = 0x7,
};
+
enum blackbird_vbi_mode_bits {
BLACKBIRD_VBI_BITS_SLICED,
BLACKBIRD_VBI_BITS_RAW,
};
+
enum blackbird_vbi_insertion_bits {
BLACKBIRD_VBI_BITS_INSERT_IN_XTENSION_USR_DATA,
BLACKBIRD_VBI_BITS_INSERT_IN_PRIVATE_PACKETS = 0x1 << 1,
@@ -112,56 +116,69 @@ enum blackbird_vbi_insertion_bits {
BLACKBIRD_VBI_BITS_SEPARATE_STREAM_USR_DATA = 0x4 << 1,
BLACKBIRD_VBI_BITS_SEPARATE_STREAM_PRV_DATA = 0x5 << 1,
};
+
enum blackbird_dma_unit {
BLACKBIRD_DMA_BYTES,
BLACKBIRD_DMA_FRAMES,
};
+
enum blackbird_dma_transfer_status_bits {
BLACKBIRD_DMA_TRANSFER_BITS_DONE = 0x01,
BLACKBIRD_DMA_TRANSFER_BITS_ERROR = 0x04,
BLACKBIRD_DMA_TRANSFER_BITS_LL_ERROR = 0x10,
};
+
enum blackbird_pause {
BLACKBIRD_PAUSE_ENCODING,
BLACKBIRD_RESUME_ENCODING,
};
+
enum blackbird_copyright {
BLACKBIRD_COPYRIGHT_OFF,
BLACKBIRD_COPYRIGHT_ON,
};
+
enum blackbird_notification_type {
BLACKBIRD_NOTIFICATION_REFRESH,
};
+
enum blackbird_notification_status {
BLACKBIRD_NOTIFICATION_OFF,
BLACKBIRD_NOTIFICATION_ON,
};
+
enum blackbird_notification_mailbox {
BLACKBIRD_NOTIFICATION_NO_MAILBOX = -1,
};
+
enum blackbird_field1_lines {
BLACKBIRD_FIELD1_SAA7114 = 0x00EF, /* 239 */
BLACKBIRD_FIELD1_SAA7115 = 0x00F0, /* 240 */
BLACKBIRD_FIELD1_MICRONAS = 0x0105, /* 261 */
};
+
enum blackbird_field2_lines {
BLACKBIRD_FIELD2_SAA7114 = 0x00EF, /* 239 */
BLACKBIRD_FIELD2_SAA7115 = 0x00F0, /* 240 */
BLACKBIRD_FIELD2_MICRONAS = 0x0106, /* 262 */
};
+
enum blackbird_custom_data_type {
BLACKBIRD_CUSTOM_EXTENSION_USR_DATA,
BLACKBIRD_CUSTOM_PRIVATE_PACKET,
};
+
enum blackbird_mute {
BLACKBIRD_UNMUTE,
BLACKBIRD_MUTE,
};
+
enum blackbird_mute_video_mask {
BLACKBIRD_MUTE_VIDEO_V_MASK = 0x0000FF00,
BLACKBIRD_MUTE_VIDEO_U_MASK = 0x00FF0000,
BLACKBIRD_MUTE_VIDEO_Y_MASK = 0xFF000000,
};
+
enum blackbird_mute_video_shift {
BLACKBIRD_MUTE_VIDEO_V_SHIFT = 8,
BLACKBIRD_MUTE_VIDEO_U_SHIFT = 16,
@@ -215,14 +232,14 @@ static void host_setup(struct cx88_core *core)
static int wait_ready_gpio0_bit1(struct cx88_core *core, u32 state)
{
unsigned long timeout = jiffies + msecs_to_jiffies(1);
- u32 gpio0,need;
+ u32 gpio0, need;
need = state ? 2 : 0;
for (;;) {
gpio0 = cx_read(MO_GP0_IO) & 2;
if (need == gpio0)
return 0;
- if (time_after(jiffies,timeout))
+ if (time_after(jiffies, timeout))
return -1;
udelay(1);
}
@@ -241,7 +258,7 @@ static int memory_write(struct cx88_core *core, u32 address, u32 value)
cx_read(P1_MDATA0);
cx_read(P1_MADDR0);
- return wait_ready_gpio0_bit1(core,1);
+ return wait_ready_gpio0_bit1(core, 1);
}
static int memory_read(struct cx88_core *core, u32 address, u32 *value)
@@ -255,7 +272,7 @@ static int memory_read(struct cx88_core *core, u32 address, u32 *value)
cx_writeb(P1_MADDR0, (unsigned int)address);
cx_read(P1_MADDR0);
- retval = wait_ready_gpio0_bit1(core,1);
+ retval = wait_ready_gpio0_bit1(core, 1);
cx_writeb(P1_MDATA3, 0);
val = (unsigned char)cx_read(P1_MDATA3) << 24;
@@ -282,10 +299,9 @@ static int register_write(struct cx88_core *core, u32 address, u32 value)
cx_read(P1_RDATA0);
cx_read(P1_RADDR0);
- return wait_ready_gpio0_bit1(core,1);
+ return wait_ready_gpio0_bit1(core, 1);
}
-
static int register_read(struct cx88_core *core, u32 address, u32 *value)
{
int retval;
@@ -296,7 +312,7 @@ static int register_read(struct cx88_core *core, u32 address, u32 *value)
cx_writeb(P1_RRDWR, 0);
cx_read(P1_RADDR0);
- retval = wait_ready_gpio0_bit1(core,1);
+ retval = wait_ready_gpio0_bit1(core, 1);
val = (unsigned char)cx_read(P1_RDATA0);
val |= (unsigned char)cx_read(P1_RDATA1) << 8;
val |= (unsigned char)cx_read(P1_RDATA2) << 16;
@@ -308,20 +324,24 @@ static int register_read(struct cx88_core *core, u32 address, u32 *value)
/* ------------------------------------------------------------------ */
-static int blackbird_mbox_func(void *priv, u32 command, int in, int out, u32 data[CX2341X_MBOX_MAX_DATA])
+static int blackbird_mbox_func(void *priv, u32 command, int in,
+ int out, u32 data[CX2341X_MBOX_MAX_DATA])
{
struct cx8802_dev *dev = priv;
unsigned long timeout;
u32 value, flag, retval;
int i;
- dprintk(1,"%s: 0x%X\n", __func__, command);
+ dprintk(1, "%s: 0x%X\n", __func__, command);
- /* this may not be 100% safe if we can't read any memory location
- without side effects */
+ /*
+ * this may not be 100% safe if we can't read any memory location
+ * without side effects
+ */
memory_read(dev->core, dev->mailbox - 4, &value);
if (value != 0x12345678) {
- dprintk(0, "Firmware and/or mailbox pointer not initialized or corrupted\n");
+ dprintk(0,
+ "Firmware and/or mailbox pointer not initialized or corrupted\n");
return -EIO;
}
@@ -336,7 +356,8 @@ static int blackbird_mbox_func(void *priv, u32 command, int in, int out, u32 dat
/* write command + args + fill remaining with zeros */
memory_write(dev->core, dev->mailbox + 1, command); /* command code */
- memory_write(dev->core, dev->mailbox + 3, IVTV_API_STD_TIMEOUT); /* timeout */
+ /* timeout */
+ memory_write(dev->core, dev->mailbox + 3, IVTV_API_STD_TIMEOUT);
for (i = 0; i < in; i++) {
memory_write(dev->core, dev->mailbox + 4 + i, data[i]);
dprintk(1, "API Input %d = %d\n", i, data[i]);
@@ -353,7 +374,7 @@ static int blackbird_mbox_func(void *priv, u32 command, int in, int out, u32 dat
memory_read(dev->core, dev->mailbox, &flag);
if (0 != (flag & 4))
break;
- if (time_after(jiffies,timeout)) {
+ if (time_after(jiffies, timeout)) {
dprintk(0, "ERROR: API Mailbox timeout %x\n", command);
return -EIO;
}
@@ -367,15 +388,19 @@ static int blackbird_mbox_func(void *priv, u32 command, int in, int out, u32 dat
}
memory_read(dev->core, dev->mailbox + 2, &retval);
- dprintk(1, "API result = %d\n",retval);
+ dprintk(1, "API result = %d\n", retval);
flag = 0;
memory_write(dev->core, dev->mailbox, flag);
return retval;
}
+
/* ------------------------------------------------------------------ */
-/* We don't need to call the API often, so using just one mailbox will probably suffice */
+/*
+ * We don't need to call the API often, so using just one mailbox
+ * will probably suffice
+ */
static int blackbird_api_cmd(struct cx8802_dev *dev, u32 command,
u32 inputcnt, u32 outputcnt, ...)
{
@@ -385,9 +410,9 @@ static int blackbird_api_cmd(struct cx8802_dev *dev, u32 command,
va_start(vargs, outputcnt);
- for (i = 0; i < inputcnt; i++) {
+ for (i = 0; i < inputcnt; i++)
data[i] = va_arg(vargs, int);
- }
+
err = blackbird_mbox_func(dev, command, inputcnt, outputcnt, data);
for (i = 0; i < outputcnt; i++) {
int *vptr = va_arg(vargs, int *);
@@ -399,8 +424,8 @@ static int blackbird_api_cmd(struct cx8802_dev *dev, u32 command,
static int blackbird_find_mailbox(struct cx8802_dev *dev)
{
- u32 signature[4]={0x12345678, 0x34567812, 0x56781234, 0x78123456};
- int signaturecnt=0;
+ u32 signature[4] = {0x12345678, 0x34567812, 0x56781234, 0x78123456};
+ int signaturecnt = 0;
u32 value;
int i;
@@ -410,9 +435,9 @@ static int blackbird_find_mailbox(struct cx8802_dev *dev)
signaturecnt++;
else
signaturecnt = 0;
- if (4 == signaturecnt) {
+ if (signaturecnt == 4) {
dprintk(1, "Mailbox signature found\n");
- return i+1;
+ return i + 1;
}
}
dprintk(0, "Mailbox signature values not found!\n");
@@ -431,10 +456,13 @@ static int blackbird_load_firmware(struct cx8802_dev *dev)
__le32 *dataptr;
retval = register_write(dev->core, IVTV_REG_VPU, 0xFFFFFFED);
- retval |= register_write(dev->core, IVTV_REG_HW_BLOCKS, IVTV_CMD_HW_BLOCKS_RST);
- retval |= register_write(dev->core, IVTV_REG_ENC_SDRAM_REFRESH, 0x80000640);
- retval |= register_write(dev->core, IVTV_REG_ENC_SDRAM_PRECHARGE, 0x1A);
- msleep(1);
+ retval |= register_write(dev->core, IVTV_REG_HW_BLOCKS,
+ IVTV_CMD_HW_BLOCKS_RST);
+ retval |= register_write(dev->core, IVTV_REG_ENC_SDRAM_REFRESH,
+ 0x80000640);
+ retval |= register_write(dev->core, IVTV_REG_ENC_SDRAM_PRECHARGE,
+ 0x1A);
+ usleep_range(10000, 20000);
retval |= register_write(dev->core, IVTV_REG_APU, 0);
if (retval < 0)
@@ -443,29 +471,28 @@ static int blackbird_load_firmware(struct cx8802_dev *dev)
retval = request_firmware(&firmware, CX2341X_FIRM_ENC_FILENAME,
&dev->pci->dev);
-
if (retval != 0) {
pr_err("Hotplug firmware request failed (%s).\n",
- CX2341X_FIRM_ENC_FILENAME);
+ CX2341X_FIRM_ENC_FILENAME);
pr_err("Please fix your hotplug setup, the board will not work without firmware loaded!\n");
return -EIO;
}
if (firmware->size != BLACKBIRD_FIRM_IMAGE_SIZE) {
pr_err("Firmware size mismatch (have %zd, expected %d)\n",
- firmware->size, BLACKBIRD_FIRM_IMAGE_SIZE);
+ firmware->size, BLACKBIRD_FIRM_IMAGE_SIZE);
release_firmware(firmware);
return -EINVAL;
}
- if (0 != memcmp(firmware->data, magic, 8)) {
+ if (memcmp(firmware->data, magic, 8) != 0) {
pr_err("Firmware magic mismatch, wrong file?\n");
release_firmware(firmware);
return -EINVAL;
}
/* transfer to the chip */
- dprintk(1,"Loading firmware ...\n");
+ dprintk(1, "Loading firmware ...\n");
dataptr = (__le32 *)firmware->data;
for (i = 0; i < (firmware->size >> 2); i++) {
value = le32_to_cpu(*dataptr);
@@ -486,10 +513,11 @@ static int blackbird_load_firmware(struct cx8802_dev *dev)
}
dprintk(0, "Firmware upload successful.\n");
- retval |= register_write(dev->core, IVTV_REG_HW_BLOCKS, IVTV_CMD_HW_BLOCKS_RST);
+ retval |= register_write(dev->core, IVTV_REG_HW_BLOCKS,
+ IVTV_CMD_HW_BLOCKS_RST);
retval |= register_read(dev->core, IVTV_REG_SPU, &value);
retval |= register_write(dev->core, IVTV_REG_SPU, value & 0xFFFFFFFE);
- msleep(1);
+ usleep_range(10000, 20000);
retval |= register_read(dev->core, IVTV_REG_VPU, &value);
retval |= register_write(dev->core, IVTV_REG_VPU, value & 0xFFFFFFE8);
@@ -499,19 +527,19 @@ static int blackbird_load_firmware(struct cx8802_dev *dev)
return 0;
}
-/**
- Settings used by the windows tv app for PVR2000:
-=================================================================================================================
-Profile | Codec | Resolution | CBR/VBR | Video Qlty | V. Bitrate | Frmrate | Audio Codec | A. Bitrate | A. Mode
------------------------------------------------------------------------------------------------------------------
-MPEG-1 | MPEG1 | 352x288PAL | (CBR) | 1000:Optimal | 2000 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo
-MPEG-2 | MPEG2 | 720x576PAL | VBR | 600 :Good | 4000 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo
-VCD | MPEG1 | 352x288PAL | (CBR) | 1000:Optimal | 1150 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo
-DVD | MPEG2 | 720x576PAL | VBR | 600 :Good | 6000 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo
-DB* DVD | MPEG2 | 720x576PAL | CBR | 600 :Good | 6000 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo
-=================================================================================================================
-*DB: "DirectBurn"
-*/
+/*
+ * Settings used by the windows tv app for PVR2000:
+ * =================================================================================================================
+ * Profile | Codec | Resolution | CBR/VBR | Video Qlty | V. Bitrate | Frmrate | Audio Codec | A. Bitrate | A. Mode
+ * -----------------------------------------------------------------------------------------------------------------
+ * MPEG-1 | MPEG1 | 352x288PAL | (CBR) | 1000:Optimal | 2000 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo
+ * MPEG-2 | MPEG2 | 720x576PAL | VBR | 600 :Good | 4000 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo
+ * VCD | MPEG1 | 352x288PAL | (CBR) | 1000:Optimal | 1150 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo
+ * DVD | MPEG2 | 720x576PAL | VBR | 600 :Good | 6000 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo
+ * DB* DVD | MPEG2 | 720x576PAL | CBR | 600 :Good | 6000 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo
+ * =================================================================================================================
+ * [*] DB: "DirectBurn"
+ */
static void blackbird_codec_settings(struct cx8802_dev *dev)
{
@@ -519,11 +547,12 @@ static void blackbird_codec_settings(struct cx8802_dev *dev)
/* assign frame size */
blackbird_api_cmd(dev, CX2341X_ENC_SET_FRAME_SIZE, 2, 0,
- core->height, core->width);
+ core->height, core->width);
dev->cxhdl.width = core->width;
dev->cxhdl.height = core->height;
- cx2341x_handler_set_50hz(&dev->cxhdl, dev->core->tvnorm & V4L2_STD_625_50);
+ cx2341x_handler_set_50hz(&dev->cxhdl,
+ dev->core->tvnorm & V4L2_STD_625_50);
cx2341x_handler_setup(&dev->cxhdl);
}
@@ -533,7 +562,7 @@ static int blackbird_initialize_codec(struct cx8802_dev *dev)
int version;
int retval;
- dprintk(1,"Initialize codec\n");
+ dprintk(1, "Initialize codec\n");
retval = blackbird_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0); /* ping */
if (retval < 0) {
/* ping was not successful, reset and upload firmware */
@@ -549,15 +578,18 @@ static int blackbird_initialize_codec(struct cx8802_dev *dev)
dev->mailbox = retval;
- retval = blackbird_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0); /* ping */
+ /* ping */
+ retval = blackbird_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0);
if (retval < 0) {
dprintk(0, "ERROR: Firmware ping failed!\n");
return -1;
}
- retval = blackbird_api_cmd(dev, CX2341X_ENC_GET_VERSION, 0, 1, &version);
+ retval = blackbird_api_cmd(dev, CX2341X_ENC_GET_VERSION,
+ 0, 1, &version);
if (retval < 0) {
- dprintk(0, "ERROR: Firmware get encoder version failed!\n");
+ dprintk(0,
+ "ERROR: Firmware get encoder version failed!\n");
return -1;
}
dprintk(0, "Firmware version is 0x%08x\n", version);
@@ -571,13 +603,11 @@ static int blackbird_initialize_codec(struct cx8802_dev *dev)
blackbird_codec_settings(dev);
blackbird_api_cmd(dev, CX2341X_ENC_SET_NUM_VSYNC_LINES, 2, 0,
- BLACKBIRD_FIELD1_SAA7115,
- BLACKBIRD_FIELD2_SAA7115
- );
+ BLACKBIRD_FIELD1_SAA7115, BLACKBIRD_FIELD2_SAA7115);
blackbird_api_cmd(dev, CX2341X_ENC_SET_PLACEHOLDER, 12, 0,
- BLACKBIRD_CUSTOM_EXTENSION_USR_DATA,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ BLACKBIRD_CUSTOM_EXTENSION_USR_DATA,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
return 0;
}
@@ -615,9 +645,7 @@ static int blackbird_start_codec(struct cx8802_dev *dev)
/* start capturing to the host interface */
blackbird_api_cmd(dev, CX2341X_ENC_START_CAPTURE, 2, 0,
- BLACKBIRD_MPEG_CAPTURE,
- BLACKBIRD_RAW_BITS_NONE
- );
+ BLACKBIRD_MPEG_CAPTURE, BLACKBIRD_RAW_BITS_NONE);
return 0;
}
@@ -625,10 +653,9 @@ static int blackbird_start_codec(struct cx8802_dev *dev)
static int blackbird_stop_codec(struct cx8802_dev *dev)
{
blackbird_api_cmd(dev, CX2341X_ENC_STOP_CAPTURE, 3, 0,
- BLACKBIRD_END_NOW,
- BLACKBIRD_MPEG_CAPTURE,
- BLACKBIRD_RAW_BITS_NONE
- );
+ BLACKBIRD_END_NOW,
+ BLACKBIRD_MPEG_CAPTURE,
+ BLACKBIRD_RAW_BITS_NONE);
cx2341x_handler_set_busy(&dev->cxhdl, 0);
@@ -638,8 +665,8 @@ static int blackbird_stop_codec(struct cx8802_dev *dev)
/* ------------------------------------------------------------------ */
static int queue_setup(struct vb2_queue *q,
- unsigned int *num_buffers, unsigned int *num_planes,
- unsigned int sizes[], struct device *alloc_devs[])
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct cx8802_dev *dev = q->drv_priv;
@@ -699,7 +726,8 @@ static int start_streaming(struct vb2_queue *q, unsigned int count)
err = drv->request_acquire(drv);
if (err != 0) {
- dprintk(1, "%s: Unable to acquire hardware, %d\n", __func__, err);
+ dprintk(1, "%s: Unable to acquire hardware, %d\n", __func__,
+ err);
goto fail;
}
@@ -770,7 +798,7 @@ static const struct vb2_ops blackbird_qops = {
/* ------------------------------------------------------------------ */
static int vidioc_querycap(struct file *file, void *priv,
- struct v4l2_capability *cap)
+ struct v4l2_capability *cap)
{
struct cx8802_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
@@ -781,8 +809,8 @@ static int vidioc_querycap(struct file *file, void *priv,
return 0;
}
-static int vidioc_enum_fmt_vid_cap (struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
{
if (f->index != 0)
return -EINVAL;
@@ -794,7 +822,7 @@ static int vidioc_enum_fmt_vid_cap (struct file *file, void *priv,
}
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
+ struct v4l2_format *f)
{
struct cx8802_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
@@ -810,11 +838,11 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
}
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
+ struct v4l2_format *f)
{
struct cx8802_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
- unsigned maxw, maxh;
+ unsigned int maxw, maxh;
enum v4l2_field field;
f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
@@ -850,7 +878,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
}
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
+ struct v4l2_format *f)
{
struct cx8802_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
@@ -864,20 +892,21 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
core->width = f->fmt.pix.width;
core->height = f->fmt.pix.height;
core->field = f->fmt.pix.field;
- cx88_set_scale(core, f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field);
+ cx88_set_scale(core, f->fmt.pix.width, f->fmt.pix.height,
+ f->fmt.pix.field);
blackbird_api_cmd(dev, CX2341X_ENC_SET_FRAME_SIZE, 2, 0,
- f->fmt.pix.height, f->fmt.pix.width);
+ f->fmt.pix.height, f->fmt.pix.width);
return 0;
}
-static int vidioc_s_frequency (struct file *file, void *priv,
- const struct v4l2_frequency *f)
+static int vidioc_s_frequency(struct file *file, void *priv,
+ const struct v4l2_frequency *f)
{
struct cx8802_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
bool streaming;
- if (unlikely(UNSET == core->board.tuner_type))
+ if (unlikely(core->board.tuner_type == UNSET))
return -EINVAL;
if (unlikely(f->tuner != 0))
return -EINVAL;
@@ -885,16 +914,15 @@ static int vidioc_s_frequency (struct file *file, void *priv,
if (streaming)
blackbird_stop_codec(dev);
- cx88_set_freq (core,f);
+ cx88_set_freq(core, f);
blackbird_initialize_codec(dev);
- cx88_set_scale(core, core->width, core->height,
- core->field);
+ cx88_set_scale(core, core->width, core->height, core->field);
if (streaming)
blackbird_start_codec(dev);
return 0;
}
-static int vidioc_log_status (struct file *file, void *priv)
+static int vidioc_log_status(struct file *file, void *priv)
{
struct cx8802_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
@@ -906,21 +934,22 @@ static int vidioc_log_status (struct file *file, void *priv)
return 0;
}
-static int vidioc_enum_input (struct file *file, void *priv,
- struct v4l2_input *i)
+static int vidioc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
{
struct cx8802_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
- return cx88_enum_input (core,i);
+
+ return cx88_enum_input(core, i);
}
-static int vidioc_g_frequency (struct file *file, void *priv,
- struct v4l2_frequency *f)
+static int vidioc_g_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f)
{
struct cx8802_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
- if (unlikely(UNSET == core->board.tuner_type))
+ if (unlikely(core->board.tuner_type == UNSET))
return -EINVAL;
if (unlikely(f->tuner != 0))
return -EINVAL;
@@ -931,7 +960,7 @@ static int vidioc_g_frequency (struct file *file, void *priv,
return 0;
}
-static int vidioc_g_input (struct file *file, void *priv, unsigned int *i)
+static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
struct cx8802_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
@@ -940,31 +969,31 @@ static int vidioc_g_input (struct file *file, void *priv, unsigned int *i)
return 0;
}
-static int vidioc_s_input (struct file *file, void *priv, unsigned int i)
+static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
{
struct cx8802_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
if (i >= 4)
return -EINVAL;
- if (0 == INPUT(i).type)
+ if (!INPUT(i).type)
return -EINVAL;
cx88_newstation(core);
- cx88_video_mux(core,i);
+ cx88_video_mux(core, i);
return 0;
}
-static int vidioc_g_tuner (struct file *file, void *priv,
- struct v4l2_tuner *t)
+static int vidioc_g_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *t)
{
struct cx8802_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
u32 reg;
- if (unlikely(UNSET == core->board.tuner_type))
+ if (unlikely(core->board.tuner_type == UNSET))
return -EINVAL;
- if (0 != t->index)
+ if (t->index != 0)
return -EINVAL;
strcpy(t->name, "Television");
@@ -972,21 +1001,21 @@ static int vidioc_g_tuner (struct file *file, void *priv,
t->rangehigh = 0xffffffffUL;
call_all(core, tuner, g_tuner, t);
- cx88_get_stereo(core ,t);
+ cx88_get_stereo(core, t);
reg = cx_read(MO_DEVICE_STATUS);
- t->signal = (reg & (1<<5)) ? 0xffff : 0x0000;
+ t->signal = (reg & (1 << 5)) ? 0xffff : 0x0000;
return 0;
}
-static int vidioc_s_tuner (struct file *file, void *priv,
- const struct v4l2_tuner *t)
+static int vidioc_s_tuner(struct file *file, void *priv,
+ const struct v4l2_tuner *t)
{
struct cx8802_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
- if (UNSET == core->board.tuner_type)
+ if (core->board.tuner_type == UNSET)
return -EINVAL;
- if (0 != t->index)
+ if (t->index != 0)
return -EINVAL;
cx88_set_stereo(core, t->audmode, 1);
@@ -1010,8 +1039,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id)
return cx88_set_tvnorm(core, id);
}
-static const struct v4l2_file_operations mpeg_fops =
-{
+static const struct v4l2_file_operations mpeg_fops = {
.owner = THIS_MODULE,
.open = v4l2_fh_open,
.release = vb2_fop_release,
@@ -1050,7 +1078,7 @@ static const struct v4l2_ioctl_ops mpeg_ioctl_ops = {
static struct video_device cx8802_mpeg_template = {
.name = "cx8802",
.fops = &mpeg_fops,
- .ioctl_ops = &mpeg_ioctl_ops,
+ .ioctl_ops = &mpeg_ioctl_ops,
.tvnorms = CX88_NORMS,
};
@@ -1064,7 +1092,9 @@ static int cx8802_blackbird_advise_acquire(struct cx8802_driver *drv)
switch (core->boardnr) {
case CX88_BOARD_HAUPPAUGE_HVR1300:
- /* By default, core setup will leave the cx22702 out of reset, on the bus.
+ /*
+ * By default, core setup will leave the cx22702 out of reset,
+ * on the bus.
* We left the hardware on power up with the cx22702 active.
* We're being given access to re-arrange the GPIOs.
* Take the bus off the cx22702 and put the cx23416 on it.
@@ -1118,12 +1148,11 @@ static int blackbird_register_video(struct cx8802_dev *dev)
dev->mpeg_dev.queue = &dev->vb2_mpegq;
err = video_register_device(&dev->mpeg_dev, VFL_TYPE_GRABBER, -1);
if (err < 0) {
- printk(KERN_INFO "%s/2: can't register mpeg device\n",
- dev->core->name);
+ pr_info("can't register mpeg device\n");
return err;
}
- printk(KERN_INFO "%s/2: registered device %s [mpeg]\n",
- dev->core->name, video_device_node_name(&dev->mpeg_dev));
+ pr_info("registered device %s [mpeg]\n",
+ video_device_node_name(&dev->mpeg_dev));
return 0;
}
@@ -1136,8 +1165,8 @@ static int cx8802_blackbird_probe(struct cx8802_driver *drv)
struct vb2_queue *q;
int err;
- dprintk( 1, "%s\n", __func__);
- dprintk( 1, " ->being probed by Card=%d Name=%s, PCI %02x:%02x\n",
+ dprintk(1, "%s\n", __func__);
+ dprintk(1, " ->being probed by Card=%d Name=%s, PCI %02x:%02x\n",
core->boardnr,
core->name,
core->pci_bus,
@@ -1158,16 +1187,15 @@ static int cx8802_blackbird_probe(struct cx8802_driver *drv)
v4l2_ctrl_add_handler(&dev->cxhdl.hdl, &core->video_hdl, NULL);
/* blackbird stuff */
- printk("%s/2: cx23416 based mpeg encoder (blackbird reference design)\n",
- core->name);
+ pr_info("cx23416 based mpeg encoder (blackbird reference design)\n");
host_setup(dev->core);
blackbird_initialize_codec(dev);
/* initial device configuration: needed ? */
// init_controls(core);
- cx88_set_tvnorm(core,core->tvnorm);
- cx88_video_mux(core,0);
+ cx88_set_tvnorm(core, core->tvnorm);
+ cx88_video_mux(core, 0);
cx2341x_handler_set_50hz(&dev->cxhdl, core->height == 576);
cx2341x_handler_setup(&dev->cxhdl);
@@ -1219,8 +1247,8 @@ static struct cx8802_driver cx8802_blackbird_driver = {
static int __init blackbird_init(void)
{
- printk(KERN_INFO "cx2388x blackbird driver version %s loaded\n",
- CX88_VERSION);
+ pr_info("cx2388x blackbird driver version %s loaded\n",
+ CX88_VERSION);
return cx8802_register_driver(&cx8802_blackbird_driver);
}
diff --git a/drivers/media/pci/cx88/cx88-cards.c b/drivers/media/pci/cx88/cx88-cards.c
index 8f2556ec3971..cdfbde277b8b 100644
--- a/drivers/media/pci/cx88/cx88-cards.c
+++ b/drivers/media/pci/cx88/cx88-cards.c
@@ -1,5 +1,4 @@
/*
- *
* device driver for Conexant 2388x based TV cards
* card-specific stuff.
*
@@ -14,22 +13,18 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "cx88.h"
+#include "tea5767.h"
+#include "xc4000.h"
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include "cx88.h"
-#include "tea5767.h"
-#include "xc4000.h"
-
static unsigned int tuner[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
static unsigned int radio[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
static unsigned int card[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
@@ -38,32 +33,23 @@ module_param_array(tuner, int, NULL, 0444);
module_param_array(radio, int, NULL, 0444);
module_param_array(card, int, NULL, 0444);
-MODULE_PARM_DESC(tuner,"tuner type");
-MODULE_PARM_DESC(radio,"radio tuner type");
-MODULE_PARM_DESC(card,"card type");
+MODULE_PARM_DESC(tuner, "tuner type");
+MODULE_PARM_DESC(radio, "radio tuner type");
+MODULE_PARM_DESC(card, "card type");
static unsigned int latency = UNSET;
-module_param(latency,int,0444);
-MODULE_PARM_DESC(latency,"pci latency timer");
+module_param(latency, int, 0444);
+MODULE_PARM_DESC(latency, "pci latency timer");
static int disable_ir;
module_param(disable_ir, int, 0444);
MODULE_PARM_DESC(disable_ir, "Disable IR support");
-#define info_printk(core, fmt, arg...) \
- printk(KERN_INFO "%s: " fmt, core->name , ## arg)
-
-#define warn_printk(core, fmt, arg...) \
- printk(KERN_WARNING "%s: " fmt, core->name , ## arg)
-
-#define err_printk(core, fmt, arg...) \
- printk(KERN_ERR "%s: " fmt, core->name , ## arg)
-
-#define dprintk(level,fmt, arg...) do { \
+#define dprintk(level, fmt, arg...) do { \
if (cx88_core_debug >= level) \
- printk(KERN_DEBUG "%s: " fmt, core->name , ## arg); \
- } while(0)
-
+ printk(KERN_DEBUG pr_fmt("%s: core:" fmt), \
+ __func__, ##arg); \
+} while (0)
/* ------------------------------------------------------------------ */
/* board config info */
@@ -291,7 +277,6 @@ static const struct cx88_board cx88_boards[] = {
.gpio2 = 0x0035e700,
.gpio3 = 0x02000000,
}, {
-
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x0035c700,
@@ -505,22 +490,22 @@ static const struct cx88_board cx88_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
/*
- GPIO[0] resets DT3302 DTV receiver
- 0 - reset asserted
- 1 - normal operation
- GPIO[1] mutes analog audio output connector
- 0 - enable selected source
- 1 - mute
- GPIO[2] selects source for analog audio output connector
- 0 - analog audio input connector on tab
- 1 - analog DAC output from CX23881 chip
- GPIO[3] selects RF input connector on tuner module
- 0 - RF connector labeled CABLE
- 1 - RF connector labeled ANT
- GPIO[4] selects high RF for QAM256 mode
- 0 - normal RF
- 1 - high RF
- */
+ * GPIO[0] resets DT3302 DTV receiver
+ * 0 - reset asserted
+ * 1 - normal operation
+ * GPIO[1] mutes analog audio output connector
+ * 0 - enable selected source
+ * 1 - mute
+ * GPIO[2] selects source for analog audio output connector
+ * 0 - analog audio input connector on tab
+ * 1 - analog DAC output from CX23881 chip
+ * GPIO[3] selects RF input connector on tuner module
+ * 0 - RF connector labeled CABLE
+ * 1 - RF connector labeled ANT
+ * GPIO[4] selects high RF for QAM256 mode
+ * 0 - normal RF
+ * 1 - high RF
+ */
.input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
@@ -743,7 +728,10 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- /* Some variants use a tda9874 and so need the tvaudio module. */
+ /*
+ * Some variants use a tda9874 and so need the
+ * tvaudio module.
+ */
.audio_chip = CX88_AUDIO_TVAUDIO,
.input = { {
.type = CX88_VMUX_TELEVISION,
@@ -1209,8 +1197,10 @@ static const struct cx88_board cx88_boards[] = {
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_KWORLD_MCE200_DELUXE] = {
- /* FIXME: tested TV input only, disabled composite,
- svideo and radio until they can be tested also. */
+ /*
+ * FIXME: tested TV input only, disabled composite,
+ * svideo and radio until they can be tested also.
+ */
.name = "Kworld MCE 200 Deluxe",
.tuner_type = TUNER_TENA_9533_DI,
.radio_type = UNSET,
@@ -1721,16 +1711,24 @@ static const struct cx88_board cx88_boards[] = {
},
},
[CX88_BOARD_POWERCOLOR_REAL_ANGEL] = {
- .name = "PowerColor RA330", /* Long names may confuse LIRC. */
+ /* Long names may confuse LIRC. */
+ .name = "PowerColor RA330",
.tuner_type = TUNER_XC2028,
.tuner_addr = 0x61,
.input = { {
+ /*
+ * Due to the way the cx88 driver is written,
+ * there is no way to deactivate audio pass-
+ * through without this entry. Furthermore, if
+ * the TV mux entry is first, you get audio
+ * from the tuner on boot for a little while.
+ */
.type = CX88_VMUX_DEBUG,
- .vmux = 3, /* Due to the way the cx88 driver is written, */
- .gpio0 = 0x00ff, /* there is no way to deactivate audio pass- */
- .gpio1 = 0xf39d, /* through without this entry. Furthermore, if */
- .gpio3 = 0x0000, /* the TV mux entry is first, you get audio */
- }, { /* from the tuner on boot for a little while. */
+ .vmux = 3,
+ .gpio0 = 0x00ff,
+ .gpio1 = 0xf39d,
+ .gpio3 = 0x0000,
+ }, {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x00ff,
@@ -1883,11 +1881,12 @@ static const struct cx88_board cx88_boards[] = {
.gpio2 = 0x0cf7,
},
},
- /* Both radio, analog and ATSC work with this board.
- However, for analog to work, s5h1409 gate should be open,
- otherwise, tuner-xc3028 won't be detected.
- A proper fix require using the newer i2c methods to add
- tuner-xc3028 without doing an i2c probe.
+ /*
+ * Both radio, analog and ATSC work with this board.
+ * However, for analog to work, s5h1409 gate should be open,
+ * otherwise, tuner-xc3028 won't be detected.
+ * A proper fix require using the newer i2c methods to add
+ * tuner-xc3028 without doing an i2c probe.
*/
[CX88_BOARD_KWORLD_ATSC_120] = {
.name = "Kworld PlusTV HD PCI 120 (ATSC 120)",
@@ -2821,15 +2820,15 @@ static const struct cx88_subid cx88_subids[] = {
},
};
-/* ----------------------------------------------------------------------- */
-/* some leadtek specific stuff */
-
+/*
+ * some leadtek specific stuff
+ */
static void leadtek_eeprom(struct cx88_core *core, u8 *eeprom_data)
{
if (eeprom_data[4] != 0x7d ||
eeprom_data[5] != 0x10 ||
eeprom_data[7] != 0x66) {
- warn_printk(core, "Leadtek eeprom invalid.\n");
+ pr_warn("Leadtek eeprom invalid.\n");
return;
}
@@ -2847,9 +2846,8 @@ static void leadtek_eeprom(struct cx88_core *core, u8 *eeprom_data)
break;
}
- info_printk(core, "Leadtek Winfast 2000XP Expert config: "
- "tuner=%d, eeprom[0]=0x%02x\n",
- core->board.tuner_type, eeprom_data[0]);
+ pr_info("Leadtek Winfast 2000XP Expert config: tuner=%d, eeprom[0]=0x%02x\n",
+ core->board.tuner_type, eeprom_data[0]);
}
static void hauppauge_eeprom(struct cx88_core *core, u8 *eeprom_data)
@@ -2863,8 +2861,7 @@ static void hauppauge_eeprom(struct cx88_core *core, u8 *eeprom_data)
core->model = tv.model;
/* Make sure we support the board model */
- switch (tv.model)
- {
+ switch (tv.model) {
case 14009: /* WinTV-HVR3000 (Retail, IR, b/panel video, 3.5mm audio in) */
case 14019: /* WinTV-HVR3000 (Retail, IR Blaster, b/panel video, 3.5mm audio in) */
case 14029: /* WinTV-HVR3000 (Retail, IR, b/panel video, 3.5mm audio in - 880 bridge) */
@@ -2905,50 +2902,50 @@ static void hauppauge_eeprom(struct cx88_core *core, u8 *eeprom_data)
cx_set(MO_GP0_IO, 0x008989FF);
break;
default:
- warn_printk(core, "warning: unknown hauppauge model #%d\n",
- tv.model);
+ pr_warn("warning: unknown hauppauge model #%d\n", tv.model);
break;
}
- info_printk(core, "hauppauge eeprom: model=%d\n", tv.model);
+ pr_info("hauppauge eeprom: model=%d\n", tv.model);
}
-/* ----------------------------------------------------------------------- */
-/* some GDI (was: Modular Technology) specific stuff */
+/*
+ * some GDI (was: Modular Technology) specific stuff
+ */
static const struct {
int id;
int fm;
const char *name;
} gdi_tuner[] = {
- [ 0x01 ] = { .id = UNSET,
- .name = "NTSC_M" },
- [ 0x02 ] = { .id = UNSET,
- .name = "PAL_B" },
- [ 0x03 ] = { .id = UNSET,
- .name = "PAL_I" },
- [ 0x04 ] = { .id = UNSET,
- .name = "PAL_D" },
- [ 0x05 ] = { .id = UNSET,
- .name = "SECAM" },
-
- [ 0x10 ] = { .id = UNSET,
- .fm = 1,
- .name = "TEMIC_4049" },
- [ 0x11 ] = { .id = TUNER_TEMIC_4136FY5,
- .name = "TEMIC_4136" },
- [ 0x12 ] = { .id = UNSET,
- .name = "TEMIC_4146" },
-
- [ 0x20 ] = { .id = TUNER_PHILIPS_FQ1216ME,
- .fm = 1,
- .name = "PHILIPS_FQ1216_MK3" },
- [ 0x21 ] = { .id = UNSET, .fm = 1,
- .name = "PHILIPS_FQ1236_MK3" },
- [ 0x22 ] = { .id = UNSET,
- .name = "PHILIPS_FI1236_MK3" },
- [ 0x23 ] = { .id = UNSET,
- .name = "PHILIPS_FI1216_MK3" },
+ [0x01] = { .id = UNSET,
+ .name = "NTSC_M" },
+ [0x02] = { .id = UNSET,
+ .name = "PAL_B" },
+ [0x03] = { .id = UNSET,
+ .name = "PAL_I" },
+ [0x04] = { .id = UNSET,
+ .name = "PAL_D" },
+ [0x05] = { .id = UNSET,
+ .name = "SECAM" },
+
+ [0x10] = { .id = UNSET,
+ .fm = 1,
+ .name = "TEMIC_4049" },
+ [0x11] = { .id = TUNER_TEMIC_4136FY5,
+ .name = "TEMIC_4136" },
+ [0x12] = { .id = UNSET,
+ .name = "TEMIC_4146" },
+
+ [0x20] = { .id = TUNER_PHILIPS_FQ1216ME,
+ .fm = 1,
+ .name = "PHILIPS_FQ1216_MK3" },
+ [0x21] = { .id = UNSET, .fm = 1,
+ .name = "PHILIPS_FQ1236_MK3" },
+ [0x22] = { .id = UNSET,
+ .name = "PHILIPS_FI1236_MK3" },
+ [0x23] = { .id = UNSET,
+ .name = "PHILIPS_FI1216_MK3" },
};
static void gdi_eeprom(struct cx88_core *core, u8 *eeprom_data)
@@ -2956,16 +2953,17 @@ static void gdi_eeprom(struct cx88_core *core, u8 *eeprom_data)
const char *name = (eeprom_data[0x0d] < ARRAY_SIZE(gdi_tuner))
? gdi_tuner[eeprom_data[0x0d]].name : NULL;
- info_printk(core, "GDI: tuner=%s\n", name ? name : "unknown");
- if (NULL == name)
+ pr_info("GDI: tuner=%s\n", name ? name : "unknown");
+ if (!name)
return;
core->board.tuner_type = gdi_tuner[eeprom_data[0x0d]].id;
core->board.radio.type = gdi_tuner[eeprom_data[0x0d]].fm ?
CX88_RADIO : 0;
}
-/* ------------------------------------------------------------------- */
-/* some Divco specific stuff */
+/*
+ * some Divco specific stuff
+ */
static int cx88_dvico_xc2028_callback(struct cx88_core *core,
int command, int arg)
{
@@ -2994,9 +2992,9 @@ static int cx88_dvico_xc2028_callback(struct cx88_core *core,
return 0;
}
-
-/* ----------------------------------------------------------------------- */
-/* some Geniatech specific stuff */
+/*
+ * some Geniatech specific stuff
+ */
static int cx88_xc3028_geniatech_tuner_callback(struct cx88_core *core,
int command, int mode)
@@ -3059,8 +3057,9 @@ static int cx88_xc4000_winfast2000h_plus_callback(struct cx88_core *core,
return -EINVAL;
}
-/* ------------------------------------------------------------------- */
-/* some Divco specific stuff */
+/*
+ * some Divco specific stuff
+ */
static int cx88_pv_8000gt_callback(struct cx88_core *core,
int command, int arg)
{
@@ -3079,8 +3078,9 @@ static int cx88_pv_8000gt_callback(struct cx88_core *core,
return 0;
}
-/* ----------------------------------------------------------------------- */
-/* some DViCO specific stuff */
+/*
+ * some DViCO specific stuff
+ */
static void dvico_fusionhdtv_hybrid_init(struct cx88_core *core)
{
@@ -3107,8 +3107,8 @@ static void dvico_fusionhdtv_hybrid_init(struct cx88_core *core)
msg.len = (i != 12 ? 5 : 2);
err = i2c_transfer(&core->i2c_adap, &msg, 1);
if (err != 1) {
- warn_printk(core, "dvico_fusionhdtv_hybrid_init buf %d "
- "failed (err = %d)!\n", i, err);
+ pr_warn("dvico_fusionhdtv_hybrid_init buf %d failed (err = %d)!\n",
+ i, err);
return;
}
}
@@ -3176,11 +3176,11 @@ static int cx88_xc4000_tuner_callback(struct cx88_core *core,
return -EINVAL;
}
-/* ----------------------------------------------------------------------- */
-/* Tuner callback function. Currently only needed for the Pinnacle *
- * PCTV HD 800i with an xc5000 sillicon tuner. This is used for both *
- * analog tuner attach (tuner-core.c) and dvb tuner attach (cx88-dvb.c) */
-
+/*
+ * Tuner callback function. Currently only needed for the Pinnacle
+ * PCTV HD 800i with an xc5000 sillicon tuner. This is used for both
+ * analog tuner attach (tuner-core.c) and dvb tuner attach (cx88-dvb.c)
+ */
static int cx88_xc5000_tuner_callback(struct cx88_core *core,
int command, int arg)
{
@@ -3188,38 +3188,38 @@ static int cx88_xc5000_tuner_callback(struct cx88_core *core,
case CX88_BOARD_PINNACLE_PCTV_HD_800i:
if (command == 0) { /* This is the reset command from xc5000 */
- /* djh - According to the engineer at PCTV Systems,
- the xc5000 reset pin is supposed to be on GPIO12.
- However, despite three nights of effort, pulling
- that GPIO low didn't reset the xc5000. While
- pulling MO_SRST_IO low does reset the xc5000, this
- also resets in the s5h1409 being reset as well.
- This causes tuning to always fail since the internal
- state of the s5h1409 does not match the driver's
- state. Given that the only two conditions in which
- the driver performs a reset is during firmware load
- and powering down the chip, I am taking out the
- reset. We know that the chip is being reset
- when the cx88 comes online, and not being able to
- do power management for this board is worse than
- not having any tuning at all. */
+ /*
+ * djh - According to the engineer at PCTV Systems,
+ * the xc5000 reset pin is supposed to be on GPIO12.
+ * However, despite three nights of effort, pulling
+ * that GPIO low didn't reset the xc5000. While
+ * pulling MO_SRST_IO low does reset the xc5000, this
+ * also resets in the s5h1409 being reset as well.
+ * This causes tuning to always fail since the internal
+ * state of the s5h1409 does not match the driver's
+ * state. Given that the only two conditions in which
+ * the driver performs a reset is during firmware load
+ * and powering down the chip, I am taking out the
+ * reset. We know that the chip is being reset
+ * when the cx88 comes online, and not being able to
+ * do power management for this board is worse than
+ * not having any tuning at all.
+ */
return 0;
- } else {
- dprintk(1, "xc5000: unknown tuner callback command.\n");
- return -EINVAL;
}
- break;
+
+ dprintk(1, "xc5000: unknown tuner callback command.\n");
+ return -EINVAL;
case CX88_BOARD_DVICO_FUSIONHDTV_7_GOLD:
if (command == 0) { /* This is the reset command from xc5000 */
cx_clear(MO_GP0_IO, 0x00000010);
- msleep(10);
+ usleep_range(10000, 20000);
cx_set(MO_GP0_IO, 0x00000010);
return 0;
- } else {
- dprintk(1, "xc5000: unknown tuner callback command.\n");
- return -EINVAL;
}
- break;
+
+ dprintk(1, "xc5000: unknown tuner callback command.\n");
+ return -EINVAL;
}
return 0; /* Should never be here */
}
@@ -3230,14 +3230,14 @@ int cx88_tuner_callback(void *priv, int component, int command, int arg)
struct cx88_core *core;
if (!i2c_algo) {
- printk(KERN_ERR "cx88: Error - i2c private data undefined.\n");
+ pr_err("Error - i2c private data undefined.\n");
return -EINVAL;
}
core = i2c_algo->data;
if (!core) {
- printk(KERN_ERR "cx88: Error - device struct undefined.\n");
+ pr_err("Error - device struct undefined.\n");
return -EINVAL;
}
@@ -3245,18 +3245,18 @@ int cx88_tuner_callback(void *priv, int component, int command, int arg)
return -EINVAL;
switch (core->board.tuner_type) {
- case TUNER_XC2028:
- dprintk(1, "Calling XC2028/3028 callback\n");
- return cx88_xc2028_tuner_callback(core, command, arg);
- case TUNER_XC4000:
- dprintk(1, "Calling XC4000 callback\n");
- return cx88_xc4000_tuner_callback(core, command, arg);
- case TUNER_XC5000:
- dprintk(1, "Calling XC5000 callback\n");
- return cx88_xc5000_tuner_callback(core, command, arg);
+ case TUNER_XC2028:
+ dprintk(1, "Calling XC2028/3028 callback\n");
+ return cx88_xc2028_tuner_callback(core, command, arg);
+ case TUNER_XC4000:
+ dprintk(1, "Calling XC4000 callback\n");
+ return cx88_xc4000_tuner_callback(core, command, arg);
+ case TUNER_XC5000:
+ dprintk(1, "Calling XC5000 callback\n");
+ return cx88_xc5000_tuner_callback(core, command, arg);
}
- err_printk(core, "Error: Calling callback for tuner %d\n",
- core->board.tuner_type);
+ pr_err("Error: Calling callback for tuner %d\n",
+ core->board.tuner_type);
return -EINVAL;
}
EXPORT_SYMBOL(cx88_tuner_callback);
@@ -3267,28 +3267,20 @@ static void cx88_card_list(struct cx88_core *core, struct pci_dev *pci)
{
int i;
- if (0 == pci->subsystem_vendor &&
- 0 == pci->subsystem_device) {
- printk(KERN_ERR
- "%s: Your board has no valid PCI Subsystem ID and thus can't\n"
- "%s: be autodetected. Please pass card=<n> insmod option to\n"
- "%s: workaround that. Redirect complaints to the vendor of\n"
- "%s: the TV card. Best regards,\n"
- "%s: -- tux\n",
- core->name,core->name,core->name,core->name,core->name);
+ if (!pci->subsystem_vendor && !pci->subsystem_device) {
+ pr_err("Your board has no valid PCI Subsystem ID and thus can't\n");
+ pr_err("be autodetected. Please pass card=<n> insmod option to\n");
+ pr_err("workaround that. Redirect complaints to the vendor of\n");
+ pr_err("the TV card\n");
} else {
- printk(KERN_ERR
- "%s: Your board isn't known (yet) to the driver. You can\n"
- "%s: try to pick one of the existing card configs via\n"
- "%s: card=<n> insmod option. Updating to the latest\n"
- "%s: version might help as well.\n",
- core->name,core->name,core->name,core->name);
+ pr_err("Your board isn't known (yet) to the driver. You can\n");
+ pr_err("try to pick one of the existing card configs via\n");
+ pr_err("card=<n> insmod option. Updating to the latest\n");
+ pr_err("version might help as well.\n");
}
- err_printk(core, "Here is a list of valid choices for the card=<n> "
- "insmod option:\n");
+ pr_err("Here is a list of valid choices for the card=<n> insmod option:\n");
for (i = 0; i < ARRAY_SIZE(cx88_boards); i++)
- printk(KERN_ERR "%s: card=%d -> %s\n",
- core->name, i, cx88_boards[i].name);
+ pr_err(" card=%d -> %s\n", i, cx88_boards[i].name);
}
static void cx88_card_setup_pre_i2c(struct cx88_core *core)
@@ -3296,7 +3288,9 @@ static void cx88_card_setup_pre_i2c(struct cx88_core *core)
switch (core->boardnr) {
case CX88_BOARD_HAUPPAUGE_HVR1300:
/*
- * Bring the 702 demod up before i2c scanning/attach or devices are hidden
+ * Bring the 702 demod up before i2c scanning/attach or
+ * devices are hidden.
+ *
* We leave here with the 702 on the bus
*
* "reset the IR receiver on GPIO[3]"
@@ -3317,7 +3311,7 @@ static void cx88_card_setup_pre_i2c(struct cx88_core *core)
cx_write(MO_GP2_IO, 0xef5);
mdelay(50);
cx_write(MO_GP2_IO, 0xcf7);
- msleep(10);
+ usleep_range(10000, 20000);
break;
case CX88_BOARD_DVICO_FUSIONHDTV_7_GOLD:
@@ -3353,7 +3347,7 @@ static void cx88_card_setup_pre_i2c(struct cx88_core *core)
case CX88_BOARD_TWINHAN_VP1027_DVBS:
cx_write(MO_GP0_IO, 0x00003230);
cx_write(MO_GP0_IO, 0x00003210);
- msleep(1);
+ usleep_range(10000, 20000);
cx_write(MO_GP0_IO, 0x00001230);
break;
}
@@ -3384,11 +3378,13 @@ void cx88_setup_xc3028(struct cx88_core *core, struct xc2028_ctrl *ctl)
ctl->demod = XC3028_FE_OREN538;
break;
case CX88_BOARD_GENIATECH_X8000_MT:
- /* FIXME: For this board, the xc3028 never recovers after being
- powered down (the reset GPIO probably is not set properly).
- We don't have access to the hardware so we cannot determine
- which GPIO is used for xc3028, so just disable power xc3028
- power management for now */
+ /*
+ * FIXME: For this board, the xc3028 never recovers after being
+ * powered down (the reset GPIO probably is not set properly).
+ * We don't have access to the hardware so we cannot determine
+ * which GPIO is used for xc3028, so just disable power xc3028
+ * power management for now
+ */
ctl->disable_power_mgmt = 1;
break;
case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
@@ -3418,7 +3414,7 @@ static void cx88_card_setup(struct cx88_core *core)
memset(&tun_setup, 0, sizeof(tun_setup));
- if (0 == core->i2c_rc) {
+ if (!core->i2c_rc) {
core->i2c_client.addr = 0xa0 >> 1;
tveeprom_read(&core->i2c_client, eeprom, sizeof(eeprom));
}
@@ -3426,17 +3422,17 @@ static void cx88_card_setup(struct cx88_core *core)
switch (core->boardnr) {
case CX88_BOARD_HAUPPAUGE:
case CX88_BOARD_HAUPPAUGE_ROSLYN:
- if (0 == core->i2c_rc)
- hauppauge_eeprom(core, eeprom+8);
+ if (!core->i2c_rc)
+ hauppauge_eeprom(core, eeprom + 8);
break;
case CX88_BOARD_GDI:
- if (0 == core->i2c_rc)
+ if (!core->i2c_rc)
gdi_eeprom(core, eeprom);
break;
case CX88_BOARD_LEADTEK_PVR2000:
case CX88_BOARD_WINFAST_DV2000:
case CX88_BOARD_WINFAST2000XP_EXPERT:
- if (0 == core->i2c_rc)
+ if (!core->i2c_rc)
leadtek_eeprom(core, eeprom);
break;
case CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1:
@@ -3449,7 +3445,7 @@ static void cx88_card_setup(struct cx88_core *core)
case CX88_BOARD_HAUPPAUGE_HVR4000:
case CX88_BOARD_HAUPPAUGE_HVR4000LITE:
case CX88_BOARD_HAUPPAUGE_IRONLY:
- if (0 == core->i2c_rc)
+ if (!core->i2c_rc)
hauppauge_eeprom(core, eeprom);
break;
case CX88_BOARD_KWORLD_DVBS_100:
@@ -3460,7 +3456,7 @@ static void cx88_card_setup(struct cx88_core *core)
/* GPIO0:0 is hooked to demod reset */
/* GPIO0:4 is hooked to xc3028 reset */
cx_write(MO_GP0_IO, 0x00111100);
- msleep(1);
+ usleep_range(10000, 20000);
cx_write(MO_GP0_IO, 0x00111111);
break;
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL:
@@ -3476,9 +3472,9 @@ static void cx88_card_setup(struct cx88_core *core)
/* GPIO0:0 is hooked to mt352 reset pin */
cx_set(MO_GP0_IO, 0x00000101);
cx_clear(MO_GP0_IO, 0x00000001);
- msleep(1);
+ usleep_range(10000, 20000);
cx_set(MO_GP0_IO, 0x00000101);
- if (0 == core->i2c_rc &&
+ if (!core->i2c_rc &&
core->boardnr == CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_HYBRID)
dvico_fusionhdtv_hybrid_init(core);
break;
@@ -3487,7 +3483,7 @@ static void cx88_card_setup(struct cx88_core *core)
cx_set(MO_GP0_IO, 0x00000707);
cx_set(MO_GP2_IO, 0x00000101);
cx_clear(MO_GP2_IO, 0x00000001);
- msleep(1);
+ usleep_range(10000, 20000);
cx_clear(MO_GP0_IO, 0x00000007);
cx_set(MO_GP2_IO, 0x00000101);
break;
@@ -3495,23 +3491,23 @@ static void cx88_card_setup(struct cx88_core *core)
cx_write(MO_GP0_IO, 0x00080808);
break;
case CX88_BOARD_ATI_HDTVWONDER:
- if (0 == core->i2c_rc) {
+ if (!core->i2c_rc) {
/* enable tuner */
int i;
- static const u8 buffer [][2] = {
- {0x10,0x12},
- {0x13,0x04},
- {0x16,0x00},
- {0x14,0x04},
- {0x17,0x00}
+ static const u8 buffer[][2] = {
+ {0x10, 0x12},
+ {0x13, 0x04},
+ {0x16, 0x00},
+ {0x14, 0x04},
+ {0x17, 0x00}
};
core->i2c_client.addr = 0x0a;
for (i = 0; i < ARRAY_SIZE(buffer); i++)
- if (2 != i2c_master_send(&core->i2c_client,
- buffer[i],2))
- warn_printk(core, "Unable to enable "
- "tuner(%i).\n", i);
+ if (i2c_master_send(&core->i2c_client,
+ buffer[i], 2) != 2)
+ pr_warn("Unable to enable tuner(%i).\n",
+ i);
}
break;
case CX88_BOARD_MSI_TVANYWHERE_MASTER:
@@ -3545,7 +3541,7 @@ static void cx88_card_setup(struct cx88_core *core)
cx_write(MO_GP0_IO, 0x8000);
msleep(100);
cx_write(MO_SRST_IO, 0);
- msleep(10);
+ usleep_range(10000, 20000);
cx_write(MO_GP0_IO, 0x8080);
msleep(100);
cx_write(MO_SRST_IO, 1);
@@ -3553,9 +3549,8 @@ static void cx88_card_setup(struct cx88_core *core)
break;
} /*end switch() */
-
/* Setup tuners */
- if ((core->board.radio_type != UNSET)) {
+ if (core->board.radio_type != UNSET) {
tun_setup.mode_mask = T_RADIO;
tun_setup.type = core->board.radio_type;
tun_setup.addr = core->board.radio_addr;
@@ -3610,35 +3605,30 @@ static int cx88_pci_quirks(const char *name, struct pci_dev *pci)
/* check pci quirks */
if (pci_pci_problems & PCIPCI_TRITON) {
- printk(KERN_INFO "%s: quirk: PCIPCI_TRITON -- set TBFX\n",
- name);
+ pr_info("quirk: PCIPCI_TRITON -- set TBFX\n");
ctrl |= CX88X_EN_TBFX;
}
if (pci_pci_problems & PCIPCI_NATOMA) {
- printk(KERN_INFO "%s: quirk: PCIPCI_NATOMA -- set TBFX\n",
- name);
+ pr_info("quirk: PCIPCI_NATOMA -- set TBFX\n");
ctrl |= CX88X_EN_TBFX;
}
if (pci_pci_problems & PCIPCI_VIAETBF) {
- printk(KERN_INFO "%s: quirk: PCIPCI_VIAETBF -- set TBFX\n",
- name);
+ pr_info("quirk: PCIPCI_VIAETBF -- set TBFX\n");
ctrl |= CX88X_EN_TBFX;
}
if (pci_pci_problems & PCIPCI_VSFX) {
- printk(KERN_INFO "%s: quirk: PCIPCI_VSFX -- set VSFX\n",
- name);
+ pr_info("quirk: PCIPCI_VSFX -- set VSFX\n");
ctrl |= CX88X_EN_VSFX;
}
#ifdef PCIPCI_ALIMAGIK
if (pci_pci_problems & PCIPCI_ALIMAGIK) {
- printk(KERN_INFO "%s: quirk: PCIPCI_ALIMAGIK -- latency fixup\n",
- name);
+ pr_info("quirk: PCIPCI_ALIMAGIK -- latency fixup\n");
lat = 0x0A;
}
#endif
/* check insmod options */
- if (UNSET != latency)
+ if (latency != UNSET)
lat = latency;
/* apply stuff */
@@ -3647,9 +3637,8 @@ static int cx88_pci_quirks(const char *name, struct pci_dev *pci)
value |= ctrl;
pci_write_config_byte(pci, CX88X_DEVCTRL, value);
}
- if (UNSET != lat) {
- printk(KERN_INFO "%s: setting pci latency timer to %d\n",
- name, latency);
+ if (lat != UNSET) {
+ pr_info("setting pci latency timer to %d\n", latency);
pci_write_config_byte(pci, PCI_LATENCY_TIMER, latency);
}
return 0;
@@ -3657,27 +3646,28 @@ static int cx88_pci_quirks(const char *name, struct pci_dev *pci)
int cx88_get_resources(const struct cx88_core *core, struct pci_dev *pci)
{
- if (request_mem_region(pci_resource_start(pci,0),
- pci_resource_len(pci,0),
+ if (request_mem_region(pci_resource_start(pci, 0),
+ pci_resource_len(pci, 0),
core->name))
return 0;
- printk(KERN_ERR
- "%s/%d: Can't get MMIO memory @ 0x%llx, subsystem: %04x:%04x\n",
- core->name, PCI_FUNC(pci->devfn),
+ pr_err("func %d: Can't get MMIO memory @ 0x%llx, subsystem: %04x:%04x\n",
+ PCI_FUNC(pci->devfn),
(unsigned long long)pci_resource_start(pci, 0),
pci->subsystem_vendor, pci->subsystem_device);
return -EBUSY;
}
-/* Allocate and initialize the cx88 core struct. One should hold the
- * devlist mutex before calling this. */
+/*
+ * Allocate and initialize the cx88 core struct. One should hold the
+ * devlist mutex before calling this.
+ */
struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
{
struct cx88_core *core;
int i;
core = kzalloc(sizeof(*core), GFP_KERNEL);
- if (core == NULL)
+ if (!core)
return NULL;
atomic_inc(&core->refcount);
@@ -3715,7 +3705,7 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
return NULL;
}
- if (0 != cx88_get_resources(core, pci)) {
+ if (cx88_get_resources(core, pci) != 0) {
v4l2_ctrl_handler_free(&core->video_hdl);
v4l2_ctrl_handler_free(&core->audio_hdl);
v4l2_device_unregister(&core->v4l2_dev);
@@ -3729,9 +3719,9 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
pci_resource_len(pci, 0));
core->bmmio = (u8 __iomem *)core->lmmio;
- if (core->lmmio == NULL) {
+ if (!core->lmmio) {
release_mem_region(pci_resource_start(pci, 0),
- pci_resource_len(pci, 0));
+ pci_resource_len(pci, 0));
v4l2_ctrl_handler_free(&core->video_hdl);
v4l2_ctrl_handler_free(&core->audio_hdl);
v4l2_device_unregister(&core->v4l2_dev);
@@ -3743,11 +3733,11 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
core->boardnr = UNSET;
if (card[core->nr] < ARRAY_SIZE(cx88_boards))
core->boardnr = card[core->nr];
- for (i = 0; UNSET == core->boardnr && i < ARRAY_SIZE(cx88_subids); i++)
+ for (i = 0; core->boardnr == UNSET && i < ARRAY_SIZE(cx88_subids); i++)
if (pci->subsystem_vendor == cx88_subids[i].subvendor &&
pci->subsystem_device == cx88_subids[i].subdevice)
core->boardnr = cx88_subids[i].card;
- if (UNSET == core->boardnr) {
+ if (core->boardnr == UNSET) {
core->boardnr = CX88_BOARD_UNKNOWN;
cx88_card_list(core, pci);
}
@@ -3757,7 +3747,7 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
if (!core->board.num_frontends && (core->board.mpeg & CX88_MPEG_DVB))
core->board.num_frontends = 1;
- info_printk(core, "subsystem: %04x:%04x, board: %s [card=%d,%s], frontend(s): %d\n",
+ pr_info("subsystem: %04x:%04x, board: %s [card=%d,%s], frontend(s): %d\n",
pci->subsystem_vendor, pci->subsystem_device, core->board.name,
core->boardnr, card[core->nr] == core->boardnr ?
"insmod option" : "autodetected",
@@ -3777,10 +3767,12 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
cx88_i2c_init(core, pci);
/* load tuner module, if needed */
- if (UNSET != core->board.tuner_type) {
- /* Ignore 0x6b and 0x6f on cx88 boards.
+ if (core->board.tuner_type != UNSET) {
+ /*
+ * Ignore 0x6b and 0x6f on cx88 boards.
* FusionHDTV5 RT Gold has an ir receiver at 0x6b
- * and an RTC at 0x6f which can get corrupted if probed. */
+ * and an RTC at 0x6f which can get corrupted if probed.
+ */
static const unsigned short tv_addrs[] = {
0x42, 0x43, 0x4a, 0x4b, /* tda8290 */
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
@@ -3789,24 +3781,27 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
};
int has_demod = (core->board.tda9887_conf & TDA9887_PRESENT);
- /* I don't trust the radio_type as is stored in the card
- definitions, so we just probe for it.
- The radio_type is sometimes missing, or set to UNSET but
- later code configures a tea5767.
+ /*
+ * I don't trust the radio_type as is stored in the card
+ * definitions, so we just probe for it.
+ * The radio_type is sometimes missing, or set to UNSET but
+ * later code configures a tea5767.
*/
v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap,
- "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_RADIO));
+ "tuner", 0,
+ v4l2_i2c_tuner_addrs(ADDRS_RADIO));
if (has_demod)
v4l2_i2c_new_subdev(&core->v4l2_dev,
- &core->i2c_adap, "tuner",
+ &core->i2c_adap, "tuner",
0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD));
if (core->board.tuner_addr == ADDR_UNSET) {
v4l2_i2c_new_subdev(&core->v4l2_dev,
- &core->i2c_adap, "tuner",
+ &core->i2c_adap, "tuner",
0, has_demod ? tv_addrs + 4 : tv_addrs);
} else {
v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap,
- "tuner", core->board.tuner_addr, NULL);
+ "tuner", core->board.tuner_addr,
+ NULL);
}
}
diff --git a/drivers/media/pci/cx88/cx88-core.c b/drivers/media/pci/cx88/cx88-core.c
index 46fe8c1eb9d4..973a9cd4c635 100644
--- a/drivers/media/pci/cx88/cx88-core.c
+++ b/drivers/media/pci/cx88/cx88-core.c
@@ -1,5 +1,4 @@
/*
- *
* device driver for Conexant 2388x based TV cards
* driver core
*
@@ -19,12 +18,10 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "cx88.h"
+
#include <linux/init.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -38,7 +35,6 @@
#include <linux/videodev2.h>
#include <linux/mutex.h>
-#include "cx88.h"
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
@@ -53,17 +49,22 @@ module_param_named(core_debug, cx88_core_debug, int, 0644);
MODULE_PARM_DESC(core_debug, "enable debug messages [core]");
static unsigned int nicam;
-module_param(nicam,int,0644);
-MODULE_PARM_DESC(nicam,"tv audio is nicam");
+module_param(nicam, int, 0644);
+MODULE_PARM_DESC(nicam, "tv audio is nicam");
static unsigned int nocomb;
-module_param(nocomb,int,0644);
-MODULE_PARM_DESC(nocomb,"disable comb filter");
+module_param(nocomb, int, 0644);
+MODULE_PARM_DESC(nocomb, "disable comb filter");
+
+#define dprintk0(fmt, arg...) \
+ printk(KERN_DEBUG pr_fmt("%s: core:" fmt), \
+ __func__, ##arg) \
-#define dprintk(level,fmt, arg...) do { \
- if (cx88_core_debug >= level) \
- printk(KERN_DEBUG "%s: " fmt, core->name , ## arg); \
- } while(0)
+#define dprintk(level, fmt, arg...) do { \
+ if (cx88_core_debug >= level) \
+ printk(KERN_DEBUG pr_fmt("%s: core:" fmt), \
+ __func__, ##arg); \
+} while (0)
static unsigned int cx88_devcount;
static LIST_HEAD(cx88_devlist);
@@ -71,15 +72,17 @@ static DEFINE_MUTEX(devlist);
#define NO_SYNC_LINE (-1U)
-/* @lpi: lines per IRQ, or 0 to not generate irqs. Note: IRQ to be
- generated _after_ lpi lines are transferred. */
-static __le32* cx88_risc_field(__le32 *rp, struct scatterlist *sglist,
- unsigned int offset, u32 sync_line,
- unsigned int bpl, unsigned int padding,
- unsigned int lines, unsigned int lpi, bool jump)
+/*
+ * @lpi: lines per IRQ, or 0 to not generate irqs. Note: IRQ to be
+ * generated _after_ lpi lines are transferred.
+ */
+static __le32 *cx88_risc_field(__le32 *rp, struct scatterlist *sglist,
+ unsigned int offset, u32 sync_line,
+ unsigned int bpl, unsigned int padding,
+ unsigned int lines, unsigned int lpi, bool jump)
{
struct scatterlist *sg;
- unsigned int line,todo,sol;
+ unsigned int line, todo, sol;
if (jump) {
(*rp++) = cpu_to_le32(RISC_JUMP);
@@ -97,33 +100,34 @@ static __le32* cx88_risc_field(__le32 *rp, struct scatterlist *sglist,
offset -= sg_dma_len(sg);
sg = sg_next(sg);
}
- if (lpi && line>0 && !(line % lpi))
+ if (lpi && line > 0 && !(line % lpi))
sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC;
else
sol = RISC_SOL;
- if (bpl <= sg_dma_len(sg)-offset) {
+ if (bpl <= sg_dma_len(sg) - offset) {
/* fits into current chunk */
- *(rp++)=cpu_to_le32(RISC_WRITE|sol|RISC_EOL|bpl);
- *(rp++)=cpu_to_le32(sg_dma_address(sg)+offset);
- offset+=bpl;
+ *(rp++) = cpu_to_le32(RISC_WRITE | sol |
+ RISC_EOL | bpl);
+ *(rp++) = cpu_to_le32(sg_dma_address(sg) + offset);
+ offset += bpl;
} else {
/* scanline needs to be split */
todo = bpl;
- *(rp++)=cpu_to_le32(RISC_WRITE|sol|
- (sg_dma_len(sg)-offset));
- *(rp++)=cpu_to_le32(sg_dma_address(sg)+offset);
- todo -= (sg_dma_len(sg)-offset);
+ *(rp++) = cpu_to_le32(RISC_WRITE | sol |
+ (sg_dma_len(sg) - offset));
+ *(rp++) = cpu_to_le32(sg_dma_address(sg) + offset);
+ todo -= (sg_dma_len(sg) - offset);
offset = 0;
sg = sg_next(sg);
while (todo > sg_dma_len(sg)) {
- *(rp++)=cpu_to_le32(RISC_WRITE|
- sg_dma_len(sg));
- *(rp++)=cpu_to_le32(sg_dma_address(sg));
+ *(rp++) = cpu_to_le32(RISC_WRITE |
+ sg_dma_len(sg));
+ *(rp++) = cpu_to_le32(sg_dma_address(sg));
todo -= sg_dma_len(sg);
sg = sg_next(sg);
}
- *(rp++)=cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
- *(rp++)=cpu_to_le32(sg_dma_address(sg));
+ *(rp++) = cpu_to_le32(RISC_WRITE | RISC_EOL | todo);
+ *(rp++) = cpu_to_le32(sg_dma_address(sg));
offset += todo;
}
offset += padding;
@@ -137,41 +141,46 @@ int cx88_risc_buffer(struct pci_dev *pci, struct cx88_riscmem *risc,
unsigned int top_offset, unsigned int bottom_offset,
unsigned int bpl, unsigned int padding, unsigned int lines)
{
- u32 instructions,fields;
+ u32 instructions, fields;
__le32 *rp;
fields = 0;
- if (UNSET != top_offset)
+ if (top_offset != UNSET)
fields++;
- if (UNSET != bottom_offset)
+ if (bottom_offset != UNSET)
fields++;
- /* estimate risc mem: worst case is one write per page border +
- one write per scan line + syncs + jump (all 2 dwords). Padding
- can cause next bpl to start close to a page border. First DMA
- region may be smaller than PAGE_SIZE */
- instructions = fields * (1 + ((bpl + padding) * lines) / PAGE_SIZE + lines);
+ /*
+ * estimate risc mem: worst case is one write per page border +
+ * one write per scan line + syncs + jump (all 2 dwords). Padding
+ * can cause next bpl to start close to a page border. First DMA
+ * region may be smaller than PAGE_SIZE
+ */
+ instructions = fields * (1 + ((bpl + padding) * lines) /
+ PAGE_SIZE + lines);
instructions += 4;
risc->size = instructions * 8;
risc->dma = 0;
risc->cpu = pci_zalloc_consistent(pci, risc->size, &risc->dma);
- if (NULL == risc->cpu)
+ if (!risc->cpu)
return -ENOMEM;
/* write risc instructions */
rp = risc->cpu;
- if (UNSET != top_offset)
+ if (top_offset != UNSET)
rp = cx88_risc_field(rp, sglist, top_offset, 0,
bpl, padding, lines, 0, true);
- if (UNSET != bottom_offset)
+ if (bottom_offset != UNSET)
rp = cx88_risc_field(rp, sglist, bottom_offset, 0x200,
- bpl, padding, lines, 0, top_offset == UNSET);
+ bpl, padding, lines, 0,
+ top_offset == UNSET);
/* save pointer to jmp instruction address */
risc->jmp = rp;
- BUG_ON((risc->jmp - risc->cpu + 2) * sizeof (*risc->cpu) > risc->size);
+ WARN_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
return 0;
}
+EXPORT_SYMBOL(cx88_risc_buffer);
int cx88_risc_databuffer(struct pci_dev *pci, struct cx88_riscmem *risc,
struct scatterlist *sglist, unsigned int bpl,
@@ -180,32 +189,38 @@ int cx88_risc_databuffer(struct pci_dev *pci, struct cx88_riscmem *risc,
u32 instructions;
__le32 *rp;
- /* estimate risc mem: worst case is one write per page border +
- one write per scan line + syncs + jump (all 2 dwords). Here
- there is no padding and no sync. First DMA region may be smaller
- than PAGE_SIZE */
+ /*
+ * estimate risc mem: worst case is one write per page border +
+ * one write per scan line + syncs + jump (all 2 dwords). Here
+ * there is no padding and no sync. First DMA region may be smaller
+ * than PAGE_SIZE
+ */
instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
instructions += 3;
risc->size = instructions * 8;
risc->dma = 0;
risc->cpu = pci_zalloc_consistent(pci, risc->size, &risc->dma);
- if (NULL == risc->cpu)
+ if (!risc->cpu)
return -ENOMEM;
/* write risc instructions */
rp = risc->cpu;
- rp = cx88_risc_field(rp, sglist, 0, NO_SYNC_LINE, bpl, 0, lines, lpi, !lpi);
+ rp = cx88_risc_field(rp, sglist, 0, NO_SYNC_LINE, bpl, 0,
+ lines, lpi, !lpi);
/* save pointer to jmp instruction address */
risc->jmp = rp;
- BUG_ON((risc->jmp - risc->cpu + 2) * sizeof (*risc->cpu) > risc->size);
+ WARN_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
return 0;
}
+EXPORT_SYMBOL(cx88_risc_databuffer);
-/* ------------------------------------------------------------------ */
-/* our SRAM memory layout */
+/*
+ * our SRAM memory layout
+ */
-/* we are going to put all thr risc programs into host memory, so we
+/*
+ * we are going to put all thr risc programs into host memory, so we
* can use the whole SDRAM for the DMA fifos. To simplify things, we
* use a static memory layout. That surely will waste memory in case
* we don't use all DMA channels at the same time (which will be the
@@ -329,12 +344,13 @@ const struct sram_channel cx88_sram_channels[] = {
.cnt2_reg = MO_DMA27_CNT2,
},
};
+EXPORT_SYMBOL(cx88_sram_channels);
int cx88_sram_channel_setup(struct cx88_core *core,
const struct sram_channel *ch,
unsigned int bpl, u32 risc)
{
- unsigned int i,lines;
+ unsigned int i, lines;
u32 cdt;
bpl = (bpl + 7) & ~7; /* alignment */
@@ -342,16 +358,16 @@ int cx88_sram_channel_setup(struct cx88_core *core,
lines = ch->fifo_size / bpl;
if (lines > 6)
lines = 6;
- BUG_ON(lines < 2);
+ WARN_ON(lines < 2);
/* write CDT */
for (i = 0; i < lines; i++)
- cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
+ cx_write(cdt + 16 * i, ch->fifo_start + bpl * i);
/* write CMDS */
cx_write(ch->cmds_start + 0, risc);
cx_write(ch->cmds_start + 4, cdt);
- cx_write(ch->cmds_start + 8, (lines*16) >> 3);
+ cx_write(ch->cmds_start + 8, (lines * 16) >> 3);
cx_write(ch->cmds_start + 12, ch->ctrl_start);
cx_write(ch->cmds_start + 16, 64 >> 2);
for (i = 20; i < 64; i += 4)
@@ -360,12 +376,13 @@ int cx88_sram_channel_setup(struct cx88_core *core,
/* fill registers */
cx_write(ch->ptr1_reg, ch->fifo_start);
cx_write(ch->ptr2_reg, cdt);
- cx_write(ch->cnt1_reg, (bpl >> 3) -1);
- cx_write(ch->cnt2_reg, (lines*16) >> 3);
+ cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
+ cx_write(ch->cnt2_reg, (lines * 16) >> 3);
- dprintk(2,"sram setup %s: bpl=%d lines=%d\n", ch->name, bpl, lines);
+ dprintk(2, "sram setup %s: bpl=%d lines=%d\n", ch->name, bpl, lines);
return 0;
}
+EXPORT_SYMBOL(cx88_sram_channel_setup);
/* ------------------------------------------------------------------ */
/* debug helper code */
@@ -373,23 +390,23 @@ int cx88_sram_channel_setup(struct cx88_core *core,
static int cx88_risc_decode(u32 risc)
{
static const char * const instr[16] = {
- [ RISC_SYNC >> 28 ] = "sync",
- [ RISC_WRITE >> 28 ] = "write",
- [ RISC_WRITEC >> 28 ] = "writec",
- [ RISC_READ >> 28 ] = "read",
- [ RISC_READC >> 28 ] = "readc",
- [ RISC_JUMP >> 28 ] = "jump",
- [ RISC_SKIP >> 28 ] = "skip",
- [ RISC_WRITERM >> 28 ] = "writerm",
- [ RISC_WRITECM >> 28 ] = "writecm",
- [ RISC_WRITECR >> 28 ] = "writecr",
+ [RISC_SYNC >> 28] = "sync",
+ [RISC_WRITE >> 28] = "write",
+ [RISC_WRITEC >> 28] = "writec",
+ [RISC_READ >> 28] = "read",
+ [RISC_READC >> 28] = "readc",
+ [RISC_JUMP >> 28] = "jump",
+ [RISC_SKIP >> 28] = "skip",
+ [RISC_WRITERM >> 28] = "writerm",
+ [RISC_WRITECM >> 28] = "writecm",
+ [RISC_WRITECR >> 28] = "writecr",
};
static int const incr[16] = {
- [ RISC_WRITE >> 28 ] = 2,
- [ RISC_JUMP >> 28 ] = 2,
- [ RISC_WRITERM >> 28 ] = 3,
- [ RISC_WRITECM >> 28 ] = 3,
- [ RISC_WRITECR >> 28 ] = 4,
+ [RISC_WRITE >> 28] = 2,
+ [RISC_JUMP >> 28] = 2,
+ [RISC_WRITERM >> 28] = 3,
+ [RISC_WRITECM >> 28] = 3,
+ [RISC_WRITECR >> 28] = 4,
};
static const char * const bits[] = {
"12", "13", "14", "resync",
@@ -399,16 +416,15 @@ static int cx88_risc_decode(u32 risc)
};
int i;
- printk("0x%08x [ %s", risc,
- instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
- for (i = ARRAY_SIZE(bits)-1; i >= 0; i--)
+ dprintk0("0x%08x [ %s", risc,
+ instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
+ for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
if (risc & (1 << (i + 12)))
- printk(" %s",bits[i]);
- printk(" count=%d ]\n", risc & 0xfff);
+ pr_cont(" %s", bits[i]);
+ pr_cont(" count=%d ]\n", risc & 0xfff);
return incr[risc >> 28] ? incr[risc >> 28] : 1;
}
-
void cx88_sram_channel_dump(struct cx88_core *core,
const struct sram_channel *ch)
{
@@ -426,46 +442,41 @@ void cx88_sram_channel_dump(struct cx88_core *core,
"line / byte",
};
u32 risc;
- unsigned int i,j,n;
+ unsigned int i, j, n;
- printk("%s: %s - dma channel status dump\n",
- core->name,ch->name);
+ dprintk0("%s - dma channel status dump\n", ch->name);
for (i = 0; i < ARRAY_SIZE(name); i++)
- printk("%s: cmds: %-12s: 0x%08x\n",
- core->name,name[i],
- cx_read(ch->cmds_start + 4*i));
+ dprintk0(" cmds: %-12s: 0x%08x\n",
+ name[i], cx_read(ch->cmds_start + 4 * i));
for (n = 1, i = 0; i < 4; i++) {
- risc = cx_read(ch->cmds_start + 4 * (i+11));
- printk("%s: risc%d: ", core->name, i);
+ risc = cx_read(ch->cmds_start + 4 * (i + 11));
+ pr_cont(" risc%d: ", i);
if (--n)
- printk("0x%08x [ arg #%d ]\n", risc, n);
+ pr_cont("0x%08x [ arg #%d ]\n", risc, n);
else
n = cx88_risc_decode(risc);
}
for (i = 0; i < 16; i += n) {
risc = cx_read(ch->ctrl_start + 4 * i);
- printk("%s: iq %x: ", core->name, i);
+ dprintk0(" iq %x: ", i);
n = cx88_risc_decode(risc);
for (j = 1; j < n; j++) {
- risc = cx_read(ch->ctrl_start + 4 * (i+j));
- printk("%s: iq %x: 0x%08x [ arg #%d ]\n",
- core->name, i+j, risc, j);
+ risc = cx_read(ch->ctrl_start + 4 * (i + j));
+ pr_cont(" iq %x: 0x%08x [ arg #%d ]\n",
+ i + j, risc, j);
}
}
- printk("%s: fifo: 0x%08x -> 0x%x\n",
- core->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
- printk("%s: ctrl: 0x%08x -> 0x%x\n",
- core->name, ch->ctrl_start, ch->ctrl_start+6*16);
- printk("%s: ptr1_reg: 0x%08x\n",
- core->name,cx_read(ch->ptr1_reg));
- printk("%s: ptr2_reg: 0x%08x\n",
- core->name,cx_read(ch->ptr2_reg));
- printk("%s: cnt1_reg: 0x%08x\n",
- core->name,cx_read(ch->cnt1_reg));
- printk("%s: cnt2_reg: 0x%08x\n",
- core->name,cx_read(ch->cnt2_reg));
+ dprintk0("fifo: 0x%08x -> 0x%x\n",
+ ch->fifo_start, ch->fifo_start + ch->fifo_size);
+ dprintk0("ctrl: 0x%08x -> 0x%x\n",
+ ch->ctrl_start, ch->ctrl_start + 6 * 16);
+ dprintk0(" ptr1_reg: 0x%08x\n", cx_read(ch->ptr1_reg));
+ dprintk0(" ptr2_reg: 0x%08x\n", cx_read(ch->ptr2_reg));
+ dprintk0(" cnt1_reg: 0x%08x\n", cx_read(ch->cnt1_reg));
+ dprintk0(" cnt2_reg: 0x%08x\n", cx_read(ch->cnt2_reg));
}
+EXPORT_SYMBOL(cx88_sram_channel_dump);
static const char *cx88_pci_irqs[32] = {
"vid", "aud", "ts", "vip", "hst", "5", "6", "tm1",
@@ -474,25 +485,26 @@ static const char *cx88_pci_irqs[32] = {
"i2c", "i2c_rack", "ir_smp", "gpio0", "gpio1"
};
-void cx88_print_irqbits(const char *name, const char *tag, const char *strings[],
+void cx88_print_irqbits(const char *tag, const char *strings[],
int len, u32 bits, u32 mask)
{
unsigned int i;
- printk(KERN_DEBUG "%s: %s [0x%x]", name, tag, bits);
+ dprintk0("%s [0x%x]", tag, bits);
for (i = 0; i < len; i++) {
if (!(bits & (1 << i)))
continue;
if (strings[i])
- printk(" %s", strings[i]);
+ pr_cont(" %s", strings[i]);
else
- printk(" %d", i);
+ pr_cont(" %d", i);
if (!(mask & (1 << i)))
continue;
- printk("*");
+ pr_cont("*");
}
- printk("\n");
+ pr_cont("\n");
}
+EXPORT_SYMBOL(cx88_print_irqbits);
/* ------------------------------------------------------------------ */
@@ -505,11 +517,12 @@ int cx88_core_irq(struct cx88_core *core, u32 status)
handled++;
}
if (!handled)
- cx88_print_irqbits(core->name, "irq pci",
+ cx88_print_irqbits("irq pci",
cx88_pci_irqs, ARRAY_SIZE(cx88_pci_irqs),
status, core->pci_irqmask);
return handled;
}
+EXPORT_SYMBOL(cx88_core_irq);
void cx88_wakeup(struct cx88_core *core,
struct cx88_dmaqueue *q, u32 count)
@@ -524,6 +537,7 @@ void cx88_wakeup(struct cx88_core *core,
list_del(&buf->list);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
}
+EXPORT_SYMBOL(cx88_wakeup);
void cx88_shutdown(struct cx88_core *core)
{
@@ -548,10 +562,11 @@ void cx88_shutdown(struct cx88_core *core)
/* stop capturing */
cx_write(VID_CAPTURE_CONTROL, 0);
}
+EXPORT_SYMBOL(cx88_shutdown);
int cx88_reset(struct cx88_core *core)
{
- dprintk(1,"%s\n",__func__);
+ dprintk(1, "");
cx88_shutdown(core);
/* clear irq status */
@@ -563,13 +578,15 @@ int cx88_reset(struct cx88_core *core)
msleep(100);
/* init sram */
- cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH21], 720*4, 0);
+ cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH21],
+ 720 * 4, 0);
cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH22], 128, 0);
cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH23], 128, 0);
cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH24], 128, 0);
cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH25], 128, 0);
cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH26], 128, 0);
- cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH28], 188*4, 0);
+ cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH28],
+ 188 * 4, 0);
cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH27], 128, 0);
/* misc init ... */
@@ -597,11 +614,12 @@ int cx88_reset(struct cx88_core *core)
/* Reset on-board parts */
cx_write(MO_SRST_IO, 0);
- msleep(10);
+ usleep_range(10000, 20000);
cx_write(MO_SRST_IO, 1);
return 0;
}
+EXPORT_SYMBOL(cx88_reset);
/* ------------------------------------------------------------------ */
@@ -631,10 +649,11 @@ static inline unsigned int norm_fsc8(v4l2_std_id norm)
if (norm & V4L2_STD_NTSC) // All NTSC/M and variants
return 28636360; // 3.57954545 MHz +/- 10 Hz
- /* SECAM have also different sub carrier for chroma,
- but step_db and step_dr, at cx88_set_tvnorm already handles that.
-
- The same FSC applies to PAL/BGDKIH, PAL/60, NTSC/4.43 and PAL/N
+ /*
+ * SECAM have also different sub carrier for chroma,
+ * but step_db and step_dr, at cx88_set_tvnorm already handles that.
+ *
+ * The same FSC applies to PAL/BGDKIH, PAL/60, NTSC/4.43 and PAL/N
*/
return 35468950; // 4.43361875 MHz +/- 5 Hz
@@ -642,13 +661,12 @@ static inline unsigned int norm_fsc8(v4l2_std_id norm)
static inline unsigned int norm_htotal(v4l2_std_id norm)
{
-
- unsigned int fsc4=norm_fsc8(norm)/2;
+ unsigned int fsc4 = norm_fsc8(norm) / 2;
/* returns 4*FSC / vtotal / frames per seconds */
return (norm & V4L2_STD_625_50) ?
- ((fsc4+312)/625+12)/25 :
- ((fsc4+262)/525*1001+15000)/30000;
+ ((fsc4 + 312) / 625 + 12) / 25 :
+ ((fsc4 + 262) / 525 * 1001 + 15000) / 30000;
}
static inline unsigned int norm_vbipack(v4l2_std_id norm)
@@ -656,14 +674,14 @@ static inline unsigned int norm_vbipack(v4l2_std_id norm)
return (norm & V4L2_STD_625_50) ? 511 : 400;
}
-int cx88_set_scale(struct cx88_core *core, unsigned int width, unsigned int height,
- enum v4l2_field field)
+int cx88_set_scale(struct cx88_core *core, unsigned int width,
+ unsigned int height, enum v4l2_field field)
{
unsigned int swidth = norm_swidth(core->tvnorm);
unsigned int sheight = norm_maxh(core->tvnorm);
u32 value;
- dprintk(1,"set_scale: %dx%d [%s%s,%s]\n", width, height,
+ dprintk(1, "set_scale: %dx%d [%s%s,%s]\n", width, height,
V4L2_FIELD_HAS_TOP(field) ? "T" : "",
V4L2_FIELD_HAS_BOTTOM(field) ? "B" : "",
v4l2_norm_to_name(core->tvnorm));
@@ -675,30 +693,30 @@ int cx88_set_scale(struct cx88_core *core, unsigned int width, unsigned int heig
value &= 0x3fe;
cx_write(MO_HDELAY_EVEN, value);
cx_write(MO_HDELAY_ODD, value);
- dprintk(1,"set_scale: hdelay 0x%04x (width %d)\n", value,swidth);
+ dprintk(1, "set_scale: hdelay 0x%04x (width %d)\n", value, swidth);
value = (swidth * 4096 / width) - 4096;
cx_write(MO_HSCALE_EVEN, value);
cx_write(MO_HSCALE_ODD, value);
- dprintk(1,"set_scale: hscale 0x%04x\n", value);
+ dprintk(1, "set_scale: hscale 0x%04x\n", value);
cx_write(MO_HACTIVE_EVEN, width);
cx_write(MO_HACTIVE_ODD, width);
- dprintk(1,"set_scale: hactive 0x%04x\n", width);
+ dprintk(1, "set_scale: hactive 0x%04x\n", width);
// recalc V scale Register (delay is constant)
cx_write(MO_VDELAY_EVEN, norm_vdelay(core->tvnorm));
cx_write(MO_VDELAY_ODD, norm_vdelay(core->tvnorm));
- dprintk(1,"set_scale: vdelay 0x%04x\n", norm_vdelay(core->tvnorm));
+ dprintk(1, "set_scale: vdelay 0x%04x\n", norm_vdelay(core->tvnorm));
value = (0x10000 - (sheight * 512 / height - 512)) & 0x1fff;
cx_write(MO_VSCALE_EVEN, value);
cx_write(MO_VSCALE_ODD, value);
- dprintk(1,"set_scale: vscale 0x%04x\n", value);
+ dprintk(1, "set_scale: vscale 0x%04x\n", value);
cx_write(MO_VACTIVE_EVEN, sheight);
cx_write(MO_VACTIVE_ODD, sheight);
- dprintk(1,"set_scale: vactive 0x%04x\n", sheight);
+ dprintk(1, "set_scale: vactive 0x%04x\n", sheight);
// setup filters
value = 0;
@@ -709,7 +727,7 @@ int cx88_set_scale(struct cx88_core *core, unsigned int width, unsigned int heig
}
if (INPUT(core->input).type == CX88_VMUX_SVIDEO)
value |= (1 << 13) | (1 << 5);
- if (V4L2_FIELD_INTERLACED == field)
+ if (field == V4L2_FIELD_INTERLACED)
value |= (1 << 3); // VINT (interlaced vertical scaling)
if (width < 385)
value |= (1 << 0); // 3-tap interpolation
@@ -720,10 +738,11 @@ int cx88_set_scale(struct cx88_core *core, unsigned int width, unsigned int heig
cx_andor(MO_FILTER_EVEN, 0x7ffc7f, value); /* preserve PEAKEN, PSEL */
cx_andor(MO_FILTER_ODD, 0x7ffc7f, value);
- dprintk(1,"set_scale: filter 0x%04x\n", value);
+ dprintk(1, "set_scale: filter 0x%04x\n", value);
return 0;
}
+EXPORT_SYMBOL(cx88_set_scale);
static const u32 xtal = 28636363;
@@ -740,36 +759,36 @@ static int set_pll(struct cx88_core *core, int prescale, u32 ofreq)
prescale = 5;
pll = ofreq * 8 * prescale * (u64)(1 << 20);
- do_div(pll,xtal);
+ do_div(pll, xtal);
reg = (pll & 0x3ffffff) | (pre[prescale] << 26);
if (((reg >> 20) & 0x3f) < 14) {
- printk("%s/0: pll out of range\n",core->name);
+ pr_err("pll out of range\n");
return -1;
}
- dprintk(1,"set_pll: MO_PLL_REG 0x%08x [old=0x%08x,freq=%d]\n",
+ dprintk(1, "set_pll: MO_PLL_REG 0x%08x [old=0x%08x,freq=%d]\n",
reg, cx_read(MO_PLL_REG), ofreq);
cx_write(MO_PLL_REG, reg);
for (i = 0; i < 100; i++) {
reg = cx_read(MO_DEVICE_STATUS);
- if (reg & (1<<2)) {
- dprintk(1,"pll locked [pre=%d,ofreq=%d]\n",
- prescale,ofreq);
+ if (reg & (1 << 2)) {
+ dprintk(1, "pll locked [pre=%d,ofreq=%d]\n",
+ prescale, ofreq);
return 0;
}
- dprintk(1,"pll not locked yet, waiting ...\n");
- msleep(10);
+ dprintk(1, "pll not locked yet, waiting ...\n");
+ usleep_range(10000, 20000);
}
- dprintk(1,"pll NOT locked [pre=%d,ofreq=%d]\n",prescale,ofreq);
+ dprintk(1, "pll NOT locked [pre=%d,ofreq=%d]\n", prescale, ofreq);
return -1;
}
int cx88_start_audio_dma(struct cx88_core *core)
{
/* constant 128 made buzz in analog Nicam-stereo for bigger fifo_size */
- int bpl = cx88_sram_channels[SRAM_CH25].fifo_size/4;
+ int bpl = cx88_sram_channels[SRAM_CH25].fifo_size / 4;
- int rds_bpl = cx88_sram_channels[SRAM_CH27].fifo_size/AUD_RDS_LINES;
+ int rds_bpl = cx88_sram_channels[SRAM_CH27].fifo_size / AUD_RDS_LINES;
/* If downstream RISC is enabled, bail out; ALSA is managing DMA */
if (cx_read(MO_AUD_DMACNTRL) & 0x10)
@@ -806,8 +825,8 @@ static int set_tvaudio(struct cx88_core *core)
{
v4l2_std_id norm = core->tvnorm;
- if (CX88_VMUX_TELEVISION != INPUT(core->input).type &&
- CX88_VMUX_CABLE != INPUT(core->input).type)
+ if (INPUT(core->input).type != CX88_VMUX_TELEVISION &&
+ INPUT(core->input).type != CX88_VMUX_CABLE)
return 0;
if (V4L2_STD_PAL_BG & norm) {
@@ -822,7 +841,8 @@ static int set_tvaudio(struct cx88_core *core)
} else if (V4L2_STD_SECAM_L & norm) {
core->tvaudio = WW_L;
- } else if ((V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H) & norm) {
+ } else if ((V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H) &
+ norm) {
core->tvaudio = WW_BG;
} else if (V4L2_STD_SECAM_DK & norm) {
@@ -836,8 +856,8 @@ static int set_tvaudio(struct cx88_core *core)
core->tvaudio = WW_EIAJ;
} else {
- printk("%s/0: tvaudio support needs work for this tv norm [%s], sorry\n",
- core->name, v4l2_norm_to_name(core->tvnorm));
+ pr_info("tvaudio support needs work for this tv norm [%s], sorry\n",
+ v4l2_norm_to_name(core->tvnorm));
core->tvaudio = WW_NONE;
return 0;
}
@@ -847,23 +867,21 @@ static int set_tvaudio(struct cx88_core *core)
/* cx88_set_stereo(dev,V4L2_TUNER_MODE_STEREO); */
/*
- This should be needed only on cx88-alsa. It seems that some cx88 chips have
- bugs and does require DMA enabled for it to work.
+ * This should be needed only on cx88-alsa. It seems that some cx88 chips have
+ * bugs and does require DMA enabled for it to work.
*/
cx88_start_audio_dma(core);
return 0;
}
-
-
int cx88_set_tvnorm(struct cx88_core *core, v4l2_std_id norm)
{
u32 fsc8;
u32 adc_clock;
u32 vdec_clock;
- u32 step_db,step_dr;
+ u32 step_db, step_dr;
u64 tmp64;
- u32 bdelay,agcdelay,htotal;
+ u32 bdelay, agcdelay, htotal;
u32 cxiformat, cxoformat;
if (norm == core->tvnorm)
@@ -912,62 +930,67 @@ int cx88_set_tvnorm(struct cx88_core *core, v4l2_std_id norm)
cxoformat = 0x181f0008;
}
- dprintk(1,"set_tvnorm: \"%s\" fsc8=%d adc=%d vdec=%d db/dr=%d/%d\n",
+ dprintk(1, "set_tvnorm: \"%s\" fsc8=%d adc=%d vdec=%d db/dr=%d/%d\n",
v4l2_norm_to_name(core->tvnorm), fsc8, adc_clock, vdec_clock,
step_db, step_dr);
- set_pll(core,2,vdec_clock);
+ set_pll(core, 2, vdec_clock);
- dprintk(1,"set_tvnorm: MO_INPUT_FORMAT 0x%08x [old=0x%08x]\n",
+ dprintk(1, "set_tvnorm: MO_INPUT_FORMAT 0x%08x [old=0x%08x]\n",
cxiformat, cx_read(MO_INPUT_FORMAT) & 0x0f);
- /* Chroma AGC must be disabled if SECAM is used, we enable it
- by default on PAL and NTSC */
+ /*
+ * Chroma AGC must be disabled if SECAM is used, we enable it
+ * by default on PAL and NTSC
+ */
cx_andor(MO_INPUT_FORMAT, 0x40f,
norm & V4L2_STD_SECAM ? cxiformat : cxiformat | 0x400);
// FIXME: as-is from DScaler
- dprintk(1,"set_tvnorm: MO_OUTPUT_FORMAT 0x%08x [old=0x%08x]\n",
+ dprintk(1, "set_tvnorm: MO_OUTPUT_FORMAT 0x%08x [old=0x%08x]\n",
cxoformat, cx_read(MO_OUTPUT_FORMAT));
cx_write(MO_OUTPUT_FORMAT, cxoformat);
// MO_SCONV_REG = adc clock / video dec clock * 2^17
tmp64 = adc_clock * (u64)(1 << 17);
do_div(tmp64, vdec_clock);
- dprintk(1,"set_tvnorm: MO_SCONV_REG 0x%08x [old=0x%08x]\n",
+ dprintk(1, "set_tvnorm: MO_SCONV_REG 0x%08x [old=0x%08x]\n",
(u32)tmp64, cx_read(MO_SCONV_REG));
cx_write(MO_SCONV_REG, (u32)tmp64);
// MO_SUB_STEP = 8 * fsc / video dec clock * 2^22
tmp64 = step_db * (u64)(1 << 22);
do_div(tmp64, vdec_clock);
- dprintk(1,"set_tvnorm: MO_SUB_STEP 0x%08x [old=0x%08x]\n",
+ dprintk(1, "set_tvnorm: MO_SUB_STEP 0x%08x [old=0x%08x]\n",
(u32)tmp64, cx_read(MO_SUB_STEP));
cx_write(MO_SUB_STEP, (u32)tmp64);
// MO_SUB_STEP_DR = 8 * 4406250 / video dec clock * 2^22
tmp64 = step_dr * (u64)(1 << 22);
do_div(tmp64, vdec_clock);
- dprintk(1,"set_tvnorm: MO_SUB_STEP_DR 0x%08x [old=0x%08x]\n",
+ dprintk(1, "set_tvnorm: MO_SUB_STEP_DR 0x%08x [old=0x%08x]\n",
(u32)tmp64, cx_read(MO_SUB_STEP_DR));
cx_write(MO_SUB_STEP_DR, (u32)tmp64);
// bdelay + agcdelay
bdelay = vdec_clock * 65 / 20000000 + 21;
agcdelay = vdec_clock * 68 / 20000000 + 15;
- dprintk(1,"set_tvnorm: MO_AGC_BURST 0x%08x [old=0x%08x,bdelay=%d,agcdelay=%d]\n",
- (bdelay << 8) | agcdelay, cx_read(MO_AGC_BURST), bdelay, agcdelay);
+ dprintk(1,
+ "set_tvnorm: MO_AGC_BURST 0x%08x [old=0x%08x,bdelay=%d,agcdelay=%d]\n",
+ (bdelay << 8) | agcdelay, cx_read(MO_AGC_BURST),
+ bdelay, agcdelay);
cx_write(MO_AGC_BURST, (bdelay << 8) | agcdelay);
// htotal
tmp64 = norm_htotal(norm) * (u64)vdec_clock;
do_div(tmp64, fsc8);
htotal = (u32)tmp64;
- dprintk(1,"set_tvnorm: MO_HTOTAL 0x%08x [old=0x%08x,htotal=%d]\n",
+ dprintk(1,
+ "set_tvnorm: MO_HTOTAL 0x%08x [old=0x%08x,htotal=%d]\n",
htotal, cx_read(MO_HTOTAL), (u32)tmp64);
cx_andor(MO_HTOTAL, 0x07ff, htotal);
// vbi stuff, set vbi offset to 10 (for 20 Clk*2 pixels), this makes
// the effective vbi offset ~244 samples, the same as the Bt8x8
- cx_write(MO_VBI_PACKET, (10<<11) | norm_vbipack(norm));
+ cx_write(MO_VBI_PACKET, (10 << 11) | norm_vbipack(norm));
// this is needed as well to set all tvnorm parameter
cx88_set_scale(core, 320, 240, V4L2_FIELD_INTERLACED);
@@ -978,12 +1001,16 @@ int cx88_set_tvnorm(struct cx88_core *core, v4l2_std_id norm)
// tell i2c chips
call_all(core, video, s_std, norm);
- /* The chroma_agc control should be inaccessible if the video format is SECAM */
+ /*
+ * The chroma_agc control should be inaccessible
+ * if the video format is SECAM
+ */
v4l2_ctrl_grab(core->chroma_agc, cxiformat == VideoFormatSECAM);
// done
return 0;
}
+EXPORT_SYMBOL(cx88_set_tvnorm);
/* ------------------------------------------------------------------ */
@@ -1008,8 +1035,9 @@ void cx88_vdev_init(struct cx88_core *core,
snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)",
core->name, type, core->board.name);
}
+EXPORT_SYMBOL(cx88_vdev_init);
-struct cx88_core* cx88_core_get(struct pci_dev *pci)
+struct cx88_core *cx88_core_get(struct pci_dev *pci)
{
struct cx88_core *core;
@@ -1020,7 +1048,7 @@ struct cx88_core* cx88_core_get(struct pci_dev *pci)
if (PCI_SLOT(pci->devfn) != core->pci_slot)
continue;
- if (0 != cx88_get_resources(core, pci)) {
+ if (cx88_get_resources(core, pci) != 0) {
mutex_unlock(&devlist);
return NULL;
}
@@ -1030,7 +1058,7 @@ struct cx88_core* cx88_core_get(struct pci_dev *pci)
}
core = cx88_core_create(pci, cx88_devcount);
- if (NULL != core) {
+ if (core) {
cx88_devcount++;
list_add_tail(&core->devlist, &cx88_devlist);
}
@@ -1038,18 +1066,19 @@ struct cx88_core* cx88_core_get(struct pci_dev *pci)
mutex_unlock(&devlist);
return core;
}
+EXPORT_SYMBOL(cx88_core_get);
void cx88_core_put(struct cx88_core *core, struct pci_dev *pci)
{
- release_mem_region(pci_resource_start(pci,0),
- pci_resource_len(pci,0));
+ release_mem_region(pci_resource_start(pci, 0),
+ pci_resource_len(pci, 0));
if (!atomic_dec_and_test(&core->refcount))
return;
mutex_lock(&devlist);
cx88_ir_fini(core);
- if (0 == core->i2c_rc) {
+ if (core->i2c_rc == 0) {
if (core->i2c_rtc)
i2c_unregister_device(core->i2c_rtc);
i2c_del_adapter(&core->i2c_adap);
@@ -1063,29 +1092,4 @@ void cx88_core_put(struct cx88_core *core, struct pci_dev *pci)
v4l2_device_unregister(&core->v4l2_dev);
kfree(core);
}
-
-/* ------------------------------------------------------------------ */
-
-EXPORT_SYMBOL(cx88_print_irqbits);
-
-EXPORT_SYMBOL(cx88_core_irq);
-EXPORT_SYMBOL(cx88_wakeup);
-EXPORT_SYMBOL(cx88_reset);
-EXPORT_SYMBOL(cx88_shutdown);
-
-EXPORT_SYMBOL(cx88_risc_buffer);
-EXPORT_SYMBOL(cx88_risc_databuffer);
-
-EXPORT_SYMBOL(cx88_sram_channels);
-EXPORT_SYMBOL(cx88_sram_channel_setup);
-EXPORT_SYMBOL(cx88_sram_channel_dump);
-
-EXPORT_SYMBOL(cx88_set_tvnorm);
-EXPORT_SYMBOL(cx88_set_scale);
-
-EXPORT_SYMBOL(cx88_vdev_init);
-EXPORT_SYMBOL(cx88_core_get);
EXPORT_SYMBOL(cx88_core_put);
-
-EXPORT_SYMBOL(cx88_ir_start);
-EXPORT_SYMBOL(cx88_ir_stop);
diff --git a/drivers/media/pci/cx88/cx88-dsp.c b/drivers/media/pci/cx88/cx88-dsp.c
index a9907265ff66..105029088120 100644
--- a/drivers/media/pci/cx88/cx88-dsp.c
+++ b/drivers/media/pci/cx88/cx88-dsp.c
@@ -1,5 +1,4 @@
/*
- *
* Stereo and SAP detection for cx88
*
* Copyright (c) 2009 Marton Balint <cus@fazekas.hu>
@@ -13,41 +12,41 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "cx88.h"
+#include "cx88-reg.h"
+
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/jiffies.h>
#include <asm/div64.h>
-#include "cx88.h"
-#include "cx88-reg.h"
-
#define INT_PI ((s32)(3.141592653589 * 32768.0))
#define compat_remainder(a, b) \
- ((float)(((s32)((a)*100))%((s32)((b)*100)))/100.0)
+ ((float)(((s32)((a) * 100)) % ((s32)((b) * 100))) / 100.0)
#define baseband_freq(carrier, srate, tone) ((s32)( \
(compat_remainder(carrier + tone, srate)) / srate * 2 * INT_PI))
-/* We calculate the baseband frequencies of the carrier and the pilot tones
- * based on the the sampling rate of the audio rds fifo. */
+/*
+ * We calculate the baseband frequencies of the carrier and the pilot tones
+ * based on the the sampling rate of the audio rds fifo.
+ */
#define FREQ_A2_CARRIER baseband_freq(54687.5, 2689.36, 0.0)
#define FREQ_A2_DUAL baseband_freq(54687.5, 2689.36, 274.1)
#define FREQ_A2_STEREO baseband_freq(54687.5, 2689.36, 117.5)
-/* The frequencies below are from the reference driver. They probably need
+/*
+ * The frequencies below are from the reference driver. They probably need
* further adjustments, because they are not tested at all. You may even need
* to play a bit with the registers of the chip to select the proper signal
* for the input of the audio rds fifo, and measure it's sampling rate to
- * calculate the proper baseband frequencies... */
+ * calculate the proper baseband frequencies...
+ */
#define FREQ_A2M_CARRIER ((s32)(2.114516 * 32768.0))
#define FREQ_A2M_DUAL ((s32)(2.754916 * 32768.0))
@@ -71,43 +70,52 @@ static unsigned int dsp_debug;
module_param(dsp_debug, int, 0644);
MODULE_PARM_DESC(dsp_debug, "enable audio dsp debug messages");
-#define dprintk(level, fmt, arg...) if (dsp_debug >= level) \
- printk(KERN_DEBUG "%s/0: " fmt, core->name , ## arg)
+#define dprintk(level, fmt, arg...) do { \
+ if (dsp_debug >= level) \
+ printk(KERN_DEBUG pr_fmt("%s: dsp:" fmt), \
+ __func__, ##arg); \
+} while (0)
static s32 int_cos(u32 x)
{
u32 t2, t4, t6, t8;
s32 ret;
u16 period = x / INT_PI;
+
if (period % 2)
return -int_cos(x - INT_PI);
x = x % INT_PI;
- if (x > INT_PI/2)
- return -int_cos(INT_PI/2 - (x % (INT_PI/2)));
- /* Now x is between 0 and INT_PI/2.
- * To calculate cos(x) we use it's Taylor polinom. */
- t2 = x*x/32768/2;
- t4 = t2*x/32768*x/32768/3/4;
- t6 = t4*x/32768*x/32768/5/6;
- t8 = t6*x/32768*x/32768/7/8;
- ret = 32768-t2+t4-t6+t8;
+ if (x > INT_PI / 2)
+ return -int_cos(INT_PI / 2 - (x % (INT_PI / 2)));
+ /*
+ * Now x is between 0 and INT_PI/2.
+ * To calculate cos(x) we use it's Taylor polinom.
+ */
+ t2 = x * x / 32768 / 2;
+ t4 = t2 * x / 32768 * x / 32768 / 3 / 4;
+ t6 = t4 * x / 32768 * x / 32768 / 5 / 6;
+ t8 = t6 * x / 32768 * x / 32768 / 7 / 8;
+ ret = 32768 - t2 + t4 - t6 + t8;
return ret;
}
static u32 int_goertzel(s16 x[], u32 N, u32 freq)
{
- /* We use the Goertzel algorithm to determine the power of the
- * given frequency in the signal */
+ /*
+ * We use the Goertzel algorithm to determine the power of the
+ * given frequency in the signal
+ */
s32 s_prev = 0;
s32 s_prev2 = 0;
- s32 coeff = 2*int_cos(freq);
+ s32 coeff = 2 * int_cos(freq);
u32 i;
u64 tmp;
u32 divisor;
for (i = 0; i < N; i++) {
- s32 s = x[i] + ((s64)coeff*s_prev/32768) - s_prev2;
+ s32 s = x[i] + ((s64)coeff * s_prev / 32768) - s_prev2;
+
s_prev2 = s_prev;
s_prev = s;
}
@@ -115,17 +123,20 @@ static u32 int_goertzel(s16 x[], u32 N, u32 freq)
tmp = (s64)s_prev2 * s_prev2 + (s64)s_prev * s_prev -
(s64)coeff * s_prev2 * s_prev / 32768;
- /* XXX: N must be low enough so that N*N fits in s32.
- * Else we need two divisions. */
+ /*
+ * XXX: N must be low enough so that N*N fits in s32.
+ * Else we need two divisions.
+ */
divisor = N * N;
do_div(tmp, divisor);
- return (u32) tmp;
+ return (u32)tmp;
}
static u32 freq_magnitude(s16 x[], u32 N, u32 freq)
{
u32 sum = int_goertzel(x, N, freq);
+
return (u32)int_sqrt(sum);
}
@@ -138,7 +149,7 @@ static u32 noise_magnitude(s16 x[], u32 N, u32 freq_start, u32 freq_end)
if (N > 192) {
/* The last 192 samples are enough for noise detection */
- x += (N-192);
+ x += (N - 192);
N = 192;
}
@@ -176,8 +187,8 @@ static s32 detect_a2_a2m_eiaj(struct cx88_core *core, s16 x[], u32 N)
dual_freq = FREQ_EIAJ_DUAL;
break;
default:
- printk(KERN_WARNING "%s/0: unsupported audio mode %d for %s\n",
- core->name, core->tvaudio, __func__);
+ pr_warn("unsupported audio mode %d for %s\n",
+ core->tvaudio, __func__);
return UNSET;
}
@@ -186,8 +197,9 @@ static s32 detect_a2_a2m_eiaj(struct cx88_core *core, s16 x[], u32 N)
dual = freq_magnitude(x, N, dual_freq);
noise = noise_magnitude(x, N, FREQ_NOISE_START, FREQ_NOISE_END);
- dprintk(1, "detect a2/a2m/eiaj: carrier=%d, stereo=%d, dual=%d, "
- "noise=%d\n", carrier, stereo, dual, noise);
+ dprintk(1,
+ "detect a2/a2m/eiaj: carrier=%d, stereo=%d, dual=%d, noise=%d\n",
+ carrier, stereo, dual, noise);
if (stereo > dual)
ret = V4L2_TUNER_SUB_STEREO;
@@ -196,20 +208,22 @@ static s32 detect_a2_a2m_eiaj(struct cx88_core *core, s16 x[], u32 N)
if (core->tvaudio == WW_EIAJ) {
/* EIAJ checks may need adjustments */
- if ((carrier > max(stereo, dual)*2) &&
- (carrier < max(stereo, dual)*6) &&
+ if ((carrier > max(stereo, dual) * 2) &&
+ (carrier < max(stereo, dual) * 6) &&
(carrier > 20 && carrier < 200) &&
(max(stereo, dual) > min(stereo, dual))) {
- /* For EIAJ the carrier is always present,
- so we probably don't need noise detection */
+ /*
+ * For EIAJ the carrier is always present,
+ * so we probably don't need noise detection
+ */
return ret;
}
} else {
- if ((carrier > max(stereo, dual)*2) &&
- (carrier < max(stereo, dual)*8) &&
+ if ((carrier > max(stereo, dual) * 2) &&
+ (carrier < max(stereo, dual) * 8) &&
(carrier > 20 && carrier < 200) &&
(noise < 10) &&
- (max(stereo, dual) > min(stereo, dual)*2)) {
+ (max(stereo, dual) > min(stereo, dual) * 2)) {
return ret;
}
}
@@ -222,8 +236,9 @@ static s32 detect_btsc(struct cx88_core *core, s16 x[], u32 N)
s32 sap = freq_magnitude(x, N, FREQ_BTSC_SAP);
s32 dual_ref = freq_magnitude(x, N, FREQ_BTSC_DUAL_REF);
s32 dual = freq_magnitude(x, N, FREQ_BTSC_DUAL);
- dprintk(1, "detect btsc: dual_ref=%d, dual=%d, sap_ref=%d, sap=%d"
- "\n", dual_ref, dual, sap_ref, sap);
+
+ dprintk(1, "detect btsc: dual_ref=%d, dual=%d, sap_ref=%d, sap=%d\n",
+ dual_ref, dual, sap_ref, sap);
/* FIXME: Currently not supported */
return UNSET;
}
@@ -234,36 +249,31 @@ static s16 *read_rds_samples(struct cx88_core *core, u32 *N)
s16 *samples;
unsigned int i;
- unsigned int bpl = srch->fifo_size/AUD_RDS_LINES;
- unsigned int spl = bpl/4;
- unsigned int sample_count = spl*(AUD_RDS_LINES-1);
+ unsigned int bpl = srch->fifo_size / AUD_RDS_LINES;
+ unsigned int spl = bpl / 4;
+ unsigned int sample_count = spl * (AUD_RDS_LINES - 1);
u32 current_address = cx_read(srch->ptr1_reg);
u32 offset = (current_address - srch->fifo_start + bpl);
- dprintk(1, "read RDS samples: current_address=%08x (offset=%08x), "
- "sample_count=%d, aud_intstat=%08x\n", current_address,
+ dprintk(1,
+ "read RDS samples: current_address=%08x (offset=%08x), sample_count=%d, aud_intstat=%08x\n",
+ current_address,
current_address - srch->fifo_start, sample_count,
cx_read(MO_AUD_INTSTAT));
-
- samples = kmalloc(sizeof(s16)*sample_count, GFP_KERNEL);
+ samples = kmalloc_array(sample_count, sizeof(*samples), GFP_KERNEL);
if (!samples)
return NULL;
*N = sample_count;
for (i = 0; i < sample_count; i++) {
- offset = offset % (AUD_RDS_LINES*bpl);
+ offset = offset % (AUD_RDS_LINES * bpl);
samples[i] = cx_read(srch->fifo_start + offset);
offset += 4;
}
- if (dsp_debug >= 2) {
- dprintk(2, "RDS samples dump: ");
- for (i = 0; i < sample_count; i++)
- printk("%hd ", samples[i]);
- printk(".\n");
- }
+ dprintk(2, "RDS samples dump: %*ph\n", sample_count, samples);
return samples;
}
@@ -310,11 +320,11 @@ s32 cx88_dsp_detect_stereo_sap(struct cx88_core *core)
kfree(samples);
- if (UNSET != ret)
+ if (ret != UNSET)
dprintk(1, "stereo/sap detection result:%s%s%s\n",
- (ret & V4L2_TUNER_SUB_MONO) ? " mono" : "",
- (ret & V4L2_TUNER_SUB_STEREO) ? " stereo" : "",
- (ret & V4L2_TUNER_SUB_LANG2) ? " dual" : "");
+ (ret & V4L2_TUNER_SUB_MONO) ? " mono" : "",
+ (ret & V4L2_TUNER_SUB_STEREO) ? " stereo" : "",
+ (ret & V4L2_TUNER_SUB_LANG2) ? " dual" : "");
return ret;
}
diff --git a/drivers/media/pci/cx88/cx88-dvb.c b/drivers/media/pci/cx88/cx88-dvb.c
index ac2392d8887a..ddf90678df34 100644
--- a/drivers/media/pci/cx88/cx88-dvb.c
+++ b/drivers/media/pci/cx88/cx88-dvb.c
@@ -1,5 +1,4 @@
/*
- *
* device driver for Conexant 2388x based TV cards
* MPEG Transport Stream (DVB) routines
*
@@ -15,12 +14,11 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "cx88.h"
+#include "dvb-pll.h"
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
@@ -29,8 +27,6 @@
#include <linux/file.h>
#include <linux/suspend.h>
-#include "cx88.h"
-#include "dvb-pll.h"
#include <media/v4l2-common.h>
#include "mt352.h"
@@ -69,7 +65,7 @@ MODULE_VERSION(CX88_VERSION);
static unsigned int debug;
module_param(debug, int, 0644);
-MODULE_PARM_DESC(debug,"enable debug messages [dvb]");
+MODULE_PARM_DESC(debug, "enable debug messages [dvb]");
static unsigned int dvb_buf_tscnt = 32;
module_param(dvb_buf_tscnt, int, 0644);
@@ -77,14 +73,17 @@ MODULE_PARM_DESC(dvb_buf_tscnt, "DVB Buffer TS count [dvb]");
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
-#define dprintk(level,fmt, arg...) if (debug >= level) \
- printk(KERN_DEBUG "%s/2-dvb: " fmt, core->name, ## arg)
+#define dprintk(level, fmt, arg...) do { \
+ if (debug >= level) \
+ printk(KERN_DEBUG pr_fmt("%s: dvb:" fmt), \
+ __func__, ##arg); \
+} while (0)
/* ------------------------------------------------------------------ */
static int queue_setup(struct vb2_queue *q,
- unsigned int *num_buffers, unsigned int *num_planes,
- unsigned int sizes[], struct device *alloc_devs[])
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct cx8802_dev *dev = q->drv_priv;
@@ -169,23 +168,23 @@ static const struct vb2_ops dvb_qops = {
/* ------------------------------------------------------------------ */
-static int cx88_dvb_bus_ctrl(struct dvb_frontend* fe, int acquire)
+static int cx88_dvb_bus_ctrl(struct dvb_frontend *fe, int acquire)
{
- struct cx8802_dev *dev= fe->dvb->priv;
+ struct cx8802_dev *dev = fe->dvb->priv;
struct cx8802_driver *drv = NULL;
int ret = 0;
int fe_id;
fe_id = vb2_dvb_find_frontend(&dev->frontends, fe);
if (!fe_id) {
- printk(KERN_ERR "%s() No frontend found\n", __func__);
+ pr_err("%s() No frontend found\n", __func__);
return -EINVAL;
}
mutex_lock(&dev->core->lock);
drv = cx8802_get_driver(dev, CX88_MPEG_DVB);
if (drv) {
- if (acquire){
+ if (acquire) {
dev->frontends.active_fe_id = fe_id;
ret = drv->request_acquire(drv);
} else {
@@ -222,13 +221,13 @@ static void cx88_dvb_gate_ctrl(struct cx88_core *core, int open)
/* ------------------------------------------------------------------ */
-static int dvico_fusionhdtv_demod_init(struct dvb_frontend* fe)
+static int dvico_fusionhdtv_demod_init(struct dvb_frontend *fe)
{
- static const u8 clock_config [] = { CLOCK_CTL, 0x38, 0x39 };
- static const u8 reset [] = { RESET, 0x80 };
- static const u8 adc_ctl_1_cfg [] = { ADC_CTL_1, 0x40 };
- static const u8 agc_cfg [] = { AGC_TARGET, 0x24, 0x20 };
- static const u8 gpp_ctl_cfg [] = { GPP_CTL, 0x33 };
+ static const u8 clock_config[] = { CLOCK_CTL, 0x38, 0x39 };
+ static const u8 reset[] = { RESET, 0x80 };
+ static const u8 adc_ctl_1_cfg[] = { ADC_CTL_1, 0x40 };
+ static const u8 agc_cfg[] = { AGC_TARGET, 0x24, 0x20 };
+ static const u8 gpp_ctl_cfg[] = { GPP_CTL, 0x33 };
static const u8 capt_range_cfg[] = { CAPT_RANGE, 0x32 };
mt352_write(fe, clock_config, sizeof(clock_config));
@@ -244,11 +243,11 @@ static int dvico_fusionhdtv_demod_init(struct dvb_frontend* fe)
static int dvico_dual_demod_init(struct dvb_frontend *fe)
{
- static const u8 clock_config [] = { CLOCK_CTL, 0x38, 0x38 };
- static const u8 reset [] = { RESET, 0x80 };
- static const u8 adc_ctl_1_cfg [] = { ADC_CTL_1, 0x40 };
- static const u8 agc_cfg [] = { AGC_TARGET, 0x28, 0x20 };
- static const u8 gpp_ctl_cfg [] = { GPP_CTL, 0x33 };
+ static const u8 clock_config[] = { CLOCK_CTL, 0x38, 0x38 };
+ static const u8 reset[] = { RESET, 0x80 };
+ static const u8 adc_ctl_1_cfg[] = { ADC_CTL_1, 0x40 };
+ static const u8 agc_cfg[] = { AGC_TARGET, 0x28, 0x20 };
+ static const u8 gpp_ctl_cfg[] = { GPP_CTL, 0x33 };
static const u8 capt_range_cfg[] = { CAPT_RANGE, 0x32 };
mt352_write(fe, clock_config, sizeof(clock_config));
@@ -263,12 +262,12 @@ static int dvico_dual_demod_init(struct dvb_frontend *fe)
return 0;
}
-static int dntv_live_dvbt_demod_init(struct dvb_frontend* fe)
+static int dntv_live_dvbt_demod_init(struct dvb_frontend *fe)
{
- static const u8 clock_config [] = { 0x89, 0x38, 0x39 };
- static const u8 reset [] = { 0x50, 0x80 };
- static const u8 adc_ctl_1_cfg [] = { 0x8E, 0x40 };
- static const u8 agc_cfg [] = { 0x67, 0x10, 0x23, 0x00, 0xFF, 0xFF,
+ static const u8 clock_config[] = { 0x89, 0x38, 0x39 };
+ static const u8 reset[] = { 0x50, 0x80 };
+ static const u8 adc_ctl_1_cfg[] = { 0x8E, 0x40 };
+ static const u8 agc_cfg[] = { 0x67, 0x10, 0x23, 0x00, 0xFF, 0xFF,
0x00, 0xFF, 0x00, 0x40, 0x40 };
static const u8 dntv_extra[] = { 0xB5, 0x7A };
static const u8 capt_range_cfg[] = { 0x75, 0x32 };
@@ -312,12 +311,12 @@ static struct mb86a16_config twinhan_vp1027 = {
};
#if IS_ENABLED(CONFIG_VIDEO_CX88_VP3054)
-static int dntv_live_dvbt_pro_demod_init(struct dvb_frontend* fe)
+static int dntv_live_dvbt_pro_demod_init(struct dvb_frontend *fe)
{
- static const u8 clock_config [] = { 0x89, 0x38, 0x38 };
- static const u8 reset [] = { 0x50, 0x80 };
- static const u8 adc_ctl_1_cfg [] = { 0x8E, 0x40 };
- static const u8 agc_cfg [] = { 0x67, 0x10, 0x20, 0x00, 0xFF, 0xFF,
+ static const u8 clock_config[] = { 0x89, 0x38, 0x38 };
+ static const u8 reset[] = { 0x50, 0x80 };
+ static const u8 adc_ctl_1_cfg[] = { 0x8E, 0x40 };
+ static const u8 agc_cfg[] = { 0x67, 0x10, 0x20, 0x00, 0xFF, 0xFF,
0x00, 0xFF, 0x00, 0x40, 0x40 };
static const u8 dntv_extra[] = { 0xB5, 0x7A };
static const u8 capt_range_cfg[] = { 0x75, 0x32 };
@@ -374,9 +373,10 @@ static const struct cx22702_config hauppauge_hvr_config = {
.output_mode = CX22702_SERIAL_OUTPUT,
};
-static int or51132_set_ts_param(struct dvb_frontend* fe, int is_punctured)
+static int or51132_set_ts_param(struct dvb_frontend *fe, int is_punctured)
{
- struct cx8802_dev *dev= fe->dvb->priv;
+ struct cx8802_dev *dev = fe->dvb->priv;
+
dev->ts_gen_cntrl = is_punctured ? 0x04 : 0x00;
return 0;
}
@@ -386,9 +386,9 @@ static const struct or51132_config pchdtv_hd3000 = {
.set_ts_params = or51132_set_ts_param,
};
-static int lgdt330x_pll_rf_set(struct dvb_frontend* fe, int index)
+static int lgdt330x_pll_rf_set(struct dvb_frontend *fe, int index)
{
- struct cx8802_dev *dev= fe->dvb->priv;
+ struct cx8802_dev *dev = fe->dvb->priv;
struct cx88_core *core = dev->core;
dprintk(1, "%s: index = %d\n", __func__, index);
@@ -399,9 +399,10 @@ static int lgdt330x_pll_rf_set(struct dvb_frontend* fe, int index)
return 0;
}
-static int lgdt330x_set_ts_param(struct dvb_frontend* fe, int is_punctured)
+static int lgdt330x_set_ts_param(struct dvb_frontend *fe, int is_punctured)
{
- struct cx8802_dev *dev= fe->dvb->priv;
+ struct cx8802_dev *dev = fe->dvb->priv;
+
if (is_punctured)
dev->ts_gen_cntrl |= 0x04;
else
@@ -430,9 +431,10 @@ static const struct lgdt330x_config pchdtv_hd5500 = {
.set_ts_params = lgdt330x_set_ts_param,
};
-static int nxt200x_set_ts_param(struct dvb_frontend* fe, int is_punctured)
+static int nxt200x_set_ts_param(struct dvb_frontend *fe, int is_punctured)
{
- struct cx8802_dev *dev= fe->dvb->priv;
+ struct cx8802_dev *dev = fe->dvb->priv;
+
dev->ts_gen_cntrl = is_punctured ? 0x04 : 0x00;
return 0;
}
@@ -442,18 +444,19 @@ static const struct nxt200x_config ati_hdtvwonder = {
.set_ts_params = nxt200x_set_ts_param,
};
-static int cx24123_set_ts_param(struct dvb_frontend* fe,
- int is_punctured)
+static int cx24123_set_ts_param(struct dvb_frontend *fe,
+ int is_punctured)
{
- struct cx8802_dev *dev= fe->dvb->priv;
+ struct cx8802_dev *dev = fe->dvb->priv;
+
dev->ts_gen_cntrl = 0x02;
return 0;
}
-static int kworld_dvbs_100_set_voltage(struct dvb_frontend* fe,
+static int kworld_dvbs_100_set_voltage(struct dvb_frontend *fe,
enum fe_sec_voltage voltage)
{
- struct cx8802_dev *dev= fe->dvb->priv;
+ struct cx8802_dev *dev = fe->dvb->priv;
struct cx88_core *core = dev->core;
if (voltage == SEC_VOLTAGE_OFF)
@@ -469,11 +472,11 @@ static int kworld_dvbs_100_set_voltage(struct dvb_frontend* fe,
static int geniatech_dvbs_set_voltage(struct dvb_frontend *fe,
enum fe_sec_voltage voltage)
{
- struct cx8802_dev *dev= fe->dvb->priv;
+ struct cx8802_dev *dev = fe->dvb->priv;
struct cx88_core *core = dev->core;
if (voltage == SEC_VOLTAGE_OFF) {
- dprintk(1,"LNB Voltage OFF\n");
+ dprintk(1, "LNB Voltage OFF\n");
cx_write(MO_GP0_IO, 0x0000efff);
}
@@ -485,7 +488,7 @@ static int geniatech_dvbs_set_voltage(struct dvb_frontend *fe,
static int tevii_dvbs_set_voltage(struct dvb_frontend *fe,
enum fe_sec_voltage voltage)
{
- struct cx8802_dev *dev= fe->dvb->priv;
+ struct cx8802_dev *dev = fe->dvb->priv;
struct cx88_core *core = dev->core;
cx_set(MO_GP0_IO, 0x6040);
@@ -625,9 +628,7 @@ static int attach_xc3028(u8 addr, struct cx8802_dev *dev)
return -EINVAL;
if (!fe0->dvb.frontend) {
- printk(KERN_ERR "%s/2: dvb frontend not attached. "
- "Can't attach xc3028\n",
- dev->core->name);
+ pr_err("dvb frontend not attached. Can't attach xc3028\n");
return -EINVAL;
}
@@ -640,16 +641,14 @@ static int attach_xc3028(u8 addr, struct cx8802_dev *dev)
fe = dvb_attach(xc2028_attach, fe0->dvb.frontend, &cfg);
if (!fe) {
- printk(KERN_ERR "%s/2: xc3028 attach failed\n",
- dev->core->name);
+ pr_err("xc3028 attach failed\n");
dvb_frontend_detach(fe0->dvb.frontend);
dvb_unregister_frontend(fe0->dvb.frontend);
fe0->dvb.frontend = NULL;
return -EINVAL;
}
- printk(KERN_INFO "%s/2: xc3028 attached\n",
- dev->core->name);
+ pr_info("xc3028 attached\n");
return 0;
}
@@ -665,41 +664,40 @@ static int attach_xc4000(struct cx8802_dev *dev, struct xc4000_config *cfg)
return -EINVAL;
if (!fe0->dvb.frontend) {
- printk(KERN_ERR "%s/2: dvb frontend not attached. "
- "Can't attach xc4000\n",
- dev->core->name);
+ pr_err("dvb frontend not attached. Can't attach xc4000\n");
return -EINVAL;
}
fe = dvb_attach(xc4000_attach, fe0->dvb.frontend, &dev->core->i2c_adap,
cfg);
if (!fe) {
- printk(KERN_ERR "%s/2: xc4000 attach failed\n",
- dev->core->name);
+ pr_err("xc4000 attach failed\n");
dvb_frontend_detach(fe0->dvb.frontend);
dvb_unregister_frontend(fe0->dvb.frontend);
fe0->dvb.frontend = NULL;
return -EINVAL;
}
- printk(KERN_INFO "%s/2: xc4000 attached\n", dev->core->name);
+ pr_info("xc4000 attached\n");
return 0;
}
static int cx24116_set_ts_param(struct dvb_frontend *fe,
- int is_punctured)
+ int is_punctured)
{
struct cx8802_dev *dev = fe->dvb->priv;
+
dev->ts_gen_cntrl = 0x2;
return 0;
}
static int stv0900_set_ts_param(struct dvb_frontend *fe,
- int is_punctured)
+ int is_punctured)
{
struct cx8802_dev *dev = fe->dvb->priv;
+
dev->ts_gen_cntrl = 0;
return 0;
@@ -713,10 +711,10 @@ static int cx24116_reset_device(struct dvb_frontend *fe)
/* Reset the part */
/* Put the cx24116 into reset */
cx_write(MO_SRST_IO, 0);
- msleep(10);
+ usleep_range(10000, 20000);
/* Take the cx24116 out of reset */
cx_write(MO_SRST_IO, 1);
- msleep(10);
+ usleep_range(10000, 20000);
return 0;
}
@@ -734,9 +732,10 @@ static const struct cx24116_config tevii_s460_config = {
};
static int ds3000_set_ts_param(struct dvb_frontend *fe,
- int is_punctured)
+ int is_punctured)
{
struct cx8802_dev *dev = fe->dvb->priv;
+
dev->ts_gen_cntrl = 4;
return 0;
@@ -800,12 +799,12 @@ static int cx8802_alloc_frontends(struct cx8802_dev *dev)
if (!core->board.num_frontends)
return -ENODEV;
- printk(KERN_INFO "%s() allocating %d frontend(s)\n", __func__,
- core->board.num_frontends);
+ pr_info("%s: allocating %d frontend(s)\n", __func__,
+ core->board.num_frontends);
for (i = 1; i <= core->board.num_frontends; i++) {
fe = vb2_dvb_alloc_frontend(&dev->frontends, i);
if (!fe) {
- printk(KERN_ERR "%s() failed to alloc\n", __func__);
+ pr_err("%s() failed to alloc\n", __func__);
vb2_dvb_dealloc_frontends(&dev->frontends);
return -ENOMEM;
}
@@ -813,8 +812,6 @@ static int cx8802_alloc_frontends(struct cx8802_dev *dev)
return 0;
}
-
-
static const u8 samsung_smt_7020_inittab[] = {
0x01, 0x15,
0x02, 0x00,
@@ -866,7 +863,6 @@ static const u8 samsung_smt_7020_inittab[] = {
0xff, 0xff,
};
-
static int samsung_smt_7020_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
@@ -899,7 +895,7 @@ static int samsung_smt_7020_tuner_set_params(struct dvb_frontend *fe)
}
static int samsung_smt_7020_set_tone(struct dvb_frontend *fe,
- enum fe_sec_tone_mode tone)
+ enum fe_sec_tone_mode tone)
{
struct cx8802_dev *dev = fe->dvb->priv;
struct cx88_core *core = dev->core;
@@ -954,7 +950,7 @@ static int samsung_smt_7020_set_voltage(struct dvb_frontend *fe,
}
static int samsung_smt_7020_stv0299_set_symbol_rate(struct dvb_frontend *fe,
- u32 srate, u32 ratio)
+ u32 srate, u32 ratio)
{
u8 aclk = 0;
u8 bclk = 0;
@@ -988,7 +984,6 @@ static int samsung_smt_7020_stv0299_set_symbol_rate(struct dvb_frontend *fe,
return 0;
}
-
static const struct stv0299_config samsung_stv0299_config = {
.demod_address = 0x68,
.inittab = samsung_smt_7020_inittab,
@@ -1008,8 +1003,8 @@ static int dvb_register(struct cx8802_dev *dev)
int mfe_shared = 0; /* bus not shared by default */
int res = -EINVAL;
- if (0 != core->i2c_rc) {
- printk(KERN_ERR "%s/2: no i2c-bus available, cannot attach dvb drivers\n", core->name);
+ if (core->i2c_rc != 0) {
+ pr_err("no i2c-bus available, cannot attach dvb drivers\n");
goto frontend_detach;
}
@@ -1030,7 +1025,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(cx22702_attach,
&connexant_refboard_config,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
0x61, &core->i2c_adap,
DVB_PLL_THOMSON_DTT759X))
@@ -1044,7 +1039,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(cx22702_attach,
&connexant_refboard_config,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
0x60, &core->i2c_adap,
DVB_PLL_THOMSON_DTT7579))
@@ -1058,10 +1053,10 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(cx22702_attach,
&hauppauge_hvr_config,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
- &core->i2c_adap, 0x61,
- TUNER_PHILIPS_FMD1216ME_MK3))
+ &core->i2c_adap, 0x61,
+ TUNER_PHILIPS_FMD1216ME_MK3))
goto frontend_detach;
}
break;
@@ -1069,10 +1064,10 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(cx22702_attach,
&hauppauge_hvr_config,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
- &core->i2c_adap, 0x61,
- TUNER_PHILIPS_FMD1216MEX_MK3))
+ &core->i2c_adap, 0x61,
+ TUNER_PHILIPS_FMD1216MEX_MK3))
goto frontend_detach;
}
break;
@@ -1082,8 +1077,8 @@ static int dvb_register(struct cx8802_dev *dev)
dev->frontends.gate = 2;
/* DVB-S init */
fe0->dvb.frontend = dvb_attach(cx24123_attach,
- &hauppauge_novas_config,
- &dev->core->i2c_adap);
+ &hauppauge_novas_config,
+ &dev->core->i2c_adap);
if (fe0->dvb.frontend) {
if (!dvb_attach(isl6421_attach,
fe0->dvb.frontend,
@@ -1097,8 +1092,8 @@ static int dvb_register(struct cx8802_dev *dev)
goto frontend_detach;
/* DVB-T init */
fe1->dvb.frontend = dvb_attach(cx22702_attach,
- &hauppauge_hvr_config,
- &dev->core->i2c_adap);
+ &hauppauge_hvr_config,
+ &dev->core->i2c_adap);
if (fe1->dvb.frontend) {
fe1->dvb.frontend->id = 1;
if (!dvb_attach(simple_tuner_attach,
@@ -1112,7 +1107,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(mt352_attach,
&dvico_fusionhdtv,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
0x60, NULL, DVB_PLL_THOMSON_DTT7579))
goto frontend_detach;
@@ -1122,19 +1117,21 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(zl10353_attach,
&dvico_fusionhdtv_plus_v1_1,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
0x60, NULL, DVB_PLL_THOMSON_DTT7579))
goto frontend_detach;
}
break;
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL:
- /* The tin box says DEE1601, but it seems to be DTT7579
- * compatible, with a slightly different MT352 AGC gain. */
+ /*
+ * The tin box says DEE1601, but it seems to be DTT7579
+ * compatible, with a slightly different MT352 AGC gain.
+ */
fe0->dvb.frontend = dvb_attach(mt352_attach,
&dvico_fusionhdtv_dual,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
0x61, NULL, DVB_PLL_THOMSON_DTT7579))
goto frontend_detach;
@@ -1144,7 +1141,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(zl10353_attach,
&dvico_fusionhdtv_plus_v1_1,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
0x61, NULL, DVB_PLL_THOMSON_DTT7579))
goto frontend_detach;
@@ -1154,7 +1151,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(mt352_attach,
&dvico_fusionhdtv,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
0x61, NULL, DVB_PLL_LG_Z201))
goto frontend_detach;
@@ -1166,7 +1163,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(mt352_attach,
&dntv_live_dvbt_config,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
0x61, NULL, DVB_PLL_UNKNOWN_1))
goto frontend_detach;
@@ -1175,27 +1172,27 @@ static int dvb_register(struct cx8802_dev *dev)
case CX88_BOARD_DNTV_LIVE_DVB_T_PRO:
#if IS_ENABLED(CONFIG_VIDEO_CX88_VP3054)
/* MT352 is on a secondary I2C bus made from some GPIO lines */
- fe0->dvb.frontend = dvb_attach(mt352_attach, &dntv_live_dvbt_pro_config,
+ fe0->dvb.frontend = dvb_attach(mt352_attach,
+ &dntv_live_dvbt_pro_config,
&dev->vp3054->adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
&core->i2c_adap, 0x61,
TUNER_PHILIPS_FMD1216ME_MK3))
goto frontend_detach;
}
#else
- printk(KERN_ERR "%s/2: built without vp3054 support\n",
- core->name);
+ pr_err("built without vp3054 support\n");
#endif
break;
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_HYBRID:
fe0->dvb.frontend = dvb_attach(zl10353_attach,
&dvico_fusionhdtv_hybrid,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
- &core->i2c_adap, 0x61,
- TUNER_THOMSON_FE6600))
+ &core->i2c_adap, 0x61,
+ TUNER_THOMSON_FE6600))
goto frontend_detach;
}
break;
@@ -1203,7 +1200,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(zl10353_attach,
&dvico_fusionhdtv_xc3028,
&core->i2c_adap);
- if (fe0->dvb.frontend == NULL)
+ if (!fe0->dvb.frontend)
fe0->dvb.frontend = dvb_attach(mt352_attach,
&dvico_fusionhdtv_mt352_xc3028,
&core->i2c_adap);
@@ -1220,7 +1217,7 @@ static int dvb_register(struct cx8802_dev *dev)
case CX88_BOARD_PCHDTV_HD3000:
fe0->dvb.frontend = dvb_attach(or51132_attach, &pchdtv_hd3000,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
&core->i2c_adap, 0x61,
TUNER_THOMSON_DTT761X))
@@ -1241,7 +1238,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(lgdt330x_attach,
&fusionhdtv_3_gold,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
&core->i2c_adap, 0x61,
TUNER_MICROTUNE_4042FI5))
@@ -1259,7 +1256,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(lgdt330x_attach,
&fusionhdtv_3_gold,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
&core->i2c_adap, 0x61,
TUNER_THOMSON_DTT761X))
@@ -1277,13 +1274,13 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(lgdt330x_attach,
&fusionhdtv_5_gold,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
&core->i2c_adap, 0x61,
TUNER_LG_TDVS_H06XF))
goto frontend_detach;
if (!dvb_attach(tda9887_attach, fe0->dvb.frontend,
- &core->i2c_adap, 0x43))
+ &core->i2c_adap, 0x43))
goto frontend_detach;
}
break;
@@ -1298,13 +1295,13 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(lgdt330x_attach,
&pchdtv_hd5500,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
&core->i2c_adap, 0x61,
TUNER_LG_TDVS_H06XF))
goto frontend_detach;
if (!dvb_attach(tda9887_attach, fe0->dvb.frontend,
- &core->i2c_adap, 0x43))
+ &core->i2c_adap, 0x43))
goto frontend_detach;
}
break;
@@ -1312,7 +1309,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(nxt200x_attach,
&ati_hdtvwonder,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
&core->i2c_adap, 0x61,
TUNER_PHILIPS_TUV1236D))
@@ -1333,8 +1330,8 @@ static int dvb_register(struct cx8802_dev *dev)
override_tone = false;
if (!dvb_attach(isl6421_attach, fe0->dvb.frontend,
- &core->i2c_adap, 0x08, ISL6421_DCL, 0x00,
- override_tone))
+ &core->i2c_adap, 0x08, ISL6421_DCL,
+ 0x00, override_tone))
goto frontend_detach;
}
break;
@@ -1360,7 +1357,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(s5h1409_attach,
&pinnacle_pctv_hd_800i_config,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(xc5000_attach, fe0->dvb.frontend,
&core->i2c_adap,
&pinnacle_pctv_hd_800i_tuner_config))
@@ -1369,9 +1366,9 @@ static int dvb_register(struct cx8802_dev *dev)
break;
case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO:
fe0->dvb.frontend = dvb_attach(s5h1409_attach,
- &dvico_hdtv5_pci_nano_config,
- &core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ &dvico_hdtv5_pci_nano_config,
+ &core->i2c_adap);
+ if (fe0->dvb.frontend) {
struct dvb_frontend *fe;
struct xc2028_config cfg = {
.i2c_adap = &core->i2c_adap,
@@ -1385,7 +1382,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe = dvb_attach(xc2028_attach,
fe0->dvb.frontend, &cfg);
- if (fe != NULL && fe->ops.tuner_ops.set_config != NULL)
+ if (fe && fe->ops.tuner_ops.set_config)
fe->ops.tuner_ops.set_config(fe, &ctl);
}
break;
@@ -1427,7 +1424,7 @@ static int dvb_register(struct cx8802_dev *dev)
if (attach_xc3028(0x61, dev) < 0)
goto frontend_detach;
break;
- case CX88_BOARD_KWORLD_ATSC_120:
+ case CX88_BOARD_KWORLD_ATSC_120:
fe0->dvb.frontend = dvb_attach(s5h1409_attach,
&kworld_atsc_120_config,
&core->i2c_adap);
@@ -1438,7 +1435,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(s5h1411_attach,
&dvico_fusionhdtv7_config,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(xc5000_attach, fe0->dvb.frontend,
&core->i2c_adap,
&dvico_fusionhdtv7_tuner_config))
@@ -1451,8 +1448,8 @@ static int dvb_register(struct cx8802_dev *dev)
dev->frontends.gate = 2;
/* DVB-S/S2 Init */
fe0->dvb.frontend = dvb_attach(cx24116_attach,
- &hauppauge_hvr4000_config,
- &dev->core->i2c_adap);
+ &hauppauge_hvr4000_config,
+ &dev->core->i2c_adap);
if (fe0->dvb.frontend) {
if (!dvb_attach(isl6421_attach,
fe0->dvb.frontend,
@@ -1466,8 +1463,8 @@ static int dvb_register(struct cx8802_dev *dev)
goto frontend_detach;
/* DVB-T Init */
fe1->dvb.frontend = dvb_attach(cx22702_attach,
- &hauppauge_hvr_config,
- &dev->core->i2c_adap);
+ &hauppauge_hvr_config,
+ &dev->core->i2c_adap);
if (fe1->dvb.frontend) {
fe1->dvb.frontend->id = 1;
if (!dvb_attach(simple_tuner_attach,
@@ -1479,8 +1476,8 @@ static int dvb_register(struct cx8802_dev *dev)
break;
case CX88_BOARD_HAUPPAUGE_HVR4000LITE:
fe0->dvb.frontend = dvb_attach(cx24116_attach,
- &hauppauge_hvr4000_config,
- &dev->core->i2c_adap);
+ &hauppauge_hvr4000_config,
+ &dev->core->i2c_adap);
if (fe0->dvb.frontend) {
if (!dvb_attach(isl6421_attach,
fe0->dvb.frontend,
@@ -1495,7 +1492,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(stv0299_attach,
&tevii_tuner_sharp_config,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend, 0x60,
&core->i2c_adap, DVB_PLL_OPERA1))
goto frontend_detach;
@@ -1506,8 +1503,9 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(stv0288_attach,
&tevii_tuner_earda_config,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
- if (!dvb_attach(stb6000_attach, fe0->dvb.frontend, 0x61,
+ if (fe0->dvb.frontend) {
+ if (!dvb_attach(stb6000_attach,
+ fe0->dvb.frontend, 0x61,
&core->i2c_adap))
goto frontend_detach;
core->prev_set_voltage = fe0->dvb.frontend->ops.set_voltage;
@@ -1519,16 +1517,16 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(cx24116_attach,
&tevii_s460_config,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL)
+ if (fe0->dvb.frontend)
fe0->dvb.frontend->ops.set_voltage = tevii_dvbs_set_voltage;
break;
case CX88_BOARD_TEVII_S464:
fe0->dvb.frontend = dvb_attach(ds3000_attach,
&tevii_ds3000_config,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
+ if (fe0->dvb.frontend) {
dvb_attach(ts2020_attach, fe0->dvb.frontend,
- &tevii_ts2020_config, &core->i2c_adap);
+ &tevii_ts2020_config, &core->i2c_adap);
fe0->dvb.frontend->ops.set_voltage =
tevii_dvbs_set_voltage;
}
@@ -1540,7 +1538,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe0->dvb.frontend = dvb_attach(cx24116_attach,
&hauppauge_hvr4000_config,
&core->i2c_adap);
- if (fe0->dvb.frontend != NULL)
+ if (fe0->dvb.frontend)
fe0->dvb.frontend->ops.set_voltage = tevii_dvbs_set_voltage;
break;
case CX88_BOARD_TERRATEC_CINERGY_HT_PCI_MKII:
@@ -1557,9 +1555,9 @@ static int dvb_register(struct cx8802_dev *dev)
struct dvb_tuner_ops *tuner_ops = NULL;
fe0->dvb.frontend = dvb_attach(stv0900_attach,
- &prof_7301_stv0900_config,
- &core->i2c_adap, 0);
- if (fe0->dvb.frontend != NULL) {
+ &prof_7301_stv0900_config,
+ &core->i2c_adap, 0);
+ if (fe0->dvb.frontend) {
if (!dvb_attach(stb6100_attach, fe0->dvb.frontend,
&prof_7301_stb6100_config,
&core->i2c_adap))
@@ -1589,8 +1587,8 @@ static int dvb_register(struct cx8802_dev *dev)
mdelay(200);
fe0->dvb.frontend = dvb_attach(stv0299_attach,
- &samsung_stv0299_config,
- &dev->core->i2c_adap);
+ &samsung_stv0299_config,
+ &dev->core->i2c_adap);
if (fe0->dvb.frontend) {
fe0->dvb.frontend->ops.tuner_ops.set_params =
samsung_smt_7020_tuner_set_params;
@@ -1606,8 +1604,8 @@ static int dvb_register(struct cx8802_dev *dev)
case CX88_BOARD_TWINHAN_VP1027_DVBS:
dev->ts_gen_cntrl = 0x00;
fe0->dvb.frontend = dvb_attach(mb86a16_attach,
- &twinhan_vp1027,
- &core->i2c_adap);
+ &twinhan_vp1027,
+ &core->i2c_adap);
if (fe0->dvb.frontend) {
core->prev_set_voltage =
fe0->dvb.frontend->ops.set_voltage;
@@ -1617,15 +1615,12 @@ static int dvb_register(struct cx8802_dev *dev)
break;
default:
- printk(KERN_ERR "%s/2: The frontend of your DVB/ATSC card isn't supported yet\n",
- core->name);
+ pr_err("The frontend of your DVB/ATSC card isn't supported yet\n");
break;
}
- if ( (NULL == fe0->dvb.frontend) || (fe1 && NULL == fe1->dvb.frontend) ) {
- printk(KERN_ERR
- "%s/2: frontend initialization failed\n",
- core->name);
+ if ((NULL == fe0->dvb.frontend) || (fe1 && NULL == fe1->dvb.frontend)) {
+ pr_err("frontend initialization failed\n");
goto frontend_detach;
}
/* define general-purpose callback pointer */
@@ -1660,7 +1655,8 @@ static int cx8802_dvb_advise_acquire(struct cx8802_driver *drv)
{
struct cx88_core *core = drv->core;
int err = 0;
- dprintk( 1, "%s\n", __func__);
+
+ dprintk(1, "%s\n", __func__);
switch (core->boardnr) {
case CX88_BOARD_HAUPPAUGE_HVR1300:
@@ -1724,7 +1720,8 @@ static int cx8802_dvb_advise_release(struct cx8802_driver *drv)
{
struct cx88_core *core = drv->core;
int err = 0;
- dprintk( 1, "%s\n", __func__);
+
+ dprintk(1, "%s\n", __func__);
switch (core->boardnr) {
case CX88_BOARD_HAUPPAUGE_HVR1300:
@@ -1747,8 +1744,8 @@ static int cx8802_dvb_probe(struct cx8802_driver *drv)
struct vb2_dvb_frontend *fe;
int i;
- dprintk( 1, "%s\n", __func__);
- dprintk( 1, " ->being probed by Card=%d Name=%s, PCI %02x:%02x\n",
+ dprintk(1, "%s\n", __func__);
+ dprintk(1, " ->being probed by Card=%d Name=%s, PCI %02x:%02x\n",
core->boardnr,
core->name,
core->pci_bus,
@@ -1760,25 +1757,25 @@ static int cx8802_dvb_probe(struct cx8802_driver *drv)
/* If vp3054 isn't enabled, a stub will just return 0 */
err = vp3054_i2c_probe(dev);
- if (0 != err)
+ if (err != 0)
goto fail_core;
/* dvb stuff */
- printk(KERN_INFO "%s/2: cx2388x based DVB/ATSC card\n", core->name);
+ pr_info("cx2388x based DVB/ATSC card\n");
dev->ts_gen_cntrl = 0x0c;
err = cx8802_alloc_frontends(dev);
if (err)
goto fail_core;
- err = -ENODEV;
for (i = 1; i <= core->board.num_frontends; i++) {
struct vb2_queue *q;
fe = vb2_dvb_get_frontend(&core->dvbdev->frontends, i);
- if (fe == NULL) {
- printk(KERN_ERR "%s() failed to get frontend(%d)\n",
- __func__, i);
+ if (!fe) {
+ pr_err("%s() failed to get frontend(%d)\n",
+ __func__, i);
+ err = -ENODEV;
goto fail_probe;
}
q = &fe->dvb.dvbq;
@@ -1805,8 +1802,7 @@ static int cx8802_dvb_probe(struct cx8802_driver *drv)
err = dvb_register(dev);
if (err)
/* frontends/adapter de-allocated in dvb_register */
- printk(KERN_ERR "%s/2: dvb_register failed (err = %d)\n",
- core->name, err);
+ pr_err("dvb_register failed (err = %d)\n", err);
return err;
fail_probe:
vb2_dvb_dealloc_frontends(&core->dvbdev->frontends);
@@ -1819,7 +1815,7 @@ static int cx8802_dvb_remove(struct cx8802_driver *drv)
struct cx88_core *core = drv->core;
struct cx8802_dev *dev = drv->core->dvbdev;
- dprintk( 1, "%s\n", __func__);
+ dprintk(1, "%s\n", __func__);
vb2_dvb_unregister_bus(&dev->frontends);
@@ -1841,8 +1837,7 @@ static struct cx8802_driver cx8802_dvb_driver = {
static int __init dvb_init(void)
{
- printk(KERN_INFO "cx88/2: cx2388x dvb driver version %s loaded\n",
- CX88_VERSION);
+ pr_info("cx2388x dvb driver version %s loaded\n", CX88_VERSION);
return cx8802_register_driver(&cx8802_dvb_driver);
}
diff --git a/drivers/media/pci/cx88/cx88-i2c.c b/drivers/media/pci/cx88/cx88-i2c.c
index cf2d69615838..f7692775fb5a 100644
--- a/drivers/media/pci/cx88/cx88-i2c.c
+++ b/drivers/media/pci/cx88/cx88-i2c.c
@@ -1,55 +1,53 @@
/*
+ *
+ * cx88-i2c.c -- all the i2c code is here
+ *
+ * Copyright (C) 1996,97,98 Ralph Metzler (rjkm@thp.uni-koeln.de)
+ * & Marcus Metzler (mocm@thp.uni-koeln.de)
+ * (c) 2002 Yurij Sysoev <yurij@naturesoft.net>
+ * (c) 1999-2003 Gerd Knorr <kraxel@bytesex.org>
+ *
+ * (c) 2005 Mauro Carvalho Chehab <mchehab@infradead.org>
+ * - Multituner support and i2c address binding
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
- cx88-i2c.c -- all the i2c code is here
-
- Copyright (C) 1996,97,98 Ralph Metzler (rjkm@thp.uni-koeln.de)
- & Marcus Metzler (mocm@thp.uni-koeln.de)
- (c) 2002 Yurij Sysoev <yurij@naturesoft.net>
- (c) 1999-2003 Gerd Knorr <kraxel@bytesex.org>
-
- (c) 2005 Mauro Carvalho Chehab <mchehab@infradead.org>
- - Multituner support and i2c address binding
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-*/
+#include "cx88.h"
-#include <linux/module.h>
#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
-#include <asm/io.h>
-
-#include "cx88.h"
#include <media/v4l2-common.h>
static unsigned int i2c_debug;
module_param(i2c_debug, int, 0644);
-MODULE_PARM_DESC(i2c_debug,"enable debug messages [i2c]");
+MODULE_PARM_DESC(i2c_debug, "enable debug messages [i2c]");
static unsigned int i2c_scan;
module_param(i2c_scan, int, 0444);
-MODULE_PARM_DESC(i2c_scan,"scan i2c bus at insmod time");
+MODULE_PARM_DESC(i2c_scan, "scan i2c bus at insmod time");
static unsigned int i2c_udelay = 5;
module_param(i2c_udelay, int, 0644);
-MODULE_PARM_DESC(i2c_udelay,"i2c delay at insmod time, in usecs "
- "(should be 5 or higher). Lower value means higher bus speed.");
+MODULE_PARM_DESC(i2c_udelay,
+ "i2c delay at insmod time, in usecs (should be 5 or higher). Lower value means higher bus speed.");
-#define dprintk(level,fmt, arg...) if (i2c_debug >= level) \
- printk(KERN_DEBUG "%s: " fmt, core->name , ## arg)
+#define dprintk(level, fmt, arg...) do { \
+ if (i2c_debug >= level) \
+ printk(KERN_DEBUG pr_fmt("%s: i2c:" fmt), \
+ __func__, ##arg); \
+} while (0)
/* ----------------------------------------------------------------------- */
@@ -109,26 +107,26 @@ static const struct i2c_algo_bit_data cx8800_i2c_algo_template = {
/* ----------------------------------------------------------------------- */
static const char * const i2c_devs[128] = {
- [ 0x1c >> 1 ] = "lgdt330x",
- [ 0x86 >> 1 ] = "tda9887/cx22702",
- [ 0xa0 >> 1 ] = "eeprom",
- [ 0xc0 >> 1 ] = "tuner (analog)",
- [ 0xc2 >> 1 ] = "tuner (analog/dvb)",
- [ 0xc8 >> 1 ] = "xc5000",
+ [0x1c >> 1] = "lgdt330x",
+ [0x86 >> 1] = "tda9887/cx22702",
+ [0xa0 >> 1] = "eeprom",
+ [0xc0 >> 1] = "tuner (analog)",
+ [0xc2 >> 1] = "tuner (analog/dvb)",
+ [0xc8 >> 1] = "xc5000",
};
static void do_i2c_scan(const char *name, struct i2c_client *c)
{
unsigned char buf;
- int i,rc;
+ int i, rc;
for (i = 0; i < ARRAY_SIZE(i2c_devs); i++) {
c->addr = i;
- rc = i2c_master_recv(c,&buf,0);
+ rc = i2c_master_recv(c, &buf, 0);
if (rc < 0)
continue;
- printk("%s: i2c scan: found device @ 0x%x [%s]\n",
- name, i << 1, i2c_devs[i] ? i2c_devs[i] : "???");
+ pr_info("i2c scan: found device @ 0x%x [%s]\n",
+ i << 1, i2c_devs[i] ? i2c_devs[i] : "???");
}
}
@@ -136,14 +134,13 @@ static void do_i2c_scan(const char *name, struct i2c_client *c)
int cx88_i2c_init(struct cx88_core *core, struct pci_dev *pci)
{
/* Prevents usage of invalid delay values */
- if (i2c_udelay<5)
- i2c_udelay=5;
+ if (i2c_udelay < 5)
+ i2c_udelay = 5;
core->i2c_algo = cx8800_i2c_algo_template;
-
core->i2c_adap.dev.parent = &pci->dev;
- strlcpy(core->i2c_adap.name,core->name,sizeof(core->i2c_adap.name));
+ strlcpy(core->i2c_adap.name, core->name, sizeof(core->i2c_adap.name));
core->i2c_adap.owner = THIS_MODULE;
core->i2c_algo.udelay = i2c_udelay;
core->i2c_algo.data = core;
@@ -152,32 +149,35 @@ int cx88_i2c_init(struct cx88_core *core, struct pci_dev *pci)
core->i2c_client.adapter = &core->i2c_adap;
strlcpy(core->i2c_client.name, "cx88xx internal", I2C_NAME_SIZE);
- cx8800_bit_setscl(core,1);
- cx8800_bit_setsda(core,1);
+ cx8800_bit_setscl(core, 1);
+ cx8800_bit_setsda(core, 1);
core->i2c_rc = i2c_bit_add_bus(&core->i2c_adap);
- if (0 == core->i2c_rc) {
- static u8 tuner_data[] =
- { 0x0b, 0xdc, 0x86, 0x52 };
- static struct i2c_msg tuner_msg =
- { .flags = 0, .addr = 0xc2 >> 1, .buf = tuner_data, .len = 4 };
+ if (core->i2c_rc == 0) {
+ static u8 tuner_data[] = {
+ 0x0b, 0xdc, 0x86, 0x52 };
+ static struct i2c_msg tuner_msg = {
+ .flags = 0,
+ .addr = 0xc2 >> 1,
+ .buf = tuner_data,
+ .len = 4
+ };
dprintk(1, "i2c register ok\n");
- switch( core->boardnr ) {
- case CX88_BOARD_HAUPPAUGE_HVR1300:
- case CX88_BOARD_HAUPPAUGE_HVR3000:
- case CX88_BOARD_HAUPPAUGE_HVR4000:
- printk("%s: i2c init: enabling analog demod on HVR1300/3000/4000 tuner\n",
- core->name);
- i2c_transfer(core->i2c_client.adapter, &tuner_msg, 1);
- break;
- default:
- break;
+ switch (core->boardnr) {
+ case CX88_BOARD_HAUPPAUGE_HVR1300:
+ case CX88_BOARD_HAUPPAUGE_HVR3000:
+ case CX88_BOARD_HAUPPAUGE_HVR4000:
+ pr_info("i2c init: enabling analog demod on HVR1300/3000/4000 tuner\n");
+ i2c_transfer(core->i2c_client.adapter, &tuner_msg, 1);
+ break;
+ default:
+ break;
}
if (i2c_scan)
- do_i2c_scan(core->name,&core->i2c_client);
+ do_i2c_scan(core->name, &core->i2c_client);
} else
- printk("%s: i2c register FAILED\n", core->name);
+ pr_err("i2c register FAILED\n");
return core->i2c_rc;
}
diff --git a/drivers/media/pci/cx88/cx88-input.c b/drivers/media/pci/cx88/cx88-input.c
index cd7687183381..c7b3cb406499 100644
--- a/drivers/media/pci/cx88/cx88-input.c
+++ b/drivers/media/pci/cx88/cx88-input.c
@@ -16,19 +16,16 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include "cx88.h"
+
#include <linux/init.h>
#include <linux/hrtimer.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include "cx88.h"
#include <media/rc-core.h>
#define MODULE_NAME "cx88xx"
@@ -57,7 +54,7 @@ struct cx88_IR {
u32 mask_keyup;
};
-static unsigned ir_samplerate = 4;
+static unsigned int ir_samplerate = 4;
module_param(ir_samplerate, uint, 0444);
MODULE_PARM_DESC(ir_samplerate, "IR samplerate in kHz, 1 - 20, default 4");
@@ -65,11 +62,15 @@ static int ir_debug;
module_param(ir_debug, int, 0644); /* debug level [IR] */
MODULE_PARM_DESC(ir_debug, "enable debug messages [IR]");
-#define ir_dprintk(fmt, arg...) if (ir_debug) \
- printk(KERN_DEBUG "%s IR: " fmt , ir->core->name , ##arg)
+#define ir_dprintk(fmt, arg...) do { \
+ if (ir_debug) \
+ printk(KERN_DEBUG "%s IR: " fmt, ir->core->name, ##arg);\
+} while (0)
-#define dprintk(fmt, arg...) if (ir_debug) \
- printk(KERN_DEBUG "cx88 IR: " fmt , ##arg)
+#define dprintk(fmt, arg...) do { \
+ if (ir_debug) \
+ printk(KERN_DEBUG "cx88 IR: " fmt, ##arg); \
+} while (0)
/* ---------------------------------------------------------------------- */
@@ -82,21 +83,22 @@ static void cx88_ir_handle_key(struct cx88_IR *ir)
gpio = cx_read(ir->gpio_addr);
switch (core->boardnr) {
case CX88_BOARD_NPGTECH_REALTV_TOP10FM:
- /* This board apparently uses a combination of 2 GPIO
- to represent the keys. Additionally, the second GPIO
- can be used for parity.
-
- Example:
-
- for key "5"
- gpio = 0x758, auxgpio = 0xe5 or 0xf5
- for key "Power"
- gpio = 0x758, auxgpio = 0xed or 0xfd
+ /*
+ * This board apparently uses a combination of 2 GPIO
+ * to represent the keys. Additionally, the second GPIO
+ * can be used for parity.
+ *
+ * Example:
+ *
+ * for key "5"
+ * gpio = 0x758, auxgpio = 0xe5 or 0xf5
+ * for key "Power"
+ * gpio = 0x758, auxgpio = 0xed or 0xfd
*/
auxgpio = cx_read(MO_GP1_IO);
/* Take out the parity part */
- gpio=(gpio & 0x7fd) + (auxgpio & 0xef);
+ gpio = (gpio & 0x7fd) + (auxgpio & 0xef);
break;
case CX88_BOARD_WINFAST_DTV1000:
case CX88_BOARD_WINFAST_DTV1800H:
@@ -145,7 +147,7 @@ static void cx88_ir_handle_key(struct cx88_IR *ir)
if (0 == (gpio & ir->mask_keyup))
rc_keydown_notimeout(ir->dev, RC_TYPE_NECX, scancode,
- 0);
+ 0);
else
rc_keyup(ir->dev);
@@ -176,8 +178,7 @@ static enum hrtimer_restart cx88_ir_work(struct hrtimer *timer)
struct cx88_IR *ir = container_of(timer, struct cx88_IR, timer);
cx88_ir_handle_key(ir);
- missed = hrtimer_forward_now(&ir->timer,
- ktime_set(0, ir->polling * 1000000));
+ missed = hrtimer_forward_now(&ir->timer, ir->polling * 1000000);
if (missed > 1)
ir_dprintk("Missed ticks %ld\n", missed - 1);
@@ -197,8 +198,7 @@ static int __cx88_ir_start(void *priv)
if (ir->polling) {
hrtimer_init(&ir->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ir->timer.function = cx88_ir_work;
- hrtimer_start(&ir->timer,
- ktime_set(0, ir->polling * 1000000),
+ hrtimer_start(&ir->timer, ir->polling * 1000000,
HRTIMER_MODE_REL);
}
if (ir->sampling) {
@@ -234,12 +234,14 @@ int cx88_ir_start(struct cx88_core *core)
return 0;
}
+EXPORT_SYMBOL(cx88_ir_start);
void cx88_ir_stop(struct cx88_core *core)
{
if (core->ir->users)
__cx88_ir_stop(core);
}
+EXPORT_SYMBOL(cx88_ir_stop);
static int cx88_ir_open(struct rc_dev *rc)
{
@@ -511,7 +513,7 @@ int cx88_ir_fini(struct cx88_core *core)
struct cx88_IR *ir = core->ir;
/* skip detach on non attached boards */
- if (NULL == ir)
+ if (!ir)
return 0;
cx88_ir_stop(core);
@@ -529,7 +531,7 @@ void cx88_ir_irq(struct cx88_core *core)
{
struct cx88_IR *ir = core->ir;
u32 samples;
- unsigned todo, bits;
+ unsigned int todo, bits;
struct ir_raw_event ev;
if (!ir || !ir->sampling)
@@ -579,7 +581,7 @@ static int get_key_pvr2000(struct IR_i2c *ir, enum rc_type *protocol,
}
dprintk("IR Key/Flags: (0x%02x/0x%02x)\n",
- code & 0xff, flags & 0xff);
+ code & 0xff, flags & 0xff);
*protocol = RC_TYPE_UNKNOWN;
*scancode = code & 0xff;
@@ -601,7 +603,7 @@ void cx88_i2c_init_ir(struct cx88_core *core)
const unsigned short *addr_list = default_addr_list;
const unsigned short *addrp;
/* Instantiate the IR receiver device, if present */
- if (0 != core->i2c_rc)
+ if (core->i2c_rc != 0)
return;
memset(&info, 0, sizeof(struct i2c_board_info));
@@ -639,8 +641,8 @@ void cx88_i2c_init_ir(struct cx88_core *core)
info.platform_data = &core->init_data;
}
if (i2c_smbus_xfer(&core->i2c_adap, *addrp, 0,
- I2C_SMBUS_READ, 0,
- I2C_SMBUS_QUICK, NULL) >= 0) {
+ I2C_SMBUS_READ, 0,
+ I2C_SMBUS_QUICK, NULL) >= 0) {
info.addr = *addrp;
i2c_new_device(&core->i2c_adap, &info);
break;
diff --git a/drivers/media/pci/cx88/cx88-mpeg.c b/drivers/media/pci/cx88/cx88-mpeg.c
index 245357adbc25..52ff00ebd4bd 100644
--- a/drivers/media/pci/cx88/cx88-mpeg.c
+++ b/drivers/media/pci/cx88/cx88-mpeg.c
@@ -16,21 +16,17 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "cx88.h"
+
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
-#include <asm/delay.h>
-
-#include "cx88.h"
+#include <linux/delay.h>
/* ------------------------------------------------------------------ */
@@ -42,23 +38,20 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(CX88_VERSION);
static unsigned int debug;
-module_param(debug,int,0644);
-MODULE_PARM_DESC(debug,"enable debug messages [mpeg]");
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "enable debug messages [mpeg]");
-#define dprintk(level, fmt, arg...) do { \
- if (debug + 1 > level) \
- printk(KERN_DEBUG "%s/2-mpeg: " fmt, dev->core->name, ## arg); \
-} while(0)
-
-#define mpeg_dbg(level, fmt, arg...) do { \
- if (debug + 1 > level) \
- printk(KERN_DEBUG "%s/2-mpeg: " fmt, core->name, ## arg); \
-} while(0)
+#define dprintk(level, fmt, arg...) do { \
+ if (debug + 1 > level) \
+ printk(KERN_DEBUG pr_fmt("%s: mpeg:" fmt), \
+ __func__, ##arg); \
+} while (0)
#if defined(CONFIG_MODULES) && defined(MODULE)
static void request_module_async(struct work_struct *work)
{
- struct cx8802_dev *dev=container_of(work, struct cx8802_dev, request_module_wk);
+ struct cx8802_dev *dev = container_of(work, struct cx8802_dev,
+ request_module_wk);
if (dev->core->board.mpeg & CX88_MPEG_DVB)
request_module("cx88-dvb");
@@ -81,18 +74,17 @@ static void flush_request_modules(struct cx8802_dev *dev)
#define flush_request_modules(dev)
#endif /* CONFIG_MODULES */
-
static LIST_HEAD(cx8802_devlist);
static DEFINE_MUTEX(cx8802_mutex);
/* ------------------------------------------------------------------ */
int cx8802_start_dma(struct cx8802_dev *dev,
- struct cx88_dmaqueue *q,
- struct cx88_buffer *buf)
+ struct cx88_dmaqueue *q,
+ struct cx88_buffer *buf)
{
struct cx88_core *core = dev->core;
- dprintk(1, "cx8802_start_dma w: %d, h: %d, f: %d\n",
+ dprintk(1, "w: %d, h: %d, f: %d\n",
core->width, core->height, core->field);
/* setup fifo + format */
@@ -102,33 +94,35 @@ int cx8802_start_dma(struct cx8802_dev *dev,
/* write TS length to chip */
cx_write(MO_TS_LNGTH, dev->ts_packet_size);
- /* FIXME: this needs a review.
- * also: move to cx88-blackbird + cx88-dvb source files? */
+ /*
+ * FIXME: this needs a review.
+ * also: move to cx88-blackbird + cx88-dvb source files?
+ */
- dprintk( 1, "core->active_type_id = 0x%08x\n", core->active_type_id);
+ dprintk(1, "core->active_type_id = 0x%08x\n", core->active_type_id);
- if ( (core->active_type_id == CX88_MPEG_DVB) &&
- (core->board.mpeg & CX88_MPEG_DVB) ) {
-
- dprintk( 1, "cx8802_start_dma doing .dvb\n");
+ if ((core->active_type_id == CX88_MPEG_DVB) &&
+ (core->board.mpeg & CX88_MPEG_DVB)) {
+ dprintk(1, "cx8802_start_dma doing .dvb\n");
/* negedge driven & software reset */
cx_write(TS_GEN_CNTRL, 0x0040 | dev->ts_gen_cntrl);
udelay(100);
cx_write(MO_PINMUX_IO, 0x00);
- cx_write(TS_HW_SOP_CNTRL, 0x47<<16|188<<4|0x01);
+ cx_write(TS_HW_SOP_CNTRL, 0x47 << 16 | 188 << 4 | 0x01);
switch (core->boardnr) {
case CX88_BOARD_DVICO_FUSIONHDTV_3_GOLD_Q:
case CX88_BOARD_DVICO_FUSIONHDTV_3_GOLD_T:
case CX88_BOARD_DVICO_FUSIONHDTV_5_GOLD:
case CX88_BOARD_PCHDTV_HD5500:
- cx_write(TS_SOP_STAT, 1<<13);
+ cx_write(TS_SOP_STAT, 1 << 13);
break;
case CX88_BOARD_SAMSUNG_SMT_7020:
cx_write(TS_SOP_STAT, 0x00);
break;
case CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1:
case CX88_BOARD_HAUPPAUGE_NOVASE2_S1:
- cx_write(MO_PINMUX_IO, 0x88); /* Enable MPEG parallel IO and video signal pins */
+ /* Enable MPEG parallel IO and video signal pins */
+ cx_write(MO_PINMUX_IO, 0x88);
udelay(100);
break;
case CX88_BOARD_HAUPPAUGE_HVR1300:
@@ -152,22 +146,24 @@ int cx8802_start_dma(struct cx8802_dev *dev,
}
cx_write(TS_GEN_CNTRL, dev->ts_gen_cntrl);
udelay(100);
- } else if ( (core->active_type_id == CX88_MPEG_BLACKBIRD) &&
- (core->board.mpeg & CX88_MPEG_BLACKBIRD) ) {
- dprintk( 1, "cx8802_start_dma doing .blackbird\n");
+ } else if ((core->active_type_id == CX88_MPEG_BLACKBIRD) &&
+ (core->board.mpeg & CX88_MPEG_BLACKBIRD)) {
+ dprintk(1, "cx8802_start_dma doing .blackbird\n");
cx_write(MO_PINMUX_IO, 0x88); /* enable MPEG parallel IO */
- cx_write(TS_GEN_CNTRL, 0x46); /* punctured clock TS & posedge driven & software reset */
+ /* punctured clock TS & posedge driven & software reset */
+ cx_write(TS_GEN_CNTRL, 0x46);
udelay(100);
cx_write(TS_HW_SOP_CNTRL, 0x408); /* mpeg start byte */
cx_write(TS_VALERR_CNTRL, 0x2000);
- cx_write(TS_GEN_CNTRL, 0x06); /* punctured clock TS & posedge driven */
+ /* punctured clock TS & posedge driven */
+ cx_write(TS_GEN_CNTRL, 0x06);
udelay(100);
} else {
- printk( "%s() Failed. Unsupported value in .mpeg (0x%08x)\n", __func__,
- core->board.mpeg );
+ pr_err("%s() Failed. Unsupported value in .mpeg (0x%08x)\n",
+ __func__, core->board.mpeg);
return -EINVAL;
}
@@ -176,20 +172,22 @@ int cx8802_start_dma(struct cx8802_dev *dev,
q->count = 0;
/* enable irqs */
- dprintk( 1, "setting the interrupt mask\n" );
+ dprintk(1, "setting the interrupt mask\n");
cx_set(MO_PCI_INTMSK, core->pci_irqmask | PCI_INT_TSINT);
cx_set(MO_TS_INTMSK, 0x1f0011);
/* start dma */
- cx_set(MO_DEV_CNTRL2, (1<<5));
+ cx_set(MO_DEV_CNTRL2, (1 << 5));
cx_set(MO_TS_DMACNTRL, 0x11);
return 0;
}
+EXPORT_SYMBOL(cx8802_start_dma);
static int cx8802_stop_dma(struct cx8802_dev *dev)
{
struct cx88_core *core = dev->core;
- dprintk( 1, "cx8802_stop_dma\n" );
+
+ dprintk(1, "\n");
/* stop dma */
cx_clear(MO_TS_DMACNTRL, 0x11);
@@ -208,12 +206,12 @@ static int cx8802_restart_queue(struct cx8802_dev *dev,
{
struct cx88_buffer *buf;
- dprintk( 1, "cx8802_restart_queue\n" );
+ dprintk(1, "\n");
if (list_empty(&q->active))
return 0;
buf = list_entry(q->active.next, struct cx88_buffer, list);
- dprintk(2,"restart_queue [%p/%d]: restart dma\n",
+ dprintk(2, "restart_queue [%p/%d]: restart dma\n",
buf, buf->vb.vb2_buf.index);
cx8802_start_dma(dev, q, buf);
return 0;
@@ -222,7 +220,7 @@ static int cx8802_restart_queue(struct cx8802_dev *dev,
/* ------------------------------------------------------------------ */
int cx8802_buf_prepare(struct vb2_queue *q, struct cx8802_dev *dev,
- struct cx88_buffer *buf)
+ struct cx88_buffer *buf)
{
int size = dev->ts_packet_size * dev->ts_packet_count;
struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0);
@@ -234,43 +232,46 @@ int cx8802_buf_prepare(struct vb2_queue *q, struct cx8802_dev *dev,
vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
rc = cx88_risc_databuffer(dev->pci, risc, sgt->sgl,
- dev->ts_packet_size, dev->ts_packet_count, 0);
+ dev->ts_packet_size, dev->ts_packet_count, 0);
if (rc) {
if (risc->cpu)
- pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
+ pci_free_consistent(dev->pci, risc->size,
+ risc->cpu, risc->dma);
memset(risc, 0, sizeof(*risc));
return rc;
}
return 0;
}
+EXPORT_SYMBOL(cx8802_buf_prepare);
void cx8802_buf_queue(struct cx8802_dev *dev, struct cx88_buffer *buf)
{
struct cx88_buffer *prev;
struct cx88_dmaqueue *cx88q = &dev->mpegq;
- dprintk( 1, "cx8802_buf_queue\n" );
+ dprintk(1, "\n");
/* add jump to start */
buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 8);
buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 8);
if (list_empty(&cx88q->active)) {
- dprintk( 1, "queue is empty - first active\n" );
+ dprintk(1, "queue is empty - first active\n");
list_add_tail(&buf->list, &cx88q->active);
- dprintk(1,"[%p/%d] %s - first active\n",
+ dprintk(1, "[%p/%d] %s - first active\n",
buf, buf->vb.vb2_buf.index, __func__);
} else {
buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
- dprintk( 1, "queue is not empty - append to active\n" );
+ dprintk(1, "queue is not empty - append to active\n");
prev = list_entry(cx88q->active.prev, struct cx88_buffer, list);
list_add_tail(&buf->list, &cx88q->active);
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
- dprintk( 1, "[%p/%d] %s - append to active\n",
+ dprintk(1, "[%p/%d] %s - append to active\n",
buf, buf->vb.vb2_buf.index, __func__);
}
}
+EXPORT_SYMBOL(cx8802_buf_queue);
/* ----------------------------------------------------------- */
@@ -280,23 +281,24 @@ static void do_cancel_buffers(struct cx8802_dev *dev)
struct cx88_buffer *buf;
unsigned long flags;
- spin_lock_irqsave(&dev->slock,flags);
+ spin_lock_irqsave(&dev->slock, flags);
while (!list_empty(&q->active)) {
buf = list_entry(q->active.next, struct cx88_buffer, list);
list_del(&buf->list);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
- spin_unlock_irqrestore(&dev->slock,flags);
+ spin_unlock_irqrestore(&dev->slock, flags);
}
void cx8802_cancel_buffers(struct cx8802_dev *dev)
{
- dprintk( 1, "cx8802_cancel_buffers" );
+ dprintk(1, "\n");
cx8802_stop_dma(dev);
do_cancel_buffers(dev);
}
+EXPORT_SYMBOL(cx8802_cancel_buffers);
-static const char * cx88_mpeg_irqs[32] = {
+static const char *cx88_mpeg_irqs[32] = {
"ts_risci1", NULL, NULL, NULL,
"ts_risci2", NULL, NULL, NULL,
"ts_oflow", NULL, NULL, NULL,
@@ -310,7 +312,7 @@ static void cx8802_mpeg_irq(struct cx8802_dev *dev)
struct cx88_core *core = dev->core;
u32 status, mask, count;
- dprintk( 1, "cx8802_mpeg_irq\n" );
+ dprintk(1, "\n");
status = cx_read(MO_TS_INTSTAT);
mask = cx_read(MO_TS_INTMSK);
if (0 == (status & mask))
@@ -319,20 +321,21 @@ static void cx8802_mpeg_irq(struct cx8802_dev *dev)
cx_write(MO_TS_INTSTAT, status);
if (debug || (status & mask & ~0xff))
- cx88_print_irqbits(core->name, "irq mpeg ",
+ cx88_print_irqbits("irq mpeg ",
cx88_mpeg_irqs, ARRAY_SIZE(cx88_mpeg_irqs),
status, mask);
/* risc op code error */
if (status & (1 << 16)) {
- printk(KERN_WARNING "%s: mpeg risc op code error\n",core->name);
+ pr_warn("mpeg risc op code error\n");
cx_clear(MO_TS_DMACNTRL, 0x11);
- cx88_sram_channel_dump(dev->core, &cx88_sram_channels[SRAM_CH28]);
+ cx88_sram_channel_dump(dev->core,
+ &cx88_sram_channels[SRAM_CH28]);
}
/* risc1 y */
if (status & 0x01) {
- dprintk( 1, "wake up\n" );
+ dprintk(1, "wake up\n");
spin_lock(&dev->slock);
count = cx_read(MO_TS_GPCNT);
cx88_wakeup(dev->core, &dev->mpegq, count);
@@ -341,7 +344,7 @@ static void cx8802_mpeg_irq(struct cx8802_dev *dev)
/* other general errors */
if (status & 0x1f0100) {
- dprintk( 0, "general errors: 0x%08x\n", status & 0x1f0100 );
+ dprintk(0, "general errors: 0x%08x\n", status & 0x1f0100);
spin_lock(&dev->slock);
cx8802_stop_dma(dev);
spin_unlock(&dev->slock);
@@ -360,24 +363,23 @@ static irqreturn_t cx8802_irq(int irq, void *dev_id)
for (loop = 0; loop < MAX_IRQ_LOOP; loop++) {
status = cx_read(MO_PCI_INTSTAT) &
(core->pci_irqmask | PCI_INT_TSINT);
- if (0 == status)
+ if (status == 0)
goto out;
- dprintk( 1, "cx8802_irq\n" );
- dprintk( 1, " loop: %d/%d\n", loop, MAX_IRQ_LOOP );
- dprintk( 1, " status: %d\n", status );
+ dprintk(1, "cx8802_irq\n");
+ dprintk(1, " loop: %d/%d\n", loop, MAX_IRQ_LOOP);
+ dprintk(1, " status: %d\n", status);
handled = 1;
cx_write(MO_PCI_INTSTAT, status);
if (status & core->pci_irqmask)
- cx88_core_irq(core,status);
+ cx88_core_irq(core, status);
if (status & PCI_INT_TSINT)
cx8802_mpeg_irq(dev);
}
- if (MAX_IRQ_LOOP == loop) {
- dprintk( 0, "clearing mask\n" );
- printk(KERN_WARNING "%s/0: irq loop -- clearing mask\n",
- core->name);
- cx_write(MO_PCI_INTMSK,0);
+ if (loop == MAX_IRQ_LOOP) {
+ dprintk(0, "clearing mask\n");
+ pr_warn("irq loop -- clearing mask\n");
+ cx_write(MO_PCI_INTMSK, 0);
}
out:
@@ -393,18 +395,18 @@ static int cx8802_init_common(struct cx8802_dev *dev)
if (pci_enable_device(dev->pci))
return -EIO;
pci_set_master(dev->pci);
- err = pci_set_dma_mask(dev->pci,DMA_BIT_MASK(32));
+ err = pci_set_dma_mask(dev->pci, DMA_BIT_MASK(32));
if (err) {
- printk("%s/2: Oops: no 32bit PCI DMA ???\n",dev->core->name);
+ pr_err("Oops: no 32bit PCI DMA ???\n");
return -EIO;
}
dev->pci_rev = dev->pci->revision;
pci_read_config_byte(dev->pci, PCI_LATENCY_TIMER, &dev->pci_lat);
- printk(KERN_INFO "%s/2: found at %s, rev: %d, irq: %d, "
- "latency: %d, mmio: 0x%llx\n", dev->core->name,
- pci_name(dev->pci), dev->pci_rev, dev->pci->irq,
- dev->pci_lat,(unsigned long long)pci_resource_start(dev->pci,0));
+ pr_info("found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n",
+ pci_name(dev->pci), dev->pci_rev, dev->pci->irq,
+ dev->pci_lat,
+ (unsigned long long)pci_resource_start(dev->pci, 0));
/* initialize driver struct */
spin_lock_init(&dev->slock);
@@ -416,20 +418,19 @@ static int cx8802_init_common(struct cx8802_dev *dev)
err = request_irq(dev->pci->irq, cx8802_irq,
IRQF_SHARED, dev->core->name, dev);
if (err < 0) {
- printk(KERN_ERR "%s: can't get IRQ %d\n",
- dev->core->name, dev->pci->irq);
+ pr_err("can't get IRQ %d\n", dev->pci->irq);
return err;
}
cx_set(MO_PCI_INTMSK, core->pci_irqmask);
/* everything worked */
- pci_set_drvdata(dev->pci,dev);
+ pci_set_drvdata(dev->pci, dev);
return 0;
}
static void cx8802_fini_common(struct cx8802_dev *dev)
{
- dprintk( 2, "cx8802_fini_common\n" );
+ dprintk(2, "\n");
cx8802_stop_dma(dev);
pci_disable_device(dev->pci);
@@ -442,14 +443,13 @@ static void cx8802_fini_common(struct cx8802_dev *dev)
static int cx8802_suspend_common(struct pci_dev *pci_dev, pm_message_t state)
{
struct cx8802_dev *dev = pci_get_drvdata(pci_dev);
- struct cx88_core *core = dev->core;
unsigned long flags;
/* stop mpeg dma */
spin_lock_irqsave(&dev->slock, flags);
if (!list_empty(&dev->mpegq.active)) {
- dprintk( 2, "suspend\n" );
- printk("%s: suspend mpeg\n", core->name);
+ dprintk(2, "suspend\n");
+ pr_info("suspend mpeg\n");
cx8802_stop_dma(dev);
}
spin_unlock_irqrestore(&dev->slock, flags);
@@ -458,7 +458,8 @@ static int cx8802_suspend_common(struct pci_dev *pci_dev, pm_message_t state)
cx88_shutdown(dev->core);
pci_save_state(pci_dev);
- if (0 != pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state))) {
+ if (pci_set_power_state(pci_dev,
+ pci_choose_state(pci_dev, state)) != 0) {
pci_disable_device(pci_dev);
dev->state.disabled = 1;
}
@@ -468,23 +469,20 @@ static int cx8802_suspend_common(struct pci_dev *pci_dev, pm_message_t state)
static int cx8802_resume_common(struct pci_dev *pci_dev)
{
struct cx8802_dev *dev = pci_get_drvdata(pci_dev);
- struct cx88_core *core = dev->core;
unsigned long flags;
int err;
if (dev->state.disabled) {
- err=pci_enable_device(pci_dev);
+ err = pci_enable_device(pci_dev);
if (err) {
- printk(KERN_ERR "%s: can't enable device\n",
- dev->core->name);
+ pr_err("can't enable device\n");
return err;
}
dev->state.disabled = 0;
}
- err=pci_set_power_state(pci_dev, PCI_D0);
+ err = pci_set_power_state(pci_dev, PCI_D0);
if (err) {
- printk(KERN_ERR "%s: can't enable device\n",
- dev->core->name);
+ pr_err("can't enable device\n");
pci_disable_device(pci_dev);
dev->state.disabled = 1;
@@ -498,15 +496,16 @@ static int cx8802_resume_common(struct pci_dev *pci_dev)
/* restart video+vbi capture */
spin_lock_irqsave(&dev->slock, flags);
if (!list_empty(&dev->mpegq.active)) {
- printk("%s: resume mpeg\n", core->name);
- cx8802_restart_queue(dev,&dev->mpegq);
+ pr_info("resume mpeg\n");
+ cx8802_restart_queue(dev, &dev->mpegq);
}
spin_unlock_irqrestore(&dev->slock, flags);
return 0;
}
-struct cx8802_driver * cx8802_get_driver(struct cx8802_dev *dev, enum cx88_board_type btype)
+struct cx8802_driver *cx8802_get_driver(struct cx8802_dev *dev,
+ enum cx88_board_type btype)
{
struct cx8802_driver *d;
@@ -516,6 +515,7 @@ struct cx8802_driver * cx8802_get_driver(struct cx8802_dev *dev, enum cx88_board
return NULL;
}
+EXPORT_SYMBOL(cx8802_get_driver);
/* Driver asked for hardware access. */
static int cx8802_request_acquire(struct cx8802_driver *drv)
@@ -533,7 +533,8 @@ static int cx8802_request_acquire(struct cx8802_driver *drv)
core->last_analog_input = core->input;
core->input = 0;
for (i = 0;
- i < (sizeof(core->board.input) / sizeof(struct cx88_input));
+ i < (sizeof(core->board.input) /
+ sizeof(struct cx88_input));
i++) {
if (core->board.input[i].type == CX88_VMUX_DVB) {
core->input = i;
@@ -542,15 +543,14 @@ static int cx8802_request_acquire(struct cx8802_driver *drv)
}
}
- if (drv->advise_acquire)
- {
+ if (drv->advise_acquire) {
core->active_ref++;
if (core->active_type_id == CX88_BOARD_NONE) {
core->active_type_id = drv->type_id;
drv->advise_acquire(drv);
}
- mpeg_dbg(1,"%s() Post acquire GPIO=%x\n", __func__, cx_read(MO_GP0_IO));
+ dprintk(1, "Post acquire GPIO=%x\n", cx_read(MO_GP0_IO));
}
return 0;
@@ -561,17 +561,18 @@ static int cx8802_request_release(struct cx8802_driver *drv)
{
struct cx88_core *core = drv->core;
- if (drv->advise_release && --core->active_ref == 0)
- {
+ if (drv->advise_release && --core->active_ref == 0) {
if (drv->type_id == CX88_MPEG_DVB) {
- /* If the DVB driver is releasing, reset the input
- state to the last configured analog input */
+ /*
+ * If the DVB driver is releasing, reset the input
+ * state to the last configured analog input
+ */
core->input = core->last_analog_input;
}
drv->advise_release(drv);
core->active_type_id = CX88_BOARD_NONE;
- mpeg_dbg(1,"%s() Post release GPIO=%x\n", __func__, cx_read(MO_GP0_IO));
+ dprintk(1, "Post release GPIO=%x\n", cx_read(MO_GP0_IO));
}
return 0;
@@ -579,21 +580,21 @@ static int cx8802_request_release(struct cx8802_driver *drv)
static int cx8802_check_driver(struct cx8802_driver *drv)
{
- if (drv == NULL)
+ if (!drv)
return -ENODEV;
if ((drv->type_id != CX88_MPEG_DVB) &&
- (drv->type_id != CX88_MPEG_BLACKBIRD))
+ (drv->type_id != CX88_MPEG_BLACKBIRD))
return -EINVAL;
if ((drv->hw_access != CX8802_DRVCTL_SHARED) &&
- (drv->hw_access != CX8802_DRVCTL_EXCLUSIVE))
+ (drv->hw_access != CX8802_DRVCTL_EXCLUSIVE))
return -EINVAL;
- if ((drv->probe == NULL) ||
- (drv->remove == NULL) ||
- (drv->advise_acquire == NULL) ||
- (drv->advise_release == NULL))
+ if ((!drv->probe) ||
+ (!drv->remove) ||
+ (!drv->advise_acquire) ||
+ (!drv->advise_release))
return -EINVAL;
return 0;
@@ -605,28 +606,28 @@ int cx8802_register_driver(struct cx8802_driver *drv)
struct cx8802_driver *driver;
int err, i = 0;
- printk(KERN_INFO
- "cx88/2: registering cx8802 driver, type: %s access: %s\n",
- drv->type_id == CX88_MPEG_DVB ? "dvb" : "blackbird",
- drv->hw_access == CX8802_DRVCTL_SHARED ? "shared" : "exclusive");
+ pr_info("registering cx8802 driver, type: %s access: %s\n",
+ drv->type_id == CX88_MPEG_DVB ? "dvb" : "blackbird",
+ drv->hw_access == CX8802_DRVCTL_SHARED ?
+ "shared" : "exclusive");
- if ((err = cx8802_check_driver(drv)) != 0) {
- printk(KERN_ERR "cx88/2: cx8802_driver is invalid\n");
+ err = cx8802_check_driver(drv);
+ if (err) {
+ pr_err("cx8802_driver is invalid\n");
return err;
}
mutex_lock(&cx8802_mutex);
list_for_each_entry(dev, &cx8802_devlist, devlist) {
- printk(KERN_INFO
- "%s/2: subsystem: %04x:%04x, board: %s [card=%d]\n",
- dev->core->name, dev->pci->subsystem_vendor,
- dev->pci->subsystem_device, dev->core->board.name,
- dev->core->boardnr);
+ pr_info("subsystem: %04x:%04x, board: %s [card=%d]\n",
+ dev->pci->subsystem_vendor,
+ dev->pci->subsystem_device, dev->core->board.name,
+ dev->core->boardnr);
/* Bring up a new struct for each driver instance */
- driver = kzalloc(sizeof(*drv),GFP_KERNEL);
- if (driver == NULL) {
+ driver = kzalloc(sizeof(*drv), GFP_KERNEL);
+ if (!driver) {
err = -ENOMEM;
goto out;
}
@@ -645,9 +646,7 @@ int cx8802_register_driver(struct cx8802_driver *drv)
i++;
list_add_tail(&driver->drvlist, &dev->drvlist);
} else {
- printk(KERN_ERR
- "%s/2: cx8802 probe failed, err = %d\n",
- dev->core->name, err);
+ pr_err("cx8802 probe failed, err = %d\n", err);
}
mutex_unlock(&drv->core->lock);
}
@@ -657,6 +656,7 @@ out:
mutex_unlock(&cx8802_mutex);
return err;
}
+EXPORT_SYMBOL(cx8802_register_driver);
int cx8802_unregister_driver(struct cx8802_driver *drv)
{
@@ -664,19 +664,18 @@ int cx8802_unregister_driver(struct cx8802_driver *drv)
struct cx8802_driver *d, *dtmp;
int err = 0;
- printk(KERN_INFO
- "cx88/2: unregistering cx8802 driver, type: %s access: %s\n",
- drv->type_id == CX88_MPEG_DVB ? "dvb" : "blackbird",
- drv->hw_access == CX8802_DRVCTL_SHARED ? "shared" : "exclusive");
+ pr_info("unregistering cx8802 driver, type: %s access: %s\n",
+ drv->type_id == CX88_MPEG_DVB ? "dvb" : "blackbird",
+ drv->hw_access == CX8802_DRVCTL_SHARED ?
+ "shared" : "exclusive");
mutex_lock(&cx8802_mutex);
list_for_each_entry(dev, &cx8802_devlist, devlist) {
- printk(KERN_INFO
- "%s/2: subsystem: %04x:%04x, board: %s [card=%d]\n",
- dev->core->name, dev->pci->subsystem_vendor,
- dev->pci->subsystem_device, dev->core->board.name,
- dev->core->boardnr);
+ pr_info("subsystem: %04x:%04x, board: %s [card=%d]\n",
+ dev->pci->subsystem_vendor,
+ dev->pci->subsystem_device, dev->core->board.name,
+ dev->core->boardnr);
mutex_lock(&dev->core->lock);
@@ -690,8 +689,8 @@ int cx8802_unregister_driver(struct cx8802_driver *drv)
list_del(&d->drvlist);
kfree(d);
} else
- printk(KERN_ERR "%s/2: cx8802 driver remove "
- "failed (%d)\n", dev->core->name, err);
+ pr_err("cx8802 driver remove failed (%d)\n",
+ err);
}
mutex_unlock(&dev->core->lock);
@@ -701,6 +700,7 @@ int cx8802_unregister_driver(struct cx8802_driver *drv)
return err;
}
+EXPORT_SYMBOL(cx8802_unregister_driver);
/* ----------------------------------------------------------- */
static int cx8802_probe(struct pci_dev *pci_dev,
@@ -712,18 +712,18 @@ static int cx8802_probe(struct pci_dev *pci_dev,
/* general setup */
core = cx88_core_get(pci_dev);
- if (NULL == core)
+ if (!core)
return -EINVAL;
- printk("%s/2: cx2388x 8802 Driver Manager\n", core->name);
+ pr_info("cx2388x 8802 Driver Manager\n");
err = -ENODEV;
if (!core->board.mpeg)
goto fail_core;
err = -ENOMEM;
- dev = kzalloc(sizeof(*dev),GFP_KERNEL);
- if (NULL == dev)
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
goto fail_core;
dev->pci = pci_dev;
dev->core = core;
@@ -737,7 +737,7 @@ static int cx8802_probe(struct pci_dev *pci_dev,
INIT_LIST_HEAD(&dev->drvlist);
mutex_lock(&cx8802_mutex);
- list_add_tail(&dev->devlist,&cx8802_devlist);
+ list_add_tail(&dev->devlist, &cx8802_devlist);
mutex_unlock(&cx8802_mutex);
/* now autoload cx88-dvb or cx88-blackbird */
@@ -748,7 +748,7 @@ static int cx8802_probe(struct pci_dev *pci_dev,
kfree(dev);
fail_core:
core->dvbdev = NULL;
- cx88_core_put(core,pci_dev);
+ cx88_core_put(core, pci_dev);
return err;
}
@@ -758,7 +758,7 @@ static void cx8802_remove(struct pci_dev *pci_dev)
dev = pci_get_drvdata(pci_dev);
- dprintk( 1, "%s\n", __func__);
+ dprintk(1, "%s\n", __func__);
flush_request_modules(dev);
@@ -768,17 +768,15 @@ static void cx8802_remove(struct pci_dev *pci_dev)
struct cx8802_driver *drv, *tmp;
int err;
- printk(KERN_WARNING "%s/2: Trying to remove cx8802 driver "
- "while cx8802 sub-drivers still loaded?!\n",
- dev->core->name);
+ pr_warn("Trying to remove cx8802 driver while cx8802 sub-drivers still loaded?!\n");
list_for_each_entry_safe(drv, tmp, &dev->drvlist, drvlist) {
err = drv->remove(drv);
if (err == 0) {
list_del(&drv->drvlist);
} else
- printk(KERN_ERR "%s/2: cx8802 driver remove "
- "failed (%d)\n", dev->core->name, err);
+ pr_err("cx8802 driver remove failed (%d)\n",
+ err);
kfree(drv);
}
}
@@ -790,7 +788,7 @@ static void cx8802_remove(struct pci_dev *pci_dev)
/* common */
cx8802_fini_common(dev);
- cx88_core_put(dev->core,dev->pci);
+ cx88_core_put(dev->core, dev->pci);
kfree(dev);
}
@@ -800,7 +798,7 @@ static const struct pci_device_id cx8802_pci_tbl[] = {
.device = 0x8802,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
- },{
+ }, {
/* --- end of list --- */
}
};
@@ -814,12 +812,3 @@ static struct pci_driver cx8802_pci_driver = {
};
module_pci_driver(cx8802_pci_driver);
-
-EXPORT_SYMBOL(cx8802_buf_prepare);
-EXPORT_SYMBOL(cx8802_buf_queue);
-EXPORT_SYMBOL(cx8802_cancel_buffers);
-EXPORT_SYMBOL(cx8802_start_dma);
-
-EXPORT_SYMBOL(cx8802_register_driver);
-EXPORT_SYMBOL(cx8802_unregister_driver);
-EXPORT_SYMBOL(cx8802_get_driver);
diff --git a/drivers/media/pci/cx88/cx88-reg.h b/drivers/media/pci/cx88/cx88-reg.h
index 2ec52d1cdea0..f1e1dd634a72 100644
--- a/drivers/media/pci/cx88/cx88-reg.h
+++ b/drivers/media/pci/cx88/cx88-reg.h
@@ -1,32 +1,28 @@
/*
-
- cx88x-hw.h - CX2388x register offsets
-
- Copyright (C) 1996,97,98 Ralph Metzler (rjkm@thp.uni-koeln.de)
- 2001 Michael Eskin
- 2002 Yurij Sysoev <yurij@naturesoft.net>
- 2003 Gerd Knorr <kraxel@bytesex.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
+ * cx88x-hw.h - CX2388x register offsets
+ *
+ * Copyright (C) 1996,97,98 Ralph Metzler (rjkm@thp.uni-koeln.de)
+ * 2001 Michael Eskin
+ * 2002 Yurij Sysoev <yurij@naturesoft.net>
+ * 2003 Gerd Knorr <kraxel@bytesex.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
#ifndef _CX88_REG_H_
#define _CX88_REG_H_
-/* ---------------------------------------------------------------------- */
-/* PCI IDs and config space */
+/*
+ * PCI IDs and config space
+ */
#ifndef PCI_VENDOR_ID_CONEXANT
# define PCI_VENDOR_ID_CONEXANT 0x14F1
@@ -39,8 +35,9 @@
#define CX88X_EN_TBFX 0x02
#define CX88X_EN_VSFX 0x04
-/* ---------------------------------------------------------------------- */
-/* PCI controller registers */
+/*
+ * PCI controller registers
+ */
/* Command and Status Register */
#define F0_CMD_STAT_MM 0x2f0004
@@ -63,8 +60,9 @@
#define F3_BAR0_MM 0x2f0310
#define F4_BAR0_MM 0x2f0410
-/* ---------------------------------------------------------------------- */
-/* DMA Controller registers */
+/*
+ * DMA Controller registers
+ */
#define MO_PDMA_STHRSH 0x200000 // Source threshold
#define MO_PDMA_STADRS 0x200004 // Source target address
@@ -157,9 +155,9 @@
#define MO_DMA31_CNT2 0x300168 // {11}RW* DMA Table Size : Ch#31
#define MO_DMA32_CNT2 0x30016C // {11}RW* DMA Table Size : Ch#32
-
-/* ---------------------------------------------------------------------- */
-/* Video registers */
+/*
+ * Video registers
+ */
#define MO_VIDY_DMA 0x310000 // {64}RWp Video Y
#define MO_VIDU_DMA 0x310008 // {64}RWp Video U
@@ -217,9 +215,9 @@
#define MO_VID_DMACNTRL 0x31C040 // {8}RW Video DMA control
#define MO_VID_XFR_STAT 0x31C044 // {1}RO Video transfer status
-
-/* ---------------------------------------------------------------------- */
-/* audio registers */
+/*
+ * audio registers
+ */
#define MO_AUDD_DMA 0x320000 // {64}RWp Audio downstream
#define MO_AUDU_DMA 0x320008 // {64}RWp Audio upstream
@@ -437,9 +435,9 @@
#define AUD_PHACC_FREQ_8LSB 0x320d2b
#define AUD_QAM_MODE 0x320d04
-
-/* ---------------------------------------------------------------------- */
-/* transport stream registers */
+/*
+ * transport stream registers
+ */
#define MO_TS_DMA 0x330000 // {64}RWp Transport stream downstream
#define MO_TS_GPCNT 0x33C020 // {16}RO TS general purpose counter
@@ -455,9 +453,9 @@
#define TS_FIFO_OVFL_STAT 0x33C05C
#define TS_VALERR_CNTRL 0x33C060
-
-/* ---------------------------------------------------------------------- */
-/* VIP registers */
+/*
+ * VIP registers
+ */
#define MO_VIPD_DMA 0x340000 // {64}RWp VIP downstream
#define MO_VIPU_DMA 0x340008 // {64}RWp VIP upstream
@@ -475,9 +473,9 @@
#define MO_VIP_INTCNTRL 0x34C05C // VIP Interrupt Control
#define MO_VIP_XFTERM 0x340060 // VIP transfer terminate
-
-/* ---------------------------------------------------------------------- */
-/* misc registers */
+/*
+ * misc registers
+ */
#define MO_M2M_DMA 0x350000 // {64}RWp Mem2Mem DMA Bfr
#define MO_GP0_IO 0x350010 // {32}RW* GPIOoutput enablesdata I/O
@@ -509,9 +507,9 @@
#define MO_INT1_STAT 0x35C064 // DMA RISC interrupt status
#define MO_INT1_MSTAT 0x35C068 // DMA RISC interrupt masked status
-
-/* ---------------------------------------------------------------------- */
-/* i2c bus registers */
+/*
+ * i2c bus registers
+ */
#define MO_I2C 0x368000 // I2C data/control
#define MO_I2C_DIV (0xf<<4)
@@ -521,9 +519,11 @@
#define MO_I2C_SDA (1<<0)
-/* ---------------------------------------------------------------------- */
-/* general purpose host registers */
-/* FIXME: tyops? s/0x35/0x38/ ?? */
+/*
+ * general purpose host registers
+ *
+ * FIXME: tyops? s/0x35/0x38/ ??
+ */
#define MO_GPHSTD_DMA 0x350000 // {64}RWp Host downstream
#define MO_GPHSTU_DMA 0x350008 // {64}RWp Host upstream
@@ -545,9 +545,9 @@
#define MO_GPHST_XFR_STAT 0x38C044 // Host transfer status
#define MO_GPHST_SOFT_RST 0x38C06C // Host software reset
-
-/* ---------------------------------------------------------------------- */
-/* RISC instructions */
+/*
+ * RISC instructions
+ */
#define RISC_SYNC 0x80000000
#define RISC_SYNC_ODD 0x80000000
@@ -576,11 +576,11 @@
#define RISC_CNT_INC 0x00010000
#define RISC_CNT_RSVR 0x00020000
#define RISC_CNT_RESET 0x00030000
-#define RISC_JMP_SRP 0x01
+#define RISC_JMP_SRP 0x01
-
-/* ---------------------------------------------------------------------- */
-/* various constants */
+/*
+ * various constants
+ */
// DMA
/* Interrupt mask/status */
@@ -822,15 +822,4 @@
#define DEFAULT_SAT_U_NTSC 0x7F
#define DEFAULT_SAT_V_NTSC 0x5A
-typedef enum
-{
- SOURCE_TUNER = 0,
- SOURCE_COMPOSITE,
- SOURCE_SVIDEO,
- SOURCE_OTHER1,
- SOURCE_OTHER2,
- SOURCE_COMPVIASVIDEO,
- SOURCE_CCIR656
-} VIDEOSOURCETYPE;
-
#endif /* _CX88_REG_H_ */
diff --git a/drivers/media/pci/cx88/cx88-tvaudio.c b/drivers/media/pci/cx88/cx88-tvaudio.c
index 6bbce6ad6295..545ad4c4d1c7 100644
--- a/drivers/media/pci/cx88/cx88-tvaudio.c
+++ b/drivers/media/pci/cx88/cx88-tvaudio.c
@@ -1,39 +1,36 @@
/*
+ * cx88x-audio.c - Conexant CX23880/23881 audio downstream driver driver
+ *
+ * (c) 2001 Michael Eskin, Tom Zakrajsek [Windows version]
+ * (c) 2002 Yurij Sysoev <yurij@naturesoft.net>
+ * (c) 2003 Gerd Knorr <kraxel@bytesex.org>
+ *
+ * -----------------------------------------------------------------------
+ *
+ * Lot of voodoo here. Even the data sheet doesn't help to
+ * understand what is going on here, the documentation for the audio
+ * part of the cx2388x chip is *very* bad.
+ *
+ * Some of this comes from party done linux driver sources I got from
+ * [undocumented].
+ *
+ * Some comes from the dscaler sources, one of the dscaler driver guy works
+ * for Conexant ...
+ *
+ * -----------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
- cx88x-audio.c - Conexant CX23880/23881 audio downstream driver driver
-
- (c) 2001 Michael Eskin, Tom Zakrajsek [Windows version]
- (c) 2002 Yurij Sysoev <yurij@naturesoft.net>
- (c) 2003 Gerd Knorr <kraxel@bytesex.org>
-
- -----------------------------------------------------------------------
-
- Lot of voodoo here. Even the data sheet doesn't help to
- understand what is going on here, the documentation for the audio
- part of the cx2388x chip is *very* bad.
-
- Some of this comes from party done linux driver sources I got from
- [undocumented].
-
- Some comes from the dscaler sources, one of the dscaler driver guy works
- for Conexant ...
-
- -----------------------------------------------------------------------
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
+#include "cx88.h"
#include <linux/module.h>
#include <linux/errno.h>
@@ -50,24 +47,24 @@
#include <linux/delay.h>
#include <linux/kthread.h>
-#include "cx88.h"
-
static unsigned int audio_debug;
module_param(audio_debug, int, 0644);
MODULE_PARM_DESC(audio_debug, "enable debug messages [audio]");
static unsigned int always_analog;
-module_param(always_analog,int,0644);
-MODULE_PARM_DESC(always_analog,"force analog audio out");
+module_param(always_analog, int, 0644);
+MODULE_PARM_DESC(always_analog, "force analog audio out");
static unsigned int radio_deemphasis;
-module_param(radio_deemphasis,int,0644);
-MODULE_PARM_DESC(radio_deemphasis, "Radio deemphasis time constant, "
- "0=None, 1=50us (elsewhere), 2=75us (USA)");
-
-#define dprintk(fmt, arg...) if (audio_debug) \
- printk(KERN_DEBUG "%s/0: " fmt, core->name , ## arg)
-
+module_param(radio_deemphasis, int, 0644);
+MODULE_PARM_DESC(radio_deemphasis,
+ "Radio deemphasis time constant, 0=None, 1=50us (elsewhere), 2=75us (USA)");
+
+#define dprintk(fmt, arg...) do { \
+ if (audio_debug) \
+ printk(KERN_DEBUG pr_fmt("%s: tvaudio:" fmt), \
+ __func__, ##arg); \
+} while (0)
/* ----------------------------------------------------------- */
static const char * const aud_ctl_names[64] = {
@@ -145,7 +142,10 @@ static void set_audio_finish(struct cx88_core *core, u32 ctl)
if (core->board.mpeg & CX88_MPEG_BLACKBIRD) {
cx_write(AUD_I2SINPUTCNTL, 4);
cx_write(AUD_BAUDRATE, 1);
- /* 'pass-thru mode': this enables the i2s output to the mpeg encoder */
+ /*
+ * 'pass-thru mode': this enables the i2s
+ * output to the mpeg encoder
+ */
cx_set(AUD_CTL, EN_I2SOUT_ENABLE);
cx_write(AUD_I2SOUTPUTCNTL, 1);
cx_write(AUD_I2SCNTL, 0);
@@ -349,7 +349,7 @@ static void set_audio_standard_NICAM(struct cx88_core *core, u32 mode)
{ /* end of list */ },
};
- set_audio_start(core,SEL_NICAM);
+ set_audio_start(core, SEL_NICAM);
switch (core->tvaudio) {
case WW_L:
dprintk("%s SECAM-L NICAM (status: devel)\n", __func__);
@@ -638,7 +638,6 @@ static void set_audio_standard_A2(struct cx88_core *core, u32 mode)
case WW_M:
dprintk("%s Warning: wrong value\n", __func__);
return;
- break;
}
mode |= EN_FMRADIO_EN_RDS | EN_DMTRX_SUMDIFF;
@@ -695,13 +694,15 @@ static void set_audio_standard_FM(struct cx88_core *core,
{ /* end of list */ },
};
- /* It is enough to leave default values? */
- /* No, it's not! The deemphasis registers are reset to the 75us
+ /*
+ * It is enough to leave default values?
+ *
+ * No, it's not! The deemphasis registers are reset to the 75us
* values by default. Analyzing the spectrum of the decoded audio
* reveals that "no deemphasis" is the same as 75 us, while the 50 us
- * setting results in less deemphasis. */
+ * setting results in less deemphasis.
+ */
static const struct rlist fm_no_deemph[] = {
-
{AUD_POLYPH80SCALEFAC, 0x0003},
{ /* end of list */ },
};
@@ -745,7 +746,7 @@ static int cx88_detect_nicam(struct cx88_core *core)
}
/* wait a little bit for next reading status */
- msleep(10);
+ usleep_range(10000, 20000);
}
dprintk("nicam is not detected.\n");
@@ -766,10 +767,12 @@ void cx88_set_tvaudio(struct cx88_core *core)
/* prepare all dsp registers */
set_audio_standard_A2(core, EN_A2_FORCE_MONO1);
- /* set nicam mode - otherwise
- AUD_NICAM_STATUS2 contains wrong values */
+ /*
+ * set nicam mode - otherwise
+ * AUD_NICAM_STATUS2 contains wrong values
+ */
set_audio_standard_NICAM(core, EN_NICAM_AUTO_STEREO);
- if (0 == cx88_detect_nicam(core)) {
+ if (cx88_detect_nicam(core) == 0) {
/* fall back to fm / am mono */
set_audio_standard_A2(core, EN_A2_FORCE_MONO1);
core->audiomode_current = V4L2_TUNER_MODE_MONO;
@@ -798,23 +801,25 @@ void cx88_set_tvaudio(struct cx88_core *core)
break;
case WW_NONE:
case WW_I2SPT:
- printk("%s/0: unknown tv audio mode [%d]\n",
- core->name, core->tvaudio);
+ pr_info("unknown tv audio mode [%d]\n", core->tvaudio);
break;
}
- return;
}
+EXPORT_SYMBOL(cx88_set_tvaudio);
void cx88_newstation(struct cx88_core *core)
{
core->audiomode_manual = UNSET;
core->last_change = jiffies;
}
+EXPORT_SYMBOL(cx88_newstation);
void cx88_get_stereo(struct cx88_core *core, struct v4l2_tuner *t)
{
- static const char * const m[] = { "stereo", "dual mono", "mono", "sap" };
- static const char * const p[] = { "no pilot", "pilot c1", "pilot c2", "?" };
+ static const char * const m[] = { "stereo", "dual mono",
+ "mono", "sap" };
+ static const char * const p[] = { "no pilot", "pilot c1",
+ "pilot c2", "?" };
u32 reg, mode, pilot;
reg = cx_read(AUD_STATUS);
@@ -869,15 +874,18 @@ void cx88_get_stereo(struct cx88_core *core, struct v4l2_tuner *t)
}
/* If software stereo detection is not supported... */
- if (UNSET == t->rxsubchans) {
+ if (t->rxsubchans == UNSET) {
t->rxsubchans = V4L2_TUNER_SUB_MONO;
- /* If the hardware itself detected stereo, also return
- stereo as an available subchannel */
- if (V4L2_TUNER_MODE_STEREO == t->audmode)
+ /*
+ * If the hardware itself detected stereo, also return
+ * stereo as an available subchannel
+ */
+ if (t->audmode == V4L2_TUNER_MODE_STEREO)
t->rxsubchans |= V4L2_TUNER_SUB_STEREO;
}
- return;
}
+EXPORT_SYMBOL(cx88_get_stereo);
+
void cx88_set_stereo(struct cx88_core *core, u32 mode, int manual)
{
@@ -887,7 +895,7 @@ void cx88_set_stereo(struct cx88_core *core, u32 mode, int manual)
if (manual) {
core->audiomode_manual = mode;
} else {
- if (UNSET != core->audiomode_manual)
+ if (core->audiomode_manual != UNSET)
return;
}
core->audiomode_current = mode;
@@ -915,7 +923,7 @@ void cx88_set_stereo(struct cx88_core *core, u32 mode, int manual)
case WW_M:
case WW_I:
case WW_L:
- if (1 == core->use_nicam) {
+ if (core->use_nicam == 1) {
switch (mode) {
case V4L2_TUNER_MODE_MONO:
case V4L2_TUNER_MODE_LANG1:
@@ -933,7 +941,8 @@ void cx88_set_stereo(struct cx88_core *core, u32 mode, int manual)
break;
}
} else {
- if ((core->tvaudio == WW_I) || (core->tvaudio == WW_L)) {
+ if ((core->tvaudio == WW_I) ||
+ (core->tvaudio == WW_L)) {
/* fall back to fm / am mono */
set_audio_standard_A2(core, EN_A2_FORCE_MONO1);
} else {
@@ -975,15 +984,14 @@ void cx88_set_stereo(struct cx88_core *core, u32 mode, int manual)
break;
}
- if (UNSET != ctl) {
- dprintk("cx88_set_stereo: mask 0x%x, ctl 0x%x "
- "[status=0x%x,ctl=0x%x,vol=0x%x]\n",
+ if (ctl != UNSET) {
+ dprintk("cx88_set_stereo: mask 0x%x, ctl 0x%x [status=0x%x,ctl=0x%x,vol=0x%x]\n",
mask, ctl, cx_read(AUD_STATUS),
cx_read(AUD_CTL), cx_sread(SHADOW_AUD_VOL_CTL));
cx_andor(AUD_CTL, mask, ctl);
}
- return;
}
+EXPORT_SYMBOL(cx88_set_stereo);
int cx88_audio_thread(void *data)
{
@@ -1012,7 +1020,7 @@ int cx88_audio_thread(void *data)
memset(&t, 0, sizeof(t));
cx88_get_stereo(core, &t);
- if (UNSET != core->audiomode_manual)
+ if (core->audiomode_manual != UNSET)
/* manually set, don't do anything. */
continue;
@@ -1033,8 +1041,10 @@ int cx88_audio_thread(void *data)
case WW_FM:
case WW_I2SADC:
hw_autodetect:
- /* stereo autodetection is supported by hardware so
- we don't need to do it manually. Do nothing. */
+ /*
+ * stereo autodetection is supported by hardware so
+ * we don't need to do it manually. Do nothing.
+ */
break;
}
}
@@ -1042,11 +1052,4 @@ hw_autodetect:
dprintk("cx88: tvaudio thread exiting\n");
return 0;
}
-
-/* ----------------------------------------------------------- */
-
-EXPORT_SYMBOL(cx88_set_tvaudio);
-EXPORT_SYMBOL(cx88_newstation);
-EXPORT_SYMBOL(cx88_set_stereo);
-EXPORT_SYMBOL(cx88_get_stereo);
EXPORT_SYMBOL(cx88_audio_thread);
diff --git a/drivers/media/pci/cx88/cx88-vbi.c b/drivers/media/pci/cx88/cx88-vbi.c
index d3237cf8ffa3..2d0ef19e6d65 100644
--- a/drivers/media/pci/cx88/cx88-vbi.c
+++ b/drivers/media/pci/cx88/cx88-vbi.c
@@ -1,22 +1,26 @@
/*
*/
+
+#include "cx88.h"
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
-#include "cx88.h"
-
static unsigned int vbi_debug;
-module_param(vbi_debug,int,0644);
-MODULE_PARM_DESC(vbi_debug,"enable debug messages [vbi]");
+module_param(vbi_debug, int, 0644);
+MODULE_PARM_DESC(vbi_debug, "enable debug messages [vbi]");
-#define dprintk(level,fmt, arg...) if (vbi_debug >= level) \
- printk(KERN_DEBUG "%s: " fmt, dev->core->name , ## arg)
+#define dprintk(level, fmt, arg...) do { \
+ if (vbi_debug >= level) \
+ printk(KERN_DEBUG pr_fmt("%s: vbi:" fmt), \
+ __func__, ##arg); \
+} while (0)
/* ------------------------------------------------------------------ */
-int cx8800_vbi_fmt (struct file *file, void *priv,
- struct v4l2_format *f)
+int cx8800_vbi_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
{
struct cx8800_dev *dev = video_drvdata(file);
@@ -44,8 +48,8 @@ int cx8800_vbi_fmt (struct file *file, void *priv,
}
static int cx8800_start_vbi_dma(struct cx8800_dev *dev,
- struct cx88_dmaqueue *q,
- struct cx88_buffer *buf)
+ struct cx88_dmaqueue *q,
+ struct cx88_buffer *buf)
{
struct cx88_core *core = dev->core;
@@ -53,9 +57,9 @@ static int cx8800_start_vbi_dma(struct cx8800_dev *dev,
cx88_sram_channel_setup(dev->core, &cx88_sram_channels[SRAM_CH24],
VBI_LINE_LENGTH, buf->risc.dma);
- cx_write(MO_VBOS_CONTROL, ( (1 << 18) | // comb filter delay fixup
- (1 << 15) | // enable vbi capture
- (1 << 11) ));
+ cx_write(MO_VBOS_CONTROL, (1 << 18) | /* comb filter delay fixup */
+ (1 << 15) | /* enable vbi capture */
+ (1 << 11));
/* reset counter */
cx_write(MO_VBI_GPCNTRL, GP_COUNT_CONTROL_RESET);
@@ -66,10 +70,10 @@ static int cx8800_start_vbi_dma(struct cx8800_dev *dev,
cx_set(MO_VID_INTMSK, 0x0f0088);
/* enable capture */
- cx_set(VID_CAPTURE_CONTROL,0x18);
+ cx_set(VID_CAPTURE_CONTROL, 0x18);
/* start dma */
- cx_set(MO_DEV_CNTRL2, (1<<5));
+ cx_set(MO_DEV_CNTRL2, (1 << 5));
cx_set(MO_VID_DMACNTRL, 0x88);
return 0;
@@ -83,7 +87,7 @@ void cx8800_stop_vbi_dma(struct cx8800_dev *dev)
cx_clear(MO_VID_DMACNTRL, 0x88);
/* disable capture */
- cx_clear(VID_CAPTURE_CONTROL,0x18);
+ cx_clear(VID_CAPTURE_CONTROL, 0x18);
/* disable irqs */
cx_clear(MO_PCI_INTMSK, PCI_INT_VIDINT);
@@ -99,7 +103,7 @@ int cx8800_restart_vbi_queue(struct cx8800_dev *dev,
return 0;
buf = list_entry(q->active.next, struct cx88_buffer, list);
- dprintk(2,"restart_queue [%p/%d]: restart dma\n",
+ dprintk(2, "restart_queue [%p/%d]: restart dma\n",
buf, buf->vb.vb2_buf.index);
cx8800_start_vbi_dma(dev, q, buf);
return 0;
@@ -108,8 +112,8 @@ int cx8800_restart_vbi_queue(struct cx8800_dev *dev,
/* ------------------------------------------------------------------ */
static int queue_setup(struct vb2_queue *q,
- unsigned int *num_buffers, unsigned int *num_planes,
- unsigned int sizes[], struct device *alloc_devs[])
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct cx8800_dev *dev = q->drv_priv;
@@ -121,7 +125,6 @@ static int queue_setup(struct vb2_queue *q,
return 0;
}
-
static int buffer_prepare(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
@@ -175,7 +178,7 @@ static void buffer_queue(struct vb2_buffer *vb)
if (list_empty(&q->active)) {
list_add_tail(&buf->list, &q->active);
cx8800_start_vbi_dma(dev, q, buf);
- dprintk(2,"[%p/%d] vbi_queue - first active\n",
+ dprintk(2, "[%p/%d] vbi_queue - first active\n",
buf, buf->vb.vb2_buf.index);
} else {
@@ -183,7 +186,7 @@ static void buffer_queue(struct vb2_buffer *vb)
prev = list_entry(q->active.prev, struct cx88_buffer, list);
list_add_tail(&buf->list, &q->active);
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
- dprintk(2,"[%p/%d] buffer_queue - append to active\n",
+ dprintk(2, "[%p/%d] buffer_queue - append to active\n",
buf, buf->vb.vb2_buf.index);
}
}
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index d83eb3b10f54..c7d4e87ccb64 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -19,12 +19,10 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "cx88.h"
+
#include <linux/init.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -37,7 +35,6 @@
#include <linux/kthread.h>
#include <asm/div64.h>
-#include "cx88.h"
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-event.h>
@@ -58,20 +55,23 @@ module_param_array(video_nr, int, NULL, 0444);
module_param_array(vbi_nr, int, NULL, 0444);
module_param_array(radio_nr, int, NULL, 0444);
-MODULE_PARM_DESC(video_nr,"video device numbers");
-MODULE_PARM_DESC(vbi_nr,"vbi device numbers");
-MODULE_PARM_DESC(radio_nr,"radio device numbers");
+MODULE_PARM_DESC(video_nr, "video device numbers");
+MODULE_PARM_DESC(vbi_nr, "vbi device numbers");
+MODULE_PARM_DESC(radio_nr, "radio device numbers");
static unsigned int video_debug;
-module_param(video_debug,int,0644);
-MODULE_PARM_DESC(video_debug,"enable debug messages [video]");
+module_param(video_debug, int, 0644);
+MODULE_PARM_DESC(video_debug, "enable debug messages [video]");
static unsigned int irq_debug;
-module_param(irq_debug,int,0644);
-MODULE_PARM_DESC(irq_debug,"enable debug messages [IRQ handler]");
+module_param(irq_debug, int, 0644);
+MODULE_PARM_DESC(irq_debug, "enable debug messages [IRQ handler]");
-#define dprintk(level,fmt, arg...) if (video_debug >= level) \
- printk(KERN_DEBUG "%s/0: " fmt, core->name , ## arg)
+#define dprintk(level, fmt, arg...) do { \
+ if (video_debug >= level) \
+ printk(KERN_DEBUG pr_fmt("%s: video:" fmt), \
+ __func__, ##arg); \
+} while (0)
/* ------------------------------------------------------------------- */
/* static data */
@@ -83,55 +83,56 @@ static const struct cx8800_fmt formats[] = {
.cxformat = ColorFormatY8,
.depth = 8,
.flags = FORMAT_FLAGS_PACKED,
- },{
+ }, {
.name = "15 bpp RGB, le",
.fourcc = V4L2_PIX_FMT_RGB555,
.cxformat = ColorFormatRGB15,
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
- },{
+ }, {
.name = "15 bpp RGB, be",
.fourcc = V4L2_PIX_FMT_RGB555X,
.cxformat = ColorFormatRGB15 | ColorFormatBSWAP,
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
- },{
+ }, {
.name = "16 bpp RGB, le",
.fourcc = V4L2_PIX_FMT_RGB565,
.cxformat = ColorFormatRGB16,
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
- },{
+ }, {
.name = "16 bpp RGB, be",
.fourcc = V4L2_PIX_FMT_RGB565X,
.cxformat = ColorFormatRGB16 | ColorFormatBSWAP,
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
- },{
+ }, {
.name = "24 bpp RGB, le",
.fourcc = V4L2_PIX_FMT_BGR24,
.cxformat = ColorFormatRGB24,
.depth = 24,
.flags = FORMAT_FLAGS_PACKED,
- },{
+ }, {
.name = "32 bpp RGB, le",
.fourcc = V4L2_PIX_FMT_BGR32,
.cxformat = ColorFormatRGB32,
.depth = 32,
.flags = FORMAT_FLAGS_PACKED,
- },{
+ }, {
.name = "32 bpp RGB, be",
.fourcc = V4L2_PIX_FMT_RGB32,
- .cxformat = ColorFormatRGB32 | ColorFormatBSWAP | ColorFormatWSWAP,
+ .cxformat = ColorFormatRGB32 | ColorFormatBSWAP |
+ ColorFormatWSWAP,
.depth = 32,
.flags = FORMAT_FLAGS_PACKED,
- },{
+ }, {
.name = "4:2:2, packed, YUYV",
.fourcc = V4L2_PIX_FMT_YUYV,
.cxformat = ColorFormatYUY2,
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
- },{
+ }, {
.name = "4:2:2, packed, UYVY",
.fourcc = V4L2_PIX_FMT_UYVY,
.cxformat = ColorFormatYUY2 | ColorFormatBSWAP,
@@ -140,13 +141,13 @@ static const struct cx8800_fmt formats[] = {
},
};
-static const struct cx8800_fmt* format_by_fourcc(unsigned int fourcc)
+static const struct cx8800_fmt *format_by_fourcc(unsigned int fourcc)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(formats); i++)
if (formats[i].fourcc == fourcc)
- return formats+i;
+ return formats + i;
return NULL;
}
@@ -180,7 +181,7 @@ static const struct cx88_ctrl cx8800_vid_ctls[] = {
.reg = MO_CONTR_BRIGHT,
.mask = 0x00ff,
.shift = 0,
- },{
+ }, {
.id = V4L2_CID_CONTRAST,
.minimum = 0,
.maximum = 0xff,
@@ -190,7 +191,7 @@ static const struct cx88_ctrl cx8800_vid_ctls[] = {
.reg = MO_CONTR_BRIGHT,
.mask = 0xff00,
.shift = 8,
- },{
+ }, {
.id = V4L2_CID_HUE,
.minimum = 0,
.maximum = 0xff,
@@ -200,7 +201,7 @@ static const struct cx88_ctrl cx8800_vid_ctls[] = {
.reg = MO_HUE,
.mask = 0x00ff,
.shift = 0,
- },{
+ }, {
/* strictly, this only describes only U saturation.
* V saturation is handled specially through code.
*/
@@ -220,8 +221,10 @@ static const struct cx88_ctrl cx8800_vid_ctls[] = {
.step = 1,
.default_value = 0x0,
.off = 0,
- /* NOTE: the value is converted and written to both even
- and odd registers in the code */
+ /*
+ * NOTE: the value is converted and written to both even
+ * and odd registers in the code
+ */
.reg = MO_FILTER_ODD,
.mask = 7 << 7,
.shift = 7,
@@ -265,7 +268,7 @@ static const struct cx88_ctrl cx8800_aud_ctls[] = {
.sreg = SHADOW_AUD_VOL_CTL,
.mask = (1 << 6),
.shift = 6,
- },{
+ }, {
.id = V4L2_CID_AUDIO_VOLUME,
.minimum = 0,
.maximum = 0x3f,
@@ -275,7 +278,7 @@ static const struct cx88_ctrl cx8800_aud_ctls[] = {
.sreg = SHADOW_AUD_VOL_CTL,
.mask = 0x3f,
.shift = 0,
- },{
+ }, {
.id = V4L2_CID_AUDIO_BALANCE,
.minimum = 0,
.maximum = 0x7f,
@@ -299,10 +302,10 @@ int cx88_video_mux(struct cx88_core *core, unsigned int input)
{
/* struct cx88_core *core = dev->core; */
- dprintk(1,"video_mux: %d [vmux=%d,gpio=0x%x,0x%x,0x%x,0x%x]\n",
+ dprintk(1, "video_mux: %d [vmux=%d,gpio=0x%x,0x%x,0x%x,0x%x]\n",
input, INPUT(input).vmux,
- INPUT(input).gpio0,INPUT(input).gpio1,
- INPUT(input).gpio2,INPUT(input).gpio3);
+ INPUT(input).gpio0, INPUT(input).gpio1,
+ INPUT(input).gpio2, INPUT(input).gpio3);
core->input = input;
cx_andor(MO_INPUT_FORMAT, 0x03 << 14, INPUT(input).vmux << 14);
cx_write(MO_GP3_IO, INPUT(input).gpio3);
@@ -325,19 +328,25 @@ int cx88_video_mux(struct cx88_core *core, unsigned int input)
break;
}
- /* if there are audioroutes defined, we have an external
- ADC to deal with audio */
+ /*
+ * if there are audioroutes defined, we have an external
+ * ADC to deal with audio
+ */
if (INPUT(input).audioroute) {
- /* The wm8775 module has the "2" route hardwired into
- the initialization. Some boards may use different
- routes for different inputs. HVR-1300 surely does */
+ /*
+ * The wm8775 module has the "2" route hardwired into
+ * the initialization. Some boards may use different
+ * routes for different inputs. HVR-1300 surely does
+ */
if (core->sd_wm8775) {
call_all(core, audio, s_routing,
INPUT(input).audioroute, 0, 0);
}
- /* cx2388's C-ADC is connected to the tuner only.
- When used with S-Video, that ADC is busy dealing with
- chroma, so an external must be used for baseband audio */
+ /*
+ * cx2388's C-ADC is connected to the tuner only.
+ * When used with S-Video, that ADC is busy dealing with
+ * chroma, so an external must be used for baseband audio
+ */
if (INPUT(input).type != CX88_VMUX_TELEVISION &&
INPUT(input).type != CX88_VMUX_CABLE) {
/* "I2S ADC mode" */
@@ -369,26 +378,27 @@ static int start_video_dma(struct cx8800_dev *dev,
cx_write(MO_COLOR_CTRL, dev->fmt->cxformat | ColorFormatGamma);
/* reset counter */
- cx_write(MO_VIDY_GPCNTRL,GP_COUNT_CONTROL_RESET);
+ cx_write(MO_VIDY_GPCNTRL, GP_COUNT_CONTROL_RESET);
q->count = 0;
/* enable irqs */
cx_set(MO_PCI_INTMSK, core->pci_irqmask | PCI_INT_VIDINT);
- /* Enables corresponding bits at PCI_INT_STAT:
- bits 0 to 4: video, audio, transport stream, VIP, Host
- bit 7: timer
- bits 8 and 9: DMA complete for: SRC, DST
- bits 10 and 11: BERR signal asserted for RISC: RD, WR
- bits 12 to 15: BERR signal asserted for: BRDG, SRC, DST, IPB
+ /*
+ * Enables corresponding bits at PCI_INT_STAT:
+ * bits 0 to 4: video, audio, transport stream, VIP, Host
+ * bit 7: timer
+ * bits 8 and 9: DMA complete for: SRC, DST
+ * bits 10 and 11: BERR signal asserted for RISC: RD, WR
+ * bits 12 to 15: BERR signal asserted for: BRDG, SRC, DST, IPB
*/
cx_set(MO_VID_INTMSK, 0x0f0011);
/* enable capture */
- cx_set(VID_CAPTURE_CONTROL,0x06);
+ cx_set(VID_CAPTURE_CONTROL, 0x06);
/* start dma */
- cx_set(MO_DEV_CNTRL2, (1<<5));
+ cx_set(MO_DEV_CNTRL2, (1 << 5));
cx_set(MO_VID_DMACNTRL, 0x11); /* Planar Y and packed FIFO and RISC enable */
return 0;
@@ -403,7 +413,7 @@ static int stop_video_dma(struct cx8800_dev *dev)
cx_clear(MO_VID_DMACNTRL, 0x11);
/* disable capture */
- cx_clear(VID_CAPTURE_CONTROL,0x06);
+ cx_clear(VID_CAPTURE_CONTROL, 0x06);
/* disable irqs */
cx_clear(MO_PCI_INTMSK, PCI_INT_VIDINT);
@@ -414,12 +424,11 @@ static int stop_video_dma(struct cx8800_dev *dev)
static int restart_video_queue(struct cx8800_dev *dev,
struct cx88_dmaqueue *q)
{
- struct cx88_core *core = dev->core;
struct cx88_buffer *buf;
if (!list_empty(&q->active)) {
buf = list_entry(q->active.next, struct cx88_buffer, list);
- dprintk(2,"restart_queue [%p/%d]: restart dma\n",
+ dprintk(2, "restart_queue [%p/%d]: restart dma\n",
buf, buf->vb.vb2_buf.index);
start_video_dma(dev, q, buf);
}
@@ -430,8 +439,8 @@ static int restart_video_queue(struct cx8800_dev *dev,
/* ------------------------------------------------------------------ */
static int queue_setup(struct vb2_queue *q,
- unsigned int *num_buffers, unsigned int *num_planes,
- unsigned int sizes[], struct device *alloc_devs[])
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct cx8800_dev *dev = q->drv_priv;
struct cx88_core *core = dev->core;
@@ -488,7 +497,8 @@ static int buffer_prepare(struct vb2_buffer *vb)
core->height >> 1);
break;
}
- dprintk(2,"[%p/%d] buffer_prepare - %dx%d %dbpp \"%s\" - dma=0x%08lx\n",
+ dprintk(2,
+ "[%p/%d] buffer_prepare - %dx%d %dbpp \"%s\" - dma=0x%08lx\n",
buf, buf->vb.vb2_buf.index,
core->width, core->height, dev->fmt->depth, dev->fmt->name,
(unsigned long)buf->risc.dma);
@@ -513,7 +523,6 @@ static void buffer_queue(struct vb2_buffer *vb)
struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
struct cx88_buffer *prev;
- struct cx88_core *core = dev->core;
struct cx88_dmaqueue *q = &dev->vidq;
/* add jump to start */
@@ -523,7 +532,7 @@ static void buffer_queue(struct vb2_buffer *vb)
if (list_empty(&q->active)) {
list_add_tail(&buf->list, &q->active);
- dprintk(2,"[%p/%d] buffer_queue - first active\n",
+ dprintk(2, "[%p/%d] buffer_queue - first active\n",
buf, buf->vb.vb2_buf.index);
} else {
@@ -596,7 +605,7 @@ static int radio_open(struct file *file)
if (core->board.radio.audioroute) {
if (core->sd_wm8775) {
call_all(core, audio, s_routing,
- core->board.radio.audioroute, 0, 0);
+ core->board.radio.audioroute, 0, 0);
}
/* "I2S ADC mode" */
core->tvaudio = WW_I2SADC;
@@ -650,9 +659,10 @@ static int cx8800_s_vid_ctrl(struct v4l2_ctrl *ctrl)
value = ((ctrl->val - cc->off) << cc->shift) & cc->mask;
break;
}
- dprintk(1, "set_control id=0x%X(%s) ctrl=0x%02x, reg=0x%02x val=0x%02x (mask 0x%02x)%s\n",
- ctrl->id, ctrl->name, ctrl->val, cc->reg, value,
- mask, cc->sreg ? " [shadowed]" : "");
+ dprintk(1,
+ "set_control id=0x%X(%s) ctrl=0x%02x, reg=0x%02x val=0x%02x (mask 0x%02x)%s\n",
+ ctrl->id, ctrl->name, ctrl->val, cc->reg, value,
+ mask, cc->sreg ? " [shadowed]" : "");
if (cc->sreg)
cx_sandor(cc->sreg, cc->reg, mask, value);
else
@@ -665,7 +675,7 @@ static int cx8800_s_aud_ctrl(struct v4l2_ctrl *ctrl)
struct cx88_core *core =
container_of(ctrl->handler, struct cx88_core, audio_hdl);
const struct cx88_ctrl *cc = ctrl->priv;
- u32 value,mask;
+ u32 value, mask;
/* Pass changes onto any WM8775 */
if (core->sd_wm8775) {
@@ -688,7 +698,8 @@ static int cx8800_s_aud_ctrl(struct v4l2_ctrl *ctrl)
mask = cc->mask;
switch (ctrl->id) {
case V4L2_CID_AUDIO_BALANCE:
- value = (ctrl->val < 0x40) ? (0x7f - ctrl->val) : (ctrl->val - 0x40);
+ value = (ctrl->val < 0x40) ?
+ (0x7f - ctrl->val) : (ctrl->val - 0x40);
break;
case V4L2_CID_AUDIO_VOLUME:
value = 0x3f - (ctrl->val & 0x3f);
@@ -697,9 +708,10 @@ static int cx8800_s_aud_ctrl(struct v4l2_ctrl *ctrl)
value = ((ctrl->val - cc->off) << cc->shift) & cc->mask;
break;
}
- dprintk(1,"set_control id=0x%X(%s) ctrl=0x%02x, reg=0x%02x val=0x%02x (mask 0x%02x)%s\n",
- ctrl->id, ctrl->name, ctrl->val, cc->reg, value,
- mask, cc->sreg ? " [shadowed]" : "");
+ dprintk(1,
+ "set_control id=0x%X(%s) ctrl=0x%02x, reg=0x%02x val=0x%02x (mask 0x%02x)%s\n",
+ ctrl->id, ctrl->name, ctrl->val, cc->reg, value,
+ mask, cc->sreg ? " [shadowed]" : "");
if (cc->sreg)
cx_sandor(cc->sreg, cc->reg, mask, value);
else
@@ -711,7 +723,7 @@ static int cx8800_s_aud_ctrl(struct v4l2_ctrl *ctrl)
/* VIDEO IOCTLS */
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
+ struct v4l2_format *f)
{
struct cx8800_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
@@ -729,7 +741,7 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
}
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
+ struct v4l2_format *f)
{
struct cx8800_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
@@ -738,7 +750,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
unsigned int maxw, maxh;
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
- if (NULL == fmt)
+ if (!fmt)
return -EINVAL;
maxw = norm_maxw(core->tvnorm);
@@ -775,13 +787,13 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
}
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
+ struct v4l2_format *f)
{
struct cx8800_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
- int err = vidioc_try_fmt_vid_cap (file,priv,f);
+ int err = vidioc_try_fmt_vid_cap(file, priv, f);
- if (0 != err)
+ if (err != 0)
return err;
if (vb2_is_busy(&dev->vb2_vidq) || vb2_is_busy(&dev->vb2_vbiq))
return -EBUSY;
@@ -795,13 +807,13 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
}
void cx88_querycap(struct file *file, struct cx88_core *core,
- struct v4l2_capability *cap)
+ struct v4l2_capability *cap)
{
struct video_device *vdev = video_devdata(file);
strlcpy(cap->card, core->board.name, sizeof(cap->card));
cap->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
- if (UNSET != core->board.tuner_type)
+ if (core->board.tuner_type != UNSET)
cap->device_caps |= V4L2_CAP_TUNER;
switch (vdev->vfl_type) {
case VFL_TYPE_RADIO:
@@ -822,7 +834,7 @@ void cx88_querycap(struct file *file, struct cx88_core *core,
EXPORT_SYMBOL(cx88_querycap);
static int vidioc_querycap(struct file *file, void *priv,
- struct v4l2_capability *cap)
+ struct v4l2_capability *cap)
{
struct cx8800_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
@@ -833,13 +845,13 @@ static int vidioc_querycap(struct file *file, void *priv,
return 0;
}
-static int vidioc_enum_fmt_vid_cap (struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
{
if (unlikely(f->index >= ARRAY_SIZE(formats)))
return -EINVAL;
- strlcpy(f->description,formats[f->index].name,sizeof(f->description));
+ strlcpy(f->description, formats[f->index].name, sizeof(f->description));
f->pixelformat = formats[f->index].fourcc;
return 0;
@@ -863,45 +875,46 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id tvnorms)
}
/* only one input in this sample driver */
-int cx88_enum_input (struct cx88_core *core,struct v4l2_input *i)
+int cx88_enum_input(struct cx88_core *core, struct v4l2_input *i)
{
static const char * const iname[] = {
- [ CX88_VMUX_COMPOSITE1 ] = "Composite1",
- [ CX88_VMUX_COMPOSITE2 ] = "Composite2",
- [ CX88_VMUX_COMPOSITE3 ] = "Composite3",
- [ CX88_VMUX_COMPOSITE4 ] = "Composite4",
- [ CX88_VMUX_SVIDEO ] = "S-Video",
- [ CX88_VMUX_TELEVISION ] = "Television",
- [ CX88_VMUX_CABLE ] = "Cable TV",
- [ CX88_VMUX_DVB ] = "DVB",
- [ CX88_VMUX_DEBUG ] = "for debug only",
+ [CX88_VMUX_COMPOSITE1] = "Composite1",
+ [CX88_VMUX_COMPOSITE2] = "Composite2",
+ [CX88_VMUX_COMPOSITE3] = "Composite3",
+ [CX88_VMUX_COMPOSITE4] = "Composite4",
+ [CX88_VMUX_SVIDEO] = "S-Video",
+ [CX88_VMUX_TELEVISION] = "Television",
+ [CX88_VMUX_CABLE] = "Cable TV",
+ [CX88_VMUX_DVB] = "DVB",
+ [CX88_VMUX_DEBUG] = "for debug only",
};
unsigned int n = i->index;
if (n >= 4)
return -EINVAL;
- if (0 == INPUT(n).type)
+ if (!INPUT(n).type)
return -EINVAL;
i->type = V4L2_INPUT_TYPE_CAMERA;
- strcpy(i->name,iname[INPUT(n).type]);
- if ((CX88_VMUX_TELEVISION == INPUT(n).type) ||
- (CX88_VMUX_CABLE == INPUT(n).type)) {
+ strcpy(i->name, iname[INPUT(n).type]);
+ if ((INPUT(n).type == CX88_VMUX_TELEVISION) ||
+ (INPUT(n).type == CX88_VMUX_CABLE))
i->type = V4L2_INPUT_TYPE_TUNER;
- }
+
i->std = CX88_NORMS;
return 0;
}
EXPORT_SYMBOL(cx88_enum_input);
-static int vidioc_enum_input (struct file *file, void *priv,
- struct v4l2_input *i)
+static int vidioc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
{
struct cx8800_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
- return cx88_enum_input (core,i);
+
+ return cx88_enum_input(core, i);
}
-static int vidioc_g_input (struct file *file, void *priv, unsigned int *i)
+static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
struct cx8800_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
@@ -910,31 +923,31 @@ static int vidioc_g_input (struct file *file, void *priv, unsigned int *i)
return 0;
}
-static int vidioc_s_input (struct file *file, void *priv, unsigned int i)
+static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
{
struct cx8800_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
if (i >= 4)
return -EINVAL;
- if (0 == INPUT(i).type)
+ if (!INPUT(i).type)
return -EINVAL;
cx88_newstation(core);
- cx88_video_mux(core,i);
+ cx88_video_mux(core, i);
return 0;
}
-static int vidioc_g_tuner (struct file *file, void *priv,
- struct v4l2_tuner *t)
+static int vidioc_g_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *t)
{
struct cx8800_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
u32 reg;
- if (unlikely(UNSET == core->board.tuner_type))
+ if (unlikely(core->board.tuner_type == UNSET))
return -EINVAL;
- if (0 != t->index)
+ if (t->index != 0)
return -EINVAL;
strcpy(t->name, "Television");
@@ -942,34 +955,34 @@ static int vidioc_g_tuner (struct file *file, void *priv,
t->rangehigh = 0xffffffffUL;
call_all(core, tuner, g_tuner, t);
- cx88_get_stereo(core ,t);
+ cx88_get_stereo(core, t);
reg = cx_read(MO_DEVICE_STATUS);
- t->signal = (reg & (1<<5)) ? 0xffff : 0x0000;
+ t->signal = (reg & (1 << 5)) ? 0xffff : 0x0000;
return 0;
}
-static int vidioc_s_tuner (struct file *file, void *priv,
- const struct v4l2_tuner *t)
+static int vidioc_s_tuner(struct file *file, void *priv,
+ const struct v4l2_tuner *t)
{
struct cx8800_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
- if (UNSET == core->board.tuner_type)
+ if (core->board.tuner_type == UNSET)
return -EINVAL;
- if (0 != t->index)
+ if (t->index != 0)
return -EINVAL;
cx88_set_stereo(core, t->audmode, 1);
return 0;
}
-static int vidioc_g_frequency (struct file *file, void *priv,
- struct v4l2_frequency *f)
+static int vidioc_g_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f)
{
struct cx8800_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
- if (unlikely(UNSET == core->board.tuner_type))
+ if (unlikely(core->board.tuner_type == UNSET))
return -EINVAL;
if (f->tuner)
return -EINVAL;
@@ -981,12 +994,12 @@ static int vidioc_g_frequency (struct file *file, void *priv,
return 0;
}
-int cx88_set_freq (struct cx88_core *core,
- const struct v4l2_frequency *f)
+int cx88_set_freq(struct cx88_core *core,
+ const struct v4l2_frequency *f)
{
struct v4l2_frequency new_freq = *f;
- if (unlikely(UNSET == core->board.tuner_type))
+ if (unlikely(core->board.tuner_type == UNSET))
return -EINVAL;
if (unlikely(f->tuner != 0))
return -EINVAL;
@@ -997,15 +1010,15 @@ int cx88_set_freq (struct cx88_core *core,
core->freq = new_freq.frequency;
/* When changing channels it is required to reset TVAUDIO */
- msleep (10);
+ usleep_range(10000, 20000);
cx88_set_tvaudio(core);
return 0;
}
EXPORT_SYMBOL(cx88_set_freq);
-static int vidioc_s_frequency (struct file *file, void *priv,
- const struct v4l2_frequency *f)
+static int vidioc_s_frequency(struct file *file, void *priv,
+ const struct v4l2_frequency *f)
{
struct cx8800_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
@@ -1014,8 +1027,8 @@ static int vidioc_s_frequency (struct file *file, void *priv,
}
#ifdef CONFIG_VIDEO_ADV_DEBUG
-static int vidioc_g_register (struct file *file, void *fh,
- struct v4l2_dbg_register *reg)
+static int vidioc_g_register(struct file *file, void *fh,
+ struct v4l2_dbg_register *reg)
{
struct cx8800_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
@@ -1026,8 +1039,8 @@ static int vidioc_g_register (struct file *file, void *fh,
return 0;
}
-static int vidioc_s_register (struct file *file, void *fh,
- const struct v4l2_dbg_register *reg)
+static int vidioc_s_register(struct file *file, void *fh,
+ const struct v4l2_dbg_register *reg)
{
struct cx8800_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
@@ -1041,8 +1054,8 @@ static int vidioc_s_register (struct file *file, void *fh,
/* RADIO ESPECIFIC IOCTLS */
/* ----------------------------------------------------------- */
-static int radio_g_tuner (struct file *file, void *priv,
- struct v4l2_tuner *t)
+static int radio_g_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *t)
{
struct cx8800_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
@@ -1056,13 +1069,13 @@ static int radio_g_tuner (struct file *file, void *priv,
return 0;
}
-static int radio_s_tuner (struct file *file, void *priv,
- const struct v4l2_tuner *t)
+static int radio_s_tuner(struct file *file, void *priv,
+ const struct v4l2_tuner *t)
{
struct cx8800_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
- if (0 != t->index)
+ if (t->index != 0)
return -EINVAL;
call_all(core, tuner, s_tuner, t);
@@ -1090,13 +1103,13 @@ static void cx8800_vid_irq(struct cx8800_dev *dev)
return;
cx_write(MO_VID_INTSTAT, status);
if (irq_debug || (status & mask & ~0xff))
- cx88_print_irqbits(core->name, "irq vid",
+ cx88_print_irqbits("irq vid",
cx88_vid_irqs, ARRAY_SIZE(cx88_vid_irqs),
status, mask);
/* risc op code error */
if (status & (1 << 16)) {
- printk(KERN_WARNING "%s/0: video risc op code error\n",core->name);
+ pr_warn("video risc op code error\n");
cx_clear(MO_VID_DMACNTRL, 0x11);
cx_clear(VID_CAPTURE_CONTROL, 0x06);
cx88_sram_channel_dump(core, &cx88_sram_channels[SRAM_CH21]);
@@ -1129,20 +1142,19 @@ static irqreturn_t cx8800_irq(int irq, void *dev_id)
for (loop = 0; loop < 10; loop++) {
status = cx_read(MO_PCI_INTSTAT) &
(core->pci_irqmask | PCI_INT_VIDINT);
- if (0 == status)
+ if (status == 0)
goto out;
cx_write(MO_PCI_INTSTAT, status);
handled = 1;
if (status & core->pci_irqmask)
- cx88_core_irq(core,status);
+ cx88_core_irq(core, status);
if (status & PCI_INT_VIDINT)
cx8800_vid_irq(dev);
}
- if (10 == loop) {
- printk(KERN_WARNING "%s/0: irq loop -- clearing mask\n",
- core->name);
- cx_write(MO_PCI_INTMSK,0);
+ if (loop == 10) {
+ pr_warn("irq loop -- clearing mask\n");
+ cx_write(MO_PCI_INTMSK, 0);
}
out:
@@ -1152,8 +1164,7 @@ static irqreturn_t cx8800_irq(int irq, void *dev_id)
/* ----------------------------------------------------------- */
/* exported stuff */
-static const struct v4l2_file_operations video_fops =
-{
+static const struct v4l2_file_operations video_fops = {
.owner = THIS_MODULE,
.open = v4l2_fh_open,
.release = vb2_fop_release,
@@ -1195,7 +1206,7 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
static const struct video_device cx8800_video_template = {
.name = "cx8800-video",
.fops = &video_fops,
- .ioctl_ops = &video_ioctl_ops,
+ .ioctl_ops = &video_ioctl_ops,
.tvnorms = CX88_NORMS,
};
@@ -1232,8 +1243,7 @@ static const struct video_device cx8800_vbi_template = {
.tvnorms = CX88_NORMS,
};
-static const struct v4l2_file_operations radio_fops =
-{
+static const struct v4l2_file_operations radio_fops = {
.owner = THIS_MODULE,
.open = radio_open,
.poll = v4l2_ctrl_poll,
@@ -1258,7 +1268,7 @@ static const struct v4l2_ioctl_ops radio_ioctl_ops = {
static const struct video_device cx8800_radio_template = {
.name = "cx8800-radio",
.fops = &radio_fops,
- .ioctl_ops = &radio_ioctl_ops,
+ .ioctl_ops = &radio_ioctl_ops,
};
static const struct v4l2_ctrl_ops cx8800_ctrl_vid_ops = {
@@ -1287,8 +1297,8 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
int err;
int i;
- dev = kzalloc(sizeof(*dev),GFP_KERNEL);
- if (NULL == dev)
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
return -ENOMEM;
/* pci init */
@@ -1298,7 +1308,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
goto fail_free;
}
core = cx88_core_get(dev->pci);
- if (NULL == core) {
+ if (!core) {
err = -EINVAL;
goto fail_free;
}
@@ -1307,15 +1317,15 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
/* print pci info */
dev->pci_rev = pci_dev->revision;
pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
- printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, "
- "latency: %d, mmio: 0x%llx\n", core->name,
- pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
- dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0));
+ pr_info("found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n",
+ pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
+ dev->pci_lat,
+ (unsigned long long)pci_resource_start(pci_dev, 0));
pci_set_master(pci_dev);
- err = pci_set_dma_mask(pci_dev,DMA_BIT_MASK(32));
+ err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
if (err) {
- printk("%s/0: Oops: no 32bit PCI DMA ???\n",core->name);
+ pr_err("Oops: no 32bit PCI DMA ???\n");
goto fail_core;
}
@@ -1332,8 +1342,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
err = request_irq(pci_dev->irq, cx8800_irq,
IRQF_SHARED, core->name, dev);
if (err < 0) {
- printk(KERN_ERR "%s/0: can't get IRQ %d\n",
- core->name,pci_dev->irq);
+ pr_err("can't get IRQ %d\n", pci_dev->irq);
goto fail_core;
}
cx_set(MO_PCI_INTMSK, core->pci_irqmask);
@@ -1343,8 +1352,9 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
struct v4l2_ctrl *vc;
vc = v4l2_ctrl_new_std(&core->audio_hdl, &cx8800_ctrl_aud_ops,
- cc->id, cc->minimum, cc->maximum, cc->step, cc->default_value);
- if (vc == NULL) {
+ cc->id, cc->minimum, cc->maximum,
+ cc->step, cc->default_value);
+ if (!vc) {
err = core->audio_hdl.error;
goto fail_core;
}
@@ -1356,8 +1366,9 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
struct v4l2_ctrl *vc;
vc = v4l2_ctrl_new_std(&core->video_hdl, &cx8800_ctrl_vid_ops,
- cc->id, cc->minimum, cc->maximum, cc->step, cc->default_value);
- if (vc == NULL) {
+ cc->id, cc->minimum, cc->maximum,
+ cc->step, cc->default_value);
+ if (!vc) {
err = core->video_hdl.error;
goto fail_core;
}
@@ -1383,18 +1394,20 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
core->wm8775_data.is_nova_s = false;
sd = v4l2_i2c_new_subdev_board(&core->v4l2_dev, &core->i2c_adap,
- &wm8775_info, NULL);
- if (sd != NULL) {
+ &wm8775_info, NULL);
+ if (sd) {
core->sd_wm8775 = sd;
sd->grp_id = WM8775_GID;
}
}
if (core->board.audio_chip == CX88_AUDIO_TVAUDIO) {
- /* This probes for a tda9874 as is used on some
- Pixelview Ultra boards. */
+ /*
+ * This probes for a tda9874 as is used on some
+ * Pixelview Ultra boards.
+ */
v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap,
- "tvaudio", 0, I2C_ADDRS(0xb0 >> 1));
+ "tvaudio", 0, I2C_ADDRS(0xb0 >> 1));
}
switch (core->boardnr) {
@@ -1470,12 +1483,11 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
err = video_register_device(&dev->video_dev, VFL_TYPE_GRABBER,
video_nr[core->nr]);
if (err < 0) {
- printk(KERN_ERR "%s/0: can't register video device\n",
- core->name);
+ pr_err("can't register video device\n");
goto fail_unreg;
}
- printk(KERN_INFO "%s/0: registered device %s [v4l2]\n",
- core->name, video_device_node_name(&dev->video_dev));
+ pr_info("registered device %s [v4l2]\n",
+ video_device_node_name(&dev->video_dev));
cx88_vdev_init(core, dev->pci, &dev->vbi_dev,
&cx8800_vbi_template, "vbi");
@@ -1484,12 +1496,11 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
err = video_register_device(&dev->vbi_dev, VFL_TYPE_VBI,
vbi_nr[core->nr]);
if (err < 0) {
- printk(KERN_ERR "%s/0: can't register vbi device\n",
- core->name);
+ pr_err("can't register vbi device\n");
goto fail_unreg;
}
- printk(KERN_INFO "%s/0: registered device %s\n",
- core->name, video_device_node_name(&dev->vbi_dev));
+ pr_info("registered device %s\n",
+ video_device_node_name(&dev->vbi_dev));
if (core->board.radio.type == CX88_RADIO) {
cx88_vdev_init(core, dev->pci, &dev->radio_dev,
@@ -1499,21 +1510,21 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
err = video_register_device(&dev->radio_dev, VFL_TYPE_RADIO,
radio_nr[core->nr]);
if (err < 0) {
- printk(KERN_ERR "%s/0: can't register radio device\n",
- core->name);
+ pr_err("can't register radio device\n");
goto fail_unreg;
}
- printk(KERN_INFO "%s/0: registered device %s\n",
- core->name, video_device_node_name(&dev->radio_dev));
+ pr_info("registered device %s\n",
+ video_device_node_name(&dev->radio_dev));
}
/* start tvaudio thread */
if (core->board.tuner_type != UNSET) {
- core->kthread = kthread_run(cx88_audio_thread, core, "cx88 tvaudio");
+ core->kthread = kthread_run(cx88_audio_thread,
+ core, "cx88 tvaudio");
if (IS_ERR(core->kthread)) {
err = PTR_ERR(core->kthread);
- printk(KERN_ERR "%s/0: failed to create cx88 audio thread, err=%d\n",
- core->name, err);
+ pr_err("failed to create cx88 audio thread, err=%d\n",
+ err);
}
}
mutex_unlock(&core->lock);
@@ -1526,7 +1537,7 @@ fail_unreg:
mutex_unlock(&core->lock);
fail_core:
core->v4ldev = NULL;
- cx88_core_put(core,dev->pci);
+ cx88_core_put(core, dev->pci);
fail_free:
kfree(dev);
return err;
@@ -1557,7 +1568,7 @@ static void cx8800_finidev(struct pci_dev *pci_dev)
core->v4ldev = NULL;
/* free memory */
- cx88_core_put(core,dev->pci);
+ cx88_core_put(core, dev->pci);
kfree(dev);
}
@@ -1571,11 +1582,11 @@ static int cx8800_suspend(struct pci_dev *pci_dev, pm_message_t state)
/* stop video+vbi capture */
spin_lock_irqsave(&dev->slock, flags);
if (!list_empty(&dev->vidq.active)) {
- printk("%s/0: suspend video\n", core->name);
+ pr_info("suspend video\n");
stop_video_dma(dev);
}
if (!list_empty(&dev->vbiq.active)) {
- printk("%s/0: suspend vbi\n", core->name);
+ pr_info("suspend vbi\n");
cx8800_stop_vbi_dma(dev);
}
spin_unlock_irqrestore(&dev->slock, flags);
@@ -1586,7 +1597,8 @@ static int cx8800_suspend(struct pci_dev *pci_dev, pm_message_t state)
cx88_shutdown(core);
pci_save_state(pci_dev);
- if (0 != pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state))) {
+ if (pci_set_power_state(pci_dev,
+ pci_choose_state(pci_dev, state)) != 0) {
pci_disable_device(pci_dev);
dev->state.disabled = 1;
}
@@ -1601,18 +1613,17 @@ static int cx8800_resume(struct pci_dev *pci_dev)
int err;
if (dev->state.disabled) {
- err=pci_enable_device(pci_dev);
+ err = pci_enable_device(pci_dev);
if (err) {
- printk(KERN_ERR "%s/0: can't enable device\n",
- core->name);
+ pr_err("can't enable device\n");
return err;
}
dev->state.disabled = 0;
}
- err= pci_set_power_state(pci_dev, PCI_D0);
+ err = pci_set_power_state(pci_dev, PCI_D0);
if (err) {
- printk(KERN_ERR "%s/0: can't set power state\n", core->name);
+ pr_err("can't set power state\n");
pci_disable_device(pci_dev);
dev->state.disabled = 1;
@@ -1630,12 +1641,12 @@ static int cx8800_resume(struct pci_dev *pci_dev)
/* restart video+vbi capture */
spin_lock_irqsave(&dev->slock, flags);
if (!list_empty(&dev->vidq.active)) {
- printk("%s/0: resume video\n", core->name);
- restart_video_queue(dev,&dev->vidq);
+ pr_info("resume video\n");
+ restart_video_queue(dev, &dev->vidq);
}
if (!list_empty(&dev->vbiq.active)) {
- printk("%s/0: resume vbi\n", core->name);
- cx8800_restart_vbi_queue(dev,&dev->vbiq);
+ pr_info("resume vbi\n");
+ cx8800_restart_vbi_queue(dev, &dev->vbiq);
}
spin_unlock_irqrestore(&dev->slock, flags);
@@ -1651,7 +1662,7 @@ static const struct pci_device_id cx8800_pci_tbl[] = {
.device = 0x8800,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
- },{
+ }, {
/* --- end of list --- */
}
};
diff --git a/drivers/media/pci/cx88/cx88-vp3054-i2c.c b/drivers/media/pci/cx88/cx88-vp3054-i2c.c
index deede6e25d94..92876de3841c 100644
--- a/drivers/media/pci/cx88/cx88-vp3054-i2c.c
+++ b/drivers/media/pci/cx88/cx88-vp3054-i2c.c
@@ -1,35 +1,28 @@
/*
+ * cx88-vp3054-i2c.c -- support for the secondary I2C bus of the
+ * DNTV Live! DVB-T Pro (VP-3054), wired as:
+ * GPIO[0] -> SCL, GPIO[1] -> SDA
+ *
+ * (c) 2005 Chris Pascoe <c.pascoe@itee.uq.edu.au>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
- cx88-vp3054-i2c.c -- support for the secondary I2C bus of the
- DNTV Live! DVB-T Pro (VP-3054), wired as:
- GPIO[0] -> SCL, GPIO[1] -> SDA
-
- (c) 2005 Chris Pascoe <c.pascoe@itee.uq.edu.au>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-*/
+#include "cx88.h"
+#include "cx88-vp3054-i2c.h"
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
-
-#include <asm/io.h>
-
-#include "cx88.h"
-#include "cx88-vp3054-i2c.h"
+#include <linux/io.h>
MODULE_DESCRIPTION("driver for cx2388x VP3054 design");
MODULE_AUTHOR("Chris Pascoe <c.pascoe@itee.uq.edu.au>");
@@ -114,7 +107,7 @@ int vp3054_i2c_probe(struct cx8802_dev *dev)
return 0;
vp3054_i2c = kzalloc(sizeof(*vp3054_i2c), GFP_KERNEL);
- if (vp3054_i2c == NULL)
+ if (!vp3054_i2c)
return -ENOMEM;
dev->vp3054 = vp3054_i2c;
@@ -128,12 +121,12 @@ int vp3054_i2c_probe(struct cx8802_dev *dev)
i2c_set_adapdata(&vp3054_i2c->adap, dev);
vp3054_i2c->adap.algo_data = &vp3054_i2c->algo;
- vp3054_bit_setscl(dev,1);
- vp3054_bit_setsda(dev,1);
+ vp3054_bit_setscl(dev, 1);
+ vp3054_bit_setsda(dev, 1);
rc = i2c_bit_add_bus(&vp3054_i2c->adap);
- if (0 != rc) {
- printk("%s: vp3054_i2c register FAILED\n", core->name);
+ if (rc != 0) {
+ pr_err("vp3054_i2c register FAILED\n");
kfree(dev->vp3054);
dev->vp3054 = NULL;
@@ -141,18 +134,17 @@ int vp3054_i2c_probe(struct cx8802_dev *dev)
return rc;
}
+EXPORT_SYMBOL(vp3054_i2c_probe);
void vp3054_i2c_remove(struct cx8802_dev *dev)
{
struct vp3054_i2c_state *vp3054_i2c = dev->vp3054;
- if (vp3054_i2c == NULL ||
+ if (!vp3054_i2c ||
dev->core->boardnr != CX88_BOARD_DNTV_LIVE_DVB_T_PRO)
return;
i2c_del_adapter(&vp3054_i2c->adap);
kfree(vp3054_i2c);
}
-
-EXPORT_SYMBOL(vp3054_i2c_probe);
EXPORT_SYMBOL(vp3054_i2c_remove);
diff --git a/drivers/media/pci/cx88/cx88-vp3054-i2c.h b/drivers/media/pci/cx88/cx88-vp3054-i2c.h
index 95d0c60a35e1..ec19bea8f1e2 100644
--- a/drivers/media/pci/cx88/cx88-vp3054-i2c.h
+++ b/drivers/media/pci/cx88/cx88-vp3054-i2c.h
@@ -1,26 +1,20 @@
/*
-
- cx88-vp3054-i2c.h -- support for the secondary I2C bus of the
- DNTV Live! DVB-T Pro (VP-3054), wired as:
- GPIO[0] -> SCL, GPIO[1] -> SDA
-
- (c) 2005 Chris Pascoe <c.pascoe@itee.uq.edu.au>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-*/
+ * cx88-vp3054-i2c.h -- support for the secondary I2C bus of the
+ * DNTV Live! DVB-T Pro (VP-3054), wired as:
+ * GPIO[0] -> SCL, GPIO[1] -> SDA
+ *
+ * (c) 2005 Chris Pascoe <c.pascoe@itee.uq.edu.au>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/* ----------------------------------------------------------------------- */
struct vp3054_i2c_state {
diff --git a/drivers/media/pci/cx88/cx88.h b/drivers/media/pci/cx88/cx88.h
index ecd4b7bece99..115414cf520f 100644
--- a/drivers/media/pci/cx88/cx88.h
+++ b/drivers/media/pci/cx88/cx88.h
@@ -1,5 +1,4 @@
/*
- *
* v4l2 device driver for cx2388x based TV cards
*
* (c) 2003,04 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
@@ -13,12 +12,13 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#ifndef CX88_H
+#define CX88_H
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/pci.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
@@ -53,7 +53,7 @@
/* defines and enums */
/* Currently unsupported by the driver: PAL/H, NTSC/Kr, SECAM/LC */
-#define CX88_NORMS (V4L2_STD_ALL \
+#define CX88_NORMS (V4L2_STD_ALL \
& ~V4L2_STD_PAL_H \
& ~V4L2_STD_NTSC_M_KR \
& ~V4L2_STD_SECAM_LC)
@@ -98,7 +98,6 @@ static inline unsigned int norm_maxw(v4l2_std_id norm)
return 720;
}
-
static inline unsigned int norm_maxh(v4l2_std_id norm)
{
return (norm & V4L2_STD_525_60) ? 480 : 576;
@@ -140,6 +139,7 @@ struct sram_channel {
u32 cnt1_reg;
u32 cnt2_reg;
};
+
extern const struct sram_channel cx88_sram_channels[];
/* ----------------------------------------------------------- */
@@ -361,12 +361,12 @@ struct cx88_core {
u32 i2c_state, i2c_rc;
/* config info -- analog */
- struct v4l2_device v4l2_dev;
+ struct v4l2_device v4l2_dev;
struct v4l2_ctrl_handler video_hdl;
struct v4l2_ctrl *chroma_agc;
struct v4l2_ctrl_handler audio_hdl;
struct v4l2_subdev *sd_wm8775;
- struct i2c_client *i2c_rtc;
+ struct i2c_client *i2c_rtc;
unsigned int boardnr;
struct cx88_board board;
@@ -383,8 +383,8 @@ struct cx88_core {
/* state info */
struct task_struct *kthread;
v4l2_std_id tvnorm;
- unsigned width, height;
- unsigned field;
+ unsigned int width, height;
+ unsigned int field;
enum cx88_tvaudio tvaudio;
u32 audiomode_manual;
u32 audiomode_current;
@@ -427,7 +427,8 @@ static inline struct cx88_core *to_core(struct v4l2_device *v4l2_dev)
if (!core->i2c_rc) { \
if (core->gate_ctrl) \
core->gate_ctrl(core, 1); \
- v4l2_device_call_all(&core->v4l2_dev, grpid, o, f, ##args); \
+ v4l2_device_call_all(&core->v4l2_dev, \
+ grpid, o, f, ##args); \
if (core->gate_ctrl) \
core->gate_ctrl(core, 0); \
} \
@@ -438,31 +439,31 @@ static inline struct cx88_core *to_core(struct v4l2_device *v4l2_dev)
#define WM8775_GID (1 << 0)
#define wm8775_s_ctrl(core, id, val) \
- do { \
- struct v4l2_ctrl *ctrl_ = \
- v4l2_ctrl_find(core->sd_wm8775->ctrl_handler, id); \
- if (ctrl_ && !core->i2c_rc) { \
- if (core->gate_ctrl) \
- core->gate_ctrl(core, 1); \
- v4l2_ctrl_s_ctrl(ctrl_, val); \
- if (core->gate_ctrl) \
- core->gate_ctrl(core, 0); \
- } \
+ do { \
+ struct v4l2_ctrl *ctrl_ = \
+ v4l2_ctrl_find(core->sd_wm8775->ctrl_handler, id);\
+ if (ctrl_ && !core->i2c_rc) { \
+ if (core->gate_ctrl) \
+ core->gate_ctrl(core, 1); \
+ v4l2_ctrl_s_ctrl(ctrl_, val); \
+ if (core->gate_ctrl) \
+ core->gate_ctrl(core, 0); \
+ } \
} while (0)
#define wm8775_g_ctrl(core, id) \
- ({ \
- struct v4l2_ctrl *ctrl_ = \
- v4l2_ctrl_find(core->sd_wm8775->ctrl_handler, id); \
- s32 val = 0; \
- if (ctrl_ && !core->i2c_rc) { \
- if (core->gate_ctrl) \
- core->gate_ctrl(core, 1); \
- val = v4l2_ctrl_g_ctrl(ctrl_); \
- if (core->gate_ctrl) \
- core->gate_ctrl(core, 0); \
- } \
- val; \
+ ({ \
+ struct v4l2_ctrl *ctrl_ = \
+ v4l2_ctrl_find(core->sd_wm8775->ctrl_handler, id);\
+ s32 val = 0; \
+ if (ctrl_ && !core->i2c_rc) { \
+ if (core->gate_ctrl) \
+ core->gate_ctrl(core, 1); \
+ val = v4l2_ctrl_g_ctrl(ctrl_); \
+ if (core->gate_ctrl) \
+ core->gate_ctrl(core, 0); \
+ } \
+ val; \
})
/* ----------------------------------------------------------- */
@@ -484,7 +485,7 @@ struct cx8800_dev {
/* pci i/o */
struct pci_dev *pci;
- unsigned char pci_rev,pci_lat;
+ unsigned char pci_rev, pci_lat;
const struct cx8800_fmt *fmt;
@@ -504,7 +505,6 @@ struct cx8800_dev {
/* function 1: audio/alsa stuff */
/* =============> moved to cx88-alsa.c <====================== */
-
/* ----------------------------------------------------------- */
/* function 2: mpeg stuff */
@@ -547,7 +547,7 @@ struct cx8802_dev {
/* pci i/o */
struct pci_dev *pci;
- unsigned char pci_rev,pci_lat;
+ unsigned char pci_rev, pci_lat;
/* dma queues */
struct cx88_dmaqueue mpegq;
@@ -566,6 +566,7 @@ struct cx8802_dev {
/* mpeg params */
struct cx2341x_handler cxhdl;
+
#endif
#if IS_ENABLED(CONFIG_VIDEO_CX88_DVB)
@@ -588,40 +589,42 @@ struct cx8802_dev {
/* ----------------------------------------------------------- */
-#define cx_read(reg) readl(core->lmmio + ((reg)>>2))
-#define cx_write(reg,value) writel((value), core->lmmio + ((reg)>>2))
-#define cx_writeb(reg,value) writeb((value), core->bmmio + (reg))
+#define cx_read(reg) readl(core->lmmio + ((reg) >> 2))
+#define cx_write(reg, value) writel((value), core->lmmio + ((reg) >> 2))
+#define cx_writeb(reg, value) writeb((value), core->bmmio + (reg))
-#define cx_andor(reg,mask,value) \
- writel((readl(core->lmmio+((reg)>>2)) & ~(mask)) |\
- ((value) & (mask)), core->lmmio+((reg)>>2))
-#define cx_set(reg,bit) cx_andor((reg),(bit),(bit))
-#define cx_clear(reg,bit) cx_andor((reg),(bit),0)
+#define cx_andor(reg, mask, value) \
+ writel((readl(core->lmmio + ((reg) >> 2)) & ~(mask)) |\
+ ((value) & (mask)), core->lmmio + ((reg) >> 2))
+#define cx_set(reg, bit) cx_andor((reg), (bit), (bit))
+#define cx_clear(reg, bit) cx_andor((reg), (bit), 0)
#define cx_wait(d) { if (need_resched()) schedule(); else udelay(d); }
/* shadow registers */
#define cx_sread(sreg) (core->shadow[sreg])
-#define cx_swrite(sreg,reg,value) \
- (core->shadow[sreg] = value, \
- writel(core->shadow[sreg], core->lmmio + ((reg)>>2)))
-#define cx_sandor(sreg,reg,mask,value) \
- (core->shadow[sreg] = (core->shadow[sreg] & ~(mask)) | ((value) & (mask)), \
- writel(core->shadow[sreg], core->lmmio + ((reg)>>2)))
+#define cx_swrite(sreg, reg, value) \
+ (core->shadow[sreg] = value, \
+ writel(core->shadow[sreg], core->lmmio + ((reg) >> 2)))
+#define cx_sandor(sreg, reg, mask, value) \
+ (core->shadow[sreg] = (core->shadow[sreg] & ~(mask)) | \
+ ((value) & (mask)), \
+ writel(core->shadow[sreg], \
+ core->lmmio + ((reg) >> 2)))
/* ----------------------------------------------------------- */
/* cx88-core.c */
extern unsigned int cx88_core_debug;
-extern void cx88_print_irqbits(const char *name, const char *tag, const char *strings[],
- int len, u32 bits, u32 mask);
+void cx88_print_irqbits(const char *tag, const char *strings[],
+ int len, u32 bits, u32 mask);
-extern int cx88_core_irq(struct cx88_core *core, u32 status);
-extern void cx88_wakeup(struct cx88_core *core,
- struct cx88_dmaqueue *q, u32 count);
-extern void cx88_shutdown(struct cx88_core *core);
-extern int cx88_reset(struct cx88_core *core);
+int cx88_core_irq(struct cx88_core *core, u32 status);
+void cx88_wakeup(struct cx88_core *core,
+ struct cx88_dmaqueue *q, u32 count);
+void cx88_shutdown(struct cx88_core *core);
+int cx88_reset(struct cx88_core *core);
extern int
cx88_risc_buffer(struct pci_dev *pci, struct cx88_riscmem *risc,
@@ -633,43 +636,37 @@ cx88_risc_databuffer(struct pci_dev *pci, struct cx88_riscmem *risc,
struct scatterlist *sglist, unsigned int bpl,
unsigned int lines, unsigned int lpi);
-extern void cx88_risc_disasm(struct cx88_core *core,
- struct cx88_riscmem *risc);
-extern int cx88_sram_channel_setup(struct cx88_core *core,
- const struct sram_channel *ch,
- unsigned int bpl, u32 risc);
-extern void cx88_sram_channel_dump(struct cx88_core *core,
- const struct sram_channel *ch);
-
-extern int cx88_set_scale(struct cx88_core *core, unsigned int width,
- unsigned int height, enum v4l2_field field);
-extern int cx88_set_tvnorm(struct cx88_core *core, v4l2_std_id norm);
-
-extern void cx88_vdev_init(struct cx88_core *core,
- struct pci_dev *pci,
- struct video_device *vfd,
- const struct video_device *template_,
- const char *type);
-extern struct cx88_core *cx88_core_get(struct pci_dev *pci);
-extern void cx88_core_put(struct cx88_core *core,
- struct pci_dev *pci);
-
-extern int cx88_start_audio_dma(struct cx88_core *core);
-extern int cx88_stop_audio_dma(struct cx88_core *core);
-
+void cx88_risc_disasm(struct cx88_core *core,
+ struct cx88_riscmem *risc);
+int cx88_sram_channel_setup(struct cx88_core *core,
+ const struct sram_channel *ch,
+ unsigned int bpl, u32 risc);
+void cx88_sram_channel_dump(struct cx88_core *core,
+ const struct sram_channel *ch);
+
+int cx88_set_scale(struct cx88_core *core, unsigned int width,
+ unsigned int height, enum v4l2_field field);
+int cx88_set_tvnorm(struct cx88_core *core, v4l2_std_id norm);
+
+void cx88_vdev_init(struct cx88_core *core,
+ struct pci_dev *pci,
+ struct video_device *vfd,
+ const struct video_device *template_,
+ const char *type);
+struct cx88_core *cx88_core_get(struct pci_dev *pci);
+void cx88_core_put(struct cx88_core *core,
+ struct pci_dev *pci);
+
+int cx88_start_audio_dma(struct cx88_core *core);
+int cx88_stop_audio_dma(struct cx88_core *core);
/* ----------------------------------------------------------- */
/* cx88-vbi.c */
/* Can be used as g_vbi_fmt, try_vbi_fmt and s_vbi_fmt */
-int cx8800_vbi_fmt (struct file *file, void *priv,
- struct v4l2_format *f);
+int cx8800_vbi_fmt(struct file *file, void *priv,
+ struct v4l2_format *f);
-/*
-int cx8800_start_vbi_dma(struct cx8800_dev *dev,
- struct cx88_dmaqueue *q,
- struct cx88_buffer *buf);
-*/
void cx8800_stop_vbi_dma(struct cx8800_dev *dev);
int cx8800_restart_vbi_queue(struct cx8800_dev *dev, struct cx88_dmaqueue *q);
@@ -678,17 +675,16 @@ extern const struct vb2_ops cx8800_vbi_qops;
/* ----------------------------------------------------------- */
/* cx88-i2c.c */
-extern int cx88_i2c_init(struct cx88_core *core, struct pci_dev *pci);
-
+int cx88_i2c_init(struct cx88_core *core, struct pci_dev *pci);
/* ----------------------------------------------------------- */
/* cx88-cards.c */
-extern int cx88_tuner_callback(void *dev, int component, int command, int arg);
-extern int cx88_get_resources(const struct cx88_core *core,
- struct pci_dev *pci);
-extern struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr);
-extern void cx88_setup_xc3028(struct cx88_core *core, struct xc2028_ctrl *ctl);
+int cx88_tuner_callback(void *dev, int component, int command, int arg);
+int cx88_get_resources(const struct cx88_core *core,
+ struct pci_dev *pci);
+struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr);
+void cx88_setup_xc3028(struct cx88_core *core, struct xc2028_ctrl *ctl);
/* ----------------------------------------------------------- */
/* cx88-tvaudio.c */
@@ -703,7 +699,8 @@ int cx8802_register_driver(struct cx8802_driver *drv);
int cx8802_unregister_driver(struct cx8802_driver *drv);
/* Caller must hold core->lock */
-struct cx8802_driver * cx8802_get_driver(struct cx8802_dev *dev, enum cx88_board_type btype);
+struct cx8802_driver *cx8802_get_driver(struct cx8802_dev *dev,
+ enum cx88_board_type btype);
/* ----------------------------------------------------------- */
/* cx88-dsp.c */
@@ -718,18 +715,18 @@ int cx88_ir_fini(struct cx88_core *core);
void cx88_ir_irq(struct cx88_core *core);
int cx88_ir_start(struct cx88_core *core);
void cx88_ir_stop(struct cx88_core *core);
-extern void cx88_i2c_init_ir(struct cx88_core *core);
+void cx88_i2c_init_ir(struct cx88_core *core);
/* ----------------------------------------------------------- */
/* cx88-mpeg.c */
int cx8802_buf_prepare(struct vb2_queue *q, struct cx8802_dev *dev,
- struct cx88_buffer *buf);
+ struct cx88_buffer *buf);
void cx8802_buf_queue(struct cx8802_dev *dev, struct cx88_buffer *buf);
void cx8802_cancel_buffers(struct cx8802_dev *dev);
int cx8802_start_dma(struct cx8802_dev *dev,
- struct cx88_dmaqueue *q,
- struct cx88_buffer *buf);
+ struct cx88_dmaqueue *q,
+ struct cx88_buffer *buf);
/* ----------------------------------------------------------- */
/* cx88-video.c*/
@@ -737,4 +734,6 @@ int cx88_enum_input(struct cx88_core *core, struct v4l2_input *i);
int cx88_set_freq(struct cx88_core *core, const struct v4l2_frequency *f);
int cx88_video_mux(struct cx88_core *core, unsigned int input);
void cx88_querycap(struct file *file, struct cx88_core *core,
- struct v4l2_capability *cap);
+ struct v4l2_capability *cap);
+
+#endif
diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c
index 18e3a4deee64..a6c9fe235974 100644
--- a/drivers/media/pci/ddbridge/ddbridge-core.c
+++ b/drivers/media/pci/ddbridge/ddbridge-core.c
@@ -824,8 +824,7 @@ static int dvb_input_attach(struct ddb_input *input)
&input->port->dev->pdev->dev,
adapter_nr);
if (ret < 0) {
- printk(KERN_ERR "ddbridge: Could not register adapter."
- "Check if you enabled enough adapters in dvb-core!\n");
+ printk(KERN_ERR "ddbridge: Could not register adapter.Check if you enabled enough adapters in dvb-core!\n");
return ret;
}
input->attached = 1;
@@ -1730,8 +1729,7 @@ static __init int module_init_ddbridge(void)
{
int ret;
- printk(KERN_INFO "Digital Devices PCIE bridge driver, "
- "Copyright (C) 2010-11 Digital Devices GmbH\n");
+ printk(KERN_INFO "Digital Devices PCIE bridge driver, Copyright (C) 2010-11 Digital Devices GmbH\n");
ret = ddb_class_create();
if (ret < 0)
diff --git a/drivers/media/pci/dm1105/dm1105.c b/drivers/media/pci/dm1105/dm1105.c
index 5dd504741b12..a589aa78d1d9 100644
--- a/drivers/media/pci/dm1105/dm1105.c
+++ b/drivers/media/pci/dm1105/dm1105.c
@@ -315,8 +315,7 @@ static void dm1105_card_list(struct pci_dev *pci)
"dm1105: Updating to the latest version might help\n"
"dm1105: as well.\n");
}
- printk(KERN_ERR "Here is a list of valid choices for the card=<n> "
- "insmod option:\n");
+ printk(KERN_ERR "Here is a list of valid choices for the card=<n> insmod option:\n");
for (i = 0; i < ARRAY_SIZE(dm1105_boards); i++)
printk(KERN_ERR "dm1105: card=%d -> %s\n",
i, dm1105_boards[i].name);
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-main.c b/drivers/media/pci/ivtv/ivtv-alsa-main.c
index 8a86b61a896d..374f45f81ab3 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-main.c
+++ b/drivers/media/pci/ivtv/ivtv-alsa-main.c
@@ -177,8 +177,8 @@ static int snd_ivtv_init(struct v4l2_device *v4l2_dev)
#if 0
ret = snd_ivtv_mixer_create(itvsc);
if (ret) {
- IVTV_ALSA_WARN("%s: snd_ivtv_mixer_create() failed with err %d:"
- " proceeding anyway\n", __func__, ret);
+ IVTV_ALSA_WARN("%s: snd_ivtv_mixer_create() failed with err %d: proceeding anyway\n",
+ __func__, ret);
}
#endif
@@ -235,8 +235,8 @@ static int ivtv_alsa_load(struct ivtv *itv)
s = &itv->streams[IVTV_ENC_STREAM_TYPE_PCM];
if (s->vdev.v4l2_dev == NULL) {
- IVTV_DEBUG_ALSA_INFO("%s: PCM stream for card is disabled - "
- "skipping\n", __func__);
+ IVTV_DEBUG_ALSA_INFO("%s: PCM stream for card is disabled - skipping\n",
+ __func__);
return 0;
}
@@ -250,8 +250,8 @@ static int ivtv_alsa_load(struct ivtv *itv)
IVTV_ALSA_ERR("%s: failed to create struct snd_ivtv_card\n",
__func__);
} else {
- IVTV_DEBUG_ALSA_INFO("%s: created ivtv ALSA interface instance "
- "\n", __func__);
+ IVTV_DEBUG_ALSA_INFO("%s: created ivtv ALSA interface instance \n",
+ __func__);
}
return 0;
}
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index ee48c3e09de4..0a3b80a4bd69 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -885,8 +885,8 @@ static int ivtv_setup_pci(struct ivtv *itv, struct pci_dev *pdev,
pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
if (pci_latency < 64 && ivtv_pci_latency) {
- IVTV_INFO("Unreasonably low latency timer, "
- "setting to 64 (was %d)\n", pci_latency);
+ IVTV_INFO("Unreasonably low latency timer, setting to 64 (was %d)\n",
+ pci_latency);
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
}
@@ -896,8 +896,7 @@ static int ivtv_setup_pci(struct ivtv *itv, struct pci_dev *pdev,
these problems. */
pci_write_config_dword(pdev, 0x40, 0xffff);
- IVTV_DEBUG_INFO("%d (rev %d) at %02x:%02x.%x, "
- "irq: %d, latency: %d, memory: 0x%llx\n",
+ IVTV_DEBUG_INFO("%d (rev %d) at %02x:%02x.%x, irq: %d, latency: %d, memory: 0x%llx\n",
pdev->device, pdev->revision, pdev->bus->number,
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
pdev->irq, pci_latency, (u64)itv->base_addr);
@@ -1047,13 +1046,10 @@ static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
itv->enc_mem = ioremap_nocache(itv->base_addr + IVTV_ENCODER_OFFSET,
IVTV_ENCODER_SIZE);
if (!itv->enc_mem) {
- IVTV_ERR("ioremap failed. Can't get a window into CX23415/6 "
- "encoder memory\n");
- IVTV_ERR("Each capture card with a CX23415/6 needs 8 MB of "
- "vmalloc address space for this window\n");
+ IVTV_ERR("ioremap failed. Can't get a window into CX23415/6 encoder memory\n");
+ IVTV_ERR("Each capture card with a CX23415/6 needs 8 MB of vmalloc address space for this window\n");
IVTV_ERR("Check the output of 'grep Vmalloc /proc/meminfo'\n");
- IVTV_ERR("Use the vmalloc= kernel command line option to set "
- "VmallocTotal to a larger value\n");
+ IVTV_ERR("Use the vmalloc= kernel command line option to set VmallocTotal to a larger value\n");
retval = -ENOMEM;
goto free_mem;
}
@@ -1064,14 +1060,10 @@ static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
itv->dec_mem = ioremap_nocache(itv->base_addr + IVTV_DECODER_OFFSET,
IVTV_DECODER_SIZE);
if (!itv->dec_mem) {
- IVTV_ERR("ioremap failed. Can't get a window into "
- "CX23415 decoder memory\n");
- IVTV_ERR("Each capture card with a CX23415 needs 8 MB "
- "of vmalloc address space for this window\n");
- IVTV_ERR("Check the output of 'grep Vmalloc "
- "/proc/meminfo'\n");
- IVTV_ERR("Use the vmalloc= kernel command line option "
- "to set VmallocTotal to a larger value\n");
+ IVTV_ERR("ioremap failed. Can't get a window into CX23415 decoder memory\n");
+ IVTV_ERR("Each capture card with a CX23415 needs 8 MB of vmalloc address space for this window\n");
+ IVTV_ERR("Check the output of 'grep Vmalloc /proc/meminfo'\n");
+ IVTV_ERR("Use the vmalloc= kernel command line option to set VmallocTotal to a larger value\n");
retval = -ENOMEM;
goto free_mem;
}
@@ -1086,13 +1078,10 @@ static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
itv->reg_mem =
ioremap_nocache(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
if (!itv->reg_mem) {
- IVTV_ERR("ioremap failed. Can't get a window into CX23415/6 "
- "register space\n");
- IVTV_ERR("Each capture card with a CX23415/6 needs 64 kB of "
- "vmalloc address space for this window\n");
+ IVTV_ERR("ioremap failed. Can't get a window into CX23415/6 register space\n");
+ IVTV_ERR("Each capture card with a CX23415/6 needs 64 kB of vmalloc address space for this window\n");
IVTV_ERR("Check the output of 'grep Vmalloc /proc/meminfo'\n");
- IVTV_ERR("Use the vmalloc= kernel command line option to set "
- "VmallocTotal to a larger value\n");
+ IVTV_ERR("Use the vmalloc= kernel command line option to set VmallocTotal to a larger value\n");
retval = -ENOMEM;
goto free_io;
}
diff --git a/drivers/media/pci/ivtv/ivtv-driver.h b/drivers/media/pci/ivtv/ivtv-driver.h
index 10cba305dbd2..6b09a9514d64 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.h
+++ b/drivers/media/pci/ivtv/ivtv-driver.h
@@ -53,7 +53,7 @@
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/byteorder.h>
#include <linux/dvb/video.h>
diff --git a/drivers/media/pci/ivtv/ivtv-firmware.c b/drivers/media/pci/ivtv/ivtv-firmware.c
index 5b3095f65dce..ba279fdb3df8 100644
--- a/drivers/media/pci/ivtv/ivtv-firmware.c
+++ b/drivers/media/pci/ivtv/ivtv-firmware.c
@@ -376,8 +376,8 @@ int ivtv_firmware_check(struct ivtv *itv, char *where)
/* If something failed & currently idle, try to reload */
if (res && !atomic_read(&itv->capturing) &&
!atomic_read(&itv->decoding)) {
- IVTV_INFO("Detected in %s that firmware had failed - "
- "Reloading\n", where);
+ IVTV_INFO("Detected in %s that firmware had failed - Reloading\n",
+ where);
res = ivtv_firmware_restart(itv);
/*
* Even if restarted ok, still signal a problem had occurred.
diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c
index f7299d3d8244..44936d6d7c39 100644
--- a/drivers/media/pci/ivtv/ivtv-yuv.c
+++ b/drivers/media/pci/ivtv/ivtv-yuv.c
@@ -89,8 +89,8 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
if (y_pages == y_dma.page_count) {
IVTV_DEBUG_WARN
- ("failed to map uv user pages, returned %d "
- "expecting %d\n", uv_pages, uv_dma.page_count);
+ ("failed to map uv user pages, returned %d expecting %d\n",
+ uv_pages, uv_dma.page_count);
if (uv_pages >= 0) {
for (i = 0; i < uv_pages; i++)
@@ -101,8 +101,8 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
}
} else {
IVTV_DEBUG_WARN
- ("failed to map y user pages, returned %d "
- "expecting %d\n", y_pages, y_dma.page_count);
+ ("failed to map y user pages, returned %d expecting %d\n",
+ y_pages, y_dma.page_count);
}
if (y_pages >= 0) {
for (i = 0; i < y_pages; i++)
diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c
index 8b95eefb610b..612a8402cf4d 100644
--- a/drivers/media/pci/ivtv/ivtvfb.c
+++ b/drivers/media/pci/ivtv/ivtvfb.c
@@ -293,8 +293,7 @@ static int ivtvfb_prep_dec_dma_to_device(struct ivtv *itv,
/* Map User DMA */
if (ivtv_udma_setup(itv, ivtv_dest_addr, userbuf, size_in_bytes) <= 0) {
mutex_unlock(&itv->udma.lock);
- IVTVFB_WARN("ivtvfb_prep_dec_dma_to_device, "
- "Error with get_user_pages: %d bytes, %d pages returned\n",
+ IVTVFB_WARN("ivtvfb_prep_dec_dma_to_device, Error with get_user_pages: %d bytes, %d pages returned\n",
size_in_bytes, itv->udma.page_count);
/* get_user_pages must have failed completely */
diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c
index ba887e8e1b17..24fba633c217 100644
--- a/drivers/media/pci/meye/meye.c
+++ b/drivers/media/pci/meye/meye.c
@@ -37,7 +37,7 @@
#include <media/v4l2-ioctl.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -60,8 +60,7 @@ MODULE_PARM_DESC(gbuffers, "number of capture buffers, default is 2 (32 max)");
/* size of a grab buffer */
static unsigned int gbufsize = MEYE_MAX_BUFSIZE;
module_param(gbufsize, int, 0444);
-MODULE_PARM_DESC(gbufsize, "size of the capture buffers, default is 614400"
- " (will be rounded up to a page multiple)");
+MODULE_PARM_DESC(gbufsize, "size of the capture buffers, default is 614400 (will be rounded up to a page multiple)");
/* /dev/videoX registration number */
static int video_nr = -1;
@@ -587,10 +586,7 @@ static void mchip_hic_stop(void)
/* get the next ready frame from the dma engine */
static u32 mchip_get_frame(void)
{
- u32 v;
-
- v = mchip_read(MCHIP_MM_FIR(meye.mchip_fnum));
- return v;
+ return mchip_read(MCHIP_MM_FIR(meye.mchip_fnum));
}
/* frees the current frame from the dma engine */
@@ -1261,8 +1257,7 @@ static int vidioc_reqbufs(struct file *file, void *fh,
meye.grab_fbuffer = rvmalloc(gbuffers * gbufsize);
if (!meye.grab_fbuffer) {
- printk(KERN_ERR "meye: v4l framebuffer allocation"
- " failed\n");
+ printk(KERN_ERR "meye: v4l framebuffer allocation failed\n");
mutex_unlock(&meye.lock);
return -ENOMEM;
}
@@ -1659,8 +1654,7 @@ static int meye_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
ret = -EIO;
if ((ret = sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERA, 1))) {
v4l2_err(v4l2_dev, "meye: unable to power on the camera\n");
- v4l2_err(v4l2_dev, "meye: did you enable the camera in "
- "sonypi using the module options ?\n");
+ v4l2_err(v4l2_dev, "meye: did you enable the camera in sonypi using the module options ?\n");
goto outsonypienable;
}
@@ -1834,8 +1828,7 @@ static int __init meye_init(void)
if (gbufsize > MEYE_MAX_BUFSIZE)
gbufsize = MEYE_MAX_BUFSIZE;
gbufsize = PAGE_ALIGN(gbufsize);
- printk(KERN_INFO "meye: using %d buffers with %dk (%dk total) "
- "for capture\n",
+ printk(KERN_INFO "meye: using %d buffers with %dk (%dk total) for capture\n",
gbuffers,
gbufsize / 1024, gbuffers * gbufsize / 1024);
return pci_register_driver(&meye_driver);
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
index b078ac2a682c..191bd8299dc3 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
@@ -1030,15 +1030,4 @@ static struct pci_driver netup_unidvb_pci_driver = {
.resume = NULL,
};
-static int __init netup_unidvb_init(void)
-{
- return pci_register_driver(&netup_unidvb_pci_driver);
-}
-
-static void __exit netup_unidvb_fini(void)
-{
- pci_unregister_driver(&netup_unidvb_pci_driver);
-}
-
-module_init(netup_unidvb_init);
-module_exit(netup_unidvb_fini);
+module_pci_driver(netup_unidvb_pci_driver);
diff --git a/drivers/media/pci/pluto2/pluto2.c b/drivers/media/pci/pluto2/pluto2.c
index 655d6854a8d7..65afb71ff79f 100644
--- a/drivers/media/pci/pluto2/pluto2.c
+++ b/drivers/media/pci/pluto2/pluto2.c
@@ -577,12 +577,12 @@ static int pluto_read_serial(struct pluto *pluto)
for (j = 0; j < 32; j += 8) {
if ((val & 0xff) == 0xff)
goto out;
- printk("%c", val & 0xff);
+ printk(KERN_CONT "%c", val & 0xff);
val >>= 8;
}
}
out:
- printk("\n");
+ printk(KERN_CONT "\n");
pci_iounmap(pdev, cis);
return 0;
diff --git a/drivers/media/pci/pt1/pt1.c b/drivers/media/pci/pt1/pt1.c
index e7e4428109c3..d5ee82aee9e8 100644
--- a/drivers/media/pci/pt1/pt1.c
+++ b/drivers/media/pci/pt1/pt1.c
@@ -282,13 +282,12 @@ static int pt1_filter(struct pt1 *pt1, struct pt1_buffer_page *page)
continue;
if (upacket >> 24 & 1)
- printk_ratelimited(KERN_INFO "earth-pt1: device "
- "buffer overflowing. table[%d] buf[%d]\n",
+ printk_ratelimited(KERN_INFO "earth-pt1: device buffer overflowing. table[%d] buf[%d]\n",
pt1->table_index, pt1->buf_index);
sc = upacket >> 26 & 0x7;
if (adap->st_count != -1 && sc != ((adap->st_count + 1) & 0x7))
- printk_ratelimited(KERN_INFO "earth-pt1: data loss"
- " in streamID(adapter)[%d]\n", index);
+ printk_ratelimited(KERN_INFO "earth-pt1: data loss in streamID(adapter)[%d]\n",
+ index);
adap->st_count = sc;
buf = adap->buf;
diff --git a/drivers/media/pci/pt1/va1j5jf8007s.c b/drivers/media/pci/pt1/va1j5jf8007s.c
index d0e70dc0e16f..249273b2e0f2 100644
--- a/drivers/media/pci/pt1/va1j5jf8007s.c
+++ b/drivers/media/pci/pt1/va1j5jf8007s.c
@@ -578,7 +578,7 @@ static void va1j5jf8007s_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops va1j5jf8007s_ops = {
+static const struct dvb_frontend_ops va1j5jf8007s_ops = {
.delsys = { SYS_ISDBS },
.info = {
.name = "VA1J5JF8007/VA1J5JF8011 ISDB-S",
diff --git a/drivers/media/pci/pt1/va1j5jf8007t.c b/drivers/media/pci/pt1/va1j5jf8007t.c
index 0268f20b8097..e0766e69a370 100644
--- a/drivers/media/pci/pt1/va1j5jf8007t.c
+++ b/drivers/media/pci/pt1/va1j5jf8007t.c
@@ -427,7 +427,7 @@ static void va1j5jf8007t_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops va1j5jf8007t_ops = {
+static const struct dvb_frontend_ops va1j5jf8007t_ops = {
.delsys = { SYS_ISDBT },
.info = {
.name = "VA1J5JF8007/VA1J5JF8011 ISDB-T",
diff --git a/drivers/media/pci/pt3/pt3.c b/drivers/media/pci/pt3/pt3.c
index 7fb649e523f4..77f4d15f322b 100644
--- a/drivers/media/pci/pt3/pt3.c
+++ b/drivers/media/pci/pt3/pt3.c
@@ -463,7 +463,7 @@ static int pt3_fetch_thread(void *data)
pt3_proc_dma(adap);
- delay = ktime_set(0, PT3_FETCH_DELAY * NSEC_PER_MSEC);
+ delay = PT3_FETCH_DELAY * NSEC_PER_MSEC;
set_current_state(TASK_UNINTERRUPTIBLE);
freezable_schedule_hrtimeout_range(&delay,
PT3_FETCH_DELAY_DELTA * NSEC_PER_MSEC,
diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c
index dc0e2fc5f68b..8a35ecfb75e3 100644
--- a/drivers/media/pci/saa7134/saa7134-alsa.c
+++ b/drivers/media/pci/saa7134/saa7134-alsa.c
@@ -813,8 +813,7 @@ static int snd_card_saa7134_capture_open(struct snd_pcm_substream * substream)
int amux, err;
if (!saa7134) {
- pr_err("BUG: saa7134 can't find device struct."
- " Can't proceed with open\n");
+ pr_err("BUG: saa7134 can't find device struct. Can't proceed with open\n");
return -ENODEV;
}
dev = saa7134->dev;
diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c
index c480a7e87593..2b60af493de4 100644
--- a/drivers/media/pci/saa7134/saa7134-cards.c
+++ b/drivers/media/pci/saa7134/saa7134-cards.c
@@ -7341,8 +7341,8 @@ static void hauppauge_eeprom(struct saa7134_dev *dev, u8 *eeprom_data)
case 67659: /* WinTV-HVR1110 (OEM, no IR, hybrid, FM, SVid/Comp, RCA aud) */
break;
default:
- pr_warn("%s: warning: "
- "unknown hauppauge model #%d\n", dev->name, tv.model);
+ pr_warn("%s: warning: unknown hauppauge model #%d\n",
+ dev->name, tv.model);
break;
}
@@ -7920,8 +7920,8 @@ int saa7134_board_init2(struct saa7134_dev *dev)
msg.addr = 0x0b;
msg.len = 1;
if (1 != i2c_transfer(&dev->i2c_adap, &msg, 1)) {
- pr_warn("%s: send wake up byte to pic16C505"
- "(IR chip) failed\n", dev->name);
+ pr_warn("%s: send wake up byte to pic16C505(IR chip) failed\n",
+ dev->name);
} else {
msg.flags = I2C_M_RD;
rc = i2c_transfer(&dev->i2c_adap, &msg, 1);
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index ffb66a9ae23e..7d6bb5c9343f 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -66,8 +66,7 @@ MODULE_PARM_DESC(latency,"pci latency timer");
int saa7134_no_overlay=-1;
module_param_named(no_overlay, saa7134_no_overlay, int, 0444);
-MODULE_PARM_DESC(no_overlay,"allow override overlay default (0 disables, 1 enables)"
- " [some VIA/SIS chipsets are known to have problem with overlay]");
+MODULE_PARM_DESC(no_overlay, "allow override overlay default (0 disables, 1 enables) [some VIA/SIS chipsets are known to have problem with overlay]");
bool saa7134_userptr;
module_param(saa7134_userptr, bool, 0644);
@@ -619,25 +618,25 @@ static irqreturn_t saa7134_irq(int irq, void *dev_id)
print_irqstatus(dev,loop,report,status);
if (report & SAA7134_IRQ_REPORT_PE) {
/* disable all parity error */
- pr_warn("%s/irq: looping -- "
- "clearing PE (parity error!) enable bit\n",dev->name);
+ pr_warn("%s/irq: looping -- clearing PE (parity error!) enable bit\n",
+ dev->name);
saa_clearl(SAA7134_IRQ2,SAA7134_IRQ2_INTE_PE);
} else if (report & SAA7134_IRQ_REPORT_GPIO16) {
/* disable gpio16 IRQ */
- pr_warn("%s/irq: looping -- "
- "clearing GPIO16 enable bit\n",dev->name);
+ pr_warn("%s/irq: looping -- clearing GPIO16 enable bit\n",
+ dev->name);
saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO16_P);
saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO16_N);
} else if (report & SAA7134_IRQ_REPORT_GPIO18) {
/* disable gpio18 IRQs */
- pr_warn("%s/irq: looping -- "
- "clearing GPIO18 enable bit\n",dev->name);
+ pr_warn("%s/irq: looping -- clearing GPIO18 enable bit\n",
+ dev->name);
saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO18_P);
saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO18_N);
} else {
/* disable all irqs */
- pr_warn("%s/irq: looping -- "
- "clearing all enable bits\n",dev->name);
+ pr_warn("%s/irq: looping -- clearing all enable bits\n",
+ dev->name);
saa_writel(SAA7134_IRQ1,0);
saa_writel(SAA7134_IRQ2,0);
}
@@ -1081,18 +1080,14 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
}
#endif
if (pci_pci_problems & (PCIPCI_FAIL|PCIAGP_FAIL)) {
- pr_info("%s: quirk: this driver and your "
- "chipset may not work together"
- " in overlay mode.\n",dev->name);
+ pr_info("%s: quirk: this driver and your chipset may not work together in overlay mode.\n",
+ dev->name);
if (!saa7134_no_overlay) {
- pr_info("%s: quirk: overlay "
- "mode will be disabled.\n",
+ pr_info("%s: quirk: overlay mode will be disabled.\n",
dev->name);
saa7134_no_overlay = 1;
} else {
- pr_info("%s: quirk: overlay "
- "mode will be forced. Use this"
- " option at your own risk.\n",
+ pr_info("%s: quirk: overlay mode will be forced. Use this option at your own risk.\n",
dev->name);
}
}
@@ -1106,10 +1101,10 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
/* print pci info */
dev->pci_rev = pci_dev->revision;
pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
- pr_info("%s: found at %s, rev: %d, irq: %d, "
- "latency: %d, mmio: 0x%llx\n", dev->name,
- pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
- dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0));
+ pr_info("%s: found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n",
+ dev->name, pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
+ dev->pci_lat,
+ (unsigned long long)pci_resource_start(pci_dev, 0));
pci_set_master(pci_dev);
err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
if (err) {
diff --git a/drivers/media/pci/saa7134/saa7134-dvb.c b/drivers/media/pci/saa7134/saa7134-dvb.c
index 59a4b5f7724e..598b8bbfe726 100644
--- a/drivers/media/pci/saa7134/saa7134-dvb.c
+++ b/drivers/media/pci/saa7134/saa7134-dvb.c
@@ -1449,8 +1449,8 @@ static int dvb_init(struct saa7134_dev *dev)
if (dvb_attach(tda826x_attach, fe0->dvb.frontend,
0x60, &dev->i2c_adap, 0) == NULL) {
- pr_warn("%s: Medion Quadro, no tda826x "
- "found !\n", __func__);
+ pr_warn("%s: Medion Quadro, no tda826x found !\n",
+ __func__);
goto detach_frontend;
}
if (dev_id != 0x08) {
@@ -1458,8 +1458,8 @@ static int dvb_init(struct saa7134_dev *dev)
fe->ops.i2c_gate_ctrl(fe, 1);
if (dvb_attach(isl6405_attach, fe,
&dev->i2c_adap, 0x08, 0, 0) == NULL) {
- pr_warn("%s: Medion Quadro, no ISL6405 "
- "found !\n", __func__);
+ pr_warn("%s: Medion Quadro, no ISL6405 found !\n",
+ __func__);
goto detach_frontend;
}
if (dev_id == 0x07) {
@@ -1629,8 +1629,8 @@ static int dvb_init(struct saa7134_dev *dev)
struct dvb_frontend *fe;
if (dvb_attach(dvb_pll_attach, fe0->dvb.frontend, 0x60,
&dev->i2c_adap, DVB_PLL_PHILIPS_SD1878_TDA8261) == NULL) {
- pr_warn("%s: MD7134 DVB-S, no SD1878 "
- "found !\n", __func__);
+ pr_warn("%s: MD7134 DVB-S, no SD1878 found !\n",
+ __func__);
goto detach_frontend;
}
/* we need to open the i2c gate (we know it exists) */
@@ -1638,8 +1638,8 @@ static int dvb_init(struct saa7134_dev *dev)
fe->ops.i2c_gate_ctrl(fe, 1);
if (dvb_attach(isl6405_attach, fe,
&dev->i2c_adap, 0x08, 0, 0) == NULL) {
- pr_warn("%s: MD7134 DVB-S, no ISL6405 "
- "found !\n", __func__);
+ pr_warn("%s: MD7134 DVB-S, no ISL6405 found !\n",
+ __func__);
goto detach_frontend;
}
fe->ops.i2c_gate_ctrl(fe, 0);
@@ -1670,14 +1670,14 @@ static int dvb_init(struct saa7134_dev *dev)
if (dvb_attach(tda826x_attach,
fe0->dvb.frontend, 0x60,
&dev->i2c_adap, 0) == NULL) {
- pr_warn("%s: Asus Tiger 3in1, no "
- "tda826x found!\n", __func__);
+ pr_warn("%s: Asus Tiger 3in1, no tda826x found!\n",
+ __func__);
goto detach_frontend;
}
if (dvb_attach(lnbp21_attach, fe0->dvb.frontend,
&dev->i2c_adap, 0, 0) == NULL) {
- pr_warn("%s: Asus Tiger 3in1, no lnbp21"
- " found!\n", __func__);
+ pr_warn("%s: Asus Tiger 3in1, no lnbp21 found!\n",
+ __func__);
goto detach_frontend;
}
}
@@ -1695,14 +1695,14 @@ static int dvb_init(struct saa7134_dev *dev)
if (dvb_attach(tda826x_attach,
fe0->dvb.frontend, 0x60,
&dev->i2c_adap, 0) == NULL) {
- pr_warn("%s: Asus My Cinema PS3-100, no "
- "tda826x found!\n", __func__);
+ pr_warn("%s: Asus My Cinema PS3-100, no tda826x found!\n",
+ __func__);
goto detach_frontend;
}
if (dvb_attach(lnbp21_attach, fe0->dvb.frontend,
&dev->i2c_adap, 0, 0) == NULL) {
- pr_warn("%s: Asus My Cinema PS3-100, no lnbp21"
- " found!\n", __func__);
+ pr_warn("%s: Asus My Cinema PS3-100, no lnbp21 found!\n",
+ __func__);
goto detach_frontend;
}
}
diff --git a/drivers/media/pci/saa7134/saa7134-i2c.c b/drivers/media/pci/saa7134/saa7134-i2c.c
index 2dac48fa1386..dca0592c5f47 100644
--- a/drivers/media/pci/saa7134/saa7134-i2c.c
+++ b/drivers/media/pci/saa7134/saa7134-i2c.c
@@ -355,12 +355,43 @@ static struct i2c_client saa7134_client_template = {
/* ----------------------------------------------------------- */
+/* On Medion 7134 reading EEPROM needs DVB-T demod i2c gate open */
+static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev)
+{
+ u8 subaddr = 0x7, dmdregval;
+ u8 data[2];
+ int ret;
+ struct i2c_msg i2cgatemsg_r[] = { {.addr = 0x08, .flags = 0,
+ .buf = &subaddr, .len = 1},
+ {.addr = 0x08,
+ .flags = I2C_M_RD,
+ .buf = &dmdregval, .len = 1}
+ };
+ struct i2c_msg i2cgatemsg_w[] = { {.addr = 0x08, .flags = 0,
+ .buf = data, .len = 2} };
+
+ ret = i2c_transfer(&dev->i2c_adap, i2cgatemsg_r, 2);
+ if ((ret == 2) && (dmdregval & 0x2)) {
+ pr_debug("%s: DVB-T demod i2c gate was left closed\n",
+ dev->name);
+
+ data[0] = subaddr;
+ data[1] = (dmdregval & ~0x2);
+ if (i2c_transfer(&dev->i2c_adap, i2cgatemsg_w, 1) != 1)
+ pr_err("%s: EEPROM i2c gate open failure\n",
+ dev->name);
+ }
+}
+
static int
saa7134_i2c_eeprom(struct saa7134_dev *dev, unsigned char *eedata, int len)
{
unsigned char buf;
int i,err;
+ if (dev->board == SAA7134_BOARD_MD7134)
+ saa7134_i2c_eeprom_md7134_gate(dev);
+
dev->i2c_client.addr = 0xa0 >> 1;
buf = 0;
if (1 != (err = i2c_master_send(&dev->i2c_client,&buf,1))) {
diff --git a/drivers/media/pci/saa7134/saa7134-input.c b/drivers/media/pci/saa7134/saa7134-input.c
index eff52bbbfd66..823b75ed47e1 100644
--- a/drivers/media/pci/saa7134/saa7134-input.c
+++ b/drivers/media/pci/saa7134/saa7134-input.c
@@ -123,8 +123,7 @@ static int get_key_flydvb_trio(struct IR_i2c *ir, enum rc_type *protocol,
struct saa7134_dev *dev = ir->c->adapter->algo_data;
if (dev == NULL) {
- ir_dbg(ir, "get_key_flydvb_trio: "
- "ir->c->adapter->algo_data is NULL!\n");
+ ir_dbg(ir, "get_key_flydvb_trio: ir->c->adapter->algo_data is NULL!\n");
return -EIO;
}
@@ -150,8 +149,8 @@ static int get_key_flydvb_trio(struct IR_i2c *ir, enum rc_type *protocol,
msleep(10);
continue;
}
- ir_dbg(ir, "send wake up byte to pic16C505 (IR chip)"
- "failed %dx\n", attempt);
+ ir_dbg(ir, "send wake up byte to pic16C505 (IR chip)failed %dx\n",
+ attempt);
return -EIO;
}
if (1 != i2c_master_recv(ir->c, &b, 1)) {
@@ -174,8 +173,7 @@ static int get_key_msi_tvanywhere_plus(struct IR_i2c *ir, enum rc_type *protocol
/* <dev> is needed to access GPIO. Used by the saa_readl macro. */
struct saa7134_dev *dev = ir->c->adapter->algo_data;
if (dev == NULL) {
- ir_dbg(ir, "get_key_msi_tvanywhere_plus: "
- "ir->c->adapter->algo_data is NULL!\n");
+ ir_dbg(ir, "get_key_msi_tvanywhere_plus: ir->c->adapter->algo_data is NULL!\n");
return -EIO;
}
@@ -223,8 +221,7 @@ static int get_key_kworld_pc150u(struct IR_i2c *ir, enum rc_type *protocol,
/* <dev> is needed to access GPIO. Used by the saa_readl macro. */
struct saa7134_dev *dev = ir->c->adapter->algo_data;
if (dev == NULL) {
- ir_dbg(ir, "get_key_kworld_pc150u: "
- "ir->c->adapter->algo_data is NULL!\n");
+ ir_dbg(ir, "get_key_kworld_pc150u: ir->c->adapter->algo_data is NULL!\n");
return -EIO;
}
diff --git a/drivers/media/pci/saa7164/saa7164-buffer.c b/drivers/media/pci/saa7164/saa7164-buffer.c
index f30758e24f5d..62c34504199d 100644
--- a/drivers/media/pci/saa7164/saa7164-buffer.c
+++ b/drivers/media/pci/saa7164/saa7164-buffer.c
@@ -218,8 +218,7 @@ int saa7164_buffer_activate(struct saa7164_buffer *buf, int i)
saa7164_writel(port->bufptr32h + ((sizeof(u32) * 2) * i), buf->pt_dma);
saa7164_writel(port->bufptr32l + ((sizeof(u32) * 2) * i), 0);
- dprintk(DBGLVL_BUF, " buf[%d] offset 0x%llx (0x%x) "
- "buf 0x%llx/%llx (0x%x/%x) nr=%d\n",
+ dprintk(DBGLVL_BUF, " buf[%d] offset 0x%llx (0x%x) buf 0x%llx/%llx (0x%x/%x) nr=%d\n",
buf->idx,
(u64)port->bufoffset + (i * sizeof(u32)),
saa7164_readl(port->bufoffset + (sizeof(u32) * i)),
diff --git a/drivers/media/pci/saa7164/saa7164-bus.c b/drivers/media/pci/saa7164/saa7164-bus.c
index a18fe5d47238..e305c02f9dc9 100644
--- a/drivers/media/pci/saa7164/saa7164-bus.c
+++ b/drivers/media/pci/saa7164/saa7164-bus.c
@@ -427,8 +427,8 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
write_distance = curr_gwp + bus->m_dwSizeGetRing - curr_grp;
if (bytes_to_read > write_distance) {
- printk(KERN_ERR "%s() Invalid bus state, missing msg "
- "or mangled ring, faulty H/W / bad code?\n", __func__);
+ printk(KERN_ERR "%s() Invalid bus state, missing msg or mangled ring, faulty H/W / bad code?\n",
+ __func__);
ret = SAA_ERR_INVALID_COMMAND;
goto out;
}
diff --git a/drivers/media/pci/saa7164/saa7164-cards.c b/drivers/media/pci/saa7164/saa7164-cards.c
index c2b738227f58..15a98c638c55 100644
--- a/drivers/media/pci/saa7164/saa7164-cards.c
+++ b/drivers/media/pci/saa7164/saa7164-cards.c
@@ -726,8 +726,8 @@ void saa7164_card_list(struct saa7164_dev *dev)
dev->name, dev->name, dev->name, dev->name);
}
- printk(KERN_ERR "%s: Here are valid choices for the card=<n> insmod "
- "option:\n", dev->name);
+ printk(KERN_ERR "%s: Here are valid choices for the card=<n> insmod option:\n",
+ dev->name);
for (i = 0; i < saa7164_bcount; i++)
printk(KERN_ERR "%s: card=%d -> %s\n",
diff --git a/drivers/media/pci/saa7164/saa7164-cmd.c b/drivers/media/pci/saa7164/saa7164-cmd.c
index 3285c37b4583..45951b3cc251 100644
--- a/drivers/media/pci/saa7164/saa7164-cmd.c
+++ b/drivers/media/pci/saa7164/saa7164-cmd.c
@@ -301,8 +301,8 @@ static int saa7164_cmd_wait(struct saa7164_dev *dev, u8 seqno)
else
saa7164_cmd_timeout_seqno(dev, seqno);
- dprintk(DBGLVL_CMD, "%s(seqno=%d) Waiting res = %d "
- "(signalled=%d)\n", __func__, seqno, r,
+ dprintk(DBGLVL_CMD, "%s(seqno=%d) Waiting res = %d (signalled=%d)\n",
+ __func__, seqno, r,
dev->cmds[seqno].signalled);
} else
ret = SAA_OK;
@@ -353,8 +353,8 @@ int saa7164_cmd_send(struct saa7164_dev *dev, u8 id, enum tmComResCmd command,
int ret;
int safety = 0;
- dprintk(DBGLVL_CMD, "%s(unitid = %s (%d) , command = 0x%x, "
- "sel = 0x%x)\n", __func__, saa7164_unitid_name(dev, id), id,
+ dprintk(DBGLVL_CMD, "%s(unitid = %s (%d) , command = 0x%x, sel = 0x%x)\n",
+ __func__, saa7164_unitid_name(dev, id), id,
command, controlselector);
if ((size == 0) || (buf == NULL)) {
@@ -452,9 +452,7 @@ int saa7164_cmd_send(struct saa7164_dev *dev, u8 id, enum tmComResCmd command,
if (presponse_t->seqno != pcommand_t->seqno) {
dprintk(DBGLVL_CMD,
- "wrong event: seqno = %d, "
- "expected seqno = %d, "
- "will dequeue regardless\n",
+ "wrong event: seqno = %d, expected seqno = %d, will dequeue regardless\n",
presponse_t->seqno, pcommand_t->seqno);
ret = saa7164_cmd_dequeue(dev);
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
index 8bbd092fbe1d..03a1511a92be 100644
--- a/drivers/media/pci/saa7164/saa7164-core.c
+++ b/drivers/media/pci/saa7164/saa7164-core.c
@@ -710,9 +710,7 @@ static irqreturn_t saa7164_irq(int irq, void *dev_id)
} else {
/* Find the function */
dprintk(DBGLVL_IRQ,
- "%s() unhandled interrupt "
- "reg 0x%x bit 0x%x "
- "intid = 0x%x\n",
+ "%s() unhandled interrupt reg 0x%x bit 0x%x intid = 0x%x\n",
__func__, i, bit, intid);
}
}
@@ -767,13 +765,11 @@ void saa7164_dumpregs(struct saa7164_dev *dev, u32 addr)
{
int i;
- dprintk(1, "--------------------> "
- "00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f\n");
+ dprintk(1, "--------------------> 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f\n");
for (i = 0; i < 0x100; i += 16)
- dprintk(1, "region0[0x%08x] = "
- "%02x %02x %02x %02x %02x %02x %02x %02x"
- " %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
+ dprintk(1, "region0[0x%08x] = %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ i,
(u8)saa7164_readb(addr + i + 0),
(u8)saa7164_readb(addr + i + 1),
(u8)saa7164_readb(addr + i + 2),
@@ -825,8 +821,7 @@ static void saa7164_dump_hwdesc(struct saa7164_dev *dev)
static void saa7164_dump_intfdesc(struct saa7164_dev *dev)
{
- dprintk(1, "@0x%p intfdesc "
- "sizeof(struct tmComResInterfaceDescr) = %d bytes\n",
+ dprintk(1, "@0x%p intfdesc sizeof(struct tmComResInterfaceDescr) = %d bytes\n",
&dev->intfdesc, (u32)sizeof(struct tmComResInterfaceDescr));
dprintk(1, " .bLength = 0x%x\n", dev->intfdesc.bLength);
@@ -1011,8 +1006,7 @@ static int saa7164_dev_setup(struct saa7164_dev *dev)
saa7164_port_init(dev, SAA7164_PORT_VBI2);
if (get_resources(dev) < 0) {
- printk(KERN_ERR "CORE %s No more PCIe resources for "
- "subsystem: %04x:%04x\n",
+ printk(KERN_ERR "CORE %s No more PCIe resources for subsystem: %04x:%04x\n",
dev->name, dev->pci->subsystem_vendor,
dev->pci->subsystem_device);
@@ -1204,8 +1198,8 @@ static bool saa7164_enable_msi(struct pci_dev *pci_dev, struct saa7164_dev *dev)
err = pci_enable_msi(pci_dev);
if (err) {
- printk(KERN_ERR "%s() Failed to enable MSI interrupt."
- " Falling back to a shared IRQ\n", __func__);
+ printk(KERN_ERR "%s() Failed to enable MSI interrupt. Falling back to a shared IRQ\n",
+ __func__);
return false;
}
@@ -1215,8 +1209,8 @@ static bool saa7164_enable_msi(struct pci_dev *pci_dev, struct saa7164_dev *dev)
if (err) {
/* fall back to legacy interrupt */
- printk(KERN_ERR "%s() Failed to get an MSI interrupt."
- " Falling back to a shared IRQ\n", __func__);
+ printk(KERN_ERR "%s() Failed to get an MSI interrupt. Falling back to a shared IRQ\n",
+ __func__);
pci_disable_msi(pci_dev);
return false;
}
@@ -1256,8 +1250,8 @@ static int saa7164_initdev(struct pci_dev *pci_dev,
/* print pci info */
dev->pci_rev = pci_dev->revision;
pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
- printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, "
- "latency: %d, mmio: 0x%llx\n", dev->name,
+ printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n",
+ dev->name,
pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
dev->pci_lat,
(unsigned long long)pci_resource_start(pci_dev, 0));
@@ -1307,8 +1301,7 @@ static int saa7164_initdev(struct pci_dev *pci_dev,
err = saa7164_downloadfirmware(dev);
if (err < 0) {
printk(KERN_ERR
- "Failed to boot firmware, no features "
- "registered\n");
+ "Failed to boot firmware, no features registered\n");
goto fail_fw;
}
@@ -1327,8 +1320,7 @@ static int saa7164_initdev(struct pci_dev *pci_dev,
*/
version = 0;
if (saa7164_api_get_fw_version(dev, &version) == SAA_OK)
- dprintk(1, "Bus is operating correctly using "
- "version %d.%d.%d.%d (0x%x)\n",
+ dprintk(1, "Bus is operating correctly using version %d.%d.%d.%d (0x%x)\n",
(version & 0x0000fc00) >> 10,
(version & 0x000003e0) >> 5,
(version & 0x0000001f),
@@ -1356,45 +1348,43 @@ static int saa7164_initdev(struct pci_dev *pci_dev,
/* Begin to create the video sub-systems and register funcs */
if (saa7164_boards[dev->board].porta == SAA7164_MPEG_DVB) {
if (saa7164_dvb_register(&dev->ports[SAA7164_PORT_TS1]) < 0) {
- printk(KERN_ERR "%s() Failed to register "
- "dvb adapters on porta\n",
+ printk(KERN_ERR "%s() Failed to register dvb adapters on porta\n",
__func__);
}
}
if (saa7164_boards[dev->board].portb == SAA7164_MPEG_DVB) {
if (saa7164_dvb_register(&dev->ports[SAA7164_PORT_TS2]) < 0) {
- printk(KERN_ERR"%s() Failed to register "
- "dvb adapters on portb\n",
+ printk(KERN_ERR"%s() Failed to register dvb adapters on portb\n",
__func__);
}
}
if (saa7164_boards[dev->board].portc == SAA7164_MPEG_ENCODER) {
if (saa7164_encoder_register(&dev->ports[SAA7164_PORT_ENC1]) < 0) {
- printk(KERN_ERR"%s() Failed to register "
- "mpeg encoder\n", __func__);
+ printk(KERN_ERR"%s() Failed to register mpeg encoder\n",
+ __func__);
}
}
if (saa7164_boards[dev->board].portd == SAA7164_MPEG_ENCODER) {
if (saa7164_encoder_register(&dev->ports[SAA7164_PORT_ENC2]) < 0) {
- printk(KERN_ERR"%s() Failed to register "
- "mpeg encoder\n", __func__);
+ printk(KERN_ERR"%s() Failed to register mpeg encoder\n",
+ __func__);
}
}
if (saa7164_boards[dev->board].porte == SAA7164_MPEG_VBI) {
if (saa7164_vbi_register(&dev->ports[SAA7164_PORT_VBI1]) < 0) {
- printk(KERN_ERR"%s() Failed to register "
- "vbi device\n", __func__);
+ printk(KERN_ERR"%s() Failed to register vbi device\n",
+ __func__);
}
}
if (saa7164_boards[dev->board].portf == SAA7164_MPEG_VBI) {
if (saa7164_vbi_register(&dev->ports[SAA7164_PORT_VBI2]) < 0) {
- printk(KERN_ERR"%s() Failed to register "
- "vbi device\n", __func__);
+ printk(KERN_ERR"%s() Failed to register vbi device\n",
+ __func__);
}
}
saa7164_api_set_debug(dev, fw_debug);
@@ -1404,15 +1394,15 @@ static int saa7164_initdev(struct pci_dev *pci_dev,
"saa7164 debug");
if (IS_ERR(dev->kthread)) {
dev->kthread = NULL;
- printk(KERN_ERR "%s() Failed to create "
- "debug kernel thread\n", __func__);
+ printk(KERN_ERR "%s() Failed to create debug kernel thread\n",
+ __func__);
}
}
} /* != BOARD_UNKNOWN */
else
- printk(KERN_ERR "%s() Unsupported board detected, "
- "registering without firmware\n", __func__);
+ printk(KERN_ERR "%s() Unsupported board detected, registering without firmware\n",
+ __func__);
dprintk(1, "%s() parameter debug = %d\n", __func__, saa_debug);
dprintk(1, "%s() parameter waitsecs = %d\n", __func__, waitsecs);
diff --git a/drivers/media/pci/saa7164/saa7164-dvb.c b/drivers/media/pci/saa7164/saa7164-dvb.c
index e9a783b71b45..cd3eeda5250b 100644
--- a/drivers/media/pci/saa7164/saa7164-dvb.c
+++ b/drivers/media/pci/saa7164/saa7164-dvb.c
@@ -244,8 +244,8 @@ static int saa7164_dvb_start_port(struct saa7164_port *port)
/* Stop the hardware, regardless */
result = saa7164_api_transition_port(port, SAA_DMASTATE_STOP);
if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
- printk(KERN_ERR "%s() acquire/forced stop transition "
- "failed, res = 0x%x\n", __func__, result);
+ printk(KERN_ERR "%s() acquire/forced stop transition failed, res = 0x%x\n",
+ __func__, result);
}
ret = -EIO;
goto out;
@@ -261,8 +261,8 @@ static int saa7164_dvb_start_port(struct saa7164_port *port)
/* Stop the hardware, regardless */
result = saa7164_api_transition_port(port, SAA_DMASTATE_STOP);
if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
- printk(KERN_ERR "%s() pause/forced stop transition "
- "failed, res = 0x%x\n", __func__, result);
+ printk(KERN_ERR "%s() pause/forced stop transition failed, res = 0x%x\n",
+ __func__, result);
}
ret = -EIO;
@@ -279,8 +279,8 @@ static int saa7164_dvb_start_port(struct saa7164_port *port)
/* Stop the hardware, regardless */
result = saa7164_api_transition_port(port, SAA_DMASTATE_STOP);
if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
- printk(KERN_ERR "%s() run/forced stop transition "
- "failed, res = 0x%x\n", __func__, result);
+ printk(KERN_ERR "%s() run/forced stop transition failed, res = 0x%x\n",
+ __func__, result);
}
ret = -EIO;
@@ -357,8 +357,7 @@ static int dvb_register(struct saa7164_port *port)
/* Sanity check that the PCI configuration space is active */
if (port->hwcfg.BARLocation == 0) {
result = -ENOMEM;
- printk(KERN_ERR "%s: dvb_register_adapter failed "
- "(errno = %d), NO PCI configuration\n",
+ printk(KERN_ERR "%s: dvb_register_adapter failed (errno = %d), NO PCI configuration\n",
DRIVER_NAME, result);
goto fail_adapter;
}
@@ -386,8 +385,7 @@ static int dvb_register(struct saa7164_port *port)
if (!buf) {
result = -ENOMEM;
- printk(KERN_ERR "%s: dvb_register_adapter failed "
- "(errno = %d), unable to allocate buffers\n",
+ printk(KERN_ERR "%s: dvb_register_adapter failed (errno = %d), unable to allocate buffers\n",
DRIVER_NAME, result);
goto fail_adapter;
}
@@ -401,8 +399,8 @@ static int dvb_register(struct saa7164_port *port)
result = dvb_register_adapter(&dvb->adapter, DRIVER_NAME, THIS_MODULE,
&dev->pci->dev, adapter_nr);
if (result < 0) {
- printk(KERN_ERR "%s: dvb_register_adapter failed "
- "(errno = %d)\n", DRIVER_NAME, result);
+ printk(KERN_ERR "%s: dvb_register_adapter failed (errno = %d)\n",
+ DRIVER_NAME, result);
goto fail_adapter;
}
dvb->adapter.priv = port;
@@ -410,8 +408,8 @@ static int dvb_register(struct saa7164_port *port)
/* register frontend */
result = dvb_register_frontend(&dvb->adapter, dvb->frontend);
if (result < 0) {
- printk(KERN_ERR "%s: dvb_register_frontend failed "
- "(errno = %d)\n", DRIVER_NAME, result);
+ printk(KERN_ERR "%s: dvb_register_frontend failed (errno = %d)\n",
+ DRIVER_NAME, result);
goto fail_frontend;
}
@@ -444,16 +442,16 @@ static int dvb_register(struct saa7164_port *port)
dvb->fe_hw.source = DMX_FRONTEND_0;
result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_hw);
if (result < 0) {
- printk(KERN_ERR "%s: add_frontend failed "
- "(DMX_FRONTEND_0, errno = %d)\n", DRIVER_NAME, result);
+ printk(KERN_ERR "%s: add_frontend failed (DMX_FRONTEND_0, errno = %d)\n",
+ DRIVER_NAME, result);
goto fail_fe_hw;
}
dvb->fe_mem.source = DMX_MEMORY_FE;
result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_mem);
if (result < 0) {
- printk(KERN_ERR "%s: add_frontend failed "
- "(DMX_MEMORY_FE, errno = %d)\n", DRIVER_NAME, result);
+ printk(KERN_ERR "%s: add_frontend failed (DMX_MEMORY_FE, errno = %d)\n",
+ DRIVER_NAME, result);
goto fail_fe_mem;
}
diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c
index 32a353d162e7..68124ce7ebc3 100644
--- a/drivers/media/pci/saa7164/saa7164-encoder.c
+++ b/drivers/media/pci/saa7164/saa7164-encoder.c
@@ -157,8 +157,7 @@ static int saa7164_encoder_buffers_alloc(struct saa7164_port *port)
params->pitch);
if (!buf) {
- printk(KERN_ERR "%s() failed "
- "(errno = %d), unable to allocate buffer\n",
+ printk(KERN_ERR "%s() failed (errno = %d), unable to allocate buffer\n",
__func__, result);
result = -ENOMEM;
goto failed;
@@ -681,8 +680,8 @@ static int saa7164_encoder_start_streaming(struct saa7164_port *port)
/* Stop the hardware, regardless */
result = saa7164_api_transition_port(port, SAA_DMASTATE_STOP);
if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
- printk(KERN_ERR "%s() acquire/forced stop transition "
- "failed, res = 0x%x\n", __func__, result);
+ printk(KERN_ERR "%s() acquire/forced stop transition failed, res = 0x%x\n",
+ __func__, result);
}
ret = -EIO;
goto out;
@@ -698,8 +697,8 @@ static int saa7164_encoder_start_streaming(struct saa7164_port *port)
/* Stop the hardware, regardless */
result = saa7164_api_transition_port(port, SAA_DMASTATE_STOP);
if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
- printk(KERN_ERR "%s() pause/forced stop transition "
- "failed, res = 0x%x\n", __func__, result);
+ printk(KERN_ERR "%s() pause/forced stop transition failed, res = 0x%x\n",
+ __func__, result);
}
ret = -EIO;
@@ -716,8 +715,8 @@ static int saa7164_encoder_start_streaming(struct saa7164_port *port)
/* Stop the hardware, regardless */
result = saa7164_api_transition_port(port, SAA_DMASTATE_STOP);
if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
- printk(KERN_ERR "%s() run/forced stop transition "
- "failed, res = 0x%x\n", __func__, result);
+ printk(KERN_ERR "%s() run/forced stop transition failed, res = 0x%x\n",
+ __func__, result);
}
ret = -EIO;
@@ -1026,8 +1025,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
/* Sanity check that the PCI configuration space is active */
if (port->hwcfg.BARLocation == 0) {
- printk(KERN_ERR "%s() failed "
- "(errno = %d), NO PCI configuration\n",
+ printk(KERN_ERR "%s() failed (errno = %d), NO PCI configuration\n",
__func__, result);
result = -ENOMEM;
goto failed;
diff --git a/drivers/media/pci/saa7164/saa7164-fw.c b/drivers/media/pci/saa7164/saa7164-fw.c
index 269e0782c7b6..8568adfd7ece 100644
--- a/drivers/media/pci/saa7164/saa7164-fw.c
+++ b/drivers/media/pci/saa7164/saa7164-fw.c
@@ -421,8 +421,8 @@ int saa7164_downloadfirmware(struct saa7164_dev *dev)
ret = request_firmware(&fw, fwname, &dev->pci->dev);
if (ret) {
- printk(KERN_ERR "%s() Upload failed. "
- "(file not found?)\n", __func__);
+ printk(KERN_ERR "%s() Upload failed. (file not found?)\n",
+ __func__);
return -ENOMEM;
}
@@ -478,15 +478,13 @@ int saa7164_downloadfirmware(struct saa7164_dev *dev)
0x03) && (saa7164_readl(SAA_DATAREADY_FLAG_ACK)
== 0x00) && (version == 0x00)) {
- dprintk(DBGLVL_FW, "BootLoader version in "
- "rom %d.%d.%d.%d\n",
+ dprintk(DBGLVL_FW, "BootLoader version in rom %d.%d.%d.%d\n",
(bootloaderversion & 0x0000fc00) >> 10,
(bootloaderversion & 0x000003e0) >> 5,
(bootloaderversion & 0x0000001f),
(bootloaderversion & 0xffff0000) >> 16
);
- dprintk(DBGLVL_FW, "BootLoader version "
- "in file %d.%d.%d.%d\n",
+ dprintk(DBGLVL_FW, "BootLoader version in file %d.%d.%d.%d\n",
(boothdr->version & 0x0000fc00) >> 10,
(boothdr->version & 0x000003e0) >> 5,
(boothdr->version & 0x0000001f),
diff --git a/drivers/media/pci/saa7164/saa7164-vbi.c b/drivers/media/pci/saa7164/saa7164-vbi.c
index ee54491459a6..e5dcb81029d3 100644
--- a/drivers/media/pci/saa7164/saa7164-vbi.c
+++ b/drivers/media/pci/saa7164/saa7164-vbi.c
@@ -110,8 +110,7 @@ static int saa7164_vbi_buffers_alloc(struct saa7164_port *port)
params->pitch);
if (!buf) {
- printk(KERN_ERR "%s() failed "
- "(errno = %d), unable to allocate buffer\n",
+ printk(KERN_ERR "%s() failed (errno = %d), unable to allocate buffer\n",
__func__, result);
result = -ENOMEM;
goto failed;
@@ -384,8 +383,8 @@ static int saa7164_vbi_start_streaming(struct saa7164_port *port)
/* Stop the hardware, regardless */
result = saa7164_vbi_stop_port(port);
if (result != SAA_OK) {
- printk(KERN_ERR "%s() pause/forced stop transition "
- "failed, res = 0x%x\n", __func__, result);
+ printk(KERN_ERR "%s() pause/forced stop transition failed, res = 0x%x\n",
+ __func__, result);
}
ret = -EIO;
@@ -403,8 +402,8 @@ static int saa7164_vbi_start_streaming(struct saa7164_port *port)
result = saa7164_vbi_acquire_port(port);
result = saa7164_vbi_stop_port(port);
if (result != SAA_OK) {
- printk(KERN_ERR "%s() run/forced stop transition "
- "failed, res = 0x%x\n", __func__, result);
+ printk(KERN_ERR "%s() run/forced stop transition failed, res = 0x%x\n",
+ __func__, result);
}
ret = -EIO;
@@ -728,8 +727,7 @@ int saa7164_vbi_register(struct saa7164_port *port)
/* Sanity check that the PCI configuration space is active */
if (port->hwcfg.BARLocation == 0) {
- printk(KERN_ERR "%s() failed "
- "(errno = %d), NO PCI configuration\n",
+ printk(KERN_ERR "%s() failed (errno = %d), NO PCI configuration\n",
__func__, result);
result = -ENOMEM;
goto failed;
diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2.c b/drivers/media/pci/solo6x10/solo6x10-v4l2.c
index b4be47969b6b..896bec6627aa 100644
--- a/drivers/media/pci/solo6x10/solo6x10-v4l2.c
+++ b/drivers/media/pci/solo6x10/solo6x10-v4l2.c
@@ -702,8 +702,8 @@ int solo_v4l2_init(struct solo_dev *solo_dev, unsigned nr)
snprintf(solo_dev->vfd->name, sizeof(solo_dev->vfd->name), "%s (%i)",
SOLO6X10_NAME, solo_dev->vfd->num);
- dev_info(&solo_dev->pdev->dev, "Display as /dev/video%d with "
- "%d inputs (%d extended)\n", solo_dev->vfd->num,
+ dev_info(&solo_dev->pdev->dev, "Display as /dev/video%d with %d inputs (%d extended)\n",
+ solo_dev->vfd->num,
solo_dev->nr_chans, solo_dev->nr_ext);
return 0;
diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h
index 5bd498735a66..3f8da5e8c430 100644
--- a/drivers/media/pci/solo6x10/solo6x10.h
+++ b/drivers/media/pci/solo6x10/solo6x10.h
@@ -284,7 +284,10 @@ static inline u32 solo_reg_read(struct solo_dev *solo_dev, int reg)
static inline void solo_reg_write(struct solo_dev *solo_dev, int reg,
u32 data)
{
+ u16 val;
+
writel(data, solo_dev->reg_base + reg);
+ pci_read_config_word(solo_dev->pdev, PCI_STATUS, &val);
}
static inline void solo_irq_on(struct solo_dev *dev, u32 mask)
diff --git a/drivers/media/pci/ttpci/Makefile b/drivers/media/pci/ttpci/Makefile
index 49f71b1eaf14..3cf617737f7c 100644
--- a/drivers/media/pci/ttpci/Makefile
+++ b/drivers/media/pci/ttpci/Makefile
@@ -3,7 +3,7 @@
# and the AV7110 DVB device driver
#
-dvb-ttpci-objs := av7110_hw.o av7110_v4l.o av7110_av.o av7110_ca.o av7110.o av7110_ipack.o
+dvb-ttpci-objs := av7110_hw.o av7110_v4l.o av7110_av.o av7110_ca.o av7110.o av7110_ipack.o dvb_filter.o
ifdef CONFIG_DVB_AV7110_IR
dvb-ttpci-objs += av7110_ir.o
diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c
index 382caf200ba1..6e63949d6ad0 100644
--- a/drivers/media/pci/ttpci/av7110.c
+++ b/drivers/media/pci/ttpci/av7110.c
@@ -100,8 +100,7 @@ MODULE_PARM_DESC(adac,"audio DAC type: 0 TI, 1 CRYSTAL, 2 MSP (use if autodetect
module_param(hw_sections, int, 0444);
MODULE_PARM_DESC(hw_sections, "0 use software section filter, 1 use hardware");
module_param(rgb_on, int, 0444);
-MODULE_PARM_DESC(rgb_on, "For Siemens DVB-C cards only: Enable RGB control"
- " signal on SCART pin 16 to switch SCART video mode from CVBS to RGB");
+MODULE_PARM_DESC(rgb_on, "For Siemens DVB-C cards only: Enable RGB control signal on SCART pin 16 to switch SCART video mode from CVBS to RGB");
module_param(volume, int, 0444);
MODULE_PARM_DESC(volume, "initial volume: default 255 (range 0-255)");
module_param(budgetpatch, int, 0444);
@@ -444,21 +443,6 @@ static void debiirq(unsigned long cookie)
case DATA_COMMON_INTERFACE:
CI_handle(av7110, (u8 *)av7110->debi_virt, av7110->debilen);
-#if 0
- {
- int i;
-
- printk("av7110%d: ", av7110->num);
- printk("%02x ", *(u8 *)av7110->debi_virt);
- printk("%02x ", *(1+(u8 *)av7110->debi_virt));
- for (i = 2; i < av7110->debilen; i++)
- printk("%02x ", (*(i+(unsigned char *)av7110->debi_virt)));
- for (i = 2; i < av7110->debilen; i++)
- printk("%c", chtrans(*(i+(unsigned char *)av7110->debi_virt)));
-
- printk("\n");
- }
-#endif
xfer = RX_BUFF;
break;
@@ -833,8 +817,7 @@ static int StartHWFilter(struct dvb_demux_filter *dvbdmxfilter)
ret = av7110_fw_request(av7110, buf, 20, &handle, 1);
if (ret != 0 || handle >= 32) {
- printk("dvb-ttpci: %s error buf %04x %04x %04x %04x "
- "ret %d handle %04x\n",
+ printk(KERN_ERR "dvb-ttpci: %s error buf %04x %04x %04x %04x ret %d handle %04x\n",
__func__, buf[0], buf[1], buf[2], buf[3],
ret, handle);
dvbdmxfilter->hw_handle = 0xffff;
@@ -876,8 +859,7 @@ static int StopHWFilter(struct dvb_demux_filter *dvbdmxfilter)
buf[2] = handle;
ret = av7110_fw_request(av7110, buf, 3, answ, 2);
if (ret != 0 || answ[1] != handle) {
- printk("dvb-ttpci: %s error cmd %04x %04x %04x ret %x "
- "resp %04x %04x pid %d\n",
+ printk(KERN_ERR "dvb-ttpci: %s error cmd %04x %04x %04x ret %x resp %04x %04x pid %d\n",
__func__, buf[0], buf[1], buf[2], ret,
answ[0], answ[1], dvbdmxfilter->feed->pid);
if (!ret)
@@ -1532,15 +1514,12 @@ static int get_firmware(struct av7110* av7110)
ret = request_firmware(&fw, "dvb-ttpci-01.fw", &av7110->dev->pci->dev);
if (ret) {
if (ret == -ENOENT) {
- printk(KERN_ERR "dvb-ttpci: could not load firmware,"
- " file not found: dvb-ttpci-01.fw\n");
- printk(KERN_ERR "dvb-ttpci: usually this should be in "
- "/usr/lib/hotplug/firmware or /lib/firmware\n");
- printk(KERN_ERR "dvb-ttpci: and can be downloaded from"
- " https://linuxtv.org/download/dvb/firmware/\n");
+ printk(KERN_ERR "dvb-ttpci: could not load firmware, file not found: dvb-ttpci-01.fw\n");
+ printk(KERN_ERR "dvb-ttpci: usually this should be in /usr/lib/hotplug/firmware or /lib/firmware\n");
+ printk(KERN_ERR "dvb-ttpci: and can be downloaded from https://linuxtv.org/download/dvb/firmware/\n");
} else
- printk(KERN_ERR "dvb-ttpci: cannot request firmware"
- " (error %i)\n", ret);
+ printk(KERN_ERR "dvb-ttpci: cannot request firmware (error %i)\n",
+ ret);
return -EINVAL;
}
@@ -2700,8 +2679,9 @@ static int av7110_attach(struct saa7146_dev* dev,
goto err_stop_arm_9;
if (FW_VERSION(av7110->arm_app)<0x2501)
- printk ("dvb-ttpci: Warning, firmware version 0x%04x is too old. "
- "System might be unstable!\n", FW_VERSION(av7110->arm_app));
+ printk(KERN_WARNING
+ "dvb-ttpci: Warning, firmware version 0x%04x is too old. System might be unstable!\n",
+ FW_VERSION(av7110->arm_app));
thread = kthread_run(arm_thread, (void *) av7110, "arm_mon");
if (IS_ERR(thread)) {
@@ -2930,9 +2910,7 @@ static struct saa7146_extension av7110_extension_driver = {
static int __init av7110_init(void)
{
- int retval;
- retval = saa7146_register_extension(&av7110_extension_driver);
- return retval;
+ return saa7146_register_extension(&av7110_extension_driver);
}
@@ -2944,7 +2922,6 @@ static void __exit av7110_exit(void)
module_init(av7110_init);
module_exit(av7110_exit);
-MODULE_DESCRIPTION("driver for the SAA7146 based AV110 PCI DVB cards by "
- "Siemens, Technotrend, Hauppauge");
+MODULE_DESCRIPTION("driver for the SAA7146 based AV110 PCI DVB cards by Siemens, Technotrend, Hauppauge");
MODULE_AUTHOR("Ralph Metzler, Marcus Metzler, others");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/pci/ttpci/av7110.h b/drivers/media/pci/ttpci/av7110.h
index 3707ccd02732..824c1e262fbb 100644
--- a/drivers/media/pci/ttpci/av7110.h
+++ b/drivers/media/pci/ttpci/av7110.h
@@ -40,8 +40,11 @@
extern int av7110_debug;
-#define dprintk(level,args...) \
- do { if ((av7110_debug & level)) { printk("dvb-ttpci: %s(): ", __func__); printk(args); } } while (0)
+#define dprintk(level, fmt, arg...) do { \
+ if (level & av7110_debug) \
+ printk(KERN_DEBUG KBUILD_MODNAME ": %s(): " fmt, \
+ __func__, ##arg); \
+} while (0)
#define MAXFILT 32
diff --git a/drivers/media/pci/ttpci/av7110_hw.c b/drivers/media/pci/ttpci/av7110_hw.c
index 0583d56ef5ef..520414cbe087 100644
--- a/drivers/media/pci/ttpci/av7110_hw.c
+++ b/drivers/media/pci/ttpci/av7110_hw.c
@@ -235,8 +235,7 @@ int av7110_bootarm(struct av7110 *av7110)
iwdebi(av7110, DEBISWAP, DPRAM_BASE, 0x76543210, 4);
if ((ret=irdebi(av7110, DEBINOSWAP, DPRAM_BASE, 0, 4)) != 0x10325476) {
- printk(KERN_ERR "dvb-ttpci: debi test in av7110_bootarm() failed: "
- "%08x != %08x (check your BIOS 'Plug&Play OS' settings)\n",
+ printk(KERN_ERR "dvb-ttpci: debi test in av7110_bootarm() failed: %08x != %08x (check your BIOS 'Plug&Play OS' settings)\n",
ret, 0x10325476);
return -1;
}
@@ -262,8 +261,7 @@ int av7110_bootarm(struct av7110 *av7110)
iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_FULL, 2);
if (saa7146_wait_for_debi_done(av7110->dev, 1)) {
- printk(KERN_ERR "dvb-ttpci: av7110_bootarm(): "
- "saa7146_wait_for_debi_done() timed out\n");
+ printk(KERN_ERR "dvb-ttpci: av7110_bootarm(): saa7146_wait_for_debi_done() timed out\n");
return -ETIMEDOUT;
}
saa7146_setgpio(dev, RESET_LINE, SAA7146_GPIO_OUTHI);
@@ -271,8 +269,7 @@ int av7110_bootarm(struct av7110 *av7110)
dprintk(1, "load dram code\n");
if (load_dram(av7110, (u32 *)av7110->bin_root, av7110->size_root) < 0) {
- printk(KERN_ERR "dvb-ttpci: av7110_bootarm(): "
- "load_dram() failed\n");
+ printk(KERN_ERR "dvb-ttpci: av7110_bootarm(): load_dram() failed\n");
return -1;
}
@@ -283,8 +280,7 @@ int av7110_bootarm(struct av7110 *av7110)
mwdebi(av7110, DEBISWAB, DPRAM_BASE, av7110->bin_dpram, av7110->size_dpram);
if (saa7146_wait_for_debi_done(av7110->dev, 1)) {
- printk(KERN_ERR "dvb-ttpci: av7110_bootarm(): "
- "saa7146_wait_for_debi_done() timed out after loading DRAM\n");
+ printk(KERN_ERR "dvb-ttpci: av7110_bootarm(): saa7146_wait_for_debi_done() timed out after loading DRAM\n");
return -ETIMEDOUT;
}
saa7146_setgpio(dev, RESET_LINE, SAA7146_GPIO_OUTHI);
diff --git a/drivers/media/pci/ttpci/budget-av.c b/drivers/media/pci/ttpci/budget-av.c
index 6f0d0161970e..896c66d4b3ae 100644
--- a/drivers/media/pci/ttpci/budget-av.c
+++ b/drivers/media/pci/ttpci/budget-av.c
@@ -1636,5 +1636,4 @@ module_exit(budget_av_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ralph Metzler, Marcus Metzler, Michael Hunold, others");
-MODULE_DESCRIPTION("driver for the SAA7146 based so-called "
- "budget PCI DVB w/ analog input and CI-module (e.g. the KNC cards)");
+MODULE_DESCRIPTION("driver for the SAA7146 based so-called budget PCI DVB w/ analog input and CI-module (e.g. the KNC cards)");
diff --git a/drivers/media/pci/ttpci/budget-ci.c b/drivers/media/pci/ttpci/budget-ci.c
index 7b27af4d9658..20ad93bf0f54 100644
--- a/drivers/media/pci/ttpci/budget-ci.c
+++ b/drivers/media/pci/ttpci/budget-ci.c
@@ -1586,6 +1586,4 @@ module_exit(budget_ci_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michael Hunold, Jack Thomasson, Andrew de Quincey, others");
-MODULE_DESCRIPTION("driver for the SAA7146 based so-called "
- "budget PCI DVB cards w/ CI-module produced by "
- "Siemens, Technotrend, Hauppauge");
+MODULE_DESCRIPTION("driver for the SAA7146 based so-called budget PCI DVB cards w/ CI-module produced by Siemens, Technotrend, Hauppauge");
diff --git a/drivers/media/pci/ttpci/budget-patch.c b/drivers/media/pci/ttpci/budget-patch.c
index 591dbdfa2a13..f152eda0123a 100644
--- a/drivers/media/pci/ttpci/budget-patch.c
+++ b/drivers/media/pci/ttpci/budget-patch.c
@@ -679,5 +679,4 @@ module_exit(budget_patch_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Emard, Roberto Deza, Holger Waechtler, Michael Hunold, others");
-MODULE_DESCRIPTION("Driver for full TS modified DVB-S SAA7146+AV7110 "
- "based so-called Budget Patch cards");
+MODULE_DESCRIPTION("Driver for full TS modified DVB-S SAA7146+AV7110 based so-called Budget Patch cards");
diff --git a/drivers/media/pci/ttpci/budget.c b/drivers/media/pci/ttpci/budget.c
index fb8ede5a1531..3091b480ce22 100644
--- a/drivers/media/pci/ttpci/budget.c
+++ b/drivers/media/pci/ttpci/budget.c
@@ -897,5 +897,4 @@ module_exit(budget_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ralph Metzler, Marcus Metzler, Michael Hunold, others");
-MODULE_DESCRIPTION("driver for the SAA7146 based so-called "
- "budget PCI DVB cards by Siemens, Technotrend, Hauppauge");
+MODULE_DESCRIPTION("driver for the SAA7146 based so-called budget PCI DVB cards by Siemens, Technotrend, Hauppauge");
diff --git a/drivers/media/pci/ttpci/budget.h b/drivers/media/pci/ttpci/budget.h
index 655eef5236ca..d5ae4438153e 100644
--- a/drivers/media/pci/ttpci/budget.h
+++ b/drivers/media/pci/ttpci/budget.h
@@ -21,8 +21,12 @@ extern int budget_debug;
#undef dprintk
#endif
-#define dprintk(level,args...) \
- do { if ((budget_debug & level)) { printk("%s: %s(): ", KBUILD_MODNAME, __func__); printk(args); } } while (0)
+#define dprintk(level, fmt, arg...) do { \
+ if (level & budget_debug) \
+ printk(KERN_DEBUG KBUILD_MODNAME ": %s(): " fmt, \
+ __func__, ##arg); \
+} while (0)
+
struct budget_info {
char *name;
diff --git a/drivers/media/pci/ttpci/dvb_filter.c b/drivers/media/pci/ttpci/dvb_filter.c
new file mode 100644
index 000000000000..b67127b67d4e
--- /dev/null
+++ b/drivers/media/pci/ttpci/dvb_filter.c
@@ -0,0 +1,114 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include "dvb_filter.h"
+
+static u32 freq[4] = {480, 441, 320, 0};
+
+static unsigned int ac3_bitrates[32] =
+ {32,40,48,56,64,80,96,112,128,160,192,224,256,320,384,448,512,576,640,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0};
+
+static u32 ac3_frames[3][32] =
+ {{64,80,96,112,128,160,192,224,256,320,384,448,512,640,768,896,1024,
+ 1152,1280,0,0,0,0,0,0,0,0,0,0,0,0,0},
+ {69,87,104,121,139,174,208,243,278,348,417,487,557,696,835,975,1114,
+ 1253,1393,0,0,0,0,0,0,0,0,0,0,0,0,0},
+ {96,120,144,168,192,240,288,336,384,480,576,672,768,960,1152,1344,
+ 1536,1728,1920,0,0,0,0,0,0,0,0,0,0,0,0,0}};
+
+int dvb_filter_get_ac3info(u8 *mbuf, int count, struct dvb_audio_info *ai, int pr)
+{
+ u8 *headr;
+ int found = 0;
+ int c = 0;
+ u8 frame = 0;
+ int fr = 0;
+
+ while ( !found && c < count){
+ u8 *b = mbuf+c;
+
+ if ( b[0] == 0x0b && b[1] == 0x77 )
+ found = 1;
+ else {
+ c++;
+ }
+ }
+
+ if (!found) return -1;
+ if (pr)
+ printk(KERN_DEBUG "Audiostream: AC3");
+
+ ai->off = c;
+ if (c+5 >= count) return -1;
+
+ ai->layer = 0; // 0 for AC3
+ headr = mbuf+c+2;
+
+ frame = (headr[2]&0x3f);
+ ai->bit_rate = ac3_bitrates[frame >> 1]*1000;
+
+ if (pr)
+ printk(KERN_CONT " BRate: %d kb/s", (int) ai->bit_rate/1000);
+
+ ai->frequency = (headr[2] & 0xc0 ) >> 6;
+ fr = (headr[2] & 0xc0 ) >> 6;
+ ai->frequency = freq[fr]*100;
+ if (pr)
+ printk(KERN_CONT " Freq: %d Hz\n", (int) ai->frequency);
+
+ ai->framesize = ac3_frames[fr][frame >> 1];
+ if ((frame & 1) && (fr == 1)) ai->framesize++;
+ ai->framesize = ai->framesize << 1;
+ if (pr)
+ printk(KERN_DEBUG " Framesize %d\n", (int) ai->framesize);
+
+ return 0;
+}
+
+void dvb_filter_pes2ts_init(struct dvb_filter_pes2ts *p2ts, unsigned short pid,
+ dvb_filter_pes2ts_cb_t *cb, void *priv)
+{
+ unsigned char *buf=p2ts->buf;
+
+ buf[0]=0x47;
+ buf[1]=(pid>>8);
+ buf[2]=pid&0xff;
+ p2ts->cc=0;
+ p2ts->cb=cb;
+ p2ts->priv=priv;
+}
+
+int dvb_filter_pes2ts(struct dvb_filter_pes2ts *p2ts, unsigned char *pes,
+ int len, int payload_start)
+{
+ unsigned char *buf=p2ts->buf;
+ int ret=0, rest;
+
+ //len=6+((pes[4]<<8)|pes[5]);
+
+ if (payload_start)
+ buf[1]|=0x40;
+ else
+ buf[1]&=~0x40;
+ while (len>=184) {
+ buf[3]=0x10|((p2ts->cc++)&0x0f);
+ memcpy(buf+4, pes, 184);
+ if ((ret=p2ts->cb(p2ts->priv, buf)))
+ return ret;
+ len-=184; pes+=184;
+ buf[1]&=~0x40;
+ }
+ if (!len)
+ return 0;
+ buf[3]=0x30|((p2ts->cc++)&0x0f);
+ rest=183-len;
+ if (rest) {
+ buf[5]=0x00;
+ if (rest-1)
+ memset(buf+6, 0xff, rest-1);
+ }
+ buf[4]=rest;
+ memcpy(buf+5+rest, pes, len);
+ return p2ts->cb(p2ts->priv, buf);
+}
diff --git a/drivers/media/dvb-core/dvb_filter.h b/drivers/media/pci/ttpci/dvb_filter.h
index 375e3be184b1..375e3be184b1 100644
--- a/drivers/media/dvb-core/dvb_filter.h
+++ b/drivers/media/pci/ttpci/dvb_filter.h
diff --git a/drivers/media/pci/ttpci/ttpci-eeprom.c b/drivers/media/pci/ttpci/ttpci-eeprom.c
index 079ee098b7e3..9534f29c1ffd 100644
--- a/drivers/media/pci/ttpci/ttpci-eeprom.c
+++ b/drivers/media/pci/ttpci/ttpci-eeprom.c
@@ -171,5 +171,4 @@ EXPORT_SYMBOL(ttpci_eeprom_parse_mac);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ralph Metzler, Marcus Metzler, others");
-MODULE_DESCRIPTION("Decode dvb_net MAC address from EEPROM of PCI DVB cards "
- "made by Siemens, Technotrend, Hauppauge");
+MODULE_DESCRIPTION("Decode dvb_net MAC address from EEPROM of PCI DVB cards made by Siemens, Technotrend, Hauppauge");
diff --git a/drivers/media/pci/tw5864/tw5864-reg.h b/drivers/media/pci/tw5864/tw5864-reg.h
index 92a1b077ef8a..30ac14210e91 100644
--- a/drivers/media/pci/tw5864/tw5864-reg.h
+++ b/drivers/media/pci/tw5864/tw5864-reg.h
@@ -1879,6 +1879,14 @@
#define TW5864_INDIR_IN_PIC_HEIGHT(channel) (0x201 + 4 * channel)
#define TW5864_INDIR_OUT_PIC_WIDTH(channel) (0x202 + 4 * channel)
#define TW5864_INDIR_OUT_PIC_HEIGHT(channel) (0x203 + 4 * channel)
+
+/* Some registers skipped */
+
+#define TW5864_INDIR_CROP_ETC 0x260
+/* Define controls in register TW5864_INDIR_CROP_ETC */
+/* Enable cropping from 720 to 704 */
+#define TW5864_INDIR_CROP_ETC_CROP_EN 0x4
+
/*
* Interrupt status register from the front-end. Write "1" to each bit to clear
* the interrupt
diff --git a/drivers/media/pci/tw5864/tw5864-video.c b/drivers/media/pci/tw5864/tw5864-video.c
index 652a059b2e0a..9421216bb942 100644
--- a/drivers/media/pci/tw5864/tw5864-video.c
+++ b/drivers/media/pci/tw5864/tw5864-video.c
@@ -330,6 +330,15 @@ static int tw5864_enable_input(struct tw5864_input *input)
tw_indir_writeb(TW5864_INDIR_OUT_PIC_WIDTH(nr), input->width / 4);
tw_indir_writeb(TW5864_INDIR_OUT_PIC_HEIGHT(nr), input->height / 4);
+ /*
+ * Crop width from 720 to 704.
+ * Above register settings need value 720 involved.
+ */
+ input->width = 704;
+ tw_indir_writeb(TW5864_INDIR_CROP_ETC,
+ tw_indir_readb(TW5864_INDIR_CROP_ETC) |
+ TW5864_INDIR_CROP_ETC_CROP_EN);
+
tw_writel(TW5864_DSP_PIC_MAX_MB,
((input->width / 16) << 8) | (input->height / 16));
@@ -532,7 +541,7 @@ static int tw5864_fmt_vid_cap(struct file *file, void *priv,
{
struct tw5864_input *input = video_drvdata(file);
- f->fmt.pix.width = 720;
+ f->fmt.pix.width = 704;
switch (input->std) {
default:
WARN_ON_ONCE(1);
@@ -738,7 +747,7 @@ static int tw5864_enum_framesizes(struct file *file, void *priv,
return -EINVAL;
fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
- fsize->discrete.width = 720;
+ fsize->discrete.width = 704;
fsize->discrete.height = input->std == STD_NTSC ? 480 : 576;
return 0;
diff --git a/drivers/media/pci/tw68/tw68-video.c b/drivers/media/pci/tw68/tw68-video.c
index a45e02367321..58c4dd75bfa1 100644
--- a/drivers/media/pci/tw68/tw68-video.c
+++ b/drivers/media/pci/tw68/tw68-video.c
@@ -279,9 +279,8 @@ static int tw68_set_scale(struct tw68_dev *dev, unsigned int width,
height /= 2; /* we must set for 1-frame */
pr_debug("%s: width=%d, height=%d, both=%d\n"
- " tvnorm h_delay=%d, h_start=%d, h_stop=%d, "
- "v_delay=%d, v_start=%d, v_stop=%d\n" , __func__,
- width, height, V4L2_FIELD_HAS_BOTH(field),
+ " tvnorm h_delay=%d, h_start=%d, h_stop=%d, v_delay=%d, v_start=%d, v_stop=%d\n",
+ __func__, width, height, V4L2_FIELD_HAS_BOTH(field),
norm->h_delay, norm->h_start, norm->h_stop,
norm->v_delay, norm->video_v_start,
norm->video_v_stop);
@@ -309,16 +308,15 @@ static int tw68_set_scale(struct tw68_dev *dev, unsigned int width,
V4L2_FIELD_HAS_TOP(field) ? "T" : "",
V4L2_FIELD_HAS_BOTTOM(field) ? "B" : "",
v4l2_norm_to_name(dev->tvnorm->id));
- pr_debug("%s: hactive=%d, hdelay=%d, hscale=%d; "
- "vactive=%d, vdelay=%d, vscale=%d\n", __func__,
+ pr_debug("%s: hactive=%d, hdelay=%d, hscale=%d; vactive=%d, vdelay=%d, vscale=%d\n",
+ __func__,
hactive, hdelay, hscale, vactive, vdelay, vscale);
comb = ((vdelay & 0x300) >> 2) |
((vactive & 0x300) >> 4) |
((hdelay & 0x300) >> 6) |
((hactive & 0x300) >> 8);
- pr_debug("%s: setting CROP_HI=%02x, VDELAY_LO=%02x, "
- "VACTIVE_LO=%02x, HDELAY_LO=%02x, HACTIVE_LO=%02x\n",
+ pr_debug("%s: setting CROP_HI=%02x, VDELAY_LO=%02x, VACTIVE_LO=%02x, HDELAY_LO=%02x, HACTIVE_LO=%02x\n",
__func__, comb, vdelay, vactive, hdelay, hactive);
tw_writeb(TW68_CROP_HI, comb);
tw_writeb(TW68_VDELAY_LO, vdelay & 0xff);
@@ -327,8 +325,8 @@ static int tw68_set_scale(struct tw68_dev *dev, unsigned int width,
tw_writeb(TW68_HACTIVE_LO, hactive & 0xff);
comb = ((vscale & 0xf00) >> 4) | ((hscale & 0xf00) >> 8);
- pr_debug("%s: setting SCALE_HI=%02x, VSCALE_LO=%02x, "
- "HSCALE_LO=%02x\n", __func__, comb, vscale, hscale);
+ pr_debug("%s: setting SCALE_HI=%02x, VSCALE_LO=%02x, HSCALE_LO=%02x\n",
+ __func__, comb, vscale, hscale);
tw_writeb(TW68_SCALE_HI, comb);
tw_writeb(TW68_VSCALE_LO, vscale);
tw_writeb(TW68_HSCALE_LO, hscale);
diff --git a/drivers/media/pci/zoran/videocodec.c b/drivers/media/pci/zoran/videocodec.c
index 13a3c07cd259..3c3cbce0f9cc 100644
--- a/drivers/media/pci/zoran/videocodec.c
+++ b/drivers/media/pci/zoran/videocodec.c
@@ -40,7 +40,7 @@
#ifdef CONFIG_PROC_FS
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#endif
#include "videocodec.h"
diff --git a/drivers/media/pci/zoran/zoran_device.c b/drivers/media/pci/zoran/zoran_device.c
index 4d47ddac97dc..35b552c178da 100644
--- a/drivers/media/pci/zoran/zoran_device.c
+++ b/drivers/media/pci/zoran/zoran_device.c
@@ -173,12 +173,8 @@ dump_guests (struct zoran *zr)
guest[i] = post_office_read(zr, i, 0);
}
- printk(KERN_INFO "%s: Guests:", ZR_DEVNAME(zr));
-
- for (i = 1; i < 8; i++) {
- printk(" 0x%02x", guest[i]);
- }
- printk("\n");
+ printk(KERN_INFO "%s: Guests: %*ph\n",
+ ZR_DEVNAME(zr), 8, guest);
}
}
@@ -216,12 +212,9 @@ detect_guest_activity (struct zoran *zr)
if (j >= 8)
break;
}
- printk(KERN_INFO "%s: Guests:", ZR_DEVNAME(zr));
- for (i = 1; i < 8; i++) {
- printk(" 0x%02x", guest0[i]);
- }
- printk("\n");
+ printk(KERN_INFO "%s: Guests: %*ph\n", ZR_DEVNAME(zr), 8, guest0);
+
if (j == 0) {
printk(KERN_INFO "%s: No activity detected.\n", ZR_DEVNAME(zr));
return;
@@ -822,39 +815,39 @@ print_interrupts (struct zoran *zr)
printk(KERN_INFO "%s: interrupts received:", ZR_DEVNAME(zr));
if ((res = zr->field_counter) < -1 || res > 1) {
- printk(" FD:%d", res);
+ printk(KERN_CONT " FD:%d", res);
}
if ((res = zr->intr_counter_GIRQ1) != 0) {
- printk(" GIRQ1:%d", res);
+ printk(KERN_CONT " GIRQ1:%d", res);
noerr++;
}
if ((res = zr->intr_counter_GIRQ0) != 0) {
- printk(" GIRQ0:%d", res);
+ printk(KERN_CONT " GIRQ0:%d", res);
noerr++;
}
if ((res = zr->intr_counter_CodRepIRQ) != 0) {
- printk(" CodRepIRQ:%d", res);
+ printk(KERN_CONT " CodRepIRQ:%d", res);
noerr++;
}
if ((res = zr->intr_counter_JPEGRepIRQ) != 0) {
- printk(" JPEGRepIRQ:%d", res);
+ printk(KERN_CONT " JPEGRepIRQ:%d", res);
noerr++;
}
if (zr->JPEG_max_missed) {
- printk(" JPEG delays: max=%d min=%d", zr->JPEG_max_missed,
+ printk(KERN_CONT " JPEG delays: max=%d min=%d", zr->JPEG_max_missed,
zr->JPEG_min_missed);
}
if (zr->END_event_missed) {
- printk(" ENDs missed: %d", zr->END_event_missed);
+ printk(KERN_CONT " ENDs missed: %d", zr->END_event_missed);
}
//if (zr->jpg_queued_num) {
- printk(" queue_state=%ld/%ld/%ld/%ld", zr->jpg_que_tail,
+ printk(KERN_CONT " queue_state=%ld/%ld/%ld/%ld", zr->jpg_que_tail,
zr->jpg_dma_tail, zr->jpg_dma_head, zr->jpg_que_head);
//}
if (!noerr) {
- printk(": no interrupts detected.");
+ printk(KERN_CONT ": no interrupts detected.");
}
- printk("\n");
+ printk(KERN_CONT "\n");
}
void
diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c
index d6b631add216..94b9b616df98 100644
--- a/drivers/media/pci/zoran/zoran_driver.c
+++ b/drivers/media/pci/zoran/zoran_driver.c
@@ -66,7 +66,7 @@
#include <asm/byteorder.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/proc_fs.h>
#include <linux/mutex.h>
@@ -1488,7 +1488,7 @@ zoran_set_input (struct zoran *zr,
if (input < 0 || input >= zr->card.inputs) {
dprintk(1,
KERN_ERR
- "%s: %s - unnsupported input %d\n",
+ "%s: %s - unsupported input %d\n",
ZR_DEVNAME(zr), __func__, input);
return -EINVAL;
}
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index ce4a96fccc43..d944421e392d 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -93,7 +93,7 @@ config VIDEO_OMAP3_DEBUG
config VIDEO_PXA27x
tristate "PXA27x Quick Capture Interface driver"
- depends on VIDEO_DEV && HAS_DMA
+ depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
depends on PXA27x || COMPILE_TEST
select VIDEOBUF2_DMA_SG
select SG_SPLIT
@@ -175,6 +175,23 @@ config VIDEO_MEDIATEK_VPU
To compile this driver as a module, choose M here: the
module will be called mtk-vpu.
+config VIDEO_MEDIATEK_MDP
+ tristate "Mediatek MDP driver"
+ depends on MTK_IOMMU || COMPILE_TEST
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on HAS_DMA
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ select VIDEO_MEDIATEK_VPU
+ default n
+ ---help---
+ It is a v4l2 driver and present in Mediatek MT8173 SoCs.
+ The driver supports for scaling and color space conversion.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mtk-mdp.
+
config VIDEO_MEDIATEK_VCODEC
tristate "Mediatek Video Codec driver"
depends on MTK_IOMMU || COMPILE_TEST
@@ -249,7 +266,7 @@ config VIDEO_MX2_EMMAPRP
config VIDEO_SAMSUNG_EXYNOS_GSC
tristate "Samsung Exynos G-Scaler driver"
depends on VIDEO_DEV && VIDEO_V4L2
- depends on ARCH_EXYNOS5 || COMPILE_TEST
+ depends on ARCH_EXYNOS || COMPILE_TEST
depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
@@ -290,6 +307,20 @@ config VIDEO_SH_VEU
Support for the Video Engine Unit (VEU) on SuperH and
SH-Mobile SoCs.
+config VIDEO_RENESAS_FDP1
+ tristate "Renesas Fine Display Processor"
+ depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
+ depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on (!ARCH_RENESAS && !VIDEO_RENESAS_FCP) || VIDEO_RENESAS_FCP
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ ---help---
+ This is a V4L2 driver for the Renesas Fine Display Processor
+ providing colour space conversion, and de-interlacing features.
+
+ To compile this driver as a module, choose M here: the module
+ will be called rcar_fdp1.
+
config VIDEO_RENESAS_JPU
tristate "Renesas JPEG Processing Unit"
depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
@@ -334,6 +365,9 @@ config VIDEO_TI_VPE
depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
+ select VIDEO_TI_VPDMA
+ select VIDEO_TI_SC
+ select VIDEO_TI_CSC
default n
---help---
Support for the TI VPE(Video Processing Engine) block
@@ -347,6 +381,17 @@ config VIDEO_TI_VPE_DEBUG
endif # V4L_MEM2MEM_DRIVERS
+# TI VIDEO PORT Helper Modules
+# These will be selected by VPE and VIP
+config VIDEO_TI_VPDMA
+ tristate
+
+config VIDEO_TI_SC
+ tristate
+
+config VIDEO_TI_CSC
+ tristate
+
menuconfig V4L_TEST_DRIVERS
bool "Media test drivers"
depends on MEDIA_CAMERA_SUPPORT
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 40b18d12726e..5b3cb271d2b8 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_VIDEO_SH_VOU) += sh_vou.o
obj-$(CONFIG_SOC_CAMERA) += soc_camera/
obj-$(CONFIG_VIDEO_RENESAS_FCP) += rcar-fcp.o
+obj-$(CONFIG_VIDEO_RENESAS_FDP1) += rcar_fdp1.o
obj-$(CONFIG_VIDEO_RENESAS_JPU) += rcar_jpu.o
obj-$(CONFIG_VIDEO_RENESAS_VSP1) += vsp1/
@@ -66,3 +67,5 @@ ccflags-y += -I$(srctree)/drivers/media/i2c
obj-$(CONFIG_VIDEO_MEDIATEK_VPU) += mtk-vpu/
obj-$(CONFIG_VIDEO_MEDIATEK_VCODEC) += mtk-vcodec/
+
+obj-$(CONFIG_VIDEO_MEDIATEK_MDP) += mtk-mdp/
diff --git a/drivers/media/platform/arv.c b/drivers/media/platform/arv.c
index 03c5098499c4..8fe59bf6cd3f 100644
--- a/drivers/media/platform/arv.c
+++ b/drivers/media/platform/arv.c
@@ -34,7 +34,7 @@
#include <media/v4l2-fh.h>
#include <linux/mutex.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/m32r.h>
#include <asm/io.h>
#include <asm/dma.h>
diff --git a/drivers/media/platform/atmel/atmel-isc.c b/drivers/media/platform/atmel/atmel-isc.c
index ccfe13b7d3f8..fa68fe912c95 100644
--- a/drivers/media/platform/atmel/atmel-isc.c
+++ b/drivers/media/platform/atmel/atmel-isc.c
@@ -617,7 +617,13 @@ static void isc_buffer_queue(struct vb2_buffer *vb)
unsigned long flags;
spin_lock_irqsave(&isc->dma_queue_lock, flags);
- list_add_tail(&buf->list, &isc->dma_queue);
+ if (!isc->cur_frm && list_empty(&isc->dma_queue) &&
+ vb2_is_streaming(vb->vb2_queue)) {
+ isc->cur_frm = buf;
+ isc_start_dma(isc->regmap, isc->cur_frm,
+ isc->current_fmt->reg_dctrl_dview);
+ } else
+ list_add_tail(&buf->list, &isc->dma_queue);
spin_unlock_irqrestore(&isc->dma_queue_lock, flags);
}
@@ -1418,6 +1424,7 @@ static int atmel_isc_probe(struct platform_device *pdev)
if (list_empty(&isc->subdev_entities)) {
dev_err(dev, "no subdev found\n");
+ ret = -ENODEV;
goto unregister_v4l2_device;
}
diff --git a/drivers/media/platform/blackfin/bfin_capture.c b/drivers/media/platform/blackfin/bfin_capture.c
index 8eb03397d736..2e6edc09b58f 100644
--- a/drivers/media/platform/blackfin/bfin_capture.c
+++ b/drivers/media/platform/blackfin/bfin_capture.c
@@ -169,7 +169,7 @@ static int bcap_init_sensor_formats(struct bcap_device *bcap_dev)
if (!num_formats)
return -ENXIO;
- sf = kzalloc(num_formats * sizeof(*sf), GFP_KERNEL);
+ sf = kcalloc(num_formats, sizeof(*sf), GFP_KERNEL);
if (!sf)
return -ENOMEM;
@@ -802,10 +802,8 @@ static int bcap_probe(struct platform_device *pdev)
}
bcap_dev = kzalloc(sizeof(*bcap_dev), GFP_KERNEL);
- if (!bcap_dev) {
- v4l2_err(pdev->dev.driver, "Unable to alloc bcap_dev\n");
+ if (!bcap_dev)
return -ENOMEM;
- }
bcap_dev->cfg = config;
diff --git a/drivers/media/platform/blackfin/ppi.c b/drivers/media/platform/blackfin/ppi.c
index cff63e511e6d..b8f3d9fa66e9 100644
--- a/drivers/media/platform/blackfin/ppi.c
+++ b/drivers/media/platform/blackfin/ppi.c
@@ -214,6 +214,8 @@ static int ppi_set_params(struct ppi_if *ppi, struct ppi_params *params)
if (params->dlen > 24 || params->dlen <= 0)
return -EINVAL;
pctrl = devm_pinctrl_get(ppi->dev);
+ if (IS_ERR(pctrl))
+ return PTR_ERR(pctrl);
pstate = pinctrl_lookup_state(pctrl,
pin_state[(params->dlen + 7) / 8 - 1]);
if (pinctrl_select_state(pctrl, pstate))
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index c39718a63e5e..9e6bdafa16f5 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -2295,8 +2295,13 @@ static int coda_probe(struct platform_device *pdev)
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- return coda_firmware_request(dev);
+ ret = coda_firmware_request(dev);
+ if (ret)
+ goto err_alloc_workqueue;
+ return 0;
+err_alloc_workqueue:
+ destroy_workqueue(dev->workqueue);
err_v4l2_register:
v4l2_device_unregister(&dev->v4l2_dev);
return ret;
diff --git a/drivers/media/platform/coda/coda-h264.c b/drivers/media/platform/coda/coda-h264.c
index 456773af1f1d..09dfcca7cc50 100644
--- a/drivers/media/platform/coda/coda-h264.c
+++ b/drivers/media/platform/coda/coda-h264.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/string.h>
+#include <coda.h>
static const u8 coda_filler_nal[14] = { 0x00, 0x00, 0x00, 0x01, 0x0c, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x80 };
diff --git a/drivers/media/platform/davinci/dm355_ccdc.c b/drivers/media/platform/davinci/dm355_ccdc.c
index c90b9a4f0c24..65c2973167c6 100644
--- a/drivers/media/platform/davinci/dm355_ccdc.c
+++ b/drivers/media/platform/davinci/dm355_ccdc.c
@@ -334,8 +334,8 @@ static int ccdc_set_params(void __user *params)
x = copy_from_user(&ccdc_raw_params, params, sizeof(ccdc_raw_params));
if (x) {
- dev_dbg(ccdc_cfg.dev, "ccdc_set_params: error in copying ccdc"
- "params, %d\n", x);
+ dev_dbg(ccdc_cfg.dev, "ccdc_set_params: error in copying ccdcparams, %d\n",
+ x);
return -EFAULT;
}
diff --git a/drivers/media/platform/davinci/dm644x_ccdc.c b/drivers/media/platform/davinci/dm644x_ccdc.c
index 6fba32bec974..c7523a7e0594 100644
--- a/drivers/media/platform/davinci/dm644x_ccdc.c
+++ b/drivers/media/platform/davinci/dm644x_ccdc.c
@@ -354,8 +354,8 @@ static int ccdc_set_params(void __user *params)
x = copy_from_user(&ccdc_raw_params, params, sizeof(ccdc_raw_params));
if (x) {
- dev_dbg(ccdc_cfg.dev, "ccdc_set_params: error in copying"
- "ccdc params, %d\n", x);
+ dev_dbg(ccdc_cfg.dev, "ccdc_set_params: error in copyingccdc params, %d\n",
+ x);
return -EFAULT;
}
diff --git a/drivers/media/platform/davinci/vpbe.c b/drivers/media/platform/davinci/vpbe.c
index 9a6c2cc38acb..8c8cbeb7d90f 100644
--- a/drivers/media/platform/davinci/vpbe.c
+++ b/drivers/media/platform/davinci/vpbe.c
@@ -107,7 +107,7 @@ static int vpbe_find_encoder_sd_index(struct vpbe_config *cfg,
static int vpbe_g_cropcap(struct vpbe_device *vpbe_dev,
struct v4l2_cropcap *cropcap)
{
- if (NULL == cropcap)
+ if (!cropcap)
return -EINVAL;
cropcap->bounds.left = 0;
cropcap->bounds.top = 0;
@@ -149,7 +149,7 @@ static int vpbe_get_mode_info(struct vpbe_device *vpbe_dev, char *mode,
int curr_output = output_index;
int i;
- if (NULL == mode)
+ if (!mode)
return -EINVAL;
for (i = 0; i < cfg->outputs[curr_output].num_modes; i++) {
@@ -166,7 +166,7 @@ static int vpbe_get_mode_info(struct vpbe_device *vpbe_dev, char *mode,
static int vpbe_get_current_mode_info(struct vpbe_device *vpbe_dev,
struct vpbe_enc_mode_info *mode_info)
{
- if (NULL == mode_info)
+ if (!mode_info)
return -EINVAL;
*mode_info = vpbe_dev->current_timings;
@@ -227,10 +227,9 @@ static int vpbe_set_output(struct vpbe_device *vpbe_dev, int index)
vpbe_current_encoder_info(vpbe_dev);
struct vpbe_config *cfg = vpbe_dev->cfg;
struct venc_platform_data *venc_device = vpbe_dev->venc_device;
- u32 if_params;
int enc_out_index;
int sd_index;
- int ret = 0;
+ int ret;
if (index >= cfg->num_outputs)
return -EINVAL;
@@ -254,20 +253,19 @@ static int vpbe_set_output(struct vpbe_device *vpbe_dev, int index)
sd_index = vpbe_find_encoder_sd_index(cfg, index);
if (sd_index < 0) {
ret = -EINVAL;
- goto out;
+ goto unlock;
}
- if_params = cfg->outputs[index].if_params;
- venc_device->setup_if_config(if_params);
+ ret = venc_device->setup_if_config(cfg->outputs[index].if_params);
if (ret)
- goto out;
+ goto unlock;
}
/* Set output at the encoder */
ret = v4l2_subdev_call(vpbe_dev->encoders[sd_index], video,
s_routing, 0, enc_out_index, 0);
if (ret)
- goto out;
+ goto unlock;
/*
* It is assumed that venc or extenal encoder will set a default
@@ -289,7 +287,7 @@ static int vpbe_set_output(struct vpbe_device *vpbe_dev, int index)
vpbe_dev->current_sd_index = sd_index;
vpbe_dev->current_out_index = index;
}
-out:
+unlock:
mutex_unlock(&vpbe_dev->lock);
return ret;
}
@@ -297,19 +295,19 @@ out:
static int vpbe_set_default_output(struct vpbe_device *vpbe_dev)
{
struct vpbe_config *cfg = vpbe_dev->cfg;
- int ret = 0;
int i;
for (i = 0; i < cfg->num_outputs; i++) {
if (!strcmp(def_output,
cfg->outputs[i].output.name)) {
- ret = vpbe_set_output(vpbe_dev, i);
+ int ret = vpbe_set_output(vpbe_dev, i);
+
if (!ret)
vpbe_dev->current_out_index = i;
return ret;
}
}
- return ret;
+ return 0;
}
/**
@@ -356,7 +354,7 @@ static int vpbe_s_dv_timings(struct vpbe_device *vpbe_dev,
ret = v4l2_subdev_call(vpbe_dev->encoders[sd_index], video,
s_dv_timings, dv_timings);
- if (!ret && (vpbe_dev->amp != NULL)) {
+ if (!ret && vpbe_dev->amp) {
/* Call amplifier subdevice */
ret = v4l2_subdev_call(vpbe_dev->amp, video,
s_dv_timings, dv_timings);
@@ -509,10 +507,9 @@ static int vpbe_set_mode(struct vpbe_device *vpbe_dev,
struct v4l2_dv_timings dv_timings;
struct osd_state *osd_device;
int out_index = vpbe_dev->current_out_index;
- int ret = 0;
int i;
- if ((NULL == mode_info) || (NULL == mode_info->name))
+ if (!mode_info || !mode_info->name)
return -EINVAL;
for (i = 0; i < cfg->outputs[out_index].num_modes; i++) {
@@ -536,7 +533,7 @@ static int vpbe_set_mode(struct vpbe_device *vpbe_dev,
}
/* Only custom timing should reach here */
- if (preset_mode == NULL)
+ if (!preset_mode)
return -EINVAL;
mutex_lock(&vpbe_dev->lock);
@@ -549,8 +546,7 @@ static int vpbe_set_mode(struct vpbe_device *vpbe_dev,
vpbe_dev->current_timings.upper_margin);
mutex_unlock(&vpbe_dev->lock);
-
- return ret;
+ return 0;
}
static int vpbe_set_default_mode(struct vpbe_device *vpbe_dev)
@@ -570,9 +566,9 @@ static int platform_device_get(struct device *dev, void *data)
struct platform_device *pdev = to_platform_device(dev);
struct vpbe_device *vpbe_dev = data;
- if (strstr(pdev->name, "vpbe-osd") != NULL)
+ if (strstr(pdev->name, "vpbe-osd"))
vpbe_dev->osd_device = platform_get_drvdata(pdev);
- if (strstr(pdev->name, "vpbe-venc") != NULL)
+ if (strstr(pdev->name, "vpbe-venc"))
vpbe_dev->venc_device = dev_get_platdata(&pdev->dev);
return 0;
@@ -606,7 +602,7 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
* from the platform device by iteration of platform drivers and
* matching with device name
*/
- if (NULL == vpbe_dev || NULL == dev) {
+ if (!vpbe_dev || !dev) {
printk(KERN_ERR "Null device pointers.\n");
return -ENODEV;
}
@@ -652,7 +648,7 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
vpbe_dev->venc = venc_sub_dev_init(&vpbe_dev->v4l2_dev,
vpbe_dev->cfg->venc.module_name);
/* register venc sub device */
- if (vpbe_dev->venc == NULL) {
+ if (!vpbe_dev->venc) {
v4l2_err(&vpbe_dev->v4l2_dev,
"vpbe unable to init venc sub device\n");
ret = -ENODEV;
@@ -660,8 +656,7 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
}
/* initialize osd device */
osd_device = vpbe_dev->osd_device;
-
- if (NULL != osd_device->ops.initialize) {
+ if (osd_device->ops.initialize) {
err = osd_device->ops.initialize(osd_device);
if (err) {
v4l2_err(&vpbe_dev->v4l2_dev,
@@ -676,12 +671,10 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
* store venc sd index.
*/
num_encoders = vpbe_dev->cfg->num_ext_encoders + 1;
- vpbe_dev->encoders = kmalloc(
- sizeof(struct v4l2_subdev *)*num_encoders,
- GFP_KERNEL);
- if (NULL == vpbe_dev->encoders) {
- v4l2_err(&vpbe_dev->v4l2_dev,
- "unable to allocate memory for encoders sub devices");
+ vpbe_dev->encoders = kmalloc_array(num_encoders,
+ sizeof(*vpbe_dev->encoders),
+ GFP_KERNEL);
+ if (!vpbe_dev->encoders) {
ret = -ENOMEM;
goto fail_dev_unregister;
}
@@ -705,19 +698,17 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
"v4l2 sub device %s registered\n",
enc_info->module_name);
else {
- v4l2_err(&vpbe_dev->v4l2_dev, "encoder %s"
- " failed to register",
+ v4l2_err(&vpbe_dev->v4l2_dev, "encoder %s failed to register",
enc_info->module_name);
ret = -ENODEV;
goto fail_kfree_encoders;
}
} else
- v4l2_warn(&vpbe_dev->v4l2_dev, "non-i2c encoders"
- " currently not supported");
+ v4l2_warn(&vpbe_dev->v4l2_dev, "non-i2c encoders currently not supported");
}
/* Add amplifier subdevice for dm365 */
if ((strcmp(vpbe_dev->cfg->module_name, "dm365-vpbe-display") == 0) &&
- vpbe_dev->cfg->amp != NULL) {
+ vpbe_dev->cfg->amp) {
amp_info = vpbe_dev->cfg->amp;
if (amp_info->is_i2c) {
vpbe_dev->amp = v4l2_i2c_new_subdev_board(
@@ -735,8 +726,7 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
amp_info->module_name);
} else {
vpbe_dev->amp = NULL;
- v4l2_warn(&vpbe_dev->v4l2_dev, "non-i2c amplifiers"
- " currently not supported");
+ v4l2_warn(&vpbe_dev->v4l2_dev, "non-i2c amplifiers currently not supported");
}
} else {
vpbe_dev->amp = NULL;
@@ -824,9 +814,8 @@ static int vpbe_probe(struct platform_device *pdev)
{
struct vpbe_device *vpbe_dev;
struct vpbe_config *cfg;
- int ret = -EINVAL;
- if (pdev->dev.platform_data == NULL) {
+ if (!pdev->dev.platform_data) {
v4l2_err(pdev->dev.driver, "No platform data\n");
return -ENODEV;
}
@@ -835,17 +824,14 @@ static int vpbe_probe(struct platform_device *pdev)
if (!cfg->module_name[0] ||
!cfg->osd.module_name[0] ||
!cfg->venc.module_name[0]) {
- v4l2_err(pdev->dev.driver, "vpbe display module names not"
- " defined\n");
- return ret;
+ v4l2_err(pdev->dev.driver, "vpbe display module names not defined\n");
+ return -EINVAL;
}
vpbe_dev = kzalloc(sizeof(*vpbe_dev), GFP_KERNEL);
- if (vpbe_dev == NULL) {
- v4l2_err(pdev->dev.driver, "Unable to allocate memory"
- " for vpbe_device\n");
+ if (!vpbe_dev)
return -ENOMEM;
- }
+
vpbe_dev->cfg = cfg;
vpbe_dev->ops = vpbe_dev_ops;
vpbe_dev->pdev = &pdev->dev;
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index 6efb2f1631c4..ee1cd79739c8 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -229,7 +229,7 @@ int vpfe_register_ccdc_device(struct ccdc_hw_device *dev)
BUG_ON(!dev->hw_ops.getfid);
mutex_lock(&ccdc_lock);
- if (NULL == ccdc_cfg) {
+ if (!ccdc_cfg) {
/*
* TODO. Will this ever happen? if so, we need to fix it.
* Proabably we need to add the request to a linked list and
@@ -265,7 +265,7 @@ EXPORT_SYMBOL(vpfe_register_ccdc_device);
*/
void vpfe_unregister_ccdc_device(struct ccdc_hw_device *dev)
{
- if (NULL == dev) {
+ if (!dev) {
printk(KERN_ERR "invalid ccdc device ptr\n");
return;
}
@@ -281,7 +281,6 @@ void vpfe_unregister_ccdc_device(struct ccdc_hw_device *dev)
mutex_lock(&ccdc_lock);
ccdc_dev = NULL;
mutex_unlock(&ccdc_lock);
- return;
}
EXPORT_SYMBOL(vpfe_unregister_ccdc_device);
@@ -384,7 +383,7 @@ static int vpfe_config_image_format(struct vpfe_device *vpfe_dev,
};
struct v4l2_mbus_framefmt *mbus_fmt = &fmt.format;
struct v4l2_pix_format *pix = &vpfe_dev->fmt.fmt.pix;
- int i, ret = 0;
+ int i, ret;
for (i = 0; i < ARRAY_SIZE(vpfe_standards); i++) {
if (vpfe_standards[i].std_id & std_id) {
@@ -453,7 +452,7 @@ static int vpfe_config_image_format(struct vpfe_device *vpfe_dev,
static int vpfe_initialize_device(struct vpfe_device *vpfe_dev)
{
- int ret = 0;
+ int ret;
/* set first input of current subdevice as the current input */
vpfe_dev->current_input = 0;
@@ -469,7 +468,7 @@ static int vpfe_initialize_device(struct vpfe_device *vpfe_dev)
/* now open the ccdc device to initialize it */
mutex_lock(&ccdc_lock);
- if (NULL == ccdc_dev) {
+ if (!ccdc_dev) {
v4l2_err(&vpfe_dev->v4l2_dev, "ccdc device not registered\n");
ret = -ENODEV;
goto unlock;
@@ -511,12 +510,10 @@ static int vpfe_open(struct file *file)
}
/* Allocate memory for the file handle object */
- fh = kmalloc(sizeof(struct vpfe_fh), GFP_KERNEL);
- if (NULL == fh) {
- v4l2_err(&vpfe_dev->v4l2_dev,
- "unable to allocate memory for file handle object\n");
+ fh = kmalloc(sizeof(*fh), GFP_KERNEL);
+ if (!fh)
return -ENOMEM;
- }
+
/* store pointer to fh in private_data member of file */
file->private_data = fh;
fh->vpfe_dev = vpfe_dev;
@@ -584,7 +581,7 @@ static irqreturn_t vpfe_isr(int irq, void *dev_id)
goto clear_intr;
/* only for 6446 this will be applicable */
- if (NULL != ccdc_dev->hw_ops.reset)
+ if (ccdc_dev->hw_ops.reset)
ccdc_dev->hw_ops.reset();
if (field == V4L2_FIELD_NONE) {
@@ -617,9 +614,8 @@ static irqreturn_t vpfe_isr(int irq, void *dev_id)
* interleavely or separately in memory, reconfigure
* the CCDC memory address
*/
- if (field == V4L2_FIELD_SEQ_TB) {
+ if (field == V4L2_FIELD_SEQ_TB)
vpfe_schedule_bottom_field(vpfe_dev);
- }
goto clear_intr;
}
/*
@@ -824,7 +820,7 @@ static const struct vpfe_pixel_format *
int temp, found;
vpfe_pix_fmt = vpfe_lookup_pix_format(pixfmt->pixelformat);
- if (NULL == vpfe_pix_fmt) {
+ if (!vpfe_pix_fmt) {
/*
* use current pixel format in the vpfe device. We
* will find this pix format in the table
@@ -919,8 +915,7 @@ static const struct vpfe_pixel_format *
else
pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
- v4l2_info(&vpfe_dev->v4l2_dev, "adjusted width = %d, height ="
- " %d, bpp = %d, bytesperline = %d, sizeimage = %d\n",
+ v4l2_info(&vpfe_dev->v4l2_dev, "adjusted width = %d, height = %d, bpp = %d, bytesperline = %d, sizeimage = %d\n",
pixfmt->width, pixfmt->height, vpfe_pix_fmt->bpp,
pixfmt->bytesperline, pixfmt->sizeimage);
return vpfe_pix_fmt;
@@ -967,7 +962,7 @@ static int vpfe_enum_fmt_vid_cap(struct file *file, void *priv,
/* Fill in the information about format */
pix_fmt = vpfe_lookup_pix_format(pix);
- if (NULL != pix_fmt) {
+ if (pix_fmt) {
temp_index = fmt->index;
*fmt = pix_fmt->fmtdesc;
fmt->index = temp_index;
@@ -981,7 +976,7 @@ static int vpfe_s_fmt_vid_cap(struct file *file, void *priv,
{
struct vpfe_device *vpfe_dev = video_drvdata(file);
const struct vpfe_pixel_format *pix_fmts;
- int ret = 0;
+ int ret;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt_vid_cap\n");
@@ -993,8 +988,7 @@ static int vpfe_s_fmt_vid_cap(struct file *file, void *priv,
/* Check for valid frame format */
pix_fmts = vpfe_check_format(vpfe_dev, &fmt->fmt.pix);
-
- if (NULL == pix_fmts)
+ if (!pix_fmts)
return -EINVAL;
/* store the pixel format in the device object */
@@ -1020,7 +1014,7 @@ static int vpfe_try_fmt_vid_cap(struct file *file, void *priv,
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt_vid_cap\n");
pix_fmts = vpfe_check_format(vpfe_dev, &f->fmt.pix);
- if (NULL == pix_fmts)
+ if (!pix_fmts)
return -EINVAL;
return 0;
}
@@ -1088,12 +1082,11 @@ static int vpfe_enum_input(struct file *file, void *priv,
&subdev,
&index,
inp->index) < 0) {
- v4l2_err(&vpfe_dev->v4l2_dev, "input information not found"
- " for the subdev\n");
+ v4l2_err(&vpfe_dev->v4l2_dev, "input information not found for the subdev\n");
return -EINVAL;
}
sdinfo = &vpfe_dev->cfg->sub_devs[subdev];
- memcpy(inp, &sdinfo->inputs[index], sizeof(struct v4l2_input));
+ *inp = sdinfo->inputs[index];
return 0;
}
@@ -1114,8 +1107,8 @@ static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
struct vpfe_subdev_info *sdinfo;
int subdev_index, inp_index;
struct vpfe_route *route;
- u32 input = 0, output = 0;
- int ret = -EINVAL;
+ u32 input, output;
+ int ret;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_input\n");
@@ -1147,6 +1140,9 @@ static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
if (route && sdinfo->can_route) {
input = route->input;
output = route->output;
+ } else {
+ input = 0;
+ output = 0;
}
if (sd)
@@ -1181,7 +1177,7 @@ static int vpfe_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
{
struct vpfe_device *vpfe_dev = video_drvdata(file);
struct vpfe_subdev_info *sdinfo;
- int ret = 0;
+ int ret;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querystd\n");
@@ -1200,7 +1196,7 @@ static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
{
struct vpfe_device *vpfe_dev = video_drvdata(file);
struct vpfe_subdev_info *sdinfo;
- int ret = 0;
+ int ret;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_std\n");
@@ -1349,7 +1345,7 @@ static int vpfe_reqbufs(struct file *file, void *priv,
{
struct vpfe_device *vpfe_dev = video_drvdata(file);
struct vpfe_fh *fh = file->private_data;
- int ret = 0;
+ int ret;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_reqbufs\n");
@@ -1481,7 +1477,7 @@ static int vpfe_streamon(struct file *file, void *priv,
struct vpfe_fh *fh = file->private_data;
struct vpfe_subdev_info *sdinfo;
unsigned long addr;
- int ret = 0;
+ int ret;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_streamon\n");
@@ -1564,7 +1560,7 @@ static int vpfe_streamoff(struct file *file, void *priv,
struct vpfe_device *vpfe_dev = video_drvdata(file);
struct vpfe_fh *fh = file->private_data;
struct vpfe_subdev_info *sdinfo;
- int ret = 0;
+ int ret;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_streamoff\n");
@@ -1650,7 +1646,7 @@ static int vpfe_s_selection(struct file *file, void *priv,
{
struct vpfe_device *vpfe_dev = video_drvdata(file);
struct v4l2_rect rect = sel->r;
- int ret = 0;
+ int ret;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_selection\n");
@@ -1708,7 +1704,7 @@ static long vpfe_param_handler(struct file *file, void *priv,
bool valid_prio, unsigned int cmd, void *param)
{
struct vpfe_device *vpfe_dev = video_drvdata(file);
- int ret = 0;
+ int ret;
v4l2_dbg(2, debug, &vpfe_dev->v4l2_dev, "vpfe_param_handler\n");
@@ -1821,7 +1817,7 @@ static int vpfe_probe(struct platform_device *pdev)
struct vpfe_device *vpfe_dev;
struct i2c_adapter *i2c_adap;
struct video_device *vfd;
- int ret = -ENOMEM, i, j;
+ int ret, i, j;
int num_subdevs = 0;
/* Get the pointer to the device object */
@@ -1830,12 +1826,12 @@ static int vpfe_probe(struct platform_device *pdev)
if (!vpfe_dev) {
v4l2_err(pdev->dev.driver,
"Failed to allocate memory for vpfe_dev\n");
- return ret;
+ return -ENOMEM;
}
vpfe_dev->pdev = &pdev->dev;
- if (NULL == pdev->dev.platform_data) {
+ if (!pdev->dev.platform_data) {
v4l2_err(pdev->dev.driver, "Unable to get vpfe config\n");
ret = -ENODEV;
goto probe_free_dev_mem;
@@ -1843,19 +1839,16 @@ static int vpfe_probe(struct platform_device *pdev)
vpfe_cfg = pdev->dev.platform_data;
vpfe_dev->cfg = vpfe_cfg;
- if (NULL == vpfe_cfg->ccdc ||
- NULL == vpfe_cfg->card_name ||
- NULL == vpfe_cfg->sub_devs) {
+ if (!vpfe_cfg->ccdc || !vpfe_cfg->card_name || !vpfe_cfg->sub_devs) {
v4l2_err(pdev->dev.driver, "null ptr in vpfe_cfg\n");
ret = -ENOENT;
goto probe_free_dev_mem;
}
/* Allocate memory for ccdc configuration */
- ccdc_cfg = kmalloc(sizeof(struct ccdc_config), GFP_KERNEL);
- if (NULL == ccdc_cfg) {
- v4l2_err(pdev->dev.driver,
- "Memory allocation failed for ccdc_cfg\n");
+ ccdc_cfg = kmalloc(sizeof(*ccdc_cfg), GFP_KERNEL);
+ if (!ccdc_cfg) {
+ ret = -ENOMEM;
goto probe_free_dev_mem;
}
@@ -1940,11 +1933,10 @@ static int vpfe_probe(struct platform_device *pdev)
video_set_drvdata(&vpfe_dev->video_dev, vpfe_dev);
i2c_adap = i2c_get_adapter(vpfe_cfg->i2c_adapter_id);
num_subdevs = vpfe_cfg->num_subdevs;
- vpfe_dev->sd = kmalloc(sizeof(struct v4l2_subdev *) * num_subdevs,
- GFP_KERNEL);
- if (NULL == vpfe_dev->sd) {
- v4l2_err(&vpfe_dev->v4l2_dev,
- "unable to allocate memory for subdevice pointers\n");
+ vpfe_dev->sd = kmalloc_array(num_subdevs,
+ sizeof(*vpfe_dev->sd),
+ GFP_KERNEL);
+ if (!vpfe_dev->sd) {
ret = -ENOMEM;
goto probe_out_video_unregister;
}
@@ -1974,6 +1966,7 @@ static int vpfe_probe(struct platform_device *pdev)
v4l2_info(&vpfe_dev->v4l2_dev,
"v4l2 sub device %s register fails\n",
sdinfo->name);
+ ret = -ENXIO;
goto probe_sd_out;
}
}
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index 5104cc0ee40e..f791f5c402bf 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -291,10 +291,10 @@ static void vpif_stop_streaming(struct vb2_queue *vq)
vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
} else {
- if (common->cur_frm != NULL)
+ if (common->cur_frm)
vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
- if (common->next_frm != NULL)
+ if (common->next_frm)
vb2_buffer_done(&common->next_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
}
@@ -375,7 +375,7 @@ static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
struct vpif_device *dev = &vpif_obj;
struct common_obj *common;
struct channel_obj *ch;
- int channel_id = 0;
+ int channel_id;
int fid = -1, i;
channel_id = *(int *)(dev_id);
@@ -648,7 +648,7 @@ static int vpif_input_to_subdev(
vpif_dbg(2, debug, "vpif_input_to_subdev\n");
subdev_name = chan_cfg->inputs[input_index].subdev_name;
- if (subdev_name == NULL)
+ if (!subdev_name)
return -1;
/* loop through the sub device list to get the sub device info */
@@ -731,7 +731,7 @@ static int vpif_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
{
struct video_device *vdev = video_devdata(file);
struct channel_obj *ch = video_get_drvdata(vdev);
- int ret = 0;
+ int ret;
vpif_dbg(2, debug, "vpif_querystd\n");
@@ -764,7 +764,7 @@ static int vpif_g_std(struct file *file, void *priv, v4l2_std_id *std)
vpif_dbg(2, debug, "vpif_g_std\n");
- if (config->chan_config[ch->channel_id].inputs == NULL)
+ if (!config->chan_config[ch->channel_id].inputs)
return -ENODATA;
chan_cfg = &config->chan_config[ch->channel_id];
@@ -794,7 +794,7 @@ static int vpif_s_std(struct file *file, void *priv, v4l2_std_id std_id)
vpif_dbg(2, debug, "vpif_s_std\n");
- if (config->chan_config[ch->channel_id].inputs == NULL)
+ if (!config->chan_config[ch->channel_id].inputs)
return -ENODATA;
chan_cfg = &config->chan_config[ch->channel_id];
@@ -1050,7 +1050,7 @@ vpif_enum_dv_timings(struct file *file, void *priv,
struct v4l2_input input;
int ret;
- if (config->chan_config[ch->channel_id].inputs == NULL)
+ if (!config->chan_config[ch->channel_id].inputs)
return -ENODATA;
chan_cfg = &config->chan_config[ch->channel_id];
@@ -1084,7 +1084,7 @@ vpif_query_dv_timings(struct file *file, void *priv,
struct v4l2_input input;
int ret;
- if (config->chan_config[ch->channel_id].inputs == NULL)
+ if (!config->chan_config[ch->channel_id].inputs)
return -ENODATA;
chan_cfg = &config->chan_config[ch->channel_id];
@@ -1120,7 +1120,7 @@ static int vpif_s_dv_timings(struct file *file, void *priv,
struct v4l2_input input;
int ret;
- if (config->chan_config[ch->channel_id].inputs == NULL)
+ if (!config->chan_config[ch->channel_id].inputs)
return -ENODATA;
chan_cfg = &config->chan_config[ch->channel_id];
@@ -1152,11 +1152,7 @@ static int vpif_s_dv_timings(struct file *file, void *priv,
timings->bt.vfrontporch &&
(timings->bt.vbackporch ||
timings->bt.vsync))) {
- vpif_dbg(2, debug, "Timings for width, height, "
- "horizontal back porch, horizontal sync, "
- "horizontal front porch, vertical back porch, "
- "vertical sync and vertical back porch "
- "must be defined\n");
+ vpif_dbg(2, debug, "Timings for width, height, horizontal back porch, horizontal sync, horizontal front porch, vertical back porch, vertical sync and vertical back porch must be defined\n");
return -EINVAL;
}
@@ -1181,8 +1177,7 @@ static int vpif_s_dv_timings(struct file *file, void *priv,
std_info->l11 = std_info->vsize -
(bt->il_vfrontporch - 1);
} else {
- vpif_dbg(2, debug, "Required timing values for "
- "interlaced BT format missing\n");
+ vpif_dbg(2, debug, "Required timing values for interlaced BT format missing\n");
return -EINVAL;
}
} else {
@@ -1218,7 +1213,7 @@ static int vpif_g_dv_timings(struct file *file, void *priv,
struct vpif_capture_chan_config *chan_cfg;
struct v4l2_input input;
- if (config->chan_config[ch->channel_id].inputs == NULL)
+ if (!config->chan_config[ch->channel_id].inputs)
return -ENODATA;
chan_cfg = &config->chan_config[ch->channel_id];
@@ -1464,10 +1459,8 @@ static __init int vpif_probe(struct platform_device *pdev)
vpif_obj.config = pdev->dev.platform_data;
subdev_count = vpif_obj.config->subdev_count;
- vpif_obj.sd = kzalloc(sizeof(struct v4l2_subdev *) * subdev_count,
- GFP_KERNEL);
- if (vpif_obj.sd == NULL) {
- vpif_err("unable to allocate memory for subdevice pointers\n");
+ vpif_obj.sd = kcalloc(subdev_count, sizeof(*vpif_obj.sd), GFP_KERNEL);
+ if (!vpif_obj.sd) {
err = -ENOMEM;
goto vpif_unregister;
}
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index 75b27233ec2f..e5f18448dbf7 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -271,10 +271,10 @@ static void vpif_stop_streaming(struct vb2_queue *vq)
vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
} else {
- if (common->cur_frm != NULL)
+ if (common->cur_frm)
vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
- if (common->next_frm != NULL)
+ if (common->next_frm)
vb2_buffer_done(&common->next_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
}
@@ -301,7 +301,7 @@ static struct vb2_ops video_qops = {
static void process_progressive_mode(struct common_obj *common)
{
- unsigned long addr = 0;
+ unsigned long addr;
spin_lock(&common->irqlock);
/* Get the next buffer from buffer queue */
@@ -363,7 +363,7 @@ static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
struct channel_obj *ch;
struct common_obj *common;
int fid = -1, i;
- int channel_id = 0;
+ int channel_id;
channel_id = *(int *)(dev_id);
if (!vpif_intr_status(channel_id + 2))
@@ -686,7 +686,7 @@ static int vpif_s_std(struct file *file, void *priv, v4l2_std_id std_id)
struct v4l2_output output;
int ret;
- if (config->chan_config[ch->channel_id].outputs == NULL)
+ if (!config->chan_config[ch->channel_id].outputs)
return -ENODATA;
chan_cfg = &config->chan_config[ch->channel_id];
@@ -732,7 +732,7 @@ static int vpif_g_std(struct file *file, void *priv, v4l2_std_id *std)
struct vpif_display_chan_config *chan_cfg;
struct v4l2_output output;
- if (config->chan_config[ch->channel_id].outputs == NULL)
+ if (!config->chan_config[ch->channel_id].outputs)
return -ENODATA;
chan_cfg = &config->chan_config[ch->channel_id];
@@ -783,11 +783,11 @@ vpif_output_to_subdev(struct vpif_display_config *vpif_cfg,
vpif_dbg(2, debug, "vpif_output_to_subdev\n");
- if (chan_cfg->outputs == NULL)
+ if (!chan_cfg->outputs)
return -1;
subdev_name = chan_cfg->outputs[index].subdev_name;
- if (subdev_name == NULL)
+ if (!subdev_name)
return -1;
/* loop through the sub device list to get the sub device info */
@@ -833,7 +833,7 @@ static int vpif_set_output(struct vpif_display_config *vpif_cfg,
}
ch->output_idx = index;
ch->sd = sd;
- if (chan_cfg->outputs != NULL)
+ if (chan_cfg->outputs)
/* update tvnorms from the sub device output info */
ch->video_dev.tvnorms = chan_cfg->outputs[index].output.std;
return 0;
@@ -885,7 +885,7 @@ vpif_enum_dv_timings(struct file *file, void *priv,
struct v4l2_output output;
int ret;
- if (config->chan_config[ch->channel_id].outputs == NULL)
+ if (!config->chan_config[ch->channel_id].outputs)
return -ENODATA;
chan_cfg = &config->chan_config[ch->channel_id];
@@ -922,7 +922,7 @@ static int vpif_s_dv_timings(struct file *file, void *priv,
struct v4l2_output output;
int ret;
- if (config->chan_config[ch->channel_id].outputs == NULL)
+ if (!config->chan_config[ch->channel_id].outputs)
return -ENODATA;
chan_cfg = &config->chan_config[ch->channel_id];
@@ -954,11 +954,7 @@ static int vpif_s_dv_timings(struct file *file, void *priv,
timings->bt.vfrontporch &&
(timings->bt.vbackporch ||
timings->bt.vsync))) {
- vpif_dbg(2, debug, "Timings for width, height, "
- "horizontal back porch, horizontal sync, "
- "horizontal front porch, vertical back porch, "
- "vertical sync and vertical back porch "
- "must be defined\n");
+ vpif_dbg(2, debug, "Timings for width, height, horizontal back porch, horizontal sync, horizontal front porch, vertical back porch, vertical sync and vertical back porch must be defined\n");
return -EINVAL;
}
@@ -983,8 +979,7 @@ static int vpif_s_dv_timings(struct file *file, void *priv,
std_info->l11 = std_info->vsize -
(bt->il_vfrontporch - 1);
} else {
- vpif_dbg(2, debug, "Required timing values for "
- "interlaced BT format missing\n");
+ vpif_dbg(2, debug, "Required timing values for interlaced BT format missing\n");
return -EINVAL;
}
} else {
@@ -1021,7 +1016,7 @@ static int vpif_g_dv_timings(struct file *file, void *priv,
struct video_obj *vid_ch = &ch->video;
struct v4l2_output output;
- if (config->chan_config[ch->channel_id].outputs == NULL)
+ if (!config->chan_config[ch->channel_id].outputs)
goto error;
chan_cfg = &config->chan_config[ch->channel_id];
@@ -1279,10 +1274,8 @@ static __init int vpif_probe(struct platform_device *pdev)
vpif_obj.config = pdev->dev.platform_data;
subdev_count = vpif_obj.config->subdev_count;
subdevdata = vpif_obj.config->subdevinfo;
- vpif_obj.sd = kzalloc(sizeof(struct v4l2_subdev *) * subdev_count,
- GFP_KERNEL);
- if (vpif_obj.sd == NULL) {
- vpif_err("unable to allocate memory for subdevice pointers\n");
+ vpif_obj.sd = kcalloc(subdev_count, sizeof(*vpif_obj.sd), GFP_KERNEL);
+ if (!vpif_obj.sd) {
err = -ENOMEM;
goto vpif_unregister;
}
diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c
index fce86f17dffc..373b796132f2 100644
--- a/drivers/media/platform/davinci/vpss.c
+++ b/drivers/media/platform/davinci/vpss.c
@@ -261,8 +261,8 @@ static int dm355_enable_clock(enum vpss_clock_sel clock_sel, int en)
shift = 6;
break;
default:
- printk(KERN_ERR "dm355_enable_clock:"
- " Invalid selector: %d\n", clock_sel);
+ printk(KERN_ERR "dm355_enable_clock: Invalid selector: %d\n",
+ clock_sel);
return -EINVAL;
}
@@ -421,8 +421,7 @@ static int vpss_probe(struct platform_device *pdev)
else if (!strcmp(platform_name, "dm644x_vpss"))
oper_cfg.platform = DM644X;
else {
- dev_err(&pdev->dev, "vpss driver not supported on"
- " this platform\n");
+ dev_err(&pdev->dev, "vpss driver not supported on this platform\n");
return -ENODEV;
}
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index 787bd16c19e5..cbf75b6194b4 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -24,12 +24,11 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <media/v4l2-ioctl.h>
#include "gsc-core.h"
-#define GSC_CLOCK_GATE_NAME "gscl"
-
static const struct gsc_fmt gsc_formats[] = {
{
.name = "RGB565",
@@ -39,8 +38,8 @@ static const struct gsc_fmt gsc_formats[] = {
.num_planes = 1,
.num_comp = 1,
}, {
- .name = "XRGB-8-8-8-8, 32 bpp",
- .pixelformat = V4L2_PIX_FMT_RGB32,
+ .name = "BGRX-8-8-8-8, 32 bpp",
+ .pixelformat = V4L2_PIX_FMT_BGR32,
.depth = { 32 },
.color = GSC_RGB,
.num_planes = 1,
@@ -441,7 +440,7 @@ int gsc_try_fmt_mplane(struct gsc_ctx *ctx, struct v4l2_format *f)
v4l_bound_align_image(&pix_mp->width, min_w, max_w, mod_x,
&pix_mp->height, min_h, max_h, mod_y, 0);
if (tmp_w != pix_mp->width || tmp_h != pix_mp->height)
- pr_info("Image size has been modified from %dx%d to %dx%d",
+ pr_debug("Image size has been modified from %dx%d to %dx%d\n",
tmp_w, tmp_h, pix_mp->width, pix_mp->height);
pix_mp->num_planes = fmt->num_planes;
@@ -451,12 +450,25 @@ int gsc_try_fmt_mplane(struct gsc_ctx *ctx, struct v4l2_format *f)
else /* SD */
pix_mp->colorspace = V4L2_COLORSPACE_SMPTE170M;
-
for (i = 0; i < pix_mp->num_planes; ++i) {
- int bpl = (pix_mp->width * fmt->depth[i]) >> 3;
- pix_mp->plane_fmt[i].bytesperline = bpl;
- pix_mp->plane_fmt[i].sizeimage = bpl * pix_mp->height;
+ struct v4l2_plane_pix_format *plane_fmt = &pix_mp->plane_fmt[i];
+ u32 bpl = plane_fmt->bytesperline;
+
+ if (fmt->num_comp == 1 && /* Packed */
+ (bpl == 0 || (bpl * 8 / fmt->depth[i]) < pix_mp->width))
+ bpl = pix_mp->width * fmt->depth[i] / 8;
+
+ if (fmt->num_comp > 1 && /* Planar */
+ (bpl == 0 || bpl < pix_mp->width))
+ bpl = pix_mp->width;
+
+ if (i != 0 && fmt->num_comp == 3)
+ bpl /= 2;
+ plane_fmt->bytesperline = bpl;
+ plane_fmt->sizeimage = max(pix_mp->width * pix_mp->height *
+ fmt->depth[i] / 8,
+ plane_fmt->sizeimage);
pr_debug("[%d]: bpl: %d, sizeimage: %d",
i, bpl, pix_mp->plane_fmt[i].sizeimage);
}
@@ -964,7 +976,19 @@ static struct gsc_driverdata gsc_v_100_drvdata = {
[3] = &gsc_v_100_variant,
},
.num_entities = 4,
- .lclk_frequency = 266000000UL,
+ .clk_names = { "gscl" },
+ .num_clocks = 1,
+};
+
+static struct gsc_driverdata gsc_5433_drvdata = {
+ .variant = {
+ [0] = &gsc_v_100_variant,
+ [1] = &gsc_v_100_variant,
+ [2] = &gsc_v_100_variant,
+ },
+ .num_entities = 3,
+ .clk_names = { "pclk", "aclk", "aclk_xiu", "aclk_gsclbend" },
+ .num_clocks = 4,
};
static const struct of_device_id exynos_gsc_match[] = {
@@ -972,98 +996,22 @@ static const struct of_device_id exynos_gsc_match[] = {
.compatible = "samsung,exynos5-gsc",
.data = &gsc_v_100_drvdata,
},
+ {
+ .compatible = "samsung,exynos5433-gsc",
+ .data = &gsc_5433_drvdata,
+ },
{},
};
MODULE_DEVICE_TABLE(of, exynos_gsc_match);
-static void *gsc_get_drv_data(struct platform_device *pdev)
-{
- struct gsc_driverdata *driver_data = NULL;
- const struct of_device_id *match;
-
- match = of_match_node(exynos_gsc_match, pdev->dev.of_node);
- if (match)
- driver_data = (struct gsc_driverdata *)match->data;
-
- return driver_data;
-}
-
-static void gsc_clk_put(struct gsc_dev *gsc)
-{
- if (!IS_ERR(gsc->clock))
- clk_unprepare(gsc->clock);
-}
-
-static int gsc_clk_get(struct gsc_dev *gsc)
-{
- int ret;
-
- dev_dbg(&gsc->pdev->dev, "gsc_clk_get Called\n");
-
- gsc->clock = devm_clk_get(&gsc->pdev->dev, GSC_CLOCK_GATE_NAME);
- if (IS_ERR(gsc->clock)) {
- dev_err(&gsc->pdev->dev, "failed to get clock~~~: %s\n",
- GSC_CLOCK_GATE_NAME);
- return PTR_ERR(gsc->clock);
- }
-
- ret = clk_prepare(gsc->clock);
- if (ret < 0) {
- dev_err(&gsc->pdev->dev, "clock prepare failed for clock: %s\n",
- GSC_CLOCK_GATE_NAME);
- gsc->clock = ERR_PTR(-EINVAL);
- return ret;
- }
-
- return 0;
-}
-
-static int gsc_m2m_suspend(struct gsc_dev *gsc)
-{
- unsigned long flags;
- int timeout;
-
- spin_lock_irqsave(&gsc->slock, flags);
- if (!gsc_m2m_pending(gsc)) {
- spin_unlock_irqrestore(&gsc->slock, flags);
- return 0;
- }
- clear_bit(ST_M2M_SUSPENDED, &gsc->state);
- set_bit(ST_M2M_SUSPENDING, &gsc->state);
- spin_unlock_irqrestore(&gsc->slock, flags);
-
- timeout = wait_event_timeout(gsc->irq_queue,
- test_bit(ST_M2M_SUSPENDED, &gsc->state),
- GSC_SHUTDOWN_TIMEOUT);
-
- clear_bit(ST_M2M_SUSPENDING, &gsc->state);
- return timeout == 0 ? -EAGAIN : 0;
-}
-
-static int gsc_m2m_resume(struct gsc_dev *gsc)
-{
- struct gsc_ctx *ctx;
- unsigned long flags;
-
- spin_lock_irqsave(&gsc->slock, flags);
- /* Clear for full H/W setup in first run after resume */
- ctx = gsc->m2m.ctx;
- gsc->m2m.ctx = NULL;
- spin_unlock_irqrestore(&gsc->slock, flags);
-
- if (test_and_clear_bit(ST_M2M_SUSPENDED, &gsc->state))
- gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
-
- return 0;
-}
-
static int gsc_probe(struct platform_device *pdev)
{
struct gsc_dev *gsc;
struct resource *res;
- struct gsc_driverdata *drv_data = gsc_get_drv_data(pdev);
struct device *dev = &pdev->dev;
+ const struct gsc_driverdata *drv_data = of_device_get_match_data(dev);
int ret;
+ int i;
gsc = devm_kzalloc(dev, sizeof(struct gsc_dev), GFP_KERNEL);
if (!gsc)
@@ -1079,13 +1027,13 @@ static int gsc_probe(struct platform_device *pdev)
return -EINVAL;
}
+ gsc->num_clocks = drv_data->num_clocks;
gsc->variant = drv_data->variant[gsc->id];
gsc->pdev = pdev;
init_waitqueue_head(&gsc->irq_queue);
spin_lock_init(&gsc->slock);
mutex_init(&gsc->lock);
- gsc->clock = ERR_PTR(-EINVAL);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
gsc->regs = devm_ioremap_resource(dev, res);
@@ -1098,9 +1046,25 @@ static int gsc_probe(struct platform_device *pdev)
return -ENXIO;
}
- ret = gsc_clk_get(gsc);
- if (ret)
- return ret;
+ for (i = 0; i < gsc->num_clocks; i++) {
+ gsc->clock[i] = devm_clk_get(dev, drv_data->clk_names[i]);
+ if (IS_ERR(gsc->clock[i])) {
+ dev_err(dev, "failed to get clock: %s\n",
+ drv_data->clk_names[i]);
+ return PTR_ERR(gsc->clock[i]);
+ }
+ }
+
+ for (i = 0; i < gsc->num_clocks; i++) {
+ ret = clk_prepare_enable(gsc->clock[i]);
+ if (ret) {
+ dev_err(dev, "clock prepare failed for clock: %s\n",
+ drv_data->clk_names[i]);
+ while (--i >= 0)
+ clk_disable_unprepare(gsc->clock[i]);
+ return ret;
+ }
+ }
ret = devm_request_irq(dev, res->start, gsc_irq_handler,
0, pdev->name, gsc);
@@ -1118,114 +1082,131 @@ static int gsc_probe(struct platform_device *pdev)
goto err_v4l2;
platform_set_drvdata(pdev, gsc);
- pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0)
- goto err_m2m;
+
+ gsc_hw_set_sw_reset(gsc);
+ gsc_wait_reset(gsc);
vb2_dma_contig_set_max_seg_size(dev, DMA_BIT_MASK(32));
dev_dbg(dev, "gsc-%d registered successfully\n", gsc->id);
- pm_runtime_put(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
return 0;
-err_m2m:
- gsc_unregister_m2m_device(gsc);
err_v4l2:
v4l2_device_unregister(&gsc->v4l2_dev);
err_clk:
- gsc_clk_put(gsc);
+ for (i = gsc->num_clocks - 1; i >= 0; i--)
+ clk_disable_unprepare(gsc->clock[i]);
return ret;
}
static int gsc_remove(struct platform_device *pdev)
{
struct gsc_dev *gsc = platform_get_drvdata(pdev);
+ int i;
+
+ pm_runtime_get_sync(&pdev->dev);
gsc_unregister_m2m_device(gsc);
v4l2_device_unregister(&gsc->v4l2_dev);
vb2_dma_contig_clear_max_seg_size(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
- gsc_clk_put(gsc);
+ for (i = 0; i < gsc->num_clocks; i++)
+ clk_disable_unprepare(gsc->clock[i]);
+
+ pm_runtime_put_noidle(&pdev->dev);
dev_dbg(&pdev->dev, "%s driver unloaded\n", pdev->name);
return 0;
}
-static int gsc_runtime_resume(struct device *dev)
+#ifdef CONFIG_PM
+static int gsc_m2m_suspend(struct gsc_dev *gsc)
{
- struct gsc_dev *gsc = dev_get_drvdata(dev);
- int ret = 0;
-
- pr_debug("gsc%d: state: 0x%lx", gsc->id, gsc->state);
+ unsigned long flags;
+ int timeout;
- ret = clk_enable(gsc->clock);
- if (ret)
- return ret;
+ spin_lock_irqsave(&gsc->slock, flags);
+ if (!gsc_m2m_pending(gsc)) {
+ spin_unlock_irqrestore(&gsc->slock, flags);
+ return 0;
+ }
+ clear_bit(ST_M2M_SUSPENDED, &gsc->state);
+ set_bit(ST_M2M_SUSPENDING, &gsc->state);
+ spin_unlock_irqrestore(&gsc->slock, flags);
- gsc_hw_set_sw_reset(gsc);
- gsc_wait_reset(gsc);
+ timeout = wait_event_timeout(gsc->irq_queue,
+ test_bit(ST_M2M_SUSPENDED, &gsc->state),
+ GSC_SHUTDOWN_TIMEOUT);
- return gsc_m2m_resume(gsc);
+ clear_bit(ST_M2M_SUSPENDING, &gsc->state);
+ return timeout == 0 ? -EAGAIN : 0;
}
-static int gsc_runtime_suspend(struct device *dev)
+static void gsc_m2m_resume(struct gsc_dev *gsc)
{
- struct gsc_dev *gsc = dev_get_drvdata(dev);
- int ret = 0;
+ struct gsc_ctx *ctx;
+ unsigned long flags;
- ret = gsc_m2m_suspend(gsc);
- if (!ret)
- clk_disable(gsc->clock);
+ spin_lock_irqsave(&gsc->slock, flags);
+ /* Clear for full H/W setup in first run after resume */
+ ctx = gsc->m2m.ctx;
+ gsc->m2m.ctx = NULL;
+ spin_unlock_irqrestore(&gsc->slock, flags);
- pr_debug("gsc%d: state: 0x%lx", gsc->id, gsc->state);
- return ret;
+ if (test_and_clear_bit(ST_M2M_SUSPENDED, &gsc->state))
+ gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
}
-static int gsc_resume(struct device *dev)
+static int gsc_runtime_resume(struct device *dev)
{
struct gsc_dev *gsc = dev_get_drvdata(dev);
- unsigned long flags;
+ int ret = 0;
+ int i;
- pr_debug("gsc%d: state: 0x%lx", gsc->id, gsc->state);
+ pr_debug("gsc%d: state: 0x%lx\n", gsc->id, gsc->state);
- /* Do not resume if the device was idle before system suspend */
- spin_lock_irqsave(&gsc->slock, flags);
- if (!test_and_clear_bit(ST_SUSPEND, &gsc->state) ||
- !gsc_m2m_opened(gsc)) {
- spin_unlock_irqrestore(&gsc->slock, flags);
- return 0;
+ for (i = 0; i < gsc->num_clocks; i++) {
+ ret = clk_prepare_enable(gsc->clock[i]);
+ if (ret) {
+ while (--i >= 0)
+ clk_disable_unprepare(gsc->clock[i]);
+ return ret;
+ }
}
- spin_unlock_irqrestore(&gsc->slock, flags);
- if (!pm_runtime_suspended(dev))
- return gsc_runtime_resume(dev);
+ gsc_hw_set_sw_reset(gsc);
+ gsc_wait_reset(gsc);
+ gsc_m2m_resume(gsc);
return 0;
}
-static int gsc_suspend(struct device *dev)
+static int gsc_runtime_suspend(struct device *dev)
{
struct gsc_dev *gsc = dev_get_drvdata(dev);
+ int ret = 0;
+ int i;
- pr_debug("gsc%d: state: 0x%lx", gsc->id, gsc->state);
-
- if (test_and_set_bit(ST_SUSPEND, &gsc->state))
- return 0;
+ ret = gsc_m2m_suspend(gsc);
+ if (ret)
+ return ret;
- if (!pm_runtime_suspended(dev))
- return gsc_runtime_suspend(dev);
+ for (i = gsc->num_clocks - 1; i >= 0; i--)
+ clk_disable_unprepare(gsc->clock[i]);
- return 0;
+ pr_debug("gsc%d: state: 0x%lx\n", gsc->id, gsc->state);
+ return ret;
}
+#endif
static const struct dev_pm_ops gsc_pm_ops = {
- .suspend = gsc_suspend,
- .resume = gsc_resume,
- .runtime_suspend = gsc_runtime_suspend,
- .runtime_resume = gsc_runtime_resume,
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
};
static struct platform_driver gsc_driver = {
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.h b/drivers/media/platform/exynos-gsc/gsc-core.h
index 7ad7b9dc2243..696217e9af66 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.h
+++ b/drivers/media/platform/exynos-gsc/gsc-core.h
@@ -33,6 +33,7 @@
#define GSC_SHUTDOWN_TIMEOUT ((100*HZ)/1000)
#define GSC_MAX_DEVS 4
+#define GSC_MAX_CLOCKS 4
#define GSC_M2M_BUF_NUM 0
#define GSC_MAX_CTRL_NUM 10
#define GSC_SC_ALIGN_4 4
@@ -48,9 +49,6 @@
#define GSC_CTX_ABORT (1 << 7)
enum gsc_dev_flags {
- /* for global */
- ST_SUSPEND,
-
/* for m2m node */
ST_M2M_OPEN,
ST_M2M_RUN,
@@ -306,12 +304,12 @@ struct gsc_variant {
* struct gsc_driverdata - per device type driver data for init time.
*
* @variant: the variant information for this driver.
- * @lclk_frequency: G-Scaler clock frequency
* @num_entities: the number of g-scalers
*/
struct gsc_driverdata {
struct gsc_variant *variant[GSC_MAX_DEVS];
- unsigned long lclk_frequency;
+ const char *clk_names[GSC_MAX_CLOCKS];
+ int num_clocks;
int num_entities;
};
@@ -335,7 +333,8 @@ struct gsc_dev {
struct platform_device *pdev;
struct gsc_variant *variant;
u16 id;
- struct clk *clock;
+ int num_clocks;
+ struct clk *clock[GSC_MAX_CLOCKS];
void __iomem *regs;
wait_queue_head_t irq_queue;
struct gsc_m2m_device m2m;
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index 9f03b791b711..f49f24b4462a 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -66,12 +66,29 @@ static int gsc_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
return ret > 0 ? 0 : ret;
}
+static void __gsc_m2m_cleanup_queue(struct gsc_ctx *ctx)
+{
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
+
+ while (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) > 0) {
+ src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
+ }
+
+ while (v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) > 0) {
+ dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
+ }
+}
+
static void gsc_m2m_stop_streaming(struct vb2_queue *q)
{
struct gsc_ctx *ctx = q->drv_priv;
__gsc_m2m_job_abort(ctx);
+ __gsc_m2m_cleanup_queue(ctx);
+
pm_runtime_put(&ctx->gsc_dev->pdev->dev);
}
@@ -365,14 +382,8 @@ static int gsc_m2m_reqbufs(struct file *file, void *fh,
max_cnt = (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ?
gsc->variant->in_buf_cnt : gsc->variant->out_buf_cnt;
- if (reqbufs->count > max_cnt) {
+ if (reqbufs->count > max_cnt)
return -EINVAL;
- } else if (reqbufs->count == 0) {
- if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
- gsc_ctx_state_lock_clear(GSC_SRC_FMT, ctx);
- else
- gsc_ctx_state_lock_clear(GSC_DST_FMT, ctx);
- }
return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
}
@@ -766,30 +777,29 @@ int gsc_register_m2m_device(struct gsc_dev *gsc)
gsc->m2m.m2m_dev = v4l2_m2m_init(&gsc_m2m_ops);
if (IS_ERR(gsc->m2m.m2m_dev)) {
dev_err(&pdev->dev, "failed to initialize v4l2-m2m device\n");
- ret = PTR_ERR(gsc->m2m.m2m_dev);
- goto err_m2m_r1;
+ return PTR_ERR(gsc->m2m.m2m_dev);
}
ret = video_register_device(&gsc->vdev, VFL_TYPE_GRABBER, -1);
if (ret) {
dev_err(&pdev->dev,
"%s(): failed to register video device\n", __func__);
- goto err_m2m_r2;
+ goto err_m2m_release;
}
pr_debug("gsc m2m driver registered as /dev/video%d", gsc->vdev.num);
return 0;
-err_m2m_r2:
+err_m2m_release:
v4l2_m2m_release(gsc->m2m.m2m_dev);
-err_m2m_r1:
- video_device_release(gsc->m2m.vfd);
return ret;
}
void gsc_unregister_m2m_device(struct gsc_dev *gsc)
{
- if (gsc)
+ if (gsc) {
v4l2_m2m_release(gsc->m2m.m2m_dev);
+ video_unregister_device(&gsc->vdev);
+ }
}
diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c
index 8f89ca21b631..099c735a39b7 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/exynos4-is/fimc-core.c
@@ -736,6 +736,7 @@ void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
for (i = 0; i < pix->num_planes; ++i) {
struct v4l2_plane_pix_format *plane_fmt = &pix->plane_fmt[i];
u32 bpl = plane_fmt->bytesperline;
+ u32 sizeimage;
if (fmt->colplanes > 1 && (bpl == 0 || bpl < pix->width))
bpl = pix->width; /* Planar */
@@ -755,8 +756,17 @@ void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
bytesperline /= 2;
plane_fmt->bytesperline = bytesperline;
- plane_fmt->sizeimage = max((pix->width * pix->height *
- fmt->depth[i]) / 8, plane_fmt->sizeimage);
+ sizeimage = pix->width * pix->height * fmt->depth[i] / 8;
+
+ /* Ensure full last row for tiled formats */
+ if (tiled_fmt(fmt)) {
+ /* 64 * 32 * plane_fmt->bytesperline / 64 */
+ u32 row_size = plane_fmt->bytesperline * 32;
+
+ sizeimage = roundup(sizeimage, row_size);
+ }
+
+ plane_fmt->sizeimage = max(sizeimage, plane_fmt->sizeimage);
}
}
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index 1a1154a9dfa4..e3a8709138fa 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -938,8 +938,7 @@ static int fimc_md_create_links(struct fimc_md *fmd)
csis = fmd->csis[pdata->mux_id].sd;
if (WARN(csis == NULL,
- "MIPI-CSI interface specified "
- "but s5p-csis module is not loaded!\n"))
+ "MIPI-CSI interface specified but s5p-csis module is not loaded!\n"))
return -EINVAL;
pad = sensor->entity.num_pads - 1;
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index af59bf4dca2d..a8bda6679422 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -49,24 +49,17 @@
static bool alloc_bufs_at_read;
module_param(alloc_bufs_at_read, bool, 0444);
MODULE_PARM_DESC(alloc_bufs_at_read,
- "Non-zero value causes DMA buffers to be allocated when the "
- "video capture device is read, rather than at module load "
- "time. This saves memory, but decreases the chances of "
- "successfully getting those buffers. This parameter is "
- "only used in the vmalloc buffer mode");
+ "Non-zero value causes DMA buffers to be allocated when the video capture device is read, rather than at module load time. This saves memory, but decreases the chances of successfully getting those buffers. This parameter is only used in the vmalloc buffer mode");
static int n_dma_bufs = 3;
module_param(n_dma_bufs, uint, 0644);
MODULE_PARM_DESC(n_dma_bufs,
- "The number of DMA buffers to allocate. Can be either two "
- "(saves memory, makes timing tighter) or three.");
+ "The number of DMA buffers to allocate. Can be either two (saves memory, makes timing tighter) or three.");
static int dma_buf_size = VGA_WIDTH * VGA_HEIGHT * 2; /* Worst case */
module_param(dma_buf_size, uint, 0444);
MODULE_PARM_DESC(dma_buf_size,
- "The size of the allocated DMA buffers. If actual operating "
- "parameters require larger buffers, an attempt to reallocate "
- "will be made.");
+ "The size of the allocated DMA buffers. If actual operating parameters require larger buffers, an attempt to reallocate will be made.");
#else /* MCAM_MODE_VMALLOC */
static const bool alloc_bufs_at_read;
static const int n_dma_bufs = 3; /* Used by S/G_PARM */
@@ -75,15 +68,12 @@ static const int n_dma_bufs = 3; /* Used by S/G_PARM */
static bool flip;
module_param(flip, bool, 0444);
MODULE_PARM_DESC(flip,
- "If set, the sensor will be instructed to flip the image "
- "vertically.");
+ "If set, the sensor will be instructed to flip the image vertically.");
static int buffer_mode = -1;
module_param(buffer_mode, int, 0444);
MODULE_PARM_DESC(buffer_mode,
- "Set the buffer mode to be used; default is to go with what "
- "the platform driver asks for. Set to 0 for vmalloc, 1 for "
- "DMA contiguous.");
+ "Set the buffer mode to be used; default is to go with what the platform driver asks for. Set to 0 for vmalloc, 1 for DMA contiguous.");
/*
* Status flags. Always manipulated with bit operations.
@@ -1759,8 +1749,7 @@ int mccic_register(struct mcam_camera *cam)
cam->buffer_mode = buffer_mode;
if (cam->buffer_mode == B_DMA_sg &&
cam->chip_id == MCAM_CAFE) {
- printk(KERN_ERR "marvell-cam: Cafe can't do S/G I/O, "
- "attempting vmalloc mode instead\n");
+ printk(KERN_ERR "marvell-cam: Cafe can't do S/G I/O, attempting vmalloc mode instead\n");
cam->buffer_mode = B_vmalloc;
}
if (!mcam_buffer_mode_supported(cam->buffer_mode)) {
@@ -1828,8 +1817,7 @@ int mccic_register(struct mcam_camera *cam)
*/
if (cam->buffer_mode == B_vmalloc && !alloc_bufs_at_read) {
if (mcam_alloc_dma_bufs(cam, 1))
- cam_warn(cam, "Unable to alloc DMA buffers at load"
- " will try again later.");
+ cam_warn(cam, "Unable to alloc DMA buffers at load will try again later.");
}
mutex_unlock(&cam->s_mutex);
diff --git a/drivers/media/platform/mtk-mdp/Makefile b/drivers/media/platform/mtk-mdp/Makefile
new file mode 100644
index 000000000000..f8025699af99
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/Makefile
@@ -0,0 +1,9 @@
+mtk-mdp-y += mtk_mdp_core.o
+mtk-mdp-y += mtk_mdp_comp.o
+mtk-mdp-y += mtk_mdp_m2m.o
+mtk-mdp-y += mtk_mdp_regs.o
+mtk-mdp-y += mtk_mdp_vpu.o
+
+obj-$(CONFIG_VIDEO_MEDIATEK_MDP) += mtk-mdp.o
+
+ccflags-y += -I$(srctree)/drivers/media/platform/mtk-vpu
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c b/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c
new file mode 100644
index 000000000000..aa8f9fd1f1a2
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <soc/mediatek/smi.h>
+
+#include "mtk_mdp_comp.h"
+
+
+static const char * const mtk_mdp_comp_stem[MTK_MDP_COMP_TYPE_MAX] = {
+ "mdp_rdma",
+ "mdp_rsz",
+ "mdp_wdma",
+ "mdp_wrot",
+};
+
+struct mtk_mdp_comp_match {
+ enum mtk_mdp_comp_type type;
+ int alias_id;
+};
+
+static const struct mtk_mdp_comp_match mtk_mdp_matches[MTK_MDP_COMP_ID_MAX] = {
+ { MTK_MDP_RDMA, 0 },
+ { MTK_MDP_RDMA, 1 },
+ { MTK_MDP_RSZ, 0 },
+ { MTK_MDP_RSZ, 1 },
+ { MTK_MDP_RSZ, 2 },
+ { MTK_MDP_WDMA, 0 },
+ { MTK_MDP_WROT, 0 },
+ { MTK_MDP_WROT, 1 },
+};
+
+int mtk_mdp_comp_get_id(struct device *dev, struct device_node *node,
+ enum mtk_mdp_comp_type comp_type)
+{
+ int id = of_alias_get_id(node, mtk_mdp_comp_stem[comp_type]);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mtk_mdp_matches); i++) {
+ if (comp_type == mtk_mdp_matches[i].type &&
+ id == mtk_mdp_matches[i].alias_id)
+ return i;
+ }
+
+ dev_err(dev, "Failed to get id. type: %d, id: %d\n", comp_type, id);
+
+ return -EINVAL;
+}
+
+void mtk_mdp_comp_clock_on(struct device *dev, struct mtk_mdp_comp *comp)
+{
+ int i, err;
+
+ if (comp->larb_dev) {
+ err = mtk_smi_larb_get(comp->larb_dev);
+ if (err)
+ dev_err(dev,
+ "failed to get larb, err %d. type:%d id:%d\n",
+ err, comp->type, comp->id);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(comp->clk); i++) {
+ if (!comp->clk[i])
+ continue;
+ err = clk_prepare_enable(comp->clk[i]);
+ if (err)
+ dev_err(dev,
+ "failed to enable clock, err %d. type:%d id:%d i:%d\n",
+ err, comp->type, comp->id, i);
+ }
+}
+
+void mtk_mdp_comp_clock_off(struct device *dev, struct mtk_mdp_comp *comp)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(comp->clk); i++) {
+ if (!comp->clk[i])
+ continue;
+ clk_disable_unprepare(comp->clk[i]);
+ }
+
+ if (comp->larb_dev)
+ mtk_smi_larb_put(comp->larb_dev);
+}
+
+int mtk_mdp_comp_init(struct device *dev, struct device_node *node,
+ struct mtk_mdp_comp *comp, enum mtk_mdp_comp_id comp_id)
+{
+ struct device_node *larb_node;
+ struct platform_device *larb_pdev;
+ int i;
+
+ if (comp_id < 0 || comp_id >= MTK_MDP_COMP_ID_MAX) {
+ dev_err(dev, "Invalid comp_id %d\n", comp_id);
+ return -EINVAL;
+ }
+
+ comp->dev_node = of_node_get(node);
+ comp->id = comp_id;
+ comp->type = mtk_mdp_matches[comp_id].type;
+ comp->regs = of_iomap(node, 0);
+
+ for (i = 0; i < ARRAY_SIZE(comp->clk); i++) {
+ comp->clk[i] = of_clk_get(node, i);
+
+ /* Only RDMA needs two clocks */
+ if (comp->type != MTK_MDP_RDMA)
+ break;
+ }
+
+ /* Only DMA capable components need the LARB property */
+ comp->larb_dev = NULL;
+ if (comp->type != MTK_MDP_RDMA &&
+ comp->type != MTK_MDP_WDMA &&
+ comp->type != MTK_MDP_WROT)
+ return 0;
+
+ larb_node = of_parse_phandle(node, "mediatek,larb", 0);
+ if (!larb_node) {
+ dev_err(dev,
+ "Missing mediadek,larb phandle in %s node\n",
+ node->full_name);
+ return -EINVAL;
+ }
+
+ larb_pdev = of_find_device_by_node(larb_node);
+ if (!larb_pdev) {
+ dev_warn(dev, "Waiting for larb device %s\n",
+ larb_node->full_name);
+ of_node_put(larb_node);
+ return -EPROBE_DEFER;
+ }
+ of_node_put(larb_node);
+
+ comp->larb_dev = &larb_pdev->dev;
+
+ return 0;
+}
+
+void mtk_mdp_comp_deinit(struct device *dev, struct mtk_mdp_comp *comp)
+{
+ of_node_put(comp->dev_node);
+}
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_comp.h b/drivers/media/platform/mtk-mdp/mtk_mdp_comp.h
new file mode 100644
index 000000000000..63b3983ef1a4
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_comp.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MTK_MDP_COMP_H__
+#define __MTK_MDP_COMP_H__
+
+/**
+ * enum mtk_mdp_comp_type - the MDP component
+ * @MTK_MDP_RDMA: Read DMA
+ * @MTK_MDP_RSZ: Riszer
+ * @MTK_MDP_WDMA: Write DMA
+ * @MTK_MDP_WROT: Write DMA with rotation
+ */
+enum mtk_mdp_comp_type {
+ MTK_MDP_RDMA,
+ MTK_MDP_RSZ,
+ MTK_MDP_WDMA,
+ MTK_MDP_WROT,
+ MTK_MDP_COMP_TYPE_MAX,
+};
+
+enum mtk_mdp_comp_id {
+ MTK_MDP_COMP_RDMA0,
+ MTK_MDP_COMP_RDMA1,
+ MTK_MDP_COMP_RSZ0,
+ MTK_MDP_COMP_RSZ1,
+ MTK_MDP_COMP_RSZ2,
+ MTK_MDP_COMP_WDMA,
+ MTK_MDP_COMP_WROT0,
+ MTK_MDP_COMP_WROT1,
+ MTK_MDP_COMP_ID_MAX,
+};
+
+/**
+ * struct mtk_mdp_comp - the MDP's function component data
+ * @dev_node: component device node
+ * @clk: clocks required for component
+ * @regs: Mapped address of component registers.
+ * @larb_dev: SMI device required for component
+ * @type: component type
+ * @id: component ID
+ */
+struct mtk_mdp_comp {
+ struct device_node *dev_node;
+ struct clk *clk[2];
+ void __iomem *regs;
+ struct device *larb_dev;
+ enum mtk_mdp_comp_type type;
+ enum mtk_mdp_comp_id id;
+};
+
+int mtk_mdp_comp_init(struct device *dev, struct device_node *node,
+ struct mtk_mdp_comp *comp, enum mtk_mdp_comp_id comp_id);
+void mtk_mdp_comp_deinit(struct device *dev, struct mtk_mdp_comp *comp);
+int mtk_mdp_comp_get_id(struct device *dev, struct device_node *node,
+ enum mtk_mdp_comp_type comp_type);
+void mtk_mdp_comp_clock_on(struct device *dev, struct mtk_mdp_comp *comp);
+void mtk_mdp_comp_clock_off(struct device *dev, struct mtk_mdp_comp *comp);
+
+
+#endif /* __MTK_MDP_COMP_H__ */
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
new file mode 100644
index 000000000000..9e4eb7dcc424
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Houlong Wei <houlong.wei@mediatek.com>
+ * Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/workqueue.h>
+#include <soc/mediatek/smi.h>
+
+#include "mtk_mdp_core.h"
+#include "mtk_mdp_m2m.h"
+#include "mtk_vpu.h"
+
+/* MDP debug log level (0-3). 3 shows all the logs. */
+int mtk_mdp_dbg_level;
+EXPORT_SYMBOL(mtk_mdp_dbg_level);
+
+module_param(mtk_mdp_dbg_level, int, 0644);
+
+static const struct of_device_id mtk_mdp_comp_dt_ids[] = {
+ {
+ .compatible = "mediatek,mt8173-mdp-rdma",
+ .data = (void *)MTK_MDP_RDMA
+ }, {
+ .compatible = "mediatek,mt8173-mdp-rsz",
+ .data = (void *)MTK_MDP_RSZ
+ }, {
+ .compatible = "mediatek,mt8173-mdp-wdma",
+ .data = (void *)MTK_MDP_WDMA
+ }, {
+ .compatible = "mediatek,mt8173-mdp-wrot",
+ .data = (void *)MTK_MDP_WROT
+ },
+ { },
+};
+
+static const struct of_device_id mtk_mdp_of_ids[] = {
+ { .compatible = "mediatek,mt8173-mdp", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, mtk_mdp_of_ids);
+
+static void mtk_mdp_clock_on(struct mtk_mdp_dev *mdp)
+{
+ struct device *dev = &mdp->pdev->dev;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mdp->comp); i++)
+ mtk_mdp_comp_clock_on(dev, mdp->comp[i]);
+}
+
+static void mtk_mdp_clock_off(struct mtk_mdp_dev *mdp)
+{
+ struct device *dev = &mdp->pdev->dev;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mdp->comp); i++)
+ mtk_mdp_comp_clock_off(dev, mdp->comp[i]);
+}
+
+static void mtk_mdp_wdt_worker(struct work_struct *work)
+{
+ struct mtk_mdp_dev *mdp =
+ container_of(work, struct mtk_mdp_dev, wdt_work);
+ struct mtk_mdp_ctx *ctx;
+
+ mtk_mdp_err("Watchdog timeout");
+
+ list_for_each_entry(ctx, &mdp->ctx_list, list) {
+ mtk_mdp_dbg(0, "[%d] Change as state error", ctx->id);
+ mtk_mdp_ctx_state_lock_set(ctx, MTK_MDP_CTX_ERROR);
+ }
+}
+
+static void mtk_mdp_reset_handler(void *priv)
+{
+ struct mtk_mdp_dev *mdp = priv;
+
+ queue_work(mdp->wdt_wq, &mdp->wdt_work);
+}
+
+static int mtk_mdp_probe(struct platform_device *pdev)
+{
+ struct mtk_mdp_dev *mdp;
+ struct device *dev = &pdev->dev;
+ struct device_node *node;
+ int i, ret = 0;
+
+ mdp = devm_kzalloc(dev, sizeof(*mdp), GFP_KERNEL);
+ if (!mdp)
+ return -ENOMEM;
+
+ mdp->id = pdev->id;
+ mdp->pdev = pdev;
+ INIT_LIST_HEAD(&mdp->ctx_list);
+
+ mutex_init(&mdp->lock);
+ mutex_init(&mdp->vpulock);
+
+ /* Iterate over sibling MDP function blocks */
+ for_each_child_of_node(dev->of_node, node) {
+ const struct of_device_id *of_id;
+ enum mtk_mdp_comp_type comp_type;
+ int comp_id;
+ struct mtk_mdp_comp *comp;
+
+ of_id = of_match_node(mtk_mdp_comp_dt_ids, node);
+ if (!of_id)
+ continue;
+
+ if (!of_device_is_available(node)) {
+ dev_err(dev, "Skipping disabled component %s\n",
+ node->full_name);
+ continue;
+ }
+
+ comp_type = (enum mtk_mdp_comp_type)of_id->data;
+ comp_id = mtk_mdp_comp_get_id(dev, node, comp_type);
+ if (comp_id < 0) {
+ dev_warn(dev, "Skipping unknown component %s\n",
+ node->full_name);
+ continue;
+ }
+
+ comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
+ if (!comp) {
+ ret = -ENOMEM;
+ goto err_comp;
+ }
+ mdp->comp[comp_id] = comp;
+
+ ret = mtk_mdp_comp_init(dev, node, comp, comp_id);
+ if (ret)
+ goto err_comp;
+ }
+
+ mdp->job_wq = create_singlethread_workqueue(MTK_MDP_MODULE_NAME);
+ if (!mdp->job_wq) {
+ dev_err(&pdev->dev, "unable to alloc job workqueue\n");
+ ret = -ENOMEM;
+ goto err_alloc_job_wq;
+ }
+
+ mdp->wdt_wq = create_singlethread_workqueue("mdp_wdt_wq");
+ if (!mdp->wdt_wq) {
+ dev_err(&pdev->dev, "unable to alloc wdt workqueue\n");
+ ret = -ENOMEM;
+ goto err_alloc_wdt_wq;
+ }
+ INIT_WORK(&mdp->wdt_work, mtk_mdp_wdt_worker);
+
+ ret = v4l2_device_register(dev, &mdp->v4l2_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register v4l2 device\n");
+ ret = -EINVAL;
+ goto err_dev_register;
+ }
+
+ ret = mtk_mdp_register_m2m_device(mdp);
+ if (ret) {
+ v4l2_err(&mdp->v4l2_dev, "Failed to init mem2mem device\n");
+ goto err_m2m_register;
+ }
+
+ mdp->vpu_dev = vpu_get_plat_device(pdev);
+ vpu_wdt_reg_handler(mdp->vpu_dev, mtk_mdp_reset_handler, mdp,
+ VPU_RST_MDP);
+
+ platform_set_drvdata(pdev, mdp);
+
+ vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
+
+ pm_runtime_enable(dev);
+ dev_dbg(dev, "mdp-%d registered successfully\n", mdp->id);
+
+ return 0;
+
+err_m2m_register:
+ v4l2_device_unregister(&mdp->v4l2_dev);
+
+err_dev_register:
+ destroy_workqueue(mdp->wdt_wq);
+
+err_alloc_wdt_wq:
+ destroy_workqueue(mdp->job_wq);
+
+err_alloc_job_wq:
+
+err_comp:
+ for (i = 0; i < ARRAY_SIZE(mdp->comp); i++)
+ mtk_mdp_comp_deinit(dev, mdp->comp[i]);
+
+ dev_dbg(dev, "err %d\n", ret);
+ return ret;
+}
+
+static int mtk_mdp_remove(struct platform_device *pdev)
+{
+ struct mtk_mdp_dev *mdp = platform_get_drvdata(pdev);
+ int i;
+
+ pm_runtime_disable(&pdev->dev);
+ vb2_dma_contig_clear_max_seg_size(&pdev->dev);
+ mtk_mdp_unregister_m2m_device(mdp);
+ v4l2_device_unregister(&mdp->v4l2_dev);
+
+ flush_workqueue(mdp->job_wq);
+ destroy_workqueue(mdp->job_wq);
+
+ for (i = 0; i < ARRAY_SIZE(mdp->comp); i++)
+ mtk_mdp_comp_deinit(&pdev->dev, mdp->comp[i]);
+
+ dev_dbg(&pdev->dev, "%s driver unloaded\n", pdev->name);
+ return 0;
+}
+
+static int __maybe_unused mtk_mdp_pm_suspend(struct device *dev)
+{
+ struct mtk_mdp_dev *mdp = dev_get_drvdata(dev);
+
+ mtk_mdp_clock_off(mdp);
+
+ return 0;
+}
+
+static int __maybe_unused mtk_mdp_pm_resume(struct device *dev)
+{
+ struct mtk_mdp_dev *mdp = dev_get_drvdata(dev);
+
+ mtk_mdp_clock_on(mdp);
+
+ return 0;
+}
+
+static int __maybe_unused mtk_mdp_suspend(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return mtk_mdp_pm_suspend(dev);
+}
+
+static int __maybe_unused mtk_mdp_resume(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return mtk_mdp_pm_resume(dev);
+}
+
+static const struct dev_pm_ops mtk_mdp_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mtk_mdp_suspend, mtk_mdp_resume)
+ SET_RUNTIME_PM_OPS(mtk_mdp_pm_suspend, mtk_mdp_pm_resume, NULL)
+};
+
+static struct platform_driver mtk_mdp_driver = {
+ .probe = mtk_mdp_probe,
+ .remove = mtk_mdp_remove,
+ .driver = {
+ .name = MTK_MDP_MODULE_NAME,
+ .pm = &mtk_mdp_pm_ops,
+ .of_match_table = mtk_mdp_of_ids,
+ }
+};
+
+module_platform_driver(mtk_mdp_driver);
+
+MODULE_AUTHOR("Houlong Wei <houlong.wei@mediatek.com>");
+MODULE_DESCRIPTION("Mediatek image processor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_core.h b/drivers/media/platform/mtk-mdp/mtk_mdp_core.h
new file mode 100644
index 000000000000..ad1cff306efd
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_core.h
@@ -0,0 +1,260 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Houlong Wei <houlong.wei@mediatek.com>
+ * Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MTK_MDP_CORE_H__
+#define __MTK_MDP_CORE_H__
+
+#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "mtk_mdp_vpu.h"
+#include "mtk_mdp_comp.h"
+
+
+#define MTK_MDP_MODULE_NAME "mtk-mdp"
+
+#define MTK_MDP_SHUTDOWN_TIMEOUT ((100*HZ)/1000) /* 100ms */
+#define MTK_MDP_MAX_CTRL_NUM 10
+
+#define MTK_MDP_FMT_FLAG_OUTPUT BIT(0)
+#define MTK_MDP_FMT_FLAG_CAPTURE BIT(1)
+
+#define MTK_MDP_VPU_INIT BIT(0)
+#define MTK_MDP_SRC_FMT BIT(1)
+#define MTK_MDP_DST_FMT BIT(2)
+#define MTK_MDP_CTX_ERROR BIT(5)
+
+/**
+ * struct mtk_mdp_pix_align - alignement of image
+ * @org_w: source alignment of width
+ * @org_h: source alignment of height
+ * @target_w: dst alignment of width
+ * @target_h: dst alignment of height
+ */
+struct mtk_mdp_pix_align {
+ u16 org_w;
+ u16 org_h;
+ u16 target_w;
+ u16 target_h;
+};
+
+/**
+ * struct mtk_mdp_fmt - the driver's internal color format data
+ * @pixelformat: the fourcc code for this format, 0 if not applicable
+ * @num_planes: number of physically non-contiguous data planes
+ * @num_comp: number of logical data planes
+ * @depth: per plane driver's private 'number of bits per pixel'
+ * @row_depth: per plane driver's private 'number of bits per pixel per row'
+ * @flags: flags indicating which operation mode format applies to
+ MTK_MDP_FMT_FLAG_OUTPUT is used in OUTPUT stream
+ MTK_MDP_FMT_FLAG_CAPTURE is used in CAPTURE stream
+ * @align: pointer to a pixel alignment struct, NULL if using default value
+ */
+struct mtk_mdp_fmt {
+ u32 pixelformat;
+ u16 num_planes;
+ u16 num_comp;
+ u8 depth[VIDEO_MAX_PLANES];
+ u8 row_depth[VIDEO_MAX_PLANES];
+ u32 flags;
+ struct mtk_mdp_pix_align *align;
+};
+
+/**
+ * struct mtk_mdp_addr - the image processor physical address set
+ * @addr: address of planes
+ */
+struct mtk_mdp_addr {
+ dma_addr_t addr[MTK_MDP_MAX_NUM_PLANE];
+};
+
+/* struct mtk_mdp_ctrls - the image processor control set
+ * @rotate: rotation degree
+ * @hflip: horizontal flip
+ * @vflip: vertical flip
+ * @global_alpha: the alpha value of current frame
+ */
+struct mtk_mdp_ctrls {
+ struct v4l2_ctrl *rotate;
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *global_alpha;
+};
+
+/**
+ * struct mtk_mdp_frame - source/target frame properties
+ * @width: SRC : SRCIMG_WIDTH, DST : OUTPUTDMA_WHOLE_IMG_WIDTH
+ * @height: SRC : SRCIMG_HEIGHT, DST : OUTPUTDMA_WHOLE_IMG_HEIGHT
+ * @crop: cropped(source)/scaled(destination) size
+ * @payload: image size in bytes (w x h x bpp)
+ * @pitch: bytes per line of image in memory
+ * @addr: image frame buffer physical addresses
+ * @fmt: color format pointer
+ * @alpha: frame's alpha value
+ */
+struct mtk_mdp_frame {
+ u32 width;
+ u32 height;
+ struct v4l2_rect crop;
+ unsigned long payload[VIDEO_MAX_PLANES];
+ unsigned int pitch[VIDEO_MAX_PLANES];
+ struct mtk_mdp_addr addr;
+ const struct mtk_mdp_fmt *fmt;
+ u8 alpha;
+};
+
+/**
+ * struct mtk_mdp_variant - image processor variant information
+ * @pix_max: maximum limit of image size
+ * @pix_min: minimun limit of image size
+ * @pix_align: alignement of image
+ * @h_scale_up_max: maximum scale-up in horizontal
+ * @v_scale_up_max: maximum scale-up in vertical
+ * @h_scale_down_max: maximum scale-down in horizontal
+ * @v_scale_down_max: maximum scale-down in vertical
+ */
+struct mtk_mdp_variant {
+ struct mtk_mdp_pix_limit *pix_max;
+ struct mtk_mdp_pix_limit *pix_min;
+ struct mtk_mdp_pix_align *pix_align;
+ u16 h_scale_up_max;
+ u16 v_scale_up_max;
+ u16 h_scale_down_max;
+ u16 v_scale_down_max;
+};
+
+/**
+ * struct mtk_mdp_dev - abstraction for image processor entity
+ * @lock: the mutex protecting this data structure
+ * @vpulock: the mutex protecting the communication with VPU
+ * @pdev: pointer to the image processor platform device
+ * @variant: the IP variant information
+ * @id: image processor device index (0..MTK_MDP_MAX_DEVS)
+ * @comp: MDP function components
+ * @m2m_dev: v4l2 memory-to-memory device data
+ * @ctx_list: list of struct mtk_mdp_ctx
+ * @vdev: video device for image processor driver
+ * @v4l2_dev: V4L2 device to register video devices for.
+ * @job_wq: processor work queue
+ * @vpu_dev: VPU platform device
+ * @ctx_num: counter of active MTK MDP context
+ * @id_counter: An integer id given to the next opened context
+ * @wdt_wq: work queue for VPU watchdog
+ * @wdt_work: worker for VPU watchdog
+ */
+struct mtk_mdp_dev {
+ struct mutex lock;
+ struct mutex vpulock;
+ struct platform_device *pdev;
+ struct mtk_mdp_variant *variant;
+ u16 id;
+ struct mtk_mdp_comp *comp[MTK_MDP_COMP_ID_MAX];
+ struct v4l2_m2m_dev *m2m_dev;
+ struct list_head ctx_list;
+ struct video_device *vdev;
+ struct v4l2_device v4l2_dev;
+ struct workqueue_struct *job_wq;
+ struct platform_device *vpu_dev;
+ int ctx_num;
+ unsigned long id_counter;
+ struct workqueue_struct *wdt_wq;
+ struct work_struct wdt_work;
+};
+
+/**
+ * mtk_mdp_ctx - the device context data
+ * @list: link to ctx_list of mtk_mdp_dev
+ * @s_frame: source frame properties
+ * @d_frame: destination frame properties
+ * @id: index of the context that this structure describes
+ * @flags: additional flags for image conversion
+ * @state: flags to keep track of user configuration
+ Protected by slock
+ * @rotation: rotates the image by specified angle
+ * @hflip: mirror the picture horizontally
+ * @vflip: mirror the picture vertically
+ * @mdp_dev: the image processor device this context applies to
+ * @m2m_ctx: memory-to-memory device context
+ * @fh: v4l2 file handle
+ * @ctrl_handler: v4l2 controls handler
+ * @ctrls image processor control set
+ * @ctrls_rdy: true if the control handler is initialized
+ * @colorspace: enum v4l2_colorspace; supplemental to pixelformat
+ * @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding
+ * @xfer_func: enum v4l2_xfer_func, colorspace transfer function
+ * @quant: enum v4l2_quantization, colorspace quantization
+ * @vpu: VPU instance
+ * @slock: the mutex protecting mtp_mdp_ctx.state
+ * @work: worker for image processing
+ */
+struct mtk_mdp_ctx {
+ struct list_head list;
+ struct mtk_mdp_frame s_frame;
+ struct mtk_mdp_frame d_frame;
+ u32 flags;
+ u32 state;
+ int id;
+ int rotation;
+ u32 hflip:1;
+ u32 vflip:1;
+ struct mtk_mdp_dev *mdp_dev;
+ struct v4l2_m2m_ctx *m2m_ctx;
+ struct v4l2_fh fh;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct mtk_mdp_ctrls ctrls;
+ bool ctrls_rdy;
+ enum v4l2_colorspace colorspace;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_xfer_func xfer_func;
+ enum v4l2_quantization quant;
+
+ struct mtk_mdp_vpu vpu;
+ struct mutex slock;
+ struct work_struct work;
+};
+
+extern int mtk_mdp_dbg_level;
+
+#if defined(DEBUG)
+
+#define mtk_mdp_dbg(level, fmt, args...) \
+ do { \
+ if (mtk_mdp_dbg_level >= level) \
+ pr_info("[MTK_MDP] level=%d %s(),%d: " fmt "\n", \
+ level, __func__, __LINE__, ##args); \
+ } while (0)
+
+#define mtk_mdp_err(fmt, args...) \
+ pr_err("[MTK_MDP][ERROR] %s:%d: " fmt "\n", __func__, __LINE__, \
+ ##args)
+
+
+#define mtk_mdp_dbg_enter() mtk_mdp_dbg(3, "+")
+#define mtk_mdp_dbg_leave() mtk_mdp_dbg(3, "-")
+
+#else
+
+#define mtk_mdp_dbg(level, fmt, args...) {}
+#define mtk_mdp_err(fmt, args...)
+#define mtk_mdp_dbg_enter()
+#define mtk_mdp_dbg_leave()
+
+#endif
+
+#endif /* __MTK_MDP_CORE_H__ */
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h b/drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h
new file mode 100644
index 000000000000..78e2cc0dead1
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Houlong Wei <houlong.wei@mediatek.com>
+ * Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MTK_MDP_IPI_H__
+#define __MTK_MDP_IPI_H__
+
+#define MTK_MDP_MAX_NUM_PLANE 3
+
+enum mdp_ipi_msgid {
+ AP_MDP_INIT = 0xd000,
+ AP_MDP_DEINIT = 0xd001,
+ AP_MDP_PROCESS = 0xd002,
+
+ VPU_MDP_INIT_ACK = 0xe000,
+ VPU_MDP_DEINIT_ACK = 0xe001,
+ VPU_MDP_PROCESS_ACK = 0xe002
+};
+
+#pragma pack(push, 4)
+
+/**
+ * struct mdp_ipi_init - for AP_MDP_INIT
+ * @msg_id : AP_MDP_INIT
+ * @ipi_id : IPI_MDP
+ * @ap_inst : AP mtk_mdp_vpu address
+ */
+struct mdp_ipi_init {
+ uint32_t msg_id;
+ uint32_t ipi_id;
+ uint64_t ap_inst;
+};
+
+/**
+ * struct mdp_ipi_comm - for AP_MDP_PROCESS, AP_MDP_DEINIT
+ * @msg_id : AP_MDP_PROCESS, AP_MDP_DEINIT
+ * @ipi_id : IPI_MDP
+ * @ap_inst : AP mtk_mdp_vpu address
+ * @vpu_inst_addr : VPU MDP instance address
+ */
+struct mdp_ipi_comm {
+ uint32_t msg_id;
+ uint32_t ipi_id;
+ uint64_t ap_inst;
+ uint32_t vpu_inst_addr;
+};
+
+/**
+ * struct mdp_ipi_comm_ack - for VPU_MDP_DEINIT_ACK, VPU_MDP_PROCESS_ACK
+ * @msg_id : VPU_MDP_DEINIT_ACK, VPU_MDP_PROCESS_ACK
+ * @ipi_id : IPI_MDP
+ * @ap_inst : AP mtk_mdp_vpu address
+ * @vpu_inst_addr : VPU MDP instance address
+ * @status : VPU exeuction result
+ */
+struct mdp_ipi_comm_ack {
+ uint32_t msg_id;
+ uint32_t ipi_id;
+ uint64_t ap_inst;
+ uint32_t vpu_inst_addr;
+ int32_t status;
+};
+
+/**
+ * struct mdp_config - configured for source/destination image
+ * @x : left
+ * @y : top
+ * @w : width
+ * @h : height
+ * @w_stride : bytes in horizontal
+ * @h_stride : bytes in vertical
+ * @crop_x : cropped left
+ * @crop_y : cropped top
+ * @crop_w : cropped width
+ * @crop_h : cropped height
+ * @format : color format
+ */
+struct mdp_config {
+ int32_t x;
+ int32_t y;
+ int32_t w;
+ int32_t h;
+ int32_t w_stride;
+ int32_t h_stride;
+ int32_t crop_x;
+ int32_t crop_y;
+ int32_t crop_w;
+ int32_t crop_h;
+ int32_t format;
+};
+
+struct mdp_buffer {
+ uint64_t addr_mva[MTK_MDP_MAX_NUM_PLANE];
+ int32_t plane_size[MTK_MDP_MAX_NUM_PLANE];
+ int32_t plane_num;
+};
+
+struct mdp_config_misc {
+ int32_t orientation; /* 0, 90, 180, 270 */
+ int32_t hflip; /* 1 will enable the flip */
+ int32_t vflip; /* 1 will enable the flip */
+ int32_t alpha; /* global alpha */
+};
+
+struct mdp_process_vsi {
+ struct mdp_config src_config;
+ struct mdp_buffer src_buffer;
+ struct mdp_config dst_config;
+ struct mdp_buffer dst_buffer;
+ struct mdp_config_misc misc;
+};
+
+#pragma pack(pop)
+
+#endif /* __MTK_MDP_IPI_H__ */
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
new file mode 100644
index 000000000000..13afe48b9dc5
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
@@ -0,0 +1,1286 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Houlong Wei <houlong.wei@mediatek.com>
+ * Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+
+#include "mtk_mdp_core.h"
+#include "mtk_mdp_m2m.h"
+#include "mtk_mdp_regs.h"
+#include "mtk_vpu.h"
+
+
+/**
+ * struct mtk_mdp_pix_limit - image pixel size limits
+ * @org_w: source pixel width
+ * @org_h: source pixel height
+ * @target_rot_dis_w: pixel dst scaled width with the rotator is off
+ * @target_rot_dis_h: pixel dst scaled height with the rotator is off
+ * @target_rot_en_w: pixel dst scaled width with the rotator is on
+ * @target_rot_en_h: pixel dst scaled height with the rotator is on
+ */
+struct mtk_mdp_pix_limit {
+ u16 org_w;
+ u16 org_h;
+ u16 target_rot_dis_w;
+ u16 target_rot_dis_h;
+ u16 target_rot_en_w;
+ u16 target_rot_en_h;
+};
+
+static struct mtk_mdp_pix_align mtk_mdp_size_align = {
+ .org_w = 16,
+ .org_h = 16,
+ .target_w = 2,
+ .target_h = 2,
+};
+
+static const struct mtk_mdp_fmt mtk_mdp_formats[] = {
+ {
+ .pixelformat = V4L2_PIX_FMT_MT21C,
+ .depth = { 8, 4 },
+ .row_depth = { 8, 8 },
+ .num_planes = 2,
+ .num_comp = 2,
+ .align = &mtk_mdp_size_align,
+ .flags = MTK_MDP_FMT_FLAG_OUTPUT,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV12M,
+ .depth = { 8, 4 },
+ .row_depth = { 8, 8 },
+ .num_planes = 2,
+ .num_comp = 2,
+ .flags = MTK_MDP_FMT_FLAG_OUTPUT |
+ MTK_MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YUV420M,
+ .depth = { 8, 2, 2 },
+ .row_depth = { 8, 4, 4 },
+ .num_planes = 3,
+ .num_comp = 3,
+ .flags = MTK_MDP_FMT_FLAG_OUTPUT |
+ MTK_MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YVU420,
+ .depth = { 12 },
+ .row_depth = { 8 },
+ .num_planes = 1,
+ .num_comp = 3,
+ .flags = MTK_MDP_FMT_FLAG_OUTPUT |
+ MTK_MDP_FMT_FLAG_CAPTURE,
+ }
+};
+
+static struct mtk_mdp_pix_limit mtk_mdp_size_max = {
+ .target_rot_dis_w = 4096,
+ .target_rot_dis_h = 4096,
+ .target_rot_en_w = 4096,
+ .target_rot_en_h = 4096,
+};
+
+static struct mtk_mdp_pix_limit mtk_mdp_size_min = {
+ .org_w = 16,
+ .org_h = 16,
+ .target_rot_dis_w = 16,
+ .target_rot_dis_h = 16,
+ .target_rot_en_w = 16,
+ .target_rot_en_h = 16,
+};
+
+/* align size for normal raster scan pixel format */
+static struct mtk_mdp_pix_align mtk_mdp_rs_align = {
+ .org_w = 2,
+ .org_h = 2,
+ .target_w = 2,
+ .target_h = 2,
+};
+
+static struct mtk_mdp_variant mtk_mdp_default_variant = {
+ .pix_max = &mtk_mdp_size_max,
+ .pix_min = &mtk_mdp_size_min,
+ .pix_align = &mtk_mdp_rs_align,
+ .h_scale_up_max = 32,
+ .v_scale_up_max = 32,
+ .h_scale_down_max = 32,
+ .v_scale_down_max = 128,
+};
+
+static const struct mtk_mdp_fmt *mtk_mdp_find_fmt(u32 pixelformat, u32 type)
+{
+ u32 i, flag;
+
+ flag = V4L2_TYPE_IS_OUTPUT(type) ? MTK_MDP_FMT_FLAG_OUTPUT :
+ MTK_MDP_FMT_FLAG_CAPTURE;
+
+ for (i = 0; i < ARRAY_SIZE(mtk_mdp_formats); ++i) {
+ if (!(mtk_mdp_formats[i].flags & flag))
+ continue;
+ if (mtk_mdp_formats[i].pixelformat == pixelformat)
+ return &mtk_mdp_formats[i];
+ }
+ return NULL;
+}
+
+static const struct mtk_mdp_fmt *mtk_mdp_find_fmt_by_index(u32 index, u32 type)
+{
+ u32 i, flag, num = 0;
+
+ flag = V4L2_TYPE_IS_OUTPUT(type) ? MTK_MDP_FMT_FLAG_OUTPUT :
+ MTK_MDP_FMT_FLAG_CAPTURE;
+
+ for (i = 0; i < ARRAY_SIZE(mtk_mdp_formats); ++i) {
+ if (!(mtk_mdp_formats[i].flags & flag))
+ continue;
+ if (index == num)
+ return &mtk_mdp_formats[i];
+ num++;
+ }
+ return NULL;
+}
+
+static void mtk_mdp_bound_align_image(u32 *w, unsigned int wmin,
+ unsigned int wmax, unsigned int align_w,
+ u32 *h, unsigned int hmin,
+ unsigned int hmax, unsigned int align_h)
+{
+ int org_w, org_h, step_w, step_h;
+ int walign, halign;
+
+ org_w = *w;
+ org_h = *h;
+ walign = ffs(align_w) - 1;
+ halign = ffs(align_h) - 1;
+ v4l_bound_align_image(w, wmin, wmax, walign, h, hmin, hmax, halign, 0);
+
+ step_w = 1 << walign;
+ step_h = 1 << halign;
+ if (*w < org_w && (*w + step_w) <= wmax)
+ *w += step_w;
+ if (*h < org_h && (*h + step_h) <= hmax)
+ *h += step_h;
+}
+
+static const struct mtk_mdp_fmt *mtk_mdp_try_fmt_mplane(struct mtk_mdp_ctx *ctx,
+ struct v4l2_format *f)
+{
+ struct mtk_mdp_dev *mdp = ctx->mdp_dev;
+ struct mtk_mdp_variant *variant = mdp->variant;
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ const struct mtk_mdp_fmt *fmt;
+ u32 max_w, max_h, align_w, align_h;
+ u32 min_w, min_h, org_w, org_h;
+ int i;
+
+ fmt = mtk_mdp_find_fmt(pix_mp->pixelformat, f->type);
+ if (!fmt)
+ fmt = mtk_mdp_find_fmt_by_index(0, f->type);
+ if (!fmt) {
+ dev_dbg(&ctx->mdp_dev->pdev->dev,
+ "pixelformat format 0x%X invalid\n",
+ pix_mp->pixelformat);
+ return NULL;
+ }
+
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->pixelformat = fmt->pixelformat;
+ if (!V4L2_TYPE_IS_OUTPUT(f->type)) {
+ pix_mp->colorspace = ctx->colorspace;
+ pix_mp->xfer_func = ctx->xfer_func;
+ pix_mp->ycbcr_enc = ctx->ycbcr_enc;
+ pix_mp->quantization = ctx->quant;
+ }
+ memset(pix_mp->reserved, 0, sizeof(pix_mp->reserved));
+
+ max_w = variant->pix_max->target_rot_dis_w;
+ max_h = variant->pix_max->target_rot_dis_h;
+
+ if (fmt->align == NULL) {
+ /* use default alignment */
+ align_w = variant->pix_align->org_w;
+ align_h = variant->pix_align->org_h;
+ } else {
+ align_w = fmt->align->org_w;
+ align_h = fmt->align->org_h;
+ }
+
+ if (V4L2_TYPE_IS_OUTPUT(f->type)) {
+ min_w = variant->pix_min->org_w;
+ min_h = variant->pix_min->org_h;
+ } else {
+ min_w = variant->pix_min->target_rot_dis_w;
+ min_h = variant->pix_min->target_rot_dis_h;
+ }
+
+ mtk_mdp_dbg(2, "[%d] type:%d, wxh:%ux%u, align:%ux%u, max:%ux%u",
+ ctx->id, f->type, pix_mp->width, pix_mp->height,
+ align_w, align_h, max_w, max_h);
+ /*
+ * To check if image size is modified to adjust parameter against
+ * hardware abilities
+ */
+ org_w = pix_mp->width;
+ org_h = pix_mp->height;
+
+ mtk_mdp_bound_align_image(&pix_mp->width, min_w, max_w, align_w,
+ &pix_mp->height, min_h, max_h, align_h);
+
+ if (org_w != pix_mp->width || org_h != pix_mp->height)
+ mtk_mdp_dbg(1, "[%d] size change:%ux%u to %ux%u", ctx->id,
+ org_w, org_h, pix_mp->width, pix_mp->height);
+ pix_mp->num_planes = fmt->num_planes;
+
+ for (i = 0; i < pix_mp->num_planes; ++i) {
+ int bpl = (pix_mp->width * fmt->row_depth[i]) / 8;
+ int sizeimage = (pix_mp->width * pix_mp->height *
+ fmt->depth[i]) / 8;
+
+ pix_mp->plane_fmt[i].bytesperline = bpl;
+ if (pix_mp->plane_fmt[i].sizeimage < sizeimage)
+ pix_mp->plane_fmt[i].sizeimage = sizeimage;
+ memset(pix_mp->plane_fmt[i].reserved, 0,
+ sizeof(pix_mp->plane_fmt[i].reserved));
+ mtk_mdp_dbg(2, "[%d] p%d, bpl:%d, sizeimage:%u (%u)", ctx->id,
+ i, bpl, pix_mp->plane_fmt[i].sizeimage, sizeimage);
+ }
+
+ return fmt;
+}
+
+static struct mtk_mdp_frame *mtk_mdp_ctx_get_frame(struct mtk_mdp_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return &ctx->s_frame;
+ return &ctx->d_frame;
+}
+
+static void mtk_mdp_check_crop_change(u32 new_w, u32 new_h, u32 *w, u32 *h)
+{
+ if (new_w != *w || new_h != *h) {
+ mtk_mdp_dbg(1, "size change:%dx%d to %dx%d",
+ *w, *h, new_w, new_h);
+
+ *w = new_w;
+ *h = new_h;
+ }
+}
+
+static int mtk_mdp_try_crop(struct mtk_mdp_ctx *ctx, u32 type,
+ struct v4l2_rect *r)
+{
+ struct mtk_mdp_frame *frame;
+ struct mtk_mdp_dev *mdp = ctx->mdp_dev;
+ struct mtk_mdp_variant *variant = mdp->variant;
+ u32 align_w, align_h, new_w, new_h;
+ u32 min_w, min_h, max_w, max_h;
+
+ if (r->top < 0 || r->left < 0) {
+ dev_err(&ctx->mdp_dev->pdev->dev,
+ "doesn't support negative values for top & left\n");
+ return -EINVAL;
+ }
+
+ mtk_mdp_dbg(2, "[%d] type:%d, set wxh:%dx%d", ctx->id, type,
+ r->width, r->height);
+
+ frame = mtk_mdp_ctx_get_frame(ctx, type);
+ max_w = frame->width;
+ max_h = frame->height;
+ new_w = r->width;
+ new_h = r->height;
+
+ if (V4L2_TYPE_IS_OUTPUT(type)) {
+ align_w = 1;
+ align_h = 1;
+ min_w = 64;
+ min_h = 32;
+ } else {
+ align_w = variant->pix_align->target_w;
+ align_h = variant->pix_align->target_h;
+ if (ctx->ctrls.rotate->val == 90 ||
+ ctx->ctrls.rotate->val == 270) {
+ max_w = frame->height;
+ max_h = frame->width;
+ min_w = variant->pix_min->target_rot_en_w;
+ min_h = variant->pix_min->target_rot_en_h;
+ new_w = r->height;
+ new_h = r->width;
+ } else {
+ min_w = variant->pix_min->target_rot_dis_w;
+ min_h = variant->pix_min->target_rot_dis_h;
+ }
+ }
+
+ mtk_mdp_dbg(2, "[%d] align:%dx%d, min:%dx%d, new:%dx%d", ctx->id,
+ align_w, align_h, min_w, min_h, new_w, new_h);
+
+ mtk_mdp_bound_align_image(&new_w, min_w, max_w, align_w,
+ &new_h, min_h, max_h, align_h);
+
+ if (!V4L2_TYPE_IS_OUTPUT(type) &&
+ (ctx->ctrls.rotate->val == 90 ||
+ ctx->ctrls.rotate->val == 270))
+ mtk_mdp_check_crop_change(new_h, new_w,
+ &r->width, &r->height);
+ else
+ mtk_mdp_check_crop_change(new_w, new_h,
+ &r->width, &r->height);
+
+ /* adjust left/top if cropping rectangle is out of bounds */
+ /* Need to add code to algin left value with 2's multiple */
+ if (r->left + new_w > max_w)
+ r->left = max_w - new_w;
+ if (r->top + new_h > max_h)
+ r->top = max_h - new_h;
+
+ if (r->left & 1)
+ r->left -= 1;
+
+ mtk_mdp_dbg(2, "[%d] crop l,t,w,h:%d,%d,%d,%d, max:%dx%d", ctx->id,
+ r->left, r->top, r->width,
+ r->height, max_w, max_h);
+ return 0;
+}
+
+static inline struct mtk_mdp_ctx *fh_to_ctx(struct v4l2_fh *fh)
+{
+ return container_of(fh, struct mtk_mdp_ctx, fh);
+}
+
+static inline struct mtk_mdp_ctx *ctrl_to_ctx(struct v4l2_ctrl *ctrl)
+{
+ return container_of(ctrl->handler, struct mtk_mdp_ctx, ctrl_handler);
+}
+
+void mtk_mdp_ctx_state_lock_set(struct mtk_mdp_ctx *ctx, u32 state)
+{
+ mutex_lock(&ctx->slock);
+ ctx->state |= state;
+ mutex_unlock(&ctx->slock);
+}
+
+static void mtk_mdp_ctx_state_lock_clear(struct mtk_mdp_ctx *ctx, u32 state)
+{
+ mutex_lock(&ctx->slock);
+ ctx->state &= ~state;
+ mutex_unlock(&ctx->slock);
+}
+
+static bool mtk_mdp_ctx_state_is_set(struct mtk_mdp_ctx *ctx, u32 mask)
+{
+ bool ret;
+
+ mutex_lock(&ctx->slock);
+ ret = (ctx->state & mask) == mask;
+ mutex_unlock(&ctx->slock);
+ return ret;
+}
+
+static void mtk_mdp_ctx_lock(struct vb2_queue *vq)
+{
+ struct mtk_mdp_ctx *ctx = vb2_get_drv_priv(vq);
+
+ mutex_lock(&ctx->mdp_dev->lock);
+}
+
+static void mtk_mdp_ctx_unlock(struct vb2_queue *vq)
+{
+ struct mtk_mdp_ctx *ctx = vb2_get_drv_priv(vq);
+
+ mutex_unlock(&ctx->mdp_dev->lock);
+}
+
+static void mtk_mdp_set_frame_size(struct mtk_mdp_frame *frame, int width,
+ int height)
+{
+ frame->width = width;
+ frame->height = height;
+ frame->crop.width = width;
+ frame->crop.height = height;
+ frame->crop.left = 0;
+ frame->crop.top = 0;
+}
+
+static int mtk_mdp_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct mtk_mdp_ctx *ctx = q->drv_priv;
+ int ret;
+
+ ret = pm_runtime_get_sync(&ctx->mdp_dev->pdev->dev);
+ if (ret < 0)
+ mtk_mdp_dbg(1, "[%d] pm_runtime_get_sync failed:%d",
+ ctx->id, ret);
+
+ return 0;
+}
+
+static void *mtk_mdp_m2m_buf_remove(struct mtk_mdp_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ else
+ return v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+}
+
+static void mtk_mdp_m2m_stop_streaming(struct vb2_queue *q)
+{
+ struct mtk_mdp_ctx *ctx = q->drv_priv;
+ struct vb2_buffer *vb;
+
+ vb = mtk_mdp_m2m_buf_remove(ctx, q->type);
+ while (vb != NULL) {
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(vb), VB2_BUF_STATE_ERROR);
+ vb = mtk_mdp_m2m_buf_remove(ctx, q->type);
+ }
+
+ pm_runtime_put(&ctx->mdp_dev->pdev->dev);
+}
+
+static void mtk_mdp_m2m_job_abort(void *priv)
+{
+}
+
+/* The color format (num_planes) must be already configured. */
+static void mtk_mdp_prepare_addr(struct mtk_mdp_ctx *ctx,
+ struct vb2_buffer *vb,
+ struct mtk_mdp_frame *frame,
+ struct mtk_mdp_addr *addr)
+{
+ u32 pix_size, planes, i;
+
+ pix_size = frame->width * frame->height;
+ planes = min_t(u32, frame->fmt->num_planes, ARRAY_SIZE(addr->addr));
+ for (i = 0; i < planes; i++)
+ addr->addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
+
+ if (planes == 1) {
+ if (frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420) {
+ addr->addr[1] = (dma_addr_t)(addr->addr[0] + pix_size);
+ addr->addr[2] = (dma_addr_t)(addr->addr[1] +
+ (pix_size >> 2));
+ } else {
+ dev_err(&ctx->mdp_dev->pdev->dev,
+ "Invalid pixelformat:0x%x\n",
+ frame->fmt->pixelformat);
+ }
+ }
+ mtk_mdp_dbg(3, "[%d] planes:%d, size:%d, addr:%p,%p,%p",
+ ctx->id, planes, pix_size, (void *)addr->addr[0],
+ (void *)addr->addr[1], (void *)addr->addr[2]);
+}
+
+static void mtk_mdp_m2m_get_bufs(struct mtk_mdp_ctx *ctx)
+{
+ struct mtk_mdp_frame *s_frame, *d_frame;
+ struct vb2_buffer *src_vb, *dst_vb;
+ struct vb2_v4l2_buffer *src_vbuf, *dst_vbuf;
+
+ s_frame = &ctx->s_frame;
+ d_frame = &ctx->d_frame;
+
+ src_vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ mtk_mdp_prepare_addr(ctx, src_vb, s_frame, &s_frame->addr);
+
+ dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ mtk_mdp_prepare_addr(ctx, dst_vb, d_frame, &d_frame->addr);
+
+ src_vbuf = to_vb2_v4l2_buffer(src_vb);
+ dst_vbuf = to_vb2_v4l2_buffer(dst_vb);
+ dst_vbuf->vb2_buf.timestamp = src_vbuf->vb2_buf.timestamp;
+}
+
+static void mtk_mdp_process_done(void *priv, int vb_state)
+{
+ struct mtk_mdp_dev *mdp = priv;
+ struct mtk_mdp_ctx *ctx;
+ struct vb2_buffer *src_vb, *dst_vb;
+ struct vb2_v4l2_buffer *src_vbuf = NULL, *dst_vbuf = NULL;
+
+ ctx = v4l2_m2m_get_curr_priv(mdp->m2m_dev);
+ if (!ctx)
+ return;
+
+ src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ src_vbuf = to_vb2_v4l2_buffer(src_vb);
+ dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ dst_vbuf = to_vb2_v4l2_buffer(dst_vb);
+
+ dst_vbuf->vb2_buf.timestamp = src_vbuf->vb2_buf.timestamp;
+ dst_vbuf->timecode = src_vbuf->timecode;
+ dst_vbuf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vbuf->flags |= src_vbuf->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+
+ v4l2_m2m_buf_done(src_vbuf, vb_state);
+ v4l2_m2m_buf_done(dst_vbuf, vb_state);
+ v4l2_m2m_job_finish(ctx->mdp_dev->m2m_dev, ctx->m2m_ctx);
+}
+
+static void mtk_mdp_m2m_worker(struct work_struct *work)
+{
+ struct mtk_mdp_ctx *ctx =
+ container_of(work, struct mtk_mdp_ctx, work);
+ struct mtk_mdp_dev *mdp = ctx->mdp_dev;
+ enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR;
+ int ret;
+
+ if (mtk_mdp_ctx_state_is_set(ctx, MTK_MDP_CTX_ERROR)) {
+ dev_err(&mdp->pdev->dev, "ctx is in error state");
+ goto worker_end;
+ }
+
+ mtk_mdp_m2m_get_bufs(ctx);
+
+ mtk_mdp_hw_set_input_addr(ctx, &ctx->s_frame.addr);
+ mtk_mdp_hw_set_output_addr(ctx, &ctx->d_frame.addr);
+
+ mtk_mdp_hw_set_in_size(ctx);
+ mtk_mdp_hw_set_in_image_format(ctx);
+
+ mtk_mdp_hw_set_out_size(ctx);
+ mtk_mdp_hw_set_out_image_format(ctx);
+
+ mtk_mdp_hw_set_rotation(ctx);
+ mtk_mdp_hw_set_global_alpha(ctx);
+
+ ret = mtk_mdp_vpu_process(&ctx->vpu);
+ if (ret) {
+ dev_err(&mdp->pdev->dev, "processing failed: %d", ret);
+ goto worker_end;
+ }
+
+ buf_state = VB2_BUF_STATE_DONE;
+
+worker_end:
+ mtk_mdp_process_done(mdp, buf_state);
+}
+
+static void mtk_mdp_m2m_device_run(void *priv)
+{
+ struct mtk_mdp_ctx *ctx = priv;
+
+ queue_work(ctx->mdp_dev->job_wq, &ctx->work);
+}
+
+static int mtk_mdp_m2m_queue_setup(struct vb2_queue *vq,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct mtk_mdp_ctx *ctx = vb2_get_drv_priv(vq);
+ struct mtk_mdp_frame *frame;
+ int i;
+
+ frame = mtk_mdp_ctx_get_frame(ctx, vq->type);
+ *num_planes = frame->fmt->num_planes;
+ for (i = 0; i < frame->fmt->num_planes; i++)
+ sizes[i] = frame->payload[i];
+ mtk_mdp_dbg(2, "[%d] type:%d, planes:%d, buffers:%d, size:%u,%u",
+ ctx->id, vq->type, *num_planes, *num_buffers,
+ sizes[0], sizes[1]);
+ return 0;
+}
+
+static int mtk_mdp_m2m_buf_prepare(struct vb2_buffer *vb)
+{
+ struct mtk_mdp_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct mtk_mdp_frame *frame;
+ int i;
+
+ frame = mtk_mdp_ctx_get_frame(ctx, vb->vb2_queue->type);
+
+ if (!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ for (i = 0; i < frame->fmt->num_planes; i++)
+ vb2_set_plane_payload(vb, i, frame->payload[i]);
+ }
+
+ return 0;
+}
+
+static void mtk_mdp_m2m_buf_queue(struct vb2_buffer *vb)
+{
+ struct mtk_mdp_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, to_vb2_v4l2_buffer(vb));
+}
+
+static struct vb2_ops mtk_mdp_m2m_qops = {
+ .queue_setup = mtk_mdp_m2m_queue_setup,
+ .buf_prepare = mtk_mdp_m2m_buf_prepare,
+ .buf_queue = mtk_mdp_m2m_buf_queue,
+ .wait_prepare = mtk_mdp_ctx_unlock,
+ .wait_finish = mtk_mdp_ctx_lock,
+ .stop_streaming = mtk_mdp_m2m_stop_streaming,
+ .start_streaming = mtk_mdp_m2m_start_streaming,
+};
+
+static int mtk_mdp_m2m_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+ struct mtk_mdp_dev *mdp = ctx->mdp_dev;
+
+ strlcpy(cap->driver, MTK_MDP_MODULE_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, mdp->pdev->name, sizeof(cap->card));
+ strlcpy(cap->bus_info, "platform:mt8173", sizeof(cap->bus_info));
+
+ return 0;
+}
+
+static int mtk_mdp_enum_fmt_mplane(struct v4l2_fmtdesc *f, u32 type)
+{
+ const struct mtk_mdp_fmt *fmt;
+
+ fmt = mtk_mdp_find_fmt_by_index(f->index, type);
+ if (!fmt)
+ return -EINVAL;
+
+ f->pixelformat = fmt->pixelformat;
+
+ return 0;
+}
+
+static int mtk_mdp_m2m_enum_fmt_mplane_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return mtk_mdp_enum_fmt_mplane(f, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+}
+
+static int mtk_mdp_m2m_enum_fmt_mplane_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return mtk_mdp_enum_fmt_mplane(f, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+}
+
+static int mtk_mdp_m2m_g_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+ struct mtk_mdp_frame *frame;
+ struct v4l2_pix_format_mplane *pix_mp;
+ int i;
+
+ mtk_mdp_dbg(2, "[%d] type:%d", ctx->id, f->type);
+
+ frame = mtk_mdp_ctx_get_frame(ctx, f->type);
+ pix_mp = &f->fmt.pix_mp;
+
+ pix_mp->width = frame->width;
+ pix_mp->height = frame->height;
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->pixelformat = frame->fmt->pixelformat;
+ pix_mp->num_planes = frame->fmt->num_planes;
+ pix_mp->colorspace = ctx->colorspace;
+ pix_mp->xfer_func = ctx->xfer_func;
+ pix_mp->ycbcr_enc = ctx->ycbcr_enc;
+ pix_mp->quantization = ctx->quant;
+ mtk_mdp_dbg(2, "[%d] wxh:%dx%d", ctx->id,
+ pix_mp->width, pix_mp->height);
+
+ for (i = 0; i < pix_mp->num_planes; ++i) {
+ pix_mp->plane_fmt[i].bytesperline = (frame->width *
+ frame->fmt->row_depth[i]) / 8;
+ pix_mp->plane_fmt[i].sizeimage = (frame->width *
+ frame->height * frame->fmt->depth[i]) / 8;
+
+ mtk_mdp_dbg(2, "[%d] p%d, bpl:%d, sizeimage:%d", ctx->id, i,
+ pix_mp->plane_fmt[i].bytesperline,
+ pix_mp->plane_fmt[i].sizeimage);
+ }
+
+ return 0;
+}
+
+static int mtk_mdp_m2m_try_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+
+ if (!mtk_mdp_try_fmt_mplane(ctx, f))
+ return -EINVAL;
+ return 0;
+}
+
+static int mtk_mdp_m2m_s_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+ struct vb2_queue *vq;
+ struct mtk_mdp_frame *frame;
+ struct v4l2_pix_format_mplane *pix_mp;
+ const struct mtk_mdp_fmt *fmt;
+ int i;
+
+ mtk_mdp_dbg(2, "[%d] type:%d", ctx->id, f->type);
+
+ frame = mtk_mdp_ctx_get_frame(ctx, f->type);
+ fmt = mtk_mdp_try_fmt_mplane(ctx, f);
+ if (!fmt) {
+ mtk_mdp_err("[%d] try_fmt failed, type:%d", ctx->id, f->type);
+ return -EINVAL;
+ }
+ frame->fmt = fmt;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (vb2_is_streaming(vq)) {
+ dev_info(&ctx->mdp_dev->pdev->dev, "queue %d busy", f->type);
+ return -EBUSY;
+ }
+
+ pix_mp = &f->fmt.pix_mp;
+ for (i = 0; i < frame->fmt->num_planes; i++) {
+ frame->payload[i] = pix_mp->plane_fmt[i].sizeimage;
+ frame->pitch[i] = pix_mp->plane_fmt[i].bytesperline;
+ }
+
+ mtk_mdp_set_frame_size(frame, pix_mp->width, pix_mp->height);
+ if (V4L2_TYPE_IS_OUTPUT(f->type)) {
+ ctx->colorspace = pix_mp->colorspace;
+ ctx->xfer_func = pix_mp->xfer_func;
+ ctx->ycbcr_enc = pix_mp->ycbcr_enc;
+ ctx->quant = pix_mp->quantization;
+ }
+
+ if (V4L2_TYPE_IS_OUTPUT(f->type))
+ mtk_mdp_ctx_state_lock_set(ctx, MTK_MDP_SRC_FMT);
+ else
+ mtk_mdp_ctx_state_lock_set(ctx, MTK_MDP_DST_FMT);
+
+ mtk_mdp_dbg(2, "[%d] type:%d, frame:%dx%d", ctx->id, f->type,
+ frame->width, frame->height);
+
+ return 0;
+}
+
+static int mtk_mdp_m2m_reqbufs(struct file *file, void *fh,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+
+ if (reqbufs->count == 0) {
+ if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ mtk_mdp_ctx_state_lock_clear(ctx, MTK_MDP_SRC_FMT);
+ else
+ mtk_mdp_ctx_state_lock_clear(ctx, MTK_MDP_DST_FMT);
+ }
+
+ return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
+}
+
+static int mtk_mdp_m2m_streamon(struct file *file, void *fh,
+ enum v4l2_buf_type type)
+{
+ struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+ int ret;
+
+ /* The source and target color format need to be set */
+ if (V4L2_TYPE_IS_OUTPUT(type)) {
+ if (!mtk_mdp_ctx_state_is_set(ctx, MTK_MDP_SRC_FMT))
+ return -EINVAL;
+ } else if (!mtk_mdp_ctx_state_is_set(ctx, MTK_MDP_DST_FMT)) {
+ return -EINVAL;
+ }
+
+ if (!mtk_mdp_ctx_state_is_set(ctx, MTK_MDP_VPU_INIT)) {
+ ret = mtk_mdp_vpu_init(&ctx->vpu);
+ if (ret < 0) {
+ dev_err(&ctx->mdp_dev->pdev->dev,
+ "vpu init failed %d\n",
+ ret);
+ return -EINVAL;
+ }
+ mtk_mdp_ctx_state_lock_set(ctx, MTK_MDP_VPU_INIT);
+ }
+
+ return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
+}
+
+static inline bool mtk_mdp_is_target_compose(u32 target)
+{
+ if (target == V4L2_SEL_TGT_COMPOSE_DEFAULT
+ || target == V4L2_SEL_TGT_COMPOSE_BOUNDS
+ || target == V4L2_SEL_TGT_COMPOSE)
+ return true;
+ return false;
+}
+
+static inline bool mtk_mdp_is_target_crop(u32 target)
+{
+ if (target == V4L2_SEL_TGT_CROP_DEFAULT
+ || target == V4L2_SEL_TGT_CROP_BOUNDS
+ || target == V4L2_SEL_TGT_CROP)
+ return true;
+ return false;
+}
+
+static int mtk_mdp_m2m_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct mtk_mdp_frame *frame;
+ struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+ bool valid = false;
+
+ if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ if (mtk_mdp_is_target_compose(s->target))
+ valid = true;
+ } else if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ if (mtk_mdp_is_target_crop(s->target))
+ valid = true;
+ }
+ if (!valid) {
+ mtk_mdp_dbg(1, "[%d] invalid type:%d,%u", ctx->id, s->type,
+ s->target);
+ return -EINVAL;
+ }
+
+ frame = mtk_mdp_ctx_get_frame(ctx, s->type);
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = frame->width;
+ s->r.height = frame->height;
+ return 0;
+
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_CROP:
+ s->r.left = frame->crop.left;
+ s->r.top = frame->crop.top;
+ s->r.width = frame->crop.width;
+ s->r.height = frame->crop.height;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int mtk_mdp_check_scaler_ratio(struct mtk_mdp_variant *var, int src_w,
+ int src_h, int dst_w, int dst_h, int rot)
+{
+ int tmp_w, tmp_h;
+
+ if (rot == 90 || rot == 270) {
+ tmp_w = dst_h;
+ tmp_h = dst_w;
+ } else {
+ tmp_w = dst_w;
+ tmp_h = dst_h;
+ }
+
+ if ((src_w / tmp_w) > var->h_scale_down_max ||
+ (src_h / tmp_h) > var->v_scale_down_max ||
+ (tmp_w / src_w) > var->h_scale_up_max ||
+ (tmp_h / src_h) > var->v_scale_up_max)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int mtk_mdp_m2m_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct mtk_mdp_frame *frame;
+ struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+ struct v4l2_rect new_r;
+ struct mtk_mdp_variant *variant = ctx->mdp_dev->variant;
+ int ret;
+ bool valid = false;
+
+ if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ if (s->target == V4L2_SEL_TGT_COMPOSE)
+ valid = true;
+ } else if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ if (s->target == V4L2_SEL_TGT_CROP)
+ valid = true;
+ }
+ if (!valid) {
+ mtk_mdp_dbg(1, "[%d] invalid type:%d,%u", ctx->id, s->type,
+ s->target);
+ return -EINVAL;
+ }
+
+ new_r = s->r;
+ ret = mtk_mdp_try_crop(ctx, s->type, &new_r);
+ if (ret)
+ return ret;
+
+ if (mtk_mdp_is_target_crop(s->target))
+ frame = &ctx->s_frame;
+ else
+ frame = &ctx->d_frame;
+
+ /* Check to see if scaling ratio is within supported range */
+ if (mtk_mdp_ctx_state_is_set(ctx, MTK_MDP_DST_FMT | MTK_MDP_SRC_FMT)) {
+ if (V4L2_TYPE_IS_OUTPUT(s->type)) {
+ ret = mtk_mdp_check_scaler_ratio(variant, new_r.width,
+ new_r.height, ctx->d_frame.crop.width,
+ ctx->d_frame.crop.height,
+ ctx->ctrls.rotate->val);
+ } else {
+ ret = mtk_mdp_check_scaler_ratio(variant,
+ ctx->s_frame.crop.width,
+ ctx->s_frame.crop.height, new_r.width,
+ new_r.height, ctx->ctrls.rotate->val);
+ }
+
+ if (ret) {
+ dev_info(&ctx->mdp_dev->pdev->dev,
+ "Out of scaler range");
+ return -EINVAL;
+ }
+ }
+
+ s->r = new_r;
+ frame->crop = new_r;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops mtk_mdp_m2m_ioctl_ops = {
+ .vidioc_querycap = mtk_mdp_m2m_querycap,
+ .vidioc_enum_fmt_vid_cap_mplane = mtk_mdp_m2m_enum_fmt_mplane_vid_cap,
+ .vidioc_enum_fmt_vid_out_mplane = mtk_mdp_m2m_enum_fmt_mplane_vid_out,
+ .vidioc_g_fmt_vid_cap_mplane = mtk_mdp_m2m_g_fmt_mplane,
+ .vidioc_g_fmt_vid_out_mplane = mtk_mdp_m2m_g_fmt_mplane,
+ .vidioc_try_fmt_vid_cap_mplane = mtk_mdp_m2m_try_fmt_mplane,
+ .vidioc_try_fmt_vid_out_mplane = mtk_mdp_m2m_try_fmt_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = mtk_mdp_m2m_s_fmt_mplane,
+ .vidioc_s_fmt_vid_out_mplane = mtk_mdp_m2m_s_fmt_mplane,
+ .vidioc_reqbufs = mtk_mdp_m2m_reqbufs,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_streamon = mtk_mdp_m2m_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_g_selection = mtk_mdp_m2m_g_selection,
+ .vidioc_s_selection = mtk_mdp_m2m_s_selection
+};
+
+static int mtk_mdp_m2m_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct mtk_mdp_ctx *ctx = priv;
+ int ret;
+
+ memset(src_vq, 0, sizeof(*src_vq));
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->ops = &mtk_mdp_m2m_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->dev = &ctx->mdp_dev->pdev->dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ memset(dst_vq, 0, sizeof(*dst_vq));
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->ops = &mtk_mdp_m2m_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->dev = &ctx->mdp_dev->pdev->dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static int mtk_mdp_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct mtk_mdp_ctx *ctx = ctrl_to_ctx(ctrl);
+ struct mtk_mdp_dev *mdp = ctx->mdp_dev;
+ struct mtk_mdp_variant *variant = mdp->variant;
+ u32 state = MTK_MDP_DST_FMT | MTK_MDP_SRC_FMT;
+ int ret = 0;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ ctx->hflip = ctrl->val;
+ break;
+ case V4L2_CID_VFLIP:
+ ctx->vflip = ctrl->val;
+ break;
+ case V4L2_CID_ROTATE:
+ if (mtk_mdp_ctx_state_is_set(ctx, state)) {
+ ret = mtk_mdp_check_scaler_ratio(variant,
+ ctx->s_frame.crop.width,
+ ctx->s_frame.crop.height,
+ ctx->d_frame.crop.width,
+ ctx->d_frame.crop.height,
+ ctx->ctrls.rotate->val);
+
+ if (ret)
+ return -EINVAL;
+ }
+
+ ctx->rotation = ctrl->val;
+ break;
+ case V4L2_CID_ALPHA_COMPONENT:
+ ctx->d_frame.alpha = ctrl->val;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops mtk_mdp_ctrl_ops = {
+ .s_ctrl = mtk_mdp_s_ctrl,
+};
+
+static int mtk_mdp_ctrls_create(struct mtk_mdp_ctx *ctx)
+{
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, MTK_MDP_MAX_CTRL_NUM);
+
+ ctx->ctrls.rotate = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &mtk_mdp_ctrl_ops, V4L2_CID_ROTATE, 0, 270, 90, 0);
+ ctx->ctrls.hflip = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &mtk_mdp_ctrl_ops,
+ V4L2_CID_HFLIP,
+ 0, 1, 1, 0);
+ ctx->ctrls.vflip = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &mtk_mdp_ctrl_ops,
+ V4L2_CID_VFLIP,
+ 0, 1, 1, 0);
+ ctx->ctrls.global_alpha = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &mtk_mdp_ctrl_ops,
+ V4L2_CID_ALPHA_COMPONENT,
+ 0, 255, 1, 0);
+ ctx->ctrls_rdy = ctx->ctrl_handler.error == 0;
+
+ if (ctx->ctrl_handler.error) {
+ int err = ctx->ctrl_handler.error;
+
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ dev_err(&ctx->mdp_dev->pdev->dev,
+ "Failed to create control handlers\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static void mtk_mdp_set_default_params(struct mtk_mdp_ctx *ctx)
+{
+ struct mtk_mdp_dev *mdp = ctx->mdp_dev;
+ struct mtk_mdp_frame *frame;
+
+ frame = mtk_mdp_ctx_get_frame(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ frame->fmt = mtk_mdp_find_fmt_by_index(0,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ frame->width = mdp->variant->pix_min->org_w;
+ frame->height = mdp->variant->pix_min->org_h;
+ frame->payload[0] = frame->width * frame->height;
+ frame->payload[1] = frame->payload[0] / 2;
+
+ frame = mtk_mdp_ctx_get_frame(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ frame->fmt = mtk_mdp_find_fmt_by_index(0,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ frame->width = mdp->variant->pix_min->target_rot_dis_w;
+ frame->height = mdp->variant->pix_min->target_rot_dis_h;
+ frame->payload[0] = frame->width * frame->height;
+ frame->payload[1] = frame->payload[0] / 2;
+
+}
+
+static int mtk_mdp_m2m_open(struct file *file)
+{
+ struct mtk_mdp_dev *mdp = video_drvdata(file);
+ struct video_device *vfd = video_devdata(file);
+ struct mtk_mdp_ctx *ctx = NULL;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (mutex_lock_interruptible(&mdp->lock)) {
+ ret = -ERESTARTSYS;
+ goto err_lock;
+ }
+
+ mutex_init(&ctx->slock);
+ ctx->id = mdp->id_counter++;
+ v4l2_fh_init(&ctx->fh, vfd);
+ file->private_data = &ctx->fh;
+ ret = mtk_mdp_ctrls_create(ctx);
+ if (ret)
+ goto error_ctrls;
+
+ /* Use separate control handler per file handle */
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+ v4l2_fh_add(&ctx->fh);
+ INIT_LIST_HEAD(&ctx->list);
+
+ ctx->mdp_dev = mdp;
+ mtk_mdp_set_default_params(ctx);
+
+ INIT_WORK(&ctx->work, mtk_mdp_m2m_worker);
+ ctx->m2m_ctx = v4l2_m2m_ctx_init(mdp->m2m_dev, ctx,
+ mtk_mdp_m2m_queue_init);
+ if (IS_ERR(ctx->m2m_ctx)) {
+ dev_err(&mdp->pdev->dev, "Failed to initialize m2m context");
+ ret = PTR_ERR(ctx->m2m_ctx);
+ goto error_m2m_ctx;
+ }
+ ctx->fh.m2m_ctx = ctx->m2m_ctx;
+ if (mdp->ctx_num++ == 0) {
+ ret = vpu_load_firmware(mdp->vpu_dev);
+ if (ret < 0) {
+ dev_err(&mdp->pdev->dev,
+ "vpu_load_firmware failed %d\n", ret);
+ goto err_load_vpu;
+ }
+
+ ret = mtk_mdp_vpu_register(mdp->pdev);
+ if (ret < 0) {
+ dev_err(&mdp->pdev->dev,
+ "mdp_vpu register failed %d\n", ret);
+ goto err_load_vpu;
+ }
+ }
+
+ list_add(&ctx->list, &mdp->ctx_list);
+ mutex_unlock(&mdp->lock);
+
+ mtk_mdp_dbg(0, "%s [%d]", dev_name(&mdp->pdev->dev), ctx->id);
+
+ return 0;
+
+err_load_vpu:
+ mdp->ctx_num--;
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+error_m2m_ctx:
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+error_ctrls:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ mutex_unlock(&mdp->lock);
+err_lock:
+ kfree(ctx);
+
+ return ret;
+}
+
+static int mtk_mdp_m2m_release(struct file *file)
+{
+ struct mtk_mdp_ctx *ctx = fh_to_ctx(file->private_data);
+ struct mtk_mdp_dev *mdp = ctx->mdp_dev;
+
+ flush_workqueue(mdp->job_wq);
+ mutex_lock(&mdp->lock);
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ mtk_mdp_vpu_deinit(&ctx->vpu);
+ mdp->ctx_num--;
+ list_del_init(&ctx->list);
+
+ mtk_mdp_dbg(0, "%s [%d]", dev_name(&mdp->pdev->dev), ctx->id);
+
+ mutex_unlock(&mdp->lock);
+ kfree(ctx);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations mtk_mdp_m2m_fops = {
+ .owner = THIS_MODULE,
+ .open = mtk_mdp_m2m_open,
+ .release = mtk_mdp_m2m_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static struct v4l2_m2m_ops mtk_mdp_m2m_ops = {
+ .device_run = mtk_mdp_m2m_device_run,
+ .job_abort = mtk_mdp_m2m_job_abort,
+};
+
+int mtk_mdp_register_m2m_device(struct mtk_mdp_dev *mdp)
+{
+ struct device *dev = &mdp->pdev->dev;
+ int ret;
+
+ mdp->variant = &mtk_mdp_default_variant;
+ mdp->vdev = video_device_alloc();
+ if (!mdp->vdev) {
+ dev_err(dev, "failed to allocate video device\n");
+ ret = -ENOMEM;
+ goto err_video_alloc;
+ }
+ mdp->vdev->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+ mdp->vdev->fops = &mtk_mdp_m2m_fops;
+ mdp->vdev->ioctl_ops = &mtk_mdp_m2m_ioctl_ops;
+ mdp->vdev->release = video_device_release;
+ mdp->vdev->lock = &mdp->lock;
+ mdp->vdev->vfl_dir = VFL_DIR_M2M;
+ mdp->vdev->v4l2_dev = &mdp->v4l2_dev;
+ snprintf(mdp->vdev->name, sizeof(mdp->vdev->name), "%s:m2m",
+ MTK_MDP_MODULE_NAME);
+ video_set_drvdata(mdp->vdev, mdp);
+
+ mdp->m2m_dev = v4l2_m2m_init(&mtk_mdp_m2m_ops);
+ if (IS_ERR(mdp->m2m_dev)) {
+ dev_err(dev, "failed to initialize v4l2-m2m device\n");
+ ret = PTR_ERR(mdp->m2m_dev);
+ goto err_m2m_init;
+ }
+
+ ret = video_register_device(mdp->vdev, VFL_TYPE_GRABBER, 2);
+ if (ret) {
+ dev_err(dev, "failed to register video device\n");
+ goto err_vdev_register;
+ }
+
+ v4l2_info(&mdp->v4l2_dev, "driver registered as /dev/video%d",
+ mdp->vdev->num);
+ return 0;
+
+err_vdev_register:
+ v4l2_m2m_release(mdp->m2m_dev);
+err_m2m_init:
+ video_device_release(mdp->vdev);
+err_video_alloc:
+
+ return ret;
+}
+
+void mtk_mdp_unregister_m2m_device(struct mtk_mdp_dev *mdp)
+{
+ video_unregister_device(mdp->vdev);
+ v4l2_m2m_release(mdp->m2m_dev);
+}
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.h b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.h
new file mode 100644
index 000000000000..45afd3655817
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MTK_MDP_M2M_H__
+#define __MTK_MDP_M2M_H__
+
+void mtk_mdp_ctx_state_lock_set(struct mtk_mdp_ctx *ctx, u32 state);
+int mtk_mdp_register_m2m_device(struct mtk_mdp_dev *mdp);
+void mtk_mdp_unregister_m2m_device(struct mtk_mdp_dev *mdp);
+
+#endif /* __MTK_MDP_M2M_H__ */
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_regs.c b/drivers/media/platform/mtk-mdp/mtk_mdp_regs.c
new file mode 100644
index 000000000000..86d57f380c97
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_regs.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Houlong Wei <houlong.wei@mediatek.com>
+ * Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+
+#include "mtk_mdp_core.h"
+#include "mtk_mdp_regs.h"
+
+
+#define MDP_COLORFMT_PACK(VIDEO, PLANE, COPLANE, HF, VF, BITS, GROUP, SWAP, ID)\
+ (((VIDEO) << 27) | ((PLANE) << 24) | ((COPLANE) << 22) |\
+ ((HF) << 20) | ((VF) << 18) | ((BITS) << 8) | ((GROUP) << 6) |\
+ ((SWAP) << 5) | ((ID) << 0))
+
+enum MDP_COLOR_ENUM {
+ MDP_COLOR_UNKNOWN = 0,
+ MDP_COLOR_NV12 = MDP_COLORFMT_PACK(0, 2, 1, 1, 1, 8, 1, 0, 12),
+ MDP_COLOR_I420 = MDP_COLORFMT_PACK(0, 3, 0, 1, 1, 8, 1, 0, 8),
+ MDP_COLOR_YV12 = MDP_COLORFMT_PACK(0, 3, 0, 1, 1, 8, 1, 1, 8),
+ /* Mediatek proprietary format */
+ MDP_COLOR_420_MT21 = MDP_COLORFMT_PACK(5, 2, 1, 1, 1, 256, 1, 0, 12),
+};
+
+static int32_t mtk_mdp_map_color_format(int v4l2_format)
+{
+ switch (v4l2_format) {
+ case V4L2_PIX_FMT_NV12M:
+ case V4L2_PIX_FMT_NV12:
+ return MDP_COLOR_NV12;
+ case V4L2_PIX_FMT_MT21C:
+ return MDP_COLOR_420_MT21;
+ case V4L2_PIX_FMT_YUV420M:
+ case V4L2_PIX_FMT_YUV420:
+ return MDP_COLOR_I420;
+ case V4L2_PIX_FMT_YVU420:
+ return MDP_COLOR_YV12;
+ }
+
+ mtk_mdp_err("Unknown format 0x%x", v4l2_format);
+
+ return MDP_COLOR_UNKNOWN;
+}
+
+void mtk_mdp_hw_set_input_addr(struct mtk_mdp_ctx *ctx,
+ struct mtk_mdp_addr *addr)
+{
+ struct mdp_buffer *src_buf = &ctx->vpu.vsi->src_buffer;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(addr->addr); i++)
+ src_buf->addr_mva[i] = (uint64_t)addr->addr[i];
+}
+
+void mtk_mdp_hw_set_output_addr(struct mtk_mdp_ctx *ctx,
+ struct mtk_mdp_addr *addr)
+{
+ struct mdp_buffer *dst_buf = &ctx->vpu.vsi->dst_buffer;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(addr->addr); i++)
+ dst_buf->addr_mva[i] = (uint64_t)addr->addr[i];
+}
+
+void mtk_mdp_hw_set_in_size(struct mtk_mdp_ctx *ctx)
+{
+ struct mtk_mdp_frame *frame = &ctx->s_frame;
+ struct mdp_config *config = &ctx->vpu.vsi->src_config;
+
+ /* Set input pixel offset */
+ config->crop_x = frame->crop.left;
+ config->crop_y = frame->crop.top;
+
+ /* Set input cropped size */
+ config->crop_w = frame->crop.width;
+ config->crop_h = frame->crop.height;
+
+ /* Set input original size */
+ config->x = 0;
+ config->y = 0;
+ config->w = frame->width;
+ config->h = frame->height;
+}
+
+void mtk_mdp_hw_set_in_image_format(struct mtk_mdp_ctx *ctx)
+{
+ unsigned int i;
+ struct mtk_mdp_frame *frame = &ctx->s_frame;
+ struct mdp_config *config = &ctx->vpu.vsi->src_config;
+ struct mdp_buffer *src_buf = &ctx->vpu.vsi->src_buffer;
+
+ src_buf->plane_num = frame->fmt->num_comp;
+ config->format = mtk_mdp_map_color_format(frame->fmt->pixelformat);
+ config->w_stride = 0; /* MDP will calculate it by color format. */
+ config->h_stride = 0; /* MDP will calculate it by color format. */
+
+ for (i = 0; i < src_buf->plane_num; i++)
+ src_buf->plane_size[i] = frame->payload[i];
+}
+
+void mtk_mdp_hw_set_out_size(struct mtk_mdp_ctx *ctx)
+{
+ struct mtk_mdp_frame *frame = &ctx->d_frame;
+ struct mdp_config *config = &ctx->vpu.vsi->dst_config;
+
+ config->crop_x = frame->crop.left;
+ config->crop_y = frame->crop.top;
+ config->crop_w = frame->crop.width;
+ config->crop_h = frame->crop.height;
+ config->x = 0;
+ config->y = 0;
+ config->w = frame->width;
+ config->h = frame->height;
+}
+
+void mtk_mdp_hw_set_out_image_format(struct mtk_mdp_ctx *ctx)
+{
+ unsigned int i;
+ struct mtk_mdp_frame *frame = &ctx->d_frame;
+ struct mdp_config *config = &ctx->vpu.vsi->dst_config;
+ struct mdp_buffer *dst_buf = &ctx->vpu.vsi->dst_buffer;
+
+ dst_buf->plane_num = frame->fmt->num_comp;
+ config->format = mtk_mdp_map_color_format(frame->fmt->pixelformat);
+ config->w_stride = 0; /* MDP will calculate it by color format. */
+ config->h_stride = 0; /* MDP will calculate it by color format. */
+ for (i = 0; i < dst_buf->plane_num; i++)
+ dst_buf->plane_size[i] = frame->payload[i];
+}
+
+void mtk_mdp_hw_set_rotation(struct mtk_mdp_ctx *ctx)
+{
+ struct mdp_config_misc *misc = &ctx->vpu.vsi->misc;
+
+ misc->orientation = ctx->ctrls.rotate->val;
+ misc->hflip = ctx->ctrls.hflip->val;
+ misc->vflip = ctx->ctrls.vflip->val;
+}
+
+void mtk_mdp_hw_set_global_alpha(struct mtk_mdp_ctx *ctx)
+{
+ struct mdp_config_misc *misc = &ctx->vpu.vsi->misc;
+
+ misc->alpha = ctx->ctrls.global_alpha->val;
+}
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_regs.h b/drivers/media/platform/mtk-mdp/mtk_mdp_regs.h
new file mode 100644
index 000000000000..42bd057e76cc
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_regs.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MTK_MDP_REGS_H__
+#define __MTK_MDP_REGS_H__
+
+
+void mtk_mdp_hw_set_input_addr(struct mtk_mdp_ctx *ctx,
+ struct mtk_mdp_addr *addr);
+void mtk_mdp_hw_set_output_addr(struct mtk_mdp_ctx *ctx,
+ struct mtk_mdp_addr *addr);
+void mtk_mdp_hw_set_in_size(struct mtk_mdp_ctx *ctx);
+void mtk_mdp_hw_set_in_image_format(struct mtk_mdp_ctx *ctx);
+void mtk_mdp_hw_set_out_size(struct mtk_mdp_ctx *ctx);
+void mtk_mdp_hw_set_out_image_format(struct mtk_mdp_ctx *ctx);
+void mtk_mdp_hw_set_rotation(struct mtk_mdp_ctx *ctx);
+void mtk_mdp_hw_set_global_alpha(struct mtk_mdp_ctx *ctx);
+
+
+#endif /* __MTK_MDP_REGS_H__ */
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c b/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c
new file mode 100644
index 000000000000..4893825aa5dd
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Houlong Wei <houlong.wei@mediatek.com>
+ * Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mtk_mdp_core.h"
+#include "mtk_mdp_vpu.h"
+#include "mtk_vpu.h"
+
+
+static inline struct mtk_mdp_ctx *vpu_to_ctx(struct mtk_mdp_vpu *vpu)
+{
+ return container_of(vpu, struct mtk_mdp_ctx, vpu);
+}
+
+static void mtk_mdp_vpu_handle_init_ack(struct mdp_ipi_comm_ack *msg)
+{
+ struct mtk_mdp_vpu *vpu = (struct mtk_mdp_vpu *)
+ (unsigned long)msg->ap_inst;
+
+ /* mapping VPU address to kernel virtual address */
+ vpu->vsi = (struct mdp_process_vsi *)
+ vpu_mapping_dm_addr(vpu->pdev, msg->vpu_inst_addr);
+ vpu->inst_addr = msg->vpu_inst_addr;
+}
+
+static void mtk_mdp_vpu_ipi_handler(void *data, unsigned int len, void *priv)
+{
+ unsigned int msg_id = *(unsigned int *)data;
+ struct mdp_ipi_comm_ack *msg = (struct mdp_ipi_comm_ack *)data;
+ struct mtk_mdp_vpu *vpu = (struct mtk_mdp_vpu *)
+ (unsigned long)msg->ap_inst;
+ struct mtk_mdp_ctx *ctx;
+
+ vpu->failure = msg->status;
+ if (!vpu->failure) {
+ switch (msg_id) {
+ case VPU_MDP_INIT_ACK:
+ mtk_mdp_vpu_handle_init_ack(data);
+ break;
+ case VPU_MDP_DEINIT_ACK:
+ case VPU_MDP_PROCESS_ACK:
+ break;
+ default:
+ ctx = vpu_to_ctx(vpu);
+ dev_err(&ctx->mdp_dev->pdev->dev,
+ "handle unknown ipi msg:0x%x\n",
+ msg_id);
+ break;
+ }
+ } else {
+ ctx = vpu_to_ctx(vpu);
+ mtk_mdp_dbg(0, "[%d]:msg 0x%x, failure:%d", ctx->id,
+ msg_id, vpu->failure);
+ }
+}
+
+int mtk_mdp_vpu_register(struct platform_device *pdev)
+{
+ struct mtk_mdp_dev *mdp = platform_get_drvdata(pdev);
+ int err;
+
+ err = vpu_ipi_register(mdp->vpu_dev, IPI_MDP,
+ mtk_mdp_vpu_ipi_handler, "mdp_vpu", NULL);
+ if (err)
+ dev_err(&mdp->pdev->dev,
+ "vpu_ipi_registration fail status=%d\n", err);
+
+ return err;
+}
+
+static int mtk_mdp_vpu_send_msg(void *msg, int len, struct mtk_mdp_vpu *vpu,
+ int id)
+{
+ struct mtk_mdp_ctx *ctx = vpu_to_ctx(vpu);
+ int err;
+
+ if (!vpu->pdev) {
+ mtk_mdp_dbg(1, "[%d]:vpu pdev is NULL", ctx->id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctx->mdp_dev->vpulock);
+ err = vpu_ipi_send(vpu->pdev, (enum ipi_id)id, msg, len);
+ if (err)
+ dev_err(&ctx->mdp_dev->pdev->dev,
+ "vpu_ipi_send fail status %d\n", err);
+ mutex_unlock(&ctx->mdp_dev->vpulock);
+
+ return err;
+}
+
+static int mtk_mdp_vpu_send_ap_ipi(struct mtk_mdp_vpu *vpu, uint32_t msg_id)
+{
+ int err;
+ struct mdp_ipi_comm msg;
+
+ msg.msg_id = msg_id;
+ msg.ipi_id = IPI_MDP;
+ msg.vpu_inst_addr = vpu->inst_addr;
+ msg.ap_inst = (unsigned long)vpu;
+ err = mtk_mdp_vpu_send_msg((void *)&msg, sizeof(msg), vpu, IPI_MDP);
+ if (!err && vpu->failure)
+ err = -EINVAL;
+
+ return err;
+}
+
+int mtk_mdp_vpu_init(struct mtk_mdp_vpu *vpu)
+{
+ int err;
+ struct mdp_ipi_init msg;
+ struct mtk_mdp_ctx *ctx = vpu_to_ctx(vpu);
+
+ vpu->pdev = ctx->mdp_dev->vpu_dev;
+
+ msg.msg_id = AP_MDP_INIT;
+ msg.ipi_id = IPI_MDP;
+ msg.ap_inst = (unsigned long)vpu;
+ err = mtk_mdp_vpu_send_msg((void *)&msg, sizeof(msg), vpu, IPI_MDP);
+ if (!err && vpu->failure)
+ err = -EINVAL;
+
+ return err;
+}
+
+int mtk_mdp_vpu_deinit(struct mtk_mdp_vpu *vpu)
+{
+ return mtk_mdp_vpu_send_ap_ipi(vpu, AP_MDP_DEINIT);
+}
+
+int mtk_mdp_vpu_process(struct mtk_mdp_vpu *vpu)
+{
+ return mtk_mdp_vpu_send_ap_ipi(vpu, AP_MDP_PROCESS);
+}
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.h b/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.h
new file mode 100644
index 000000000000..df4bddaa438e
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Houlong Wei <houlong.wei@mediatek.com>
+ * Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MTK_MDP_VPU_H__
+#define __MTK_MDP_VPU_H__
+
+#include "mtk_mdp_ipi.h"
+
+
+/**
+ * struct mtk_mdp_vpu - VPU instance for MDP
+ * @pdev : pointer to the VPU platform device
+ * @inst_addr : VPU MDP instance address
+ * @failure : VPU execution result status
+ * @vsi : VPU shared information
+ */
+struct mtk_mdp_vpu {
+ struct platform_device *pdev;
+ uint32_t inst_addr;
+ int32_t failure;
+ struct mdp_process_vsi *vsi;
+};
+
+int mtk_mdp_vpu_register(struct platform_device *pdev);
+int mtk_mdp_vpu_init(struct mtk_mdp_vpu *vpu);
+int mtk_mdp_vpu_deinit(struct mtk_mdp_vpu *vpu);
+int mtk_mdp_vpu_process(struct mtk_mdp_vpu *vpu);
+
+#endif /* __MTK_MDP_VPU_H__ */
diff --git a/drivers/media/platform/mtk-vcodec/Makefile b/drivers/media/platform/mtk-vcodec/Makefile
index dc5cb006d600..852d9697ccfa 100644
--- a/drivers/media/platform/mtk-vcodec/Makefile
+++ b/drivers/media/platform/mtk-vcodec/Makefile
@@ -1,7 +1,16 @@
-
-obj-$(CONFIG_VIDEO_MEDIATEK_VCODEC) += mtk-vcodec-enc.o mtk-vcodec-common.o
-
+obj-$(CONFIG_VIDEO_MEDIATEK_VCODEC) += mtk-vcodec-dec.o \
+ mtk-vcodec-enc.o \
+ mtk-vcodec-common.o
+
+mtk-vcodec-dec-y := vdec/vdec_h264_if.o \
+ vdec/vdec_vp8_if.o \
+ vdec/vdec_vp9_if.o \
+ mtk_vcodec_dec_drv.o \
+ vdec_drv_if.o \
+ vdec_vpu_if.o \
+ mtk_vcodec_dec.o \
+ mtk_vcodec_dec_pm.o \
mtk-vcodec-enc-y := venc/venc_vp8_if.o \
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
new file mode 100644
index 000000000000..074659227864
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
@@ -0,0 +1,1451 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ * Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_dec.h"
+#include "mtk_vcodec_intr.h"
+#include "mtk_vcodec_util.h"
+#include "vdec_drv_if.h"
+#include "mtk_vcodec_dec_pm.h"
+
+#define OUT_FMT_IDX 0
+#define CAP_FMT_IDX 3
+
+#define MTK_VDEC_MIN_W 64U
+#define MTK_VDEC_MIN_H 64U
+#define DFT_CFG_WIDTH MTK_VDEC_MIN_W
+#define DFT_CFG_HEIGHT MTK_VDEC_MIN_H
+
+static struct mtk_video_fmt mtk_video_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_H264,
+ .type = MTK_FMT_DEC,
+ .num_planes = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8,
+ .type = MTK_FMT_DEC,
+ .num_planes = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP9,
+ .type = MTK_FMT_DEC,
+ .num_planes = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MT21C,
+ .type = MTK_FMT_FRAME,
+ .num_planes = 2,
+ },
+};
+
+static const struct mtk_codec_framesizes mtk_vdec_framesizes[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_H264,
+ .stepwise = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
+ MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8,
+ .stepwise = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
+ MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP9,
+ .stepwise = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
+ MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
+ },
+};
+
+#define NUM_SUPPORTED_FRAMESIZE ARRAY_SIZE(mtk_vdec_framesizes)
+#define NUM_FORMATS ARRAY_SIZE(mtk_video_formats)
+
+static struct mtk_video_fmt *mtk_vdec_find_format(struct v4l2_format *f)
+{
+ struct mtk_video_fmt *fmt;
+ unsigned int k;
+
+ for (k = 0; k < NUM_FORMATS; k++) {
+ fmt = &mtk_video_formats[k];
+ if (fmt->fourcc == f->fmt.pix_mp.pixelformat)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+static struct mtk_q_data *mtk_vdec_get_q_data(struct mtk_vcodec_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return &ctx->q_data[MTK_Q_DATA_SRC];
+
+ return &ctx->q_data[MTK_Q_DATA_DST];
+}
+
+/*
+ * This function tries to clean all display buffers, the buffers will return
+ * in display order.
+ * Note the buffers returned from codec driver may still be in driver's
+ * reference list.
+ */
+static struct vb2_buffer *get_display_buffer(struct mtk_vcodec_ctx *ctx)
+{
+ struct vdec_fb *disp_frame_buffer = NULL;
+ struct mtk_video_dec_buf *dstbuf;
+
+ mtk_v4l2_debug(3, "[%d]", ctx->id);
+ if (vdec_if_get_param(ctx,
+ GET_PARAM_DISP_FRAME_BUFFER,
+ &disp_frame_buffer)) {
+ mtk_v4l2_err("[%d]Cannot get param : GET_PARAM_DISP_FRAME_BUFFER",
+ ctx->id);
+ return NULL;
+ }
+
+ if (disp_frame_buffer == NULL) {
+ mtk_v4l2_debug(3, "No display frame buffer");
+ return NULL;
+ }
+
+ dstbuf = container_of(disp_frame_buffer, struct mtk_video_dec_buf,
+ frame_buffer);
+ mutex_lock(&ctx->lock);
+ if (dstbuf->used) {
+ vb2_set_plane_payload(&dstbuf->vb.vb2_buf, 0,
+ ctx->picinfo.y_bs_sz);
+ vb2_set_plane_payload(&dstbuf->vb.vb2_buf, 1,
+ ctx->picinfo.c_bs_sz);
+
+ dstbuf->ready_to_display = true;
+
+ mtk_v4l2_debug(2,
+ "[%d]status=%x queue id=%d to done_list %d",
+ ctx->id, disp_frame_buffer->status,
+ dstbuf->vb.vb2_buf.index,
+ dstbuf->queued_in_vb2);
+
+ v4l2_m2m_buf_done(&dstbuf->vb, VB2_BUF_STATE_DONE);
+ ctx->decoded_frame_cnt++;
+ }
+ mutex_unlock(&ctx->lock);
+ return &dstbuf->vb.vb2_buf;
+}
+
+/*
+ * This function tries to clean all capture buffers that are not used as
+ * reference buffers by codec driver any more
+ * In this case, we need re-queue buffer to vb2 buffer if user space
+ * already returns this buffer to v4l2 or this buffer is just the output of
+ * previous sps/pps/resolution change decode, or do nothing if user
+ * space still owns this buffer
+ */
+static struct vb2_buffer *get_free_buffer(struct mtk_vcodec_ctx *ctx)
+{
+ struct mtk_video_dec_buf *dstbuf;
+ struct vdec_fb *free_frame_buffer = NULL;
+
+ if (vdec_if_get_param(ctx,
+ GET_PARAM_FREE_FRAME_BUFFER,
+ &free_frame_buffer)) {
+ mtk_v4l2_err("[%d] Error!! Cannot get param", ctx->id);
+ return NULL;
+ }
+ if (free_frame_buffer == NULL) {
+ mtk_v4l2_debug(3, " No free frame buffer");
+ return NULL;
+ }
+
+ mtk_v4l2_debug(3, "[%d] tmp_frame_addr = 0x%p",
+ ctx->id, free_frame_buffer);
+
+ dstbuf = container_of(free_frame_buffer, struct mtk_video_dec_buf,
+ frame_buffer);
+
+ mutex_lock(&ctx->lock);
+ if (dstbuf->used) {
+ if ((dstbuf->queued_in_vb2) &&
+ (dstbuf->queued_in_v4l2) &&
+ (free_frame_buffer->status == FB_ST_FREE)) {
+ /*
+ * After decode sps/pps or non-display buffer, we don't
+ * need to return capture buffer to user space, but
+ * just re-queue this capture buffer to vb2 queue.
+ * This reduce overheads that dq/q unused capture
+ * buffer. In this case, queued_in_vb2 = true.
+ */
+ mtk_v4l2_debug(2,
+ "[%d]status=%x queue id=%d to rdy_queue %d",
+ ctx->id, free_frame_buffer->status,
+ dstbuf->vb.vb2_buf.index,
+ dstbuf->queued_in_vb2);
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, &dstbuf->vb);
+ } else if ((dstbuf->queued_in_vb2 == false) &&
+ (dstbuf->queued_in_v4l2 == true)) {
+ /*
+ * If buffer in v4l2 driver but not in vb2 queue yet,
+ * and we get this buffer from free_list, it means
+ * that codec driver do not use this buffer as
+ * reference buffer anymore. We should q buffer to vb2
+ * queue, so later work thread could get this buffer
+ * for decode. In this case, queued_in_vb2 = false
+ * means this buffer is not from previous decode
+ * output.
+ */
+ mtk_v4l2_debug(2,
+ "[%d]status=%x queue id=%d to rdy_queue",
+ ctx->id, free_frame_buffer->status,
+ dstbuf->vb.vb2_buf.index);
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, &dstbuf->vb);
+ dstbuf->queued_in_vb2 = true;
+ } else {
+ /*
+ * Codec driver do not need to reference this capture
+ * buffer and this buffer is not in v4l2 driver.
+ * Then we don't need to do any thing, just add log when
+ * we need to debug buffer flow.
+ * When this buffer q from user space, it could
+ * directly q to vb2 buffer
+ */
+ mtk_v4l2_debug(3, "[%d]status=%x err queue id=%d %d %d",
+ ctx->id, free_frame_buffer->status,
+ dstbuf->vb.vb2_buf.index,
+ dstbuf->queued_in_vb2,
+ dstbuf->queued_in_v4l2);
+ }
+ dstbuf->used = false;
+ }
+ mutex_unlock(&ctx->lock);
+ return &dstbuf->vb.vb2_buf;
+}
+
+static void clean_display_buffer(struct mtk_vcodec_ctx *ctx)
+{
+ struct vb2_buffer *framptr;
+
+ do {
+ framptr = get_display_buffer(ctx);
+ } while (framptr);
+}
+
+static void clean_free_buffer(struct mtk_vcodec_ctx *ctx)
+{
+ struct vb2_buffer *framptr;
+
+ do {
+ framptr = get_free_buffer(ctx);
+ } while (framptr);
+}
+
+static void mtk_vdec_queue_res_chg_event(struct mtk_vcodec_ctx *ctx)
+{
+ static const struct v4l2_event ev_src_ch = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes =
+ V4L2_EVENT_SRC_CH_RESOLUTION,
+ };
+
+ mtk_v4l2_debug(1, "[%d]", ctx->id);
+ v4l2_event_queue_fh(&ctx->fh, &ev_src_ch);
+}
+
+static void mtk_vdec_flush_decoder(struct mtk_vcodec_ctx *ctx)
+{
+ bool res_chg;
+ int ret = 0;
+
+ ret = vdec_if_decode(ctx, NULL, NULL, &res_chg);
+ if (ret)
+ mtk_v4l2_err("DecodeFinal failed, ret=%d", ret);
+
+ clean_display_buffer(ctx);
+ clean_free_buffer(ctx);
+}
+
+static void mtk_vdec_pic_info_update(struct mtk_vcodec_ctx *ctx)
+{
+ unsigned int dpbsize = 0;
+ int ret;
+
+ if (vdec_if_get_param(ctx,
+ GET_PARAM_PIC_INFO,
+ &ctx->last_decoded_picinfo)) {
+ mtk_v4l2_err("[%d]Error!! Cannot get param : GET_PARAM_PICTURE_INFO ERR",
+ ctx->id);
+ return;
+ }
+
+ if (ctx->last_decoded_picinfo.pic_w == 0 ||
+ ctx->last_decoded_picinfo.pic_h == 0 ||
+ ctx->last_decoded_picinfo.buf_w == 0 ||
+ ctx->last_decoded_picinfo.buf_h == 0) {
+ mtk_v4l2_err("Cannot get correct pic info");
+ return;
+ }
+
+ if ((ctx->last_decoded_picinfo.pic_w == ctx->picinfo.pic_w) ||
+ (ctx->last_decoded_picinfo.pic_h == ctx->picinfo.pic_h))
+ return;
+
+ mtk_v4l2_debug(1,
+ "[%d]-> new(%d,%d), old(%d,%d), real(%d,%d)",
+ ctx->id, ctx->last_decoded_picinfo.pic_w,
+ ctx->last_decoded_picinfo.pic_h,
+ ctx->picinfo.pic_w, ctx->picinfo.pic_h,
+ ctx->last_decoded_picinfo.buf_w,
+ ctx->last_decoded_picinfo.buf_h);
+
+ ret = vdec_if_get_param(ctx, GET_PARAM_DPB_SIZE, &dpbsize);
+ if (dpbsize == 0)
+ mtk_v4l2_err("Incorrect dpb size, ret=%d", ret);
+
+ ctx->dpb_size = dpbsize;
+}
+
+static void mtk_vdec_worker(struct work_struct *work)
+{
+ struct mtk_vcodec_ctx *ctx = container_of(work, struct mtk_vcodec_ctx,
+ decode_work);
+ struct mtk_vcodec_dev *dev = ctx->dev;
+ struct vb2_buffer *src_buf, *dst_buf;
+ struct mtk_vcodec_mem buf;
+ struct vdec_fb *pfb;
+ bool res_chg = false;
+ int ret;
+ struct mtk_video_dec_buf *dst_buf_info, *src_buf_info;
+ struct vb2_v4l2_buffer *dst_vb2_v4l2, *src_vb2_v4l2;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ if (src_buf == NULL) {
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+ mtk_v4l2_debug(1, "[%d] src_buf empty!!", ctx->id);
+ return;
+ }
+
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ if (dst_buf == NULL) {
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+ mtk_v4l2_debug(1, "[%d] dst_buf empty!!", ctx->id);
+ return;
+ }
+
+ src_vb2_v4l2 = container_of(src_buf, struct vb2_v4l2_buffer, vb2_buf);
+ src_buf_info = container_of(src_vb2_v4l2, struct mtk_video_dec_buf, vb);
+
+ dst_vb2_v4l2 = container_of(dst_buf, struct vb2_v4l2_buffer, vb2_buf);
+ dst_buf_info = container_of(dst_vb2_v4l2, struct mtk_video_dec_buf, vb);
+
+ buf.va = vb2_plane_vaddr(src_buf, 0);
+ buf.dma_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ buf.size = (size_t)src_buf->planes[0].bytesused;
+ if (!buf.va) {
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+ mtk_v4l2_err("[%d] id=%d src_addr is NULL!!",
+ ctx->id, src_buf->index);
+ return;
+ }
+
+ pfb = &dst_buf_info->frame_buffer;
+ pfb->base_y.va = vb2_plane_vaddr(dst_buf, 0);
+ pfb->base_y.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ pfb->base_y.size = ctx->picinfo.y_bs_sz + ctx->picinfo.y_len_sz;
+
+ pfb->base_c.va = vb2_plane_vaddr(dst_buf, 1);
+ pfb->base_c.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 1);
+ pfb->base_c.size = ctx->picinfo.c_bs_sz + ctx->picinfo.c_len_sz;
+ pfb->status = 0;
+ mtk_v4l2_debug(3, "===>[%d] vdec_if_decode() ===>", ctx->id);
+ mtk_v4l2_debug(3, "[%d] Bitstream VA=%p DMA=%pad Size=%zx vb=%p",
+ ctx->id, buf.va, &buf.dma_addr, buf.size, src_buf);
+
+ mtk_v4l2_debug(3,
+ "id=%d Framebuf pfb=%p VA=%p Y_DMA=%pad C_DMA=%pad Size=%zx",
+ dst_buf->index, pfb,
+ pfb->base_y.va, &pfb->base_y.dma_addr,
+ &pfb->base_c.dma_addr, pfb->base_y.size);
+
+ if (src_buf_info->lastframe) {
+ /* update src buf status */
+ src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ src_buf_info->lastframe = false;
+ v4l2_m2m_buf_done(&src_buf_info->vb, VB2_BUF_STATE_DONE);
+
+ /* update dst buf status */
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ dst_buf_info->used = false;
+
+ vdec_if_decode(ctx, NULL, NULL, &res_chg);
+ clean_display_buffer(ctx);
+ vb2_set_plane_payload(&dst_buf_info->vb.vb2_buf, 0, 0);
+ vb2_set_plane_payload(&dst_buf_info->vb.vb2_buf, 1, 0);
+ v4l2_m2m_buf_done(&dst_buf_info->vb, VB2_BUF_STATE_DONE);
+ clean_free_buffer(ctx);
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+ return;
+ }
+ dst_buf_info->vb.vb2_buf.timestamp
+ = src_buf_info->vb.vb2_buf.timestamp;
+ dst_buf_info->vb.timecode
+ = src_buf_info->vb.timecode;
+ mutex_lock(&ctx->lock);
+ dst_buf_info->used = true;
+ mutex_unlock(&ctx->lock);
+ src_buf_info->used = true;
+
+ ret = vdec_if_decode(ctx, &buf, pfb, &res_chg);
+
+ if (ret) {
+ mtk_v4l2_err(
+ " <===[%d], src_buf[%d]%d sz=0x%zx pts=%llu dst_buf[%d] vdec_if_decode() ret=%d res_chg=%d===>",
+ ctx->id,
+ src_buf->index,
+ src_buf_info->lastframe,
+ buf.size,
+ src_buf_info->vb.vb2_buf.timestamp,
+ dst_buf->index,
+ ret, res_chg);
+ src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ v4l2_m2m_buf_done(&src_buf_info->vb, VB2_BUF_STATE_ERROR);
+ } else if (res_chg == false) {
+ /*
+ * we only return src buffer with VB2_BUF_STATE_DONE
+ * when decode success without resolution change
+ */
+ src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ v4l2_m2m_buf_done(&src_buf_info->vb, VB2_BUF_STATE_DONE);
+ }
+
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ clean_display_buffer(ctx);
+ clean_free_buffer(ctx);
+
+ if (!ret && res_chg) {
+ mtk_vdec_pic_info_update(ctx);
+ /*
+ * On encountering a resolution change in the stream.
+ * The driver must first process and decode all
+ * remaining buffers from before the resolution change
+ * point, so call flush decode here
+ */
+ mtk_vdec_flush_decoder(ctx);
+ /*
+ * After all buffers containing decoded frames from
+ * before the resolution change point ready to be
+ * dequeued on the CAPTURE queue, the driver sends a
+ * V4L2_EVENT_SOURCE_CHANGE event for source change
+ * type V4L2_EVENT_SRC_CH_RESOLUTION
+ */
+ mtk_vdec_queue_res_chg_event(ctx);
+ }
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+}
+
+void mtk_vdec_unlock(struct mtk_vcodec_ctx *ctx)
+{
+ mutex_unlock(&ctx->dev->dec_mutex);
+}
+
+void mtk_vdec_lock(struct mtk_vcodec_ctx *ctx)
+{
+ mutex_lock(&ctx->dev->dec_mutex);
+}
+
+void mtk_vcodec_dec_release(struct mtk_vcodec_ctx *ctx)
+{
+ vdec_if_deinit(ctx);
+ ctx->state = MTK_STATE_FREE;
+}
+
+void mtk_vcodec_dec_set_default_params(struct mtk_vcodec_ctx *ctx)
+{
+ struct mtk_q_data *q_data;
+
+ ctx->m2m_ctx->q_lock = &ctx->dev->dev_mutex;
+ ctx->fh.m2m_ctx = ctx->m2m_ctx;
+ ctx->fh.ctrl_handler = &ctx->ctrl_hdl;
+ INIT_WORK(&ctx->decode_work, mtk_vdec_worker);
+ ctx->colorspace = V4L2_COLORSPACE_REC709;
+ ctx->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ ctx->quantization = V4L2_QUANTIZATION_DEFAULT;
+ ctx->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+
+ q_data = &ctx->q_data[MTK_Q_DATA_SRC];
+ memset(q_data, 0, sizeof(struct mtk_q_data));
+ q_data->visible_width = DFT_CFG_WIDTH;
+ q_data->visible_height = DFT_CFG_HEIGHT;
+ q_data->fmt = &mtk_video_formats[OUT_FMT_IDX];
+ q_data->field = V4L2_FIELD_NONE;
+
+ q_data->sizeimage[0] = DFT_CFG_WIDTH * DFT_CFG_HEIGHT;
+ q_data->bytesperline[0] = 0;
+
+ q_data = &ctx->q_data[MTK_Q_DATA_DST];
+ memset(q_data, 0, sizeof(struct mtk_q_data));
+ q_data->visible_width = DFT_CFG_WIDTH;
+ q_data->visible_height = DFT_CFG_HEIGHT;
+ q_data->coded_width = DFT_CFG_WIDTH;
+ q_data->coded_height = DFT_CFG_HEIGHT;
+ q_data->fmt = &mtk_video_formats[CAP_FMT_IDX];
+ q_data->field = V4L2_FIELD_NONE;
+
+ v4l_bound_align_image(&q_data->coded_width,
+ MTK_VDEC_MIN_W,
+ MTK_VDEC_MAX_W, 4,
+ &q_data->coded_height,
+ MTK_VDEC_MIN_H,
+ MTK_VDEC_MAX_H, 5, 6);
+
+ q_data->sizeimage[0] = q_data->coded_width * q_data->coded_height;
+ q_data->bytesperline[0] = q_data->coded_width;
+ q_data->sizeimage[1] = q_data->sizeimage[0] / 2;
+ q_data->bytesperline[1] = q_data->coded_width;
+}
+
+static int vidioc_vdec_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct vb2_queue *vq;
+ struct vb2_buffer *vb;
+ struct mtk_video_dec_buf *mtkbuf;
+ struct vb2_v4l2_buffer *vb2_v4l2;
+
+ if (ctx->state == MTK_STATE_ABORT) {
+ mtk_v4l2_err("[%d] Call on QBUF after unrecoverable error",
+ ctx->id);
+ return -EIO;
+ }
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, buf->type);
+ if (buf->index >= vq->num_buffers) {
+ mtk_v4l2_debug(1, "buffer index %d out of range", buf->index);
+ return -EINVAL;
+ }
+ vb = vq->bufs[buf->index];
+ vb2_v4l2 = container_of(vb, struct vb2_v4l2_buffer, vb2_buf);
+ mtkbuf = container_of(vb2_v4l2, struct mtk_video_dec_buf, vb);
+
+ if ((buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) &&
+ (buf->m.planes[0].bytesused == 0)) {
+ mtkbuf->lastframe = true;
+ mtk_v4l2_debug(1, "[%d] (%d) id=%d lastframe=%d (%d,%d, %d) vb=%p",
+ ctx->id, buf->type, buf->index,
+ mtkbuf->lastframe, buf->bytesused,
+ buf->m.planes[0].bytesused, buf->length,
+ vb);
+ }
+
+ return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_vdec_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+ if (ctx->state == MTK_STATE_ABORT) {
+ mtk_v4l2_err("[%d] Call on DQBUF after unrecoverable error",
+ ctx->id);
+ return -EIO;
+ }
+
+ return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_vdec_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strlcpy(cap->driver, MTK_VCODEC_DEC_NAME, sizeof(cap->driver));
+ strlcpy(cap->bus_info, MTK_PLATFORM_STR, sizeof(cap->bus_info));
+ strlcpy(cap->card, MTK_PLATFORM_STR, sizeof(cap->card));
+
+ return 0;
+}
+
+static int vidioc_vdec_subscribe_evt(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_EOS:
+ return v4l2_event_subscribe(fh, sub, 2, NULL);
+ case V4L2_EVENT_SOURCE_CHANGE:
+ return v4l2_src_change_event_subscribe(fh, sub);
+ default:
+ return v4l2_ctrl_subscribe_event(fh, sub);
+ }
+}
+
+static int vidioc_try_fmt(struct v4l2_format *f, struct mtk_video_fmt *fmt)
+{
+ struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
+ int i;
+
+ pix_fmt_mp->field = V4L2_FIELD_NONE;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ pix_fmt_mp->num_planes = 1;
+ pix_fmt_mp->plane_fmt[0].bytesperline = 0;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ int tmp_w, tmp_h;
+
+ pix_fmt_mp->height = clamp(pix_fmt_mp->height,
+ MTK_VDEC_MIN_H,
+ MTK_VDEC_MAX_H);
+ pix_fmt_mp->width = clamp(pix_fmt_mp->width,
+ MTK_VDEC_MIN_W,
+ MTK_VDEC_MAX_W);
+
+ /*
+ * Find next closer width align 64, heign align 64, size align
+ * 64 rectangle
+ * Note: This only get default value, the real HW needed value
+ * only available when ctx in MTK_STATE_HEADER state
+ */
+ tmp_w = pix_fmt_mp->width;
+ tmp_h = pix_fmt_mp->height;
+ v4l_bound_align_image(&pix_fmt_mp->width,
+ MTK_VDEC_MIN_W,
+ MTK_VDEC_MAX_W, 6,
+ &pix_fmt_mp->height,
+ MTK_VDEC_MIN_H,
+ MTK_VDEC_MAX_H, 6, 9);
+
+ if (pix_fmt_mp->width < tmp_w &&
+ (pix_fmt_mp->width + 64) <= MTK_VDEC_MAX_W)
+ pix_fmt_mp->width += 64;
+ if (pix_fmt_mp->height < tmp_h &&
+ (pix_fmt_mp->height + 64) <= MTK_VDEC_MAX_H)
+ pix_fmt_mp->height += 64;
+
+ mtk_v4l2_debug(0,
+ "before resize width=%d, height=%d, after resize width=%d, height=%d, sizeimage=%d",
+ tmp_w, tmp_h, pix_fmt_mp->width,
+ pix_fmt_mp->height,
+ pix_fmt_mp->width * pix_fmt_mp->height);
+
+ pix_fmt_mp->num_planes = fmt->num_planes;
+ pix_fmt_mp->plane_fmt[0].sizeimage =
+ pix_fmt_mp->width * pix_fmt_mp->height;
+ pix_fmt_mp->plane_fmt[0].bytesperline = pix_fmt_mp->width;
+
+ if (pix_fmt_mp->num_planes == 2) {
+ pix_fmt_mp->plane_fmt[1].sizeimage =
+ (pix_fmt_mp->width * pix_fmt_mp->height) / 2;
+ pix_fmt_mp->plane_fmt[1].bytesperline =
+ pix_fmt_mp->width;
+ }
+ }
+
+ for (i = 0; i < pix_fmt_mp->num_planes; i++)
+ memset(&(pix_fmt_mp->plane_fmt[i].reserved[0]), 0x0,
+ sizeof(pix_fmt_mp->plane_fmt[0].reserved));
+
+ pix_fmt_mp->flags = 0;
+ memset(&pix_fmt_mp->reserved, 0x0, sizeof(pix_fmt_mp->reserved));
+ return 0;
+}
+
+static int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct mtk_video_fmt *fmt;
+
+ fmt = mtk_vdec_find_format(f);
+ if (!fmt) {
+ f->fmt.pix.pixelformat = mtk_video_formats[CAP_FMT_IDX].fourcc;
+ fmt = mtk_vdec_find_format(f);
+ }
+
+ return vidioc_try_fmt(f, fmt);
+}
+
+static int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
+ struct mtk_video_fmt *fmt;
+
+ fmt = mtk_vdec_find_format(f);
+ if (!fmt) {
+ f->fmt.pix.pixelformat = mtk_video_formats[OUT_FMT_IDX].fourcc;
+ fmt = mtk_vdec_find_format(f);
+ }
+
+ if (pix_fmt_mp->plane_fmt[0].sizeimage == 0) {
+ mtk_v4l2_err("sizeimage of output format must be given");
+ return -EINVAL;
+ }
+
+ return vidioc_try_fmt(f, fmt);
+}
+
+static int vidioc_vdec_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct mtk_q_data *q_data;
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ q_data = &ctx->q_data[MTK_Q_DATA_DST];
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = ctx->picinfo.pic_w;
+ s->r.height = ctx->picinfo.pic_h;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = ctx->picinfo.buf_w;
+ s->r.height = ctx->picinfo.buf_h;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ if (vdec_if_get_param(ctx, GET_PARAM_CROP_INFO, &(s->r))) {
+ /* set to default value if header info not ready yet*/
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = q_data->visible_width;
+ s->r.height = q_data->visible_height;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (ctx->state < MTK_STATE_HEADER) {
+ /* set to default value if header info not ready yet*/
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = q_data->visible_width;
+ s->r.height = q_data->visible_height;
+ return 0;
+ }
+
+ return 0;
+}
+
+static int vidioc_vdec_s_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = ctx->picinfo.pic_w;
+ s->r.height = ctx->picinfo.pic_h;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vidioc_vdec_s_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct v4l2_pix_format_mplane *pix_mp;
+ struct mtk_q_data *q_data;
+ int ret = 0;
+ struct mtk_video_fmt *fmt;
+
+ mtk_v4l2_debug(3, "[%d]", ctx->id);
+
+ q_data = mtk_vdec_get_q_data(ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ pix_mp = &f->fmt.pix_mp;
+ if ((f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) &&
+ vb2_is_busy(&ctx->m2m_ctx->out_q_ctx.q)) {
+ mtk_v4l2_err("out_q_ctx buffers already requested");
+ ret = -EBUSY;
+ }
+
+ if ((f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) &&
+ vb2_is_busy(&ctx->m2m_ctx->cap_q_ctx.q)) {
+ mtk_v4l2_err("cap_q_ctx buffers already requested");
+ ret = -EBUSY;
+ }
+
+ fmt = mtk_vdec_find_format(f);
+ if (fmt == NULL) {
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ f->fmt.pix.pixelformat =
+ mtk_video_formats[OUT_FMT_IDX].fourcc;
+ fmt = mtk_vdec_find_format(f);
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ f->fmt.pix.pixelformat =
+ mtk_video_formats[CAP_FMT_IDX].fourcc;
+ fmt = mtk_vdec_find_format(f);
+ }
+ }
+
+ q_data->fmt = fmt;
+ vidioc_try_fmt(f, q_data->fmt);
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ q_data->sizeimage[0] = pix_mp->plane_fmt[0].sizeimage;
+ q_data->coded_width = pix_mp->width;
+ q_data->coded_height = pix_mp->height;
+
+ ctx->colorspace = f->fmt.pix_mp.colorspace;
+ ctx->ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
+ ctx->quantization = f->fmt.pix_mp.quantization;
+ ctx->xfer_func = f->fmt.pix_mp.xfer_func;
+
+ if (ctx->state == MTK_STATE_FREE) {
+ ret = vdec_if_init(ctx, q_data->fmt->fourcc);
+ if (ret) {
+ mtk_v4l2_err("[%d]: vdec_if_init() fail ret=%d",
+ ctx->id, ret);
+ return -EINVAL;
+ }
+ ctx->state = MTK_STATE_INIT;
+ }
+ }
+
+ return 0;
+}
+
+static int vidioc_enum_framesizes(struct file *file, void *priv,
+ struct v4l2_frmsizeenum *fsize)
+{
+ int i = 0;
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+ if (fsize->index != 0)
+ return -EINVAL;
+
+ for (i = 0; i < NUM_SUPPORTED_FRAMESIZE; ++i) {
+ if (fsize->pixel_format != mtk_vdec_framesizes[i].fourcc)
+ continue;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise = mtk_vdec_framesizes[i].stepwise;
+ if (!(ctx->dev->dec_capability &
+ VCODEC_CAPABILITY_4K_DISABLED)) {
+ mtk_v4l2_debug(3, "4K is enabled");
+ fsize->stepwise.max_width =
+ VCODEC_DEC_4K_CODED_WIDTH;
+ fsize->stepwise.max_height =
+ VCODEC_DEC_4K_CODED_HEIGHT;
+ }
+ mtk_v4l2_debug(1, "%x, %d %d %d %d %d %d",
+ ctx->dev->dec_capability,
+ fsize->stepwise.min_width,
+ fsize->stepwise.max_width,
+ fsize->stepwise.step_width,
+ fsize->stepwise.min_height,
+ fsize->stepwise.max_height,
+ fsize->stepwise.step_height);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, bool output_queue)
+{
+ struct mtk_video_fmt *fmt;
+ int i, j = 0;
+
+ for (i = 0; i < NUM_FORMATS; i++) {
+ if (output_queue && (mtk_video_formats[i].type != MTK_FMT_DEC))
+ continue;
+ if (!output_queue &&
+ (mtk_video_formats[i].type != MTK_FMT_FRAME))
+ continue;
+
+ if (j == f->index)
+ break;
+ ++j;
+ }
+
+ if (i == NUM_FORMATS)
+ return -EINVAL;
+
+ fmt = &mtk_video_formats[i];
+ f->pixelformat = fmt->fourcc;
+
+ return 0;
+}
+
+static int vidioc_vdec_enum_fmt_vid_cap_mplane(struct file *file, void *pirv,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(f, false);
+}
+
+static int vidioc_vdec_enum_fmt_vid_out_mplane(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(f, true);
+}
+
+static int vidioc_vdec_g_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ struct vb2_queue *vq;
+ struct mtk_q_data *q_data;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (!vq) {
+ mtk_v4l2_err("no vb2 queue for type=%d", f->type);
+ return -EINVAL;
+ }
+
+ q_data = mtk_vdec_get_q_data(ctx, f->type);
+
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->colorspace = ctx->colorspace;
+ pix_mp->ycbcr_enc = ctx->ycbcr_enc;
+ pix_mp->quantization = ctx->quantization;
+ pix_mp->xfer_func = ctx->xfer_func;
+
+ if ((f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) &&
+ (ctx->state >= MTK_STATE_HEADER)) {
+ /* Until STREAMOFF is called on the CAPTURE queue
+ * (acknowledging the event), the driver operates as if
+ * the resolution hasn't changed yet.
+ * So we just return picinfo yet, and update picinfo in
+ * stop_streaming hook function
+ */
+ q_data->sizeimage[0] = ctx->picinfo.y_bs_sz +
+ ctx->picinfo.y_len_sz;
+ q_data->sizeimage[1] = ctx->picinfo.c_bs_sz +
+ ctx->picinfo.c_len_sz;
+ q_data->bytesperline[0] = ctx->last_decoded_picinfo.buf_w;
+ q_data->bytesperline[1] = ctx->last_decoded_picinfo.buf_w;
+ q_data->coded_width = ctx->picinfo.buf_w;
+ q_data->coded_height = ctx->picinfo.buf_h;
+
+ /*
+ * Width and height are set to the dimensions
+ * of the movie, the buffer is bigger and
+ * further processing stages should crop to this
+ * rectangle.
+ */
+ pix_mp->width = q_data->coded_width;
+ pix_mp->height = q_data->coded_height;
+
+ /*
+ * Set pixelformat to the format in which mt vcodec
+ * outputs the decoded frame
+ */
+ pix_mp->num_planes = q_data->fmt->num_planes;
+ pix_mp->pixelformat = q_data->fmt->fourcc;
+ pix_mp->plane_fmt[0].bytesperline = q_data->bytesperline[0];
+ pix_mp->plane_fmt[0].sizeimage = q_data->sizeimage[0];
+ pix_mp->plane_fmt[1].bytesperline = q_data->bytesperline[1];
+ pix_mp->plane_fmt[1].sizeimage = q_data->sizeimage[1];
+
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ /*
+ * This is run on OUTPUT
+ * The buffer contains compressed image
+ * so width and height have no meaning.
+ * Assign value here to pass v4l2-compliance test
+ */
+ pix_mp->width = q_data->visible_width;
+ pix_mp->height = q_data->visible_height;
+ pix_mp->plane_fmt[0].bytesperline = q_data->bytesperline[0];
+ pix_mp->plane_fmt[0].sizeimage = q_data->sizeimage[0];
+ pix_mp->pixelformat = q_data->fmt->fourcc;
+ pix_mp->num_planes = q_data->fmt->num_planes;
+ } else {
+ pix_mp->width = q_data->coded_width;
+ pix_mp->height = q_data->coded_height;
+ pix_mp->num_planes = q_data->fmt->num_planes;
+ pix_mp->pixelformat = q_data->fmt->fourcc;
+ pix_mp->plane_fmt[0].bytesperline = q_data->bytesperline[0];
+ pix_mp->plane_fmt[0].sizeimage = q_data->sizeimage[0];
+ pix_mp->plane_fmt[1].bytesperline = q_data->bytesperline[1];
+ pix_mp->plane_fmt[1].sizeimage = q_data->sizeimage[1];
+
+ mtk_v4l2_debug(1, "[%d] type=%d state=%d Format information could not be read, not ready yet!",
+ ctx->id, f->type, ctx->state);
+ }
+
+ return 0;
+}
+
+static int vb2ops_vdec_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers,
+ unsigned int *nplanes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vq);
+ struct mtk_q_data *q_data;
+ unsigned int i;
+
+ q_data = mtk_vdec_get_q_data(ctx, vq->type);
+
+ if (q_data == NULL) {
+ mtk_v4l2_err("vq->type=%d err\n", vq->type);
+ return -EINVAL;
+ }
+
+ if (*nplanes) {
+ for (i = 0; i < *nplanes; i++) {
+ if (sizes[i] < q_data->sizeimage[i])
+ return -EINVAL;
+ }
+ } else {
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ *nplanes = 2;
+ else
+ *nplanes = 1;
+
+ for (i = 0; i < *nplanes; i++)
+ sizes[i] = q_data->sizeimage[i];
+ }
+
+ mtk_v4l2_debug(1,
+ "[%d]\t type = %d, get %d plane(s), %d buffer(s) of size 0x%x 0x%x ",
+ ctx->id, vq->type, *nplanes, *nbuffers,
+ sizes[0], sizes[1]);
+
+ return 0;
+}
+
+static int vb2ops_vdec_buf_prepare(struct vb2_buffer *vb)
+{
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct mtk_q_data *q_data;
+ int i;
+
+ mtk_v4l2_debug(3, "[%d] (%d) id=%d",
+ ctx->id, vb->vb2_queue->type, vb->index);
+
+ q_data = mtk_vdec_get_q_data(ctx, vb->vb2_queue->type);
+
+ for (i = 0; i < q_data->fmt->num_planes; i++) {
+ if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) {
+ mtk_v4l2_err("data will not fit into plane %d (%lu < %d)",
+ i, vb2_plane_size(vb, i),
+ q_data->sizeimage[i]);
+ }
+ }
+
+ return 0;
+}
+
+static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_buffer *src_buf;
+ struct mtk_vcodec_mem src_mem;
+ bool res_chg = false;
+ int ret = 0;
+ unsigned int dpbsize = 1;
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vb2_v4l2 = container_of(vb,
+ struct vb2_v4l2_buffer, vb2_buf);
+ struct mtk_video_dec_buf *buf = container_of(vb2_v4l2,
+ struct mtk_video_dec_buf, vb);
+
+ mtk_v4l2_debug(3, "[%d] (%d) id=%d, vb=%p",
+ ctx->id, vb->vb2_queue->type,
+ vb->index, vb);
+ /*
+ * check if this buffer is ready to be used after decode
+ */
+ if (vb->vb2_queue->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ mutex_lock(&ctx->lock);
+ if (buf->used == false) {
+ v4l2_m2m_buf_queue(ctx->m2m_ctx,
+ to_vb2_v4l2_buffer(vb));
+ buf->queued_in_vb2 = true;
+ buf->queued_in_v4l2 = true;
+ buf->ready_to_display = false;
+ } else {
+ buf->queued_in_vb2 = false;
+ buf->queued_in_v4l2 = true;
+ buf->ready_to_display = false;
+ }
+ mutex_unlock(&ctx->lock);
+ return;
+ }
+
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb2_v4l2);
+
+ if (ctx->state != MTK_STATE_INIT) {
+ mtk_v4l2_debug(3, "[%d] already init driver %d",
+ ctx->id, ctx->state);
+ return;
+ }
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ if (!src_buf) {
+ mtk_v4l2_err("No src buffer");
+ return;
+ }
+
+ src_mem.va = vb2_plane_vaddr(src_buf, 0);
+ src_mem.dma_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ src_mem.size = (size_t)src_buf->planes[0].bytesused;
+ mtk_v4l2_debug(2,
+ "[%d] buf id=%d va=%p dma=%pad size=%zx",
+ ctx->id, src_buf->index,
+ src_mem.va, &src_mem.dma_addr,
+ src_mem.size);
+
+ ret = vdec_if_decode(ctx, &src_mem, NULL, &res_chg);
+ if (ret || !res_chg) {
+ /*
+ * fb == NULL menas to parse SPS/PPS header or
+ * resolution info in src_mem. Decode can fail
+ * if there is no SPS header or picture info
+ * in bs
+ */
+ int log_level = ret ? 0 : 1;
+
+ src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf),
+ VB2_BUF_STATE_DONE);
+ mtk_v4l2_debug(log_level,
+ "[%d] vdec_if_decode() src_buf=%d, size=%zu, fail=%d, res_chg=%d",
+ ctx->id, src_buf->index,
+ src_mem.size, ret, res_chg);
+ return;
+ }
+
+ if (vdec_if_get_param(ctx, GET_PARAM_PIC_INFO, &ctx->picinfo)) {
+ mtk_v4l2_err("[%d]Error!! Cannot get param : GET_PARAM_PICTURE_INFO ERR",
+ ctx->id);
+ return;
+ }
+
+ ctx->last_decoded_picinfo = ctx->picinfo;
+ ctx->q_data[MTK_Q_DATA_DST].sizeimage[0] =
+ ctx->picinfo.y_bs_sz +
+ ctx->picinfo.y_len_sz;
+ ctx->q_data[MTK_Q_DATA_DST].bytesperline[0] =
+ ctx->picinfo.buf_w;
+ ctx->q_data[MTK_Q_DATA_DST].sizeimage[1] =
+ ctx->picinfo.c_bs_sz +
+ ctx->picinfo.c_len_sz;
+ ctx->q_data[MTK_Q_DATA_DST].bytesperline[1] = ctx->picinfo.buf_w;
+ mtk_v4l2_debug(2, "[%d] vdec_if_init() OK wxh=%dx%d pic wxh=%dx%d sz[0]=0x%x sz[1]=0x%x",
+ ctx->id,
+ ctx->picinfo.buf_w, ctx->picinfo.buf_h,
+ ctx->picinfo.pic_w, ctx->picinfo.pic_h,
+ ctx->q_data[MTK_Q_DATA_DST].sizeimage[0],
+ ctx->q_data[MTK_Q_DATA_DST].sizeimage[1]);
+
+ ret = vdec_if_get_param(ctx, GET_PARAM_DPB_SIZE, &dpbsize);
+ if (dpbsize == 0)
+ mtk_v4l2_err("[%d] GET_PARAM_DPB_SIZE fail=%d", ctx->id, ret);
+
+ ctx->dpb_size = dpbsize;
+ ctx->state = MTK_STATE_HEADER;
+ mtk_v4l2_debug(1, "[%d] dpbsize=%d", ctx->id, ctx->dpb_size);
+}
+
+static void vb2ops_vdec_buf_finish(struct vb2_buffer *vb)
+{
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vb2_v4l2;
+ struct mtk_video_dec_buf *buf;
+
+ if (vb->vb2_queue->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return;
+
+ vb2_v4l2 = container_of(vb, struct vb2_v4l2_buffer, vb2_buf);
+ buf = container_of(vb2_v4l2, struct mtk_video_dec_buf, vb);
+ mutex_lock(&ctx->lock);
+ buf->queued_in_v4l2 = false;
+ buf->queued_in_vb2 = false;
+ mutex_unlock(&ctx->lock);
+}
+
+static int vb2ops_vdec_buf_init(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vb2_v4l2 = container_of(vb,
+ struct vb2_v4l2_buffer, vb2_buf);
+ struct mtk_video_dec_buf *buf = container_of(vb2_v4l2,
+ struct mtk_video_dec_buf, vb);
+
+ if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ buf->used = false;
+ buf->ready_to_display = false;
+ buf->queued_in_v4l2 = false;
+ } else {
+ buf->lastframe = false;
+ }
+
+ return 0;
+}
+
+static int vb2ops_vdec_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q);
+
+ if (ctx->state == MTK_STATE_FLUSH)
+ ctx->state = MTK_STATE_HEADER;
+
+ return 0;
+}
+
+static void vb2ops_vdec_stop_streaming(struct vb2_queue *q)
+{
+ struct vb2_buffer *src_buf = NULL, *dst_buf = NULL;
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q);
+
+ mtk_v4l2_debug(3, "[%d] (%d) state=(%x) ctx->decoded_frame_cnt=%d",
+ ctx->id, q->type, ctx->state, ctx->decoded_frame_cnt);
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ while ((src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx)))
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf),
+ VB2_BUF_STATE_ERROR);
+ return;
+ }
+
+ if (ctx->state >= MTK_STATE_HEADER) {
+
+ /* Until STREAMOFF is called on the CAPTURE queue
+ * (acknowledging the event), the driver operates
+ * as if the resolution hasn't changed yet, i.e.
+ * VIDIOC_G_FMT< etc. return previous resolution.
+ * So we update picinfo here
+ */
+ ctx->picinfo = ctx->last_decoded_picinfo;
+
+ mtk_v4l2_debug(2,
+ "[%d]-> new(%d,%d), old(%d,%d), real(%d,%d)",
+ ctx->id, ctx->last_decoded_picinfo.pic_w,
+ ctx->last_decoded_picinfo.pic_h,
+ ctx->picinfo.pic_w, ctx->picinfo.pic_h,
+ ctx->last_decoded_picinfo.buf_w,
+ ctx->last_decoded_picinfo.buf_h);
+
+ mtk_vdec_flush_decoder(ctx);
+ }
+ ctx->state = MTK_STATE_FLUSH;
+
+ while ((dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx))) {
+ vb2_set_plane_payload(dst_buf, 0, 0);
+ vb2_set_plane_payload(dst_buf, 1, 0);
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf),
+ VB2_BUF_STATE_ERROR);
+ }
+
+}
+
+static void m2mops_vdec_device_run(void *priv)
+{
+ struct mtk_vcodec_ctx *ctx = priv;
+ struct mtk_vcodec_dev *dev = ctx->dev;
+
+ queue_work(dev->decode_workqueue, &ctx->decode_work);
+}
+
+static int m2mops_vdec_job_ready(void *m2m_priv)
+{
+ struct mtk_vcodec_ctx *ctx = m2m_priv;
+
+ mtk_v4l2_debug(3, "[%d]", ctx->id);
+
+ if (ctx->state == MTK_STATE_ABORT)
+ return 0;
+
+ if ((ctx->last_decoded_picinfo.pic_w != ctx->picinfo.pic_w) ||
+ (ctx->last_decoded_picinfo.pic_h != ctx->picinfo.pic_h))
+ return 0;
+
+ if (ctx->state != MTK_STATE_HEADER)
+ return 0;
+
+ return 1;
+}
+
+static void m2mops_vdec_job_abort(void *priv)
+{
+ struct mtk_vcodec_ctx *ctx = priv;
+
+ ctx->state = MTK_STATE_ABORT;
+}
+
+static int mtk_vdec_g_v_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct mtk_vcodec_ctx *ctx = ctrl_to_ctx(ctrl);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+ if (ctx->state >= MTK_STATE_HEADER) {
+ ctrl->val = ctx->dpb_size;
+ } else {
+ mtk_v4l2_debug(0, "Seqinfo not ready");
+ ctrl->val = 0;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops mtk_vcodec_dec_ctrl_ops = {
+ .g_volatile_ctrl = mtk_vdec_g_v_ctrl,
+};
+
+int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_ctx *ctx)
+{
+ struct v4l2_ctrl *ctrl;
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_hdl, 1);
+
+ ctrl = v4l2_ctrl_new_std(&ctx->ctrl_hdl,
+ &mtk_vcodec_dec_ctrl_ops,
+ V4L2_CID_MIN_BUFFERS_FOR_CAPTURE,
+ 0, 32, 1, 1);
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ if (ctx->ctrl_hdl.error) {
+ mtk_v4l2_err("Adding control failed %d",
+ ctx->ctrl_hdl.error);
+ return ctx->ctrl_hdl.error;
+ }
+
+ v4l2_ctrl_handler_setup(&ctx->ctrl_hdl);
+ return 0;
+}
+
+static void m2mops_vdec_lock(void *m2m_priv)
+{
+ struct mtk_vcodec_ctx *ctx = m2m_priv;
+
+ mtk_v4l2_debug(3, "[%d]", ctx->id);
+ mutex_lock(&ctx->dev->dev_mutex);
+}
+
+static void m2mops_vdec_unlock(void *m2m_priv)
+{
+ struct mtk_vcodec_ctx *ctx = m2m_priv;
+
+ mtk_v4l2_debug(3, "[%d]", ctx->id);
+ mutex_unlock(&ctx->dev->dev_mutex);
+}
+
+const struct v4l2_m2m_ops mtk_vdec_m2m_ops = {
+ .device_run = m2mops_vdec_device_run,
+ .job_ready = m2mops_vdec_job_ready,
+ .job_abort = m2mops_vdec_job_abort,
+ .lock = m2mops_vdec_lock,
+ .unlock = m2mops_vdec_unlock,
+};
+
+static const struct vb2_ops mtk_vdec_vb2_ops = {
+ .queue_setup = vb2ops_vdec_queue_setup,
+ .buf_prepare = vb2ops_vdec_buf_prepare,
+ .buf_queue = vb2ops_vdec_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .buf_init = vb2ops_vdec_buf_init,
+ .buf_finish = vb2ops_vdec_buf_finish,
+ .start_streaming = vb2ops_vdec_start_streaming,
+ .stop_streaming = vb2ops_vdec_stop_streaming,
+};
+
+const struct v4l2_ioctl_ops mtk_vdec_ioctl_ops = {
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_qbuf = vidioc_vdec_qbuf,
+ .vidioc_dqbuf = vidioc_vdec_dqbuf,
+
+ .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt_vid_cap_mplane,
+ .vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt_vid_out_mplane,
+
+ .vidioc_s_fmt_vid_cap_mplane = vidioc_vdec_s_fmt,
+ .vidioc_s_fmt_vid_out_mplane = vidioc_vdec_s_fmt,
+ .vidioc_g_fmt_vid_cap_mplane = vidioc_vdec_g_fmt,
+ .vidioc_g_fmt_vid_out_mplane = vidioc_vdec_g_fmt,
+
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+
+ .vidioc_enum_fmt_vid_cap_mplane = vidioc_vdec_enum_fmt_vid_cap_mplane,
+ .vidioc_enum_fmt_vid_out_mplane = vidioc_vdec_enum_fmt_vid_out_mplane,
+ .vidioc_enum_framesizes = vidioc_enum_framesizes,
+
+ .vidioc_querycap = vidioc_vdec_querycap,
+ .vidioc_subscribe_event = vidioc_vdec_subscribe_evt,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_g_selection = vidioc_vdec_g_selection,
+ .vidioc_s_selection = vidioc_vdec_s_selection,
+};
+
+int mtk_vcodec_dec_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct mtk_vcodec_ctx *ctx = priv;
+ int ret = 0;
+
+ mtk_v4l2_debug(3, "[%d]", ctx->id);
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_DMABUF | VB2_MMAP;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct mtk_video_dec_buf);
+ src_vq->ops = &mtk_vdec_vb2_ops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->dev->dev_mutex;
+ src_vq->dev = &ctx->dev->plat_dev->dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret) {
+ mtk_v4l2_err("Failed to initialize videobuf2 queue(output)");
+ return ret;
+ }
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_DMABUF | VB2_MMAP;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct mtk_video_dec_buf);
+ dst_vq->ops = &mtk_vdec_vb2_ops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->dev->dev_mutex;
+ dst_vq->dev = &ctx->dev->plat_dev->dev;
+
+ ret = vb2_queue_init(dst_vq);
+ if (ret) {
+ vb2_queue_release(src_vq);
+ mtk_v4l2_err("Failed to initialize videobuf2 queue(capture)");
+ }
+
+ return ret;
+}
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
new file mode 100644
index 000000000000..362f5a85762e
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ * Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MTK_VCODEC_DEC_H_
+#define _MTK_VCODEC_DEC_H_
+
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
+
+#define VCODEC_CAPABILITY_4K_DISABLED 0x10
+#define VCODEC_DEC_4K_CODED_WIDTH 4096U
+#define VCODEC_DEC_4K_CODED_HEIGHT 2304U
+#define MTK_VDEC_MAX_W 2048U
+#define MTK_VDEC_MAX_H 1088U
+
+#define MTK_VDEC_IRQ_STATUS_DEC_SUCCESS 0x10000
+
+/**
+ * struct vdec_fb - decoder frame buffer
+ * @base_y : Y plane memory info
+ * @base_c : C plane memory info
+ * @status : frame buffer status (vdec_fb_status)
+ */
+struct vdec_fb {
+ struct mtk_vcodec_mem base_y;
+ struct mtk_vcodec_mem base_c;
+ unsigned int status;
+};
+
+/**
+ * struct mtk_video_dec_buf - Private data related to each VB2 buffer.
+ * @b: VB2 buffer
+ * @list: link list
+ * @used: Capture buffer contain decoded frame data and keep in
+ * codec data structure
+ * @ready_to_display: Capture buffer not display yet
+ * @queued_in_vb2: Capture buffer is queue in vb2
+ * @queued_in_v4l2: Capture buffer is in v4l2 driver, but not in vb2
+ * queue yet
+ * @lastframe: Intput buffer is last buffer - EOS
+ * @frame_buffer: Decode status, and buffer information of Capture buffer
+ *
+ * Note : These status information help us track and debug buffer state
+ */
+struct mtk_video_dec_buf {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+
+ bool used;
+ bool ready_to_display;
+ bool queued_in_vb2;
+ bool queued_in_v4l2;
+ bool lastframe;
+ struct vdec_fb frame_buffer;
+};
+
+extern const struct v4l2_ioctl_ops mtk_vdec_ioctl_ops;
+extern const struct v4l2_m2m_ops mtk_vdec_m2m_ops;
+
+
+/*
+ * mtk_vdec_lock/mtk_vdec_unlock are for ctx instance to
+ * get/release lock before/after access decoder hw.
+ * mtk_vdec_lock get decoder hw lock and set curr_ctx
+ * to ctx instance that get lock
+ */
+void mtk_vdec_unlock(struct mtk_vcodec_ctx *ctx);
+void mtk_vdec_lock(struct mtk_vcodec_ctx *ctx);
+int mtk_vcodec_dec_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq);
+void mtk_vcodec_dec_set_default_params(struct mtk_vcodec_ctx *ctx);
+void mtk_vcodec_dec_release(struct mtk_vcodec_ctx *ctx);
+int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_ctx *ctx);
+
+
+#endif /* _MTK_VCODEC_DEC_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
new file mode 100644
index 000000000000..d48287c727f4
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
@@ -0,0 +1,394 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ * Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_dec.h"
+#include "mtk_vcodec_dec_pm.h"
+#include "mtk_vcodec_intr.h"
+#include "mtk_vcodec_util.h"
+#include "mtk_vpu.h"
+
+#define VDEC_HW_ACTIVE 0x10
+#define VDEC_IRQ_CFG 0x11
+#define VDEC_IRQ_CLR 0x10
+#define VDEC_IRQ_CFG_REG 0xa4
+
+module_param(mtk_v4l2_dbg_level, int, 0644);
+module_param(mtk_vcodec_dbg, bool, 0644);
+
+/* Wake up context wait_queue */
+static void wake_up_ctx(struct mtk_vcodec_ctx *ctx)
+{
+ ctx->int_cond = 1;
+ wake_up_interruptible(&ctx->queue);
+}
+
+static irqreturn_t mtk_vcodec_dec_irq_handler(int irq, void *priv)
+{
+ struct mtk_vcodec_dev *dev = priv;
+ struct mtk_vcodec_ctx *ctx;
+ u32 cg_status = 0;
+ unsigned int dec_done_status = 0;
+ void __iomem *vdec_misc_addr = dev->reg_base[VDEC_MISC] +
+ VDEC_IRQ_CFG_REG;
+
+ ctx = mtk_vcodec_get_curr_ctx(dev);
+
+ /* check if HW active or not */
+ cg_status = readl(dev->reg_base[0]);
+ if ((cg_status & VDEC_HW_ACTIVE) != 0) {
+ mtk_v4l2_err("DEC ISR, VDEC active is not 0x0 (0x%08x)",
+ cg_status);
+ return IRQ_HANDLED;
+ }
+
+ dec_done_status = readl(vdec_misc_addr);
+ ctx->irq_status = dec_done_status;
+ if ((dec_done_status & MTK_VDEC_IRQ_STATUS_DEC_SUCCESS) !=
+ MTK_VDEC_IRQ_STATUS_DEC_SUCCESS)
+ return IRQ_HANDLED;
+
+ /* clear interrupt */
+ writel((readl(vdec_misc_addr) | VDEC_IRQ_CFG),
+ dev->reg_base[VDEC_MISC] + VDEC_IRQ_CFG_REG);
+ writel((readl(vdec_misc_addr) & ~VDEC_IRQ_CLR),
+ dev->reg_base[VDEC_MISC] + VDEC_IRQ_CFG_REG);
+
+ wake_up_ctx(ctx);
+
+ mtk_v4l2_debug(3,
+ "mtk_vcodec_dec_irq_handler :wake up ctx %d, dec_done_status=%x",
+ ctx->id, dec_done_status);
+
+ return IRQ_HANDLED;
+}
+
+static void mtk_vcodec_dec_reset_handler(void *priv)
+{
+ struct mtk_vcodec_dev *dev = priv;
+ struct mtk_vcodec_ctx *ctx;
+
+ mtk_v4l2_err("Watchdog timeout!!");
+
+ mutex_lock(&dev->dev_mutex);
+ list_for_each_entry(ctx, &dev->ctx_list, list) {
+ ctx->state = MTK_STATE_ABORT;
+ mtk_v4l2_debug(0, "[%d] Change to state MTK_STATE_ERROR",
+ ctx->id);
+ }
+ mutex_unlock(&dev->dev_mutex);
+}
+
+static int fops_vcodec_open(struct file *file)
+{
+ struct mtk_vcodec_dev *dev = video_drvdata(file);
+ struct mtk_vcodec_ctx *ctx = NULL;
+ int ret = 0;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ mutex_lock(&dev->dev_mutex);
+ ctx->id = dev->id_counter++;
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+ INIT_LIST_HEAD(&ctx->list);
+ ctx->dev = dev;
+ init_waitqueue_head(&ctx->queue);
+ mutex_init(&ctx->lock);
+
+ ctx->type = MTK_INST_DECODER;
+ ret = mtk_vcodec_dec_ctrls_setup(ctx);
+ if (ret) {
+ mtk_v4l2_err("Failed to setup mt vcodec controls");
+ goto err_ctrls_setup;
+ }
+ ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev_dec, ctx,
+ &mtk_vcodec_dec_queue_init);
+ if (IS_ERR((__force void *)ctx->m2m_ctx)) {
+ ret = PTR_ERR((__force void *)ctx->m2m_ctx);
+ mtk_v4l2_err("Failed to v4l2_m2m_ctx_init() (%d)",
+ ret);
+ goto err_m2m_ctx_init;
+ }
+ mtk_vcodec_dec_set_default_params(ctx);
+
+ if (v4l2_fh_is_singular(&ctx->fh)) {
+ mtk_vcodec_dec_pw_on(&dev->pm);
+ /*
+ * vpu_load_firmware checks if it was loaded already and
+ * does nothing in that case
+ */
+ ret = vpu_load_firmware(dev->vpu_plat_dev);
+ if (ret < 0) {
+ /*
+ * Return 0 if downloading firmware successfully,
+ * otherwise it is failed
+ */
+ mtk_v4l2_err("vpu_load_firmware failed!");
+ goto err_load_fw;
+ }
+
+ dev->dec_capability =
+ vpu_get_vdec_hw_capa(dev->vpu_plat_dev);
+ mtk_v4l2_debug(0, "decoder capability %x", dev->dec_capability);
+ }
+
+ list_add(&ctx->list, &dev->ctx_list);
+
+ mutex_unlock(&dev->dev_mutex);
+ mtk_v4l2_debug(0, "%s decoder [%d]", dev_name(&dev->plat_dev->dev),
+ ctx->id);
+ return ret;
+
+ /* Deinit when failure occurred */
+err_load_fw:
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+err_m2m_ctx_init:
+ v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+err_ctrls_setup:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+ mutex_unlock(&dev->dev_mutex);
+
+ return ret;
+}
+
+static int fops_vcodec_release(struct file *file)
+{
+ struct mtk_vcodec_dev *dev = video_drvdata(file);
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(file->private_data);
+
+ mtk_v4l2_debug(0, "[%d] decoder", ctx->id);
+ mutex_lock(&dev->dev_mutex);
+
+ /*
+ * Call v4l2_m2m_ctx_release before mtk_vcodec_dec_release. First, it
+ * makes sure the worker thread is not running after vdec_if_deinit.
+ * Second, the decoder will be flushed and all the buffers will be
+ * returned in stop_streaming.
+ */
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ mtk_vcodec_dec_release(ctx);
+
+ if (v4l2_fh_is_singular(&ctx->fh))
+ mtk_vcodec_dec_pw_off(&dev->pm);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+
+ list_del_init(&ctx->list);
+ kfree(ctx);
+ mutex_unlock(&dev->dev_mutex);
+ return 0;
+}
+
+static const struct v4l2_file_operations mtk_vcodec_fops = {
+ .owner = THIS_MODULE,
+ .open = fops_vcodec_open,
+ .release = fops_vcodec_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static int mtk_vcodec_probe(struct platform_device *pdev)
+{
+ struct mtk_vcodec_dev *dev;
+ struct video_device *vfd_dec;
+ struct resource *res;
+ int i, ret;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&dev->ctx_list);
+ dev->plat_dev = pdev;
+
+ dev->vpu_plat_dev = vpu_get_plat_device(dev->plat_dev);
+ if (dev->vpu_plat_dev == NULL) {
+ mtk_v4l2_err("[VPU] vpu device in not ready");
+ return -EPROBE_DEFER;
+ }
+
+ vpu_wdt_reg_handler(dev->vpu_plat_dev, mtk_vcodec_dec_reset_handler,
+ dev, VPU_RST_DEC);
+
+ ret = mtk_vcodec_init_dec_pm(dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to get mt vcodec clock source");
+ return ret;
+ }
+
+ for (i = 0; i < NUM_MAX_VDEC_REG_BASE; i++) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "get memory resource failed.");
+ ret = -ENXIO;
+ goto err_res;
+ }
+ dev->reg_base[i] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR((__force void *)dev->reg_base[i])) {
+ ret = PTR_ERR((__force void *)dev->reg_base[i]);
+ goto err_res;
+ }
+ mtk_v4l2_debug(2, "reg[%d] base=%p", i, dev->reg_base[i]);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "failed to get irq resource");
+ ret = -ENOENT;
+ goto err_res;
+ }
+
+ dev->dec_irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(&pdev->dev, dev->dec_irq,
+ mtk_vcodec_dec_irq_handler, 0, pdev->name, dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to install dev->dec_irq %d (%d)",
+ dev->dec_irq,
+ ret);
+ goto err_res;
+ }
+
+ disable_irq(dev->dec_irq);
+ mutex_init(&dev->dec_mutex);
+ mutex_init(&dev->dev_mutex);
+ spin_lock_init(&dev->irqlock);
+
+ snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name), "%s",
+ "[/MTK_V4L2_VDEC]");
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret) {
+ mtk_v4l2_err("v4l2_device_register err=%d", ret);
+ goto err_res;
+ }
+
+ init_waitqueue_head(&dev->queue);
+
+ vfd_dec = video_device_alloc();
+ if (!vfd_dec) {
+ mtk_v4l2_err("Failed to allocate video device");
+ ret = -ENOMEM;
+ goto err_dec_alloc;
+ }
+ vfd_dec->fops = &mtk_vcodec_fops;
+ vfd_dec->ioctl_ops = &mtk_vdec_ioctl_ops;
+ vfd_dec->release = video_device_release;
+ vfd_dec->lock = &dev->dev_mutex;
+ vfd_dec->v4l2_dev = &dev->v4l2_dev;
+ vfd_dec->vfl_dir = VFL_DIR_M2M;
+ vfd_dec->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE |
+ V4L2_CAP_STREAMING;
+
+ snprintf(vfd_dec->name, sizeof(vfd_dec->name), "%s",
+ MTK_VCODEC_DEC_NAME);
+ video_set_drvdata(vfd_dec, dev);
+ dev->vfd_dec = vfd_dec;
+ platform_set_drvdata(pdev, dev);
+
+ dev->m2m_dev_dec = v4l2_m2m_init(&mtk_vdec_m2m_ops);
+ if (IS_ERR((__force void *)dev->m2m_dev_dec)) {
+ mtk_v4l2_err("Failed to init mem2mem dec device");
+ ret = PTR_ERR((__force void *)dev->m2m_dev_dec);
+ goto err_dec_mem_init;
+ }
+
+ dev->decode_workqueue =
+ alloc_ordered_workqueue(MTK_VCODEC_DEC_NAME,
+ WQ_MEM_RECLAIM | WQ_FREEZABLE);
+ if (!dev->decode_workqueue) {
+ mtk_v4l2_err("Failed to create decode workqueue");
+ ret = -EINVAL;
+ goto err_event_workq;
+ }
+
+ ret = video_register_device(vfd_dec, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ mtk_v4l2_err("Failed to register video device");
+ goto err_dec_reg;
+ }
+
+ mtk_v4l2_debug(0, "decoder registered as /dev/video%d",
+ vfd_dec->num);
+
+ return 0;
+
+err_dec_reg:
+ destroy_workqueue(dev->decode_workqueue);
+err_event_workq:
+ v4l2_m2m_release(dev->m2m_dev_dec);
+err_dec_mem_init:
+ video_unregister_device(vfd_dec);
+err_dec_alloc:
+ v4l2_device_unregister(&dev->v4l2_dev);
+err_res:
+ mtk_vcodec_release_dec_pm(dev);
+ return ret;
+}
+
+static const struct of_device_id mtk_vcodec_match[] = {
+ {.compatible = "mediatek,mt8173-vcodec-dec",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mtk_vcodec_match);
+
+static int mtk_vcodec_dec_remove(struct platform_device *pdev)
+{
+ struct mtk_vcodec_dev *dev = platform_get_drvdata(pdev);
+
+ flush_workqueue(dev->decode_workqueue);
+ destroy_workqueue(dev->decode_workqueue);
+ if (dev->m2m_dev_dec)
+ v4l2_m2m_release(dev->m2m_dev_dec);
+
+ if (dev->vfd_dec)
+ video_unregister_device(dev->vfd_dec);
+
+ v4l2_device_unregister(&dev->v4l2_dev);
+ mtk_vcodec_release_dec_pm(dev);
+ return 0;
+}
+
+static struct platform_driver mtk_vcodec_dec_driver = {
+ .probe = mtk_vcodec_probe,
+ .remove = mtk_vcodec_dec_remove,
+ .driver = {
+ .name = MTK_VCODEC_DEC_NAME,
+ .of_match_table = mtk_vcodec_match,
+ },
+};
+
+module_platform_driver(mtk_vcodec_dec_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Mediatek video codec V4L2 decoder driver");
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
new file mode 100644
index 000000000000..79ca03ac449c
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <soc/mediatek/smi.h>
+
+#include "mtk_vcodec_dec_pm.h"
+#include "mtk_vcodec_util.h"
+#include "mtk_vpu.h"
+
+int mtk_vcodec_init_dec_pm(struct mtk_vcodec_dev *mtkdev)
+{
+ struct device_node *node;
+ struct platform_device *pdev;
+ struct mtk_vcodec_pm *pm;
+ int ret = 0;
+
+ pdev = mtkdev->plat_dev;
+ pm = &mtkdev->pm;
+ pm->mtkdev = mtkdev;
+ node = of_parse_phandle(pdev->dev.of_node, "mediatek,larb", 0);
+ if (!node) {
+ mtk_v4l2_err("of_parse_phandle mediatek,larb fail!");
+ return -1;
+ }
+
+ pdev = of_find_device_by_node(node);
+ if (WARN_ON(!pdev)) {
+ of_node_put(node);
+ return -1;
+ }
+ pm->larbvdec = &pdev->dev;
+ pdev = mtkdev->plat_dev;
+ pm->dev = &pdev->dev;
+
+ pm->vcodecpll = devm_clk_get(&pdev->dev, "vcodecpll");
+ if (IS_ERR(pm->vcodecpll)) {
+ mtk_v4l2_err("devm_clk_get vcodecpll fail");
+ ret = PTR_ERR(pm->vcodecpll);
+ }
+
+ pm->univpll_d2 = devm_clk_get(&pdev->dev, "univpll_d2");
+ if (IS_ERR(pm->univpll_d2)) {
+ mtk_v4l2_err("devm_clk_get univpll_d2 fail");
+ ret = PTR_ERR(pm->univpll_d2);
+ }
+
+ pm->clk_cci400_sel = devm_clk_get(&pdev->dev, "clk_cci400_sel");
+ if (IS_ERR(pm->clk_cci400_sel)) {
+ mtk_v4l2_err("devm_clk_get clk_cci400_sel fail");
+ ret = PTR_ERR(pm->clk_cci400_sel);
+ }
+
+ pm->vdec_sel = devm_clk_get(&pdev->dev, "vdec_sel");
+ if (IS_ERR(pm->vdec_sel)) {
+ mtk_v4l2_err("devm_clk_get vdec_sel fail");
+ ret = PTR_ERR(pm->vdec_sel);
+ }
+
+ pm->vdecpll = devm_clk_get(&pdev->dev, "vdecpll");
+ if (IS_ERR(pm->vdecpll)) {
+ mtk_v4l2_err("devm_clk_get vdecpll fail");
+ ret = PTR_ERR(pm->vdecpll);
+ }
+
+ pm->vencpll = devm_clk_get(&pdev->dev, "vencpll");
+ if (IS_ERR(pm->vencpll)) {
+ mtk_v4l2_err("devm_clk_get vencpll fail");
+ ret = PTR_ERR(pm->vencpll);
+ }
+
+ pm->venc_lt_sel = devm_clk_get(&pdev->dev, "venc_lt_sel");
+ if (IS_ERR(pm->venc_lt_sel)) {
+ mtk_v4l2_err("devm_clk_get venc_lt_sel fail");
+ ret = PTR_ERR(pm->venc_lt_sel);
+ }
+
+ pm->vdec_bus_clk_src = devm_clk_get(&pdev->dev, "vdec_bus_clk_src");
+ if (IS_ERR(pm->vdec_bus_clk_src)) {
+ mtk_v4l2_err("devm_clk_get vdec_bus_clk_src");
+ ret = PTR_ERR(pm->vdec_bus_clk_src);
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ return ret;
+}
+
+void mtk_vcodec_release_dec_pm(struct mtk_vcodec_dev *dev)
+{
+ pm_runtime_disable(dev->pm.dev);
+}
+
+void mtk_vcodec_dec_pw_on(struct mtk_vcodec_pm *pm)
+{
+ int ret;
+
+ ret = pm_runtime_get_sync(pm->dev);
+ if (ret)
+ mtk_v4l2_err("pm_runtime_get_sync fail %d", ret);
+}
+
+void mtk_vcodec_dec_pw_off(struct mtk_vcodec_pm *pm)
+{
+ int ret;
+
+ ret = pm_runtime_put_sync(pm->dev);
+ if (ret)
+ mtk_v4l2_err("pm_runtime_put_sync fail %d", ret);
+}
+
+void mtk_vcodec_dec_clock_on(struct mtk_vcodec_pm *pm)
+{
+ int ret;
+
+ ret = clk_set_rate(pm->vcodecpll, 1482 * 1000000);
+ if (ret)
+ mtk_v4l2_err("clk_set_rate vcodecpll fail %d", ret);
+
+ ret = clk_set_rate(pm->vencpll, 800 * 1000000);
+ if (ret)
+ mtk_v4l2_err("clk_set_rate vencpll fail %d", ret);
+
+ ret = clk_prepare_enable(pm->vcodecpll);
+ if (ret)
+ mtk_v4l2_err("clk_prepare_enable vcodecpll fail %d", ret);
+
+ ret = clk_prepare_enable(pm->vencpll);
+ if (ret)
+ mtk_v4l2_err("clk_prepare_enable vencpll fail %d", ret);
+
+ ret = clk_prepare_enable(pm->vdec_bus_clk_src);
+ if (ret)
+ mtk_v4l2_err("clk_prepare_enable vdec_bus_clk_src fail %d",
+ ret);
+
+ ret = clk_prepare_enable(pm->venc_lt_sel);
+ if (ret)
+ mtk_v4l2_err("clk_prepare_enable venc_lt_sel fail %d", ret);
+
+ ret = clk_set_parent(pm->venc_lt_sel, pm->vdec_bus_clk_src);
+ if (ret)
+ mtk_v4l2_err("clk_set_parent venc_lt_sel vdec_bus_clk_src fail %d",
+ ret);
+
+ ret = clk_prepare_enable(pm->univpll_d2);
+ if (ret)
+ mtk_v4l2_err("clk_prepare_enable univpll_d2 fail %d", ret);
+
+ ret = clk_prepare_enable(pm->clk_cci400_sel);
+ if (ret)
+ mtk_v4l2_err("clk_prepare_enable clk_cci400_sel fail %d", ret);
+
+ ret = clk_set_parent(pm->clk_cci400_sel, pm->univpll_d2);
+ if (ret)
+ mtk_v4l2_err("clk_set_parent clk_cci400_sel univpll_d2 fail %d",
+ ret);
+
+ ret = clk_prepare_enable(pm->vdecpll);
+ if (ret)
+ mtk_v4l2_err("clk_prepare_enable vdecpll fail %d", ret);
+
+ ret = clk_prepare_enable(pm->vdec_sel);
+ if (ret)
+ mtk_v4l2_err("clk_prepare_enable vdec_sel fail %d", ret);
+
+ ret = clk_set_parent(pm->vdec_sel, pm->vdecpll);
+ if (ret)
+ mtk_v4l2_err("clk_set_parent vdec_sel vdecpll fail %d", ret);
+
+ ret = mtk_smi_larb_get(pm->larbvdec);
+ if (ret)
+ mtk_v4l2_err("mtk_smi_larb_get larbvdec fail %d", ret);
+
+}
+
+void mtk_vcodec_dec_clock_off(struct mtk_vcodec_pm *pm)
+{
+ mtk_smi_larb_put(pm->larbvdec);
+ clk_disable_unprepare(pm->vdec_sel);
+ clk_disable_unprepare(pm->vdecpll);
+ clk_disable_unprepare(pm->univpll_d2);
+ clk_disable_unprepare(pm->clk_cci400_sel);
+ clk_disable_unprepare(pm->venc_lt_sel);
+ clk_disable_unprepare(pm->vdec_bus_clk_src);
+ clk_disable_unprepare(pm->vencpll);
+ clk_disable_unprepare(pm->vcodecpll);
+}
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h
new file mode 100644
index 000000000000..86a7825353e3
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MTK_VCODEC_DEC_PM_H_
+#define _MTK_VCODEC_DEC_PM_H_
+
+#include "mtk_vcodec_drv.h"
+
+int mtk_vcodec_init_dec_pm(struct mtk_vcodec_dev *dev);
+void mtk_vcodec_release_dec_pm(struct mtk_vcodec_dev *dev);
+
+void mtk_vcodec_dec_pw_on(struct mtk_vcodec_pm *pm);
+void mtk_vcodec_dec_pw_off(struct mtk_vcodec_pm *pm);
+void mtk_vcodec_dec_clock_on(struct mtk_vcodec_pm *pm);
+void mtk_vcodec_dec_clock_off(struct mtk_vcodec_pm *pm);
+
+#endif /* _MTK_VCODEC_DEC_PM_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
index c8eaa41c00e6..d7eb8ef855d2 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
@@ -22,13 +22,13 @@
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-core.h>
-
+#include "mtk_vcodec_util.h"
#define MTK_VCODEC_DRV_NAME "mtk_vcodec_drv"
+#define MTK_VCODEC_DEC_NAME "mtk-vcodec-dec"
#define MTK_VCODEC_ENC_NAME "mtk-vcodec-enc"
#define MTK_PLATFORM_STR "platform:mt8173"
-
#define MTK_VCODEC_MAX_PLANES 3
#define MTK_V4L2_BENCHMARK 0
#define WAIT_INTR_TIMEOUT_MS 1000
@@ -179,6 +179,9 @@ struct mtk_enc_params {
* struct mtk_vcodec_pm - Power management data structure
*/
struct mtk_vcodec_pm {
+ struct clk *vdec_bus_clk_src;
+ struct clk *vencpll;
+
struct clk *vcodecpll;
struct clk *univpll_d2;
struct clk *clk_cci400_sel;
@@ -196,6 +199,32 @@ struct mtk_vcodec_pm {
};
/**
+ * struct vdec_pic_info - picture size information
+ * @pic_w: picture width
+ * @pic_h: picture height
+ * @buf_w: picture buffer width (64 aligned up from pic_w)
+ * @buf_h: picture buffer heiht (64 aligned up from pic_h)
+ * @y_bs_sz: Y bitstream size
+ * @c_bs_sz: CbCr bitstream size
+ * @y_len_sz: additional size required to store decompress information for y
+ * plane
+ * @c_len_sz: additional size required to store decompress information for cbcr
+ * plane
+ * E.g. suppose picture size is 176x144,
+ * buffer size will be aligned to 176x160.
+ */
+struct vdec_pic_info {
+ unsigned int pic_w;
+ unsigned int pic_h;
+ unsigned int buf_w;
+ unsigned int buf_h;
+ unsigned int y_bs_sz;
+ unsigned int c_bs_sz;
+ unsigned int y_len_sz;
+ unsigned int c_len_sz;
+};
+
+/**
* struct mtk_vcodec_ctx - Context (instance) private data.
*
* @type: type of the instance - decoder or encoder
@@ -209,9 +238,12 @@ struct mtk_vcodec_pm {
* @state: state of the context
* @param_change: indicate encode parameter type
* @enc_params: encoding parameters
+ * @dec_if: hooked decoder driver interface
* @enc_if: hoooked encoder driver interface
* @drv_handle: driver handle for specific decode/encode instance
*
+ * @picinfo: store picture info after header parsing
+ * @dpb_size: store dpb count after header parsing
* @int_cond: variable used by the waitqueue
* @int_type: type of the last interrupt
* @queue: waitqueue that can be used to wait for this context to
@@ -219,12 +251,16 @@ struct mtk_vcodec_pm {
* @irq_status: irq status
*
* @ctrl_hdl: handler for v4l2 framework
+ * @decode_work: worker for the decoding
* @encode_work: worker for the encoding
+ * @last_decoded_picinfo: pic information get from latest decode
*
* @colorspace: enum v4l2_colorspace; supplemental to pixelformat
* @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding
* @quantization: enum v4l2_quantization, colorspace quantization
* @xfer_func: enum v4l2_xfer_func, colorspace transfer function
+ * @lock: protect variables accessed by V4L2 threads and worker thread such as
+ * mtk_video_dec_buf.
*/
struct mtk_vcodec_ctx {
enum mtk_instance_type type;
@@ -239,28 +275,40 @@ struct mtk_vcodec_ctx {
enum mtk_encode_param param_change;
struct mtk_enc_params enc_params;
+ const struct vdec_common_if *dec_if;
const struct venc_common_if *enc_if;
unsigned long drv_handle;
+ struct vdec_pic_info picinfo;
+ int dpb_size;
+
int int_cond;
int int_type;
wait_queue_head_t queue;
unsigned int irq_status;
struct v4l2_ctrl_handler ctrl_hdl;
+ struct work_struct decode_work;
struct work_struct encode_work;
+ struct vdec_pic_info last_decoded_picinfo;
enum v4l2_colorspace colorspace;
enum v4l2_ycbcr_encoding ycbcr_enc;
enum v4l2_quantization quantization;
enum v4l2_xfer_func xfer_func;
+
+ int decoded_frame_cnt;
+ struct mutex lock;
+
};
/**
* struct mtk_vcodec_dev - driver data
* @v4l2_dev: V4L2 device to register video devices for.
+ * @vfd_dec: Video device for decoder
* @vfd_enc: Video device for encoder.
*
+ * @m2m_dev_dec: m2m device for decoder
* @m2m_dev_enc: m2m device for encoder.
* @plat_dev: platform device
* @vpu_plat_dev: mtk vpu platform device
@@ -271,7 +319,6 @@ struct mtk_vcodec_ctx {
* @reg_base: Mapped address of MTK Vcodec registers.
*
* @id_counter: used to identify current opened instance
- * @num_instances: counter of active MTK Vcodec instances
*
* @encode_workqueue: encode work queue
*
@@ -280,9 +327,11 @@ struct mtk_vcodec_ctx {
* @dev_mutex: video_device lock
* @queue: waitqueue for waiting for completion of device commands
*
+ * @dec_irq: decoder irq resource
* @enc_irq: h264 encoder irq resource
* @enc_lt_irq: vp8 encoder irq resource
*
+ * @dec_mutex: decoder hardware lock
* @enc_mutex: encoder hardware lock.
*
* @pm: power management control
@@ -291,8 +340,10 @@ struct mtk_vcodec_ctx {
*/
struct mtk_vcodec_dev {
struct v4l2_device v4l2_dev;
+ struct video_device *vfd_dec;
struct video_device *vfd_enc;
+ struct v4l2_m2m_dev *m2m_dev_dec;
struct v4l2_m2m_dev *m2m_dev_enc;
struct platform_device *plat_dev;
struct platform_device *vpu_plat_dev;
@@ -302,18 +353,19 @@ struct mtk_vcodec_dev {
void __iomem *reg_base[NUM_MAX_VCODEC_REG_BASE];
unsigned long id_counter;
- int num_instances;
+ struct workqueue_struct *decode_workqueue;
struct workqueue_struct *encode_workqueue;
-
int int_cond;
int int_type;
struct mutex dev_mutex;
wait_queue_head_t queue;
+ int dec_irq;
int enc_irq;
int enc_lt_irq;
+ struct mutex dec_mutex;
struct mutex enc_mutex;
struct mtk_vcodec_pm pm;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
index 5cd2151431bf..aa81f3ce9463 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
@@ -188,7 +188,6 @@ static int fops_vcodec_open(struct file *file)
mtk_v4l2_debug(2, "Create instance [%d]@%p m2m_ctx=%p ",
ctx->id, ctx, ctx->m2m_ctx);
- dev->num_instances++;
list_add(&ctx->list, &dev->ctx_list);
mutex_unlock(&dev->dev_mutex);
@@ -218,18 +217,13 @@ static int fops_vcodec_release(struct file *file)
mtk_v4l2_debug(1, "[%d] encoder", ctx->id);
mutex_lock(&dev->dev_mutex);
- /*
- * Call v4l2_m2m_ctx_release to make sure the worker thread is not
- * running after venc_if_deinit.
- */
- v4l2_m2m_ctx_release(ctx->m2m_ctx);
mtk_vcodec_enc_release(ctx);
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
list_del_init(&ctx->list);
- dev->num_instances--;
kfree(ctx);
mutex_unlock(&dev->dev_mutex);
return 0;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c
index 52e7e5c9afa0..113b2097f061 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c
@@ -30,8 +30,7 @@ int mtk_vcodec_wait_for_done_ctx(struct mtk_vcodec_ctx *ctx, int command,
timeout_jiff = msecs_to_jiffies(timeout_ms);
ret = wait_event_interruptible_timeout(*waitqueue,
- (ctx->int_cond &&
- (ctx->int_type == command)),
+ ctx->int_cond,
timeout_jiff);
if (!ret) {
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
index 5e3651372a3c..46768c056193 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
@@ -81,14 +81,37 @@ void mtk_vcodec_mem_free(struct mtk_vcodec_ctx *data,
return;
}
- dma_free_coherent(dev, size, mem->va, mem->dma_addr);
- mem->va = NULL;
- mem->dma_addr = 0;
- mem->size = 0;
-
mtk_v4l2_debug(3, "[%d] - va = %p", ctx->id, mem->va);
mtk_v4l2_debug(3, "[%d] - dma = 0x%lx", ctx->id,
(unsigned long)mem->dma_addr);
mtk_v4l2_debug(3, "[%d] size = 0x%lx", ctx->id, size);
+
+ dma_free_coherent(dev, size, mem->va, mem->dma_addr);
+ mem->va = NULL;
+ mem->dma_addr = 0;
+ mem->size = 0;
}
EXPORT_SYMBOL(mtk_vcodec_mem_free);
+
+void mtk_vcodec_set_curr_ctx(struct mtk_vcodec_dev *dev,
+ struct mtk_vcodec_ctx *ctx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ dev->curr_ctx = ctx;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+}
+EXPORT_SYMBOL(mtk_vcodec_set_curr_ctx);
+
+struct mtk_vcodec_ctx *mtk_vcodec_get_curr_ctx(struct mtk_vcodec_dev *dev)
+{
+ unsigned long flags;
+ struct mtk_vcodec_ctx *ctx;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ ctx = dev->curr_ctx;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ return ctx;
+}
+EXPORT_SYMBOL(mtk_vcodec_get_curr_ctx);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h
index d6345fc04840..7d55975d3185 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h
@@ -26,6 +26,7 @@ struct mtk_vcodec_mem {
};
struct mtk_vcodec_ctx;
+struct mtk_vcodec_dev;
extern int mtk_v4l2_dbg_level;
extern bool mtk_vcodec_dbg;
@@ -84,4 +85,8 @@ int mtk_vcodec_mem_alloc(struct mtk_vcodec_ctx *data,
struct mtk_vcodec_mem *mem);
void mtk_vcodec_mem_free(struct mtk_vcodec_ctx *data,
struct mtk_vcodec_mem *mem);
+void mtk_vcodec_set_curr_ctx(struct mtk_vcodec_dev *dev,
+ struct mtk_vcodec_ctx *ctx);
+struct mtk_vcodec_ctx *mtk_vcodec_get_curr_ctx(struct mtk_vcodec_dev *dev);
+
#endif /* _MTK_VCODEC_UTIL_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
new file mode 100644
index 000000000000..57a842ff3097
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
@@ -0,0 +1,507 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "../vdec_drv_if.h"
+#include "../mtk_vcodec_util.h"
+#include "../mtk_vcodec_dec.h"
+#include "../mtk_vcodec_intr.h"
+#include "../vdec_vpu_if.h"
+#include "../vdec_drv_base.h"
+
+#define NAL_NON_IDR_SLICE 0x01
+#define NAL_IDR_SLICE 0x05
+#define NAL_H264_PPS 0x08
+#define NAL_TYPE(value) ((value) & 0x1F)
+
+#define BUF_PREDICTION_SZ (32 * 1024)
+
+#define MB_UNIT_LEN 16
+
+/* motion vector size (bytes) for every macro block */
+#define HW_MB_STORE_SZ 64
+
+#define H264_MAX_FB_NUM 17
+#define HDR_PARSING_BUF_SZ 1024
+
+/**
+ * struct h264_fb - h264 decode frame buffer information
+ * @vdec_fb_va : virtual address of struct vdec_fb
+ * @y_fb_dma : dma address of Y frame buffer (luma)
+ * @c_fb_dma : dma address of C frame buffer (chroma)
+ * @poc : picture order count of frame buffer
+ * @reserved : for 8 bytes alignment
+ */
+struct h264_fb {
+ uint64_t vdec_fb_va;
+ uint64_t y_fb_dma;
+ uint64_t c_fb_dma;
+ int32_t poc;
+ uint32_t reserved;
+};
+
+/**
+ * struct h264_ring_fb_list - ring frame buffer list
+ * @fb_list : frame buffer arrary
+ * @read_idx : read index
+ * @write_idx : write index
+ * @count : buffer count in list
+ */
+struct h264_ring_fb_list {
+ struct h264_fb fb_list[H264_MAX_FB_NUM];
+ unsigned int read_idx;
+ unsigned int write_idx;
+ unsigned int count;
+ unsigned int reserved;
+};
+
+/**
+ * struct vdec_h264_dec_info - decode information
+ * @dpb_sz : decoding picture buffer size
+ * @resolution_changed : resoltion change happen
+ * @realloc_mv_buf : flag to notify driver to re-allocate mv buffer
+ * @reserved : for 8 bytes alignment
+ * @bs_dma : Input bit-stream buffer dma address
+ * @y_fb_dma : Y frame buffer dma address
+ * @c_fb_dma : C frame buffer dma address
+ * @vdec_fb_va : VDEC frame buffer struct virtual address
+ */
+struct vdec_h264_dec_info {
+ uint32_t dpb_sz;
+ uint32_t resolution_changed;
+ uint32_t realloc_mv_buf;
+ uint32_t reserved;
+ uint64_t bs_dma;
+ uint64_t y_fb_dma;
+ uint64_t c_fb_dma;
+ uint64_t vdec_fb_va;
+};
+
+/**
+ * struct vdec_h264_vsi - shared memory for decode information exchange
+ * between VPU and Host.
+ * The memory is allocated by VPU then mapping to Host
+ * in vpu_dec_init() and freed in vpu_dec_deinit()
+ * by VPU.
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
+ * @hdr_buf : Header parsing buffer (AP-W, VPU-R)
+ * @pred_buf_dma : HW working predication buffer dma address (AP-W, VPU-R)
+ * @mv_buf_dma : HW working motion vector buffer dma address (AP-W, VPU-R)
+ * @list_free : free frame buffer ring list (AP-W/R, VPU-W)
+ * @list_disp : display frame buffer ring list (AP-R, VPU-W)
+ * @dec : decode information (AP-R, VPU-W)
+ * @pic : picture information (AP-R, VPU-W)
+ * @crop : crop information (AP-R, VPU-W)
+ */
+struct vdec_h264_vsi {
+ unsigned char hdr_buf[HDR_PARSING_BUF_SZ];
+ uint64_t pred_buf_dma;
+ uint64_t mv_buf_dma[H264_MAX_FB_NUM];
+ struct h264_ring_fb_list list_free;
+ struct h264_ring_fb_list list_disp;
+ struct vdec_h264_dec_info dec;
+ struct vdec_pic_info pic;
+ struct v4l2_rect crop;
+};
+
+/**
+ * struct vdec_h264_inst - h264 decoder instance
+ * @num_nalu : how many nalus be decoded
+ * @ctx : point to mtk_vcodec_ctx
+ * @pred_buf : HW working predication buffer
+ * @mv_buf : HW working motion vector buffer
+ * @vpu : VPU instance
+ * @vsi : VPU shared information
+ */
+struct vdec_h264_inst {
+ unsigned int num_nalu;
+ struct mtk_vcodec_ctx *ctx;
+ struct mtk_vcodec_mem pred_buf;
+ struct mtk_vcodec_mem mv_buf[H264_MAX_FB_NUM];
+ struct vdec_vpu_inst vpu;
+ struct vdec_h264_vsi *vsi;
+};
+
+static unsigned int get_mv_buf_size(unsigned int width, unsigned int height)
+{
+ return HW_MB_STORE_SZ * (width/MB_UNIT_LEN) * (height/MB_UNIT_LEN);
+}
+
+static int allocate_predication_buf(struct vdec_h264_inst *inst)
+{
+ int err = 0;
+
+ inst->pred_buf.size = BUF_PREDICTION_SZ;
+ err = mtk_vcodec_mem_alloc(inst->ctx, &inst->pred_buf);
+ if (err) {
+ mtk_vcodec_err(inst, "failed to allocate ppl buf");
+ return err;
+ }
+
+ inst->vsi->pred_buf_dma = inst->pred_buf.dma_addr;
+ return 0;
+}
+
+static void free_predication_buf(struct vdec_h264_inst *inst)
+{
+ struct mtk_vcodec_mem *mem = NULL;
+
+ mtk_vcodec_debug_enter(inst);
+
+ inst->vsi->pred_buf_dma = 0;
+ mem = &inst->pred_buf;
+ if (mem->va)
+ mtk_vcodec_mem_free(inst->ctx, mem);
+}
+
+static int alloc_mv_buf(struct vdec_h264_inst *inst, struct vdec_pic_info *pic)
+{
+ int i;
+ int err;
+ struct mtk_vcodec_mem *mem = NULL;
+ unsigned int buf_sz = get_mv_buf_size(pic->buf_w, pic->buf_h);
+
+ for (i = 0; i < H264_MAX_FB_NUM; i++) {
+ mem = &inst->mv_buf[i];
+ if (mem->va)
+ mtk_vcodec_mem_free(inst->ctx, mem);
+ mem->size = buf_sz;
+ err = mtk_vcodec_mem_alloc(inst->ctx, mem);
+ if (err) {
+ mtk_vcodec_err(inst, "failed to allocate mv buf");
+ return err;
+ }
+ inst->vsi->mv_buf_dma[i] = mem->dma_addr;
+ }
+
+ return 0;
+}
+
+static void free_mv_buf(struct vdec_h264_inst *inst)
+{
+ int i;
+ struct mtk_vcodec_mem *mem = NULL;
+
+ for (i = 0; i < H264_MAX_FB_NUM; i++) {
+ inst->vsi->mv_buf_dma[i] = 0;
+ mem = &inst->mv_buf[i];
+ if (mem->va)
+ mtk_vcodec_mem_free(inst->ctx, mem);
+ }
+}
+
+static int check_list_validity(struct vdec_h264_inst *inst, bool disp_list)
+{
+ struct h264_ring_fb_list *list;
+
+ list = disp_list ? &inst->vsi->list_disp : &inst->vsi->list_free;
+
+ if (list->count > H264_MAX_FB_NUM ||
+ list->read_idx >= H264_MAX_FB_NUM ||
+ list->write_idx >= H264_MAX_FB_NUM) {
+ mtk_vcodec_err(inst, "%s list err: cnt=%d r_idx=%d w_idx=%d",
+ disp_list ? "disp" : "free", list->count,
+ list->read_idx, list->write_idx);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void put_fb_to_free(struct vdec_h264_inst *inst, struct vdec_fb *fb)
+{
+ struct h264_ring_fb_list *list;
+
+ if (fb) {
+ if (check_list_validity(inst, false))
+ return;
+
+ list = &inst->vsi->list_free;
+ if (list->count == H264_MAX_FB_NUM) {
+ mtk_vcodec_err(inst, "[FB] put fb free_list full");
+ return;
+ }
+
+ mtk_vcodec_debug(inst, "[FB] put fb into free_list @(%p, %llx)",
+ fb->base_y.va, (u64)fb->base_y.dma_addr);
+
+ list->fb_list[list->write_idx].vdec_fb_va = (u64)(uintptr_t)fb;
+ list->write_idx = (list->write_idx == H264_MAX_FB_NUM - 1) ?
+ 0 : list->write_idx + 1;
+ list->count++;
+ }
+}
+
+static void get_pic_info(struct vdec_h264_inst *inst,
+ struct vdec_pic_info *pic)
+{
+ *pic = inst->vsi->pic;
+ mtk_vcodec_debug(inst, "pic(%d, %d), buf(%d, %d)",
+ pic->pic_w, pic->pic_h, pic->buf_w, pic->buf_h);
+ mtk_vcodec_debug(inst, "Y(%d, %d), C(%d, %d)", pic->y_bs_sz,
+ pic->y_len_sz, pic->c_bs_sz, pic->c_len_sz);
+}
+
+static void get_crop_info(struct vdec_h264_inst *inst, struct v4l2_rect *cr)
+{
+ cr->left = inst->vsi->crop.left;
+ cr->top = inst->vsi->crop.top;
+ cr->width = inst->vsi->crop.width;
+ cr->height = inst->vsi->crop.height;
+
+ mtk_vcodec_debug(inst, "l=%d, t=%d, w=%d, h=%d",
+ cr->left, cr->top, cr->width, cr->height);
+}
+
+static void get_dpb_size(struct vdec_h264_inst *inst, unsigned int *dpb_sz)
+{
+ *dpb_sz = inst->vsi->dec.dpb_sz;
+ mtk_vcodec_debug(inst, "sz=%d", *dpb_sz);
+}
+
+static int vdec_h264_init(struct mtk_vcodec_ctx *ctx, unsigned long *h_vdec)
+{
+ struct vdec_h264_inst *inst = NULL;
+ int err;
+
+ inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ inst->ctx = ctx;
+
+ inst->vpu.id = IPI_VDEC_H264;
+ inst->vpu.dev = ctx->dev->vpu_plat_dev;
+ inst->vpu.ctx = ctx;
+ inst->vpu.handler = vpu_dec_ipi_handler;
+
+ err = vpu_dec_init(&inst->vpu);
+ if (err) {
+ mtk_vcodec_err(inst, "vdec_h264 init err=%d", err);
+ goto error_free_inst;
+ }
+
+ inst->vsi = (struct vdec_h264_vsi *)inst->vpu.vsi;
+ err = allocate_predication_buf(inst);
+ if (err)
+ goto error_deinit;
+
+ mtk_vcodec_debug(inst, "H264 Instance >> %p", inst);
+
+ *h_vdec = (unsigned long)inst;
+ return 0;
+
+error_deinit:
+ vpu_dec_deinit(&inst->vpu);
+
+error_free_inst:
+ kfree(inst);
+ return err;
+}
+
+static void vdec_h264_deinit(unsigned long h_vdec)
+{
+ struct vdec_h264_inst *inst = (struct vdec_h264_inst *)h_vdec;
+
+ mtk_vcodec_debug_enter(inst);
+
+ vpu_dec_deinit(&inst->vpu);
+ free_predication_buf(inst);
+ free_mv_buf(inst);
+
+ kfree(inst);
+}
+
+static int find_start_code(unsigned char *data, unsigned int data_sz)
+{
+ if (data_sz > 3 && data[0] == 0 && data[1] == 0 && data[2] == 1)
+ return 3;
+
+ if (data_sz > 4 && data[0] == 0 && data[1] == 0 && data[2] == 0 &&
+ data[3] == 1)
+ return 4;
+
+ return -1;
+}
+
+static int vdec_h264_decode(unsigned long h_vdec, struct mtk_vcodec_mem *bs,
+ struct vdec_fb *fb, bool *res_chg)
+{
+ struct vdec_h264_inst *inst = (struct vdec_h264_inst *)h_vdec;
+ struct vdec_vpu_inst *vpu = &inst->vpu;
+ int nal_start_idx = 0;
+ int err = 0;
+ unsigned int nal_start;
+ unsigned int nal_type;
+ unsigned char *buf;
+ unsigned int buf_sz;
+ unsigned int data[2];
+ uint64_t vdec_fb_va = (u64)(uintptr_t)fb;
+ uint64_t y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0;
+ uint64_t c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0;
+
+ mtk_vcodec_debug(inst, "+ [%d] FB y_dma=%llx c_dma=%llx va=%p",
+ ++inst->num_nalu, y_fb_dma, c_fb_dma, fb);
+
+ /* bs NULL means flush decoder */
+ if (bs == NULL)
+ return vpu_dec_reset(vpu);
+
+ buf = (unsigned char *)bs->va;
+ buf_sz = bs->size;
+ nal_start_idx = find_start_code(buf, buf_sz);
+ if (nal_start_idx < 0)
+ goto err_free_fb_out;
+
+ nal_start = buf[nal_start_idx];
+ nal_type = NAL_TYPE(buf[nal_start_idx]);
+ mtk_vcodec_debug(inst, "\n + NALU[%d] type %d +\n", inst->num_nalu,
+ nal_type);
+
+ if (nal_type == NAL_H264_PPS) {
+ buf_sz -= nal_start_idx;
+ if (buf_sz > HDR_PARSING_BUF_SZ) {
+ err = -EILSEQ;
+ goto err_free_fb_out;
+ }
+ memcpy(inst->vsi->hdr_buf, buf + nal_start_idx, buf_sz);
+ }
+
+ inst->vsi->dec.bs_dma = (uint64_t)bs->dma_addr;
+ inst->vsi->dec.y_fb_dma = y_fb_dma;
+ inst->vsi->dec.c_fb_dma = c_fb_dma;
+ inst->vsi->dec.vdec_fb_va = vdec_fb_va;
+
+ data[0] = buf_sz;
+ data[1] = nal_start;
+ err = vpu_dec_start(vpu, data, 2);
+ if (err)
+ goto err_free_fb_out;
+
+ *res_chg = inst->vsi->dec.resolution_changed;
+ if (*res_chg) {
+ struct vdec_pic_info pic;
+
+ mtk_vcodec_debug(inst, "- resolution changed -");
+ get_pic_info(inst, &pic);
+
+ if (inst->vsi->dec.realloc_mv_buf) {
+ err = alloc_mv_buf(inst, &pic);
+ if (err)
+ goto err_free_fb_out;
+ }
+ }
+
+ if (nal_type == NAL_NON_IDR_SLICE || nal_type == NAL_IDR_SLICE) {
+ /* wait decoder done interrupt */
+ err = mtk_vcodec_wait_for_done_ctx(inst->ctx,
+ MTK_INST_IRQ_RECEIVED,
+ WAIT_INTR_TIMEOUT_MS);
+ if (err)
+ goto err_free_fb_out;
+
+ vpu_dec_end(vpu);
+ }
+
+ mtk_vcodec_debug(inst, "\n - NALU[%d] type=%d -\n", inst->num_nalu,
+ nal_type);
+ return 0;
+
+err_free_fb_out:
+ put_fb_to_free(inst, fb);
+ mtk_vcodec_err(inst, "\n - NALU[%d] err=%d -\n", inst->num_nalu, err);
+ return err;
+}
+
+static void vdec_h264_get_fb(struct vdec_h264_inst *inst,
+ struct h264_ring_fb_list *list,
+ bool disp_list, struct vdec_fb **out_fb)
+{
+ struct vdec_fb *fb;
+
+ if (check_list_validity(inst, disp_list))
+ return;
+
+ if (list->count == 0) {
+ mtk_vcodec_debug(inst, "[FB] there is no %s fb",
+ disp_list ? "disp" : "free");
+ *out_fb = NULL;
+ return;
+ }
+
+ fb = (struct vdec_fb *)
+ (uintptr_t)list->fb_list[list->read_idx].vdec_fb_va;
+ fb->status |= (disp_list ? FB_ST_DISPLAY : FB_ST_FREE);
+
+ *out_fb = fb;
+ mtk_vcodec_debug(inst, "[FB] get %s fb st=%d poc=%d %llx",
+ disp_list ? "disp" : "free",
+ fb->status, list->fb_list[list->read_idx].poc,
+ list->fb_list[list->read_idx].vdec_fb_va);
+
+ list->read_idx = (list->read_idx == H264_MAX_FB_NUM - 1) ?
+ 0 : list->read_idx + 1;
+ list->count--;
+}
+
+static int vdec_h264_get_param(unsigned long h_vdec,
+ enum vdec_get_param_type type, void *out)
+{
+ struct vdec_h264_inst *inst = (struct vdec_h264_inst *)h_vdec;
+
+ switch (type) {
+ case GET_PARAM_DISP_FRAME_BUFFER:
+ vdec_h264_get_fb(inst, &inst->vsi->list_disp, true, out);
+ break;
+
+ case GET_PARAM_FREE_FRAME_BUFFER:
+ vdec_h264_get_fb(inst, &inst->vsi->list_free, false, out);
+ break;
+
+ case GET_PARAM_PIC_INFO:
+ get_pic_info(inst, out);
+ break;
+
+ case GET_PARAM_DPB_SIZE:
+ get_dpb_size(inst, out);
+ break;
+
+ case GET_PARAM_CROP_INFO:
+ get_crop_info(inst, out);
+ break;
+
+ default:
+ mtk_vcodec_err(inst, "invalid get parameter type=%d", type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct vdec_common_if vdec_h264_if = {
+ vdec_h264_init,
+ vdec_h264_decode,
+ vdec_h264_get_param,
+ vdec_h264_deinit,
+};
+
+struct vdec_common_if *get_h264_dec_comm_if(void);
+
+struct vdec_common_if *get_h264_dec_comm_if(void)
+{
+ return &vdec_h264_if;
+}
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
new file mode 100644
index 000000000000..6e7a62ae0842
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
@@ -0,0 +1,634 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Jungchang Tsao <jungchang.tsao@mediatek.com>
+ * PC Chen <pc.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include "../vdec_drv_if.h"
+#include "../mtk_vcodec_util.h"
+#include "../mtk_vcodec_dec.h"
+#include "../mtk_vcodec_intr.h"
+#include "../vdec_vpu_if.h"
+#include "../vdec_drv_base.h"
+
+/* Decoding picture buffer size (3 reference frames plus current frame) */
+#define VP8_DPB_SIZE 4
+
+/* HW working buffer size (bytes) */
+#define VP8_WORKING_BUF_SZ (45 * 4096)
+
+/* HW control register address */
+#define VP8_SEGID_DRAM_ADDR 0x3c
+#define VP8_HW_VLD_ADDR 0x93C
+#define VP8_HW_VLD_VALUE 0x940
+#define VP8_BSASET 0x100
+#define VP8_BSDSET 0x104
+#define VP8_RW_CKEN_SET 0x0
+#define VP8_RW_DCM_CON 0x18
+#define VP8_WO_VLD_SRST 0x108
+#define VP8_RW_MISC_SYS_SEL 0x84
+#define VP8_RW_MISC_SPEC_CON 0xC8
+#define VP8_WO_VLD_SRST 0x108
+#define VP8_RW_VP8_CTRL 0xA4
+#define VP8_RW_MISC_DCM_CON 0xEC
+#define VP8_RW_MISC_SRST 0xF4
+#define VP8_RW_MISC_FUNC_CON 0xCC
+
+#define VP8_MAX_FRM_BUF_NUM 5
+#define VP8_MAX_FRM_BUF_NODE_NUM (VP8_MAX_FRM_BUF_NUM * 2)
+
+/* required buffer size (bytes) to store decode information */
+#define VP8_HW_SEGMENT_DATA_SZ 272
+#define VP8_HW_SEGMENT_UINT 4
+
+#define VP8_DEC_TABLE_PROC_LOOP 96
+#define VP8_DEC_TABLE_UNIT 3
+#define VP8_DEC_TABLE_SZ 300
+#define VP8_DEC_TABLE_OFFSET 2
+#define VP8_DEC_TABLE_RW_UNIT 4
+
+/**
+ * struct vdec_vp8_dec_info - decode misc information
+ * @working_buf_dma : working buffer dma address
+ * @prev_y_dma : previous decoded frame buffer Y plane address
+ * @cur_y_fb_dma : current plane Y frame buffer dma address
+ * @cur_c_fb_dma : current plane C frame buffer dma address
+ * @bs_dma : bitstream dma address
+ * @bs_sz : bitstream size
+ * @resolution_changed: resolution change flag 1 - changed, 0 - not change
+ * @show_frame : display this frame or not
+ * @wait_key_frame : wait key frame coming
+ */
+struct vdec_vp8_dec_info {
+ uint64_t working_buf_dma;
+ uint64_t prev_y_dma;
+ uint64_t cur_y_fb_dma;
+ uint64_t cur_c_fb_dma;
+ uint64_t bs_dma;
+ uint32_t bs_sz;
+ uint32_t resolution_changed;
+ uint32_t show_frame;
+ uint32_t wait_key_frame;
+};
+
+/**
+ * struct vdec_vp8_vsi - VPU shared information
+ * @dec : decoding information
+ * @pic : picture information
+ * @dec_table : decoder coefficient table
+ * @segment_buf : segmentation buffer
+ * @load_data : flag to indicate reload decode data
+ */
+struct vdec_vp8_vsi {
+ struct vdec_vp8_dec_info dec;
+ struct vdec_pic_info pic;
+ uint32_t dec_table[VP8_DEC_TABLE_SZ];
+ uint32_t segment_buf[VP8_HW_SEGMENT_DATA_SZ][VP8_HW_SEGMENT_UINT];
+ uint32_t load_data;
+};
+
+/**
+ * struct vdec_vp8_hw_reg_base - HW register base
+ * @sys : base address for sys
+ * @misc : base address for misc
+ * @ld : base address for ld
+ * @top : base address for top
+ * @cm : base address for cm
+ * @hwd : base address for hwd
+ * @hwb : base address for hwb
+ */
+struct vdec_vp8_hw_reg_base {
+ void __iomem *sys;
+ void __iomem *misc;
+ void __iomem *ld;
+ void __iomem *top;
+ void __iomem *cm;
+ void __iomem *hwd;
+ void __iomem *hwb;
+};
+
+/**
+ * struct vdec_vp8_vpu_inst - VPU instance for VP8 decode
+ * @wq_hd : Wait queue to wait VPU message ack
+ * @signaled : 1 - Host has received ack message from VPU, 0 - not recevie
+ * @failure : VPU execution result status 0 - success, others - fail
+ * @inst_addr : VPU decoder instance address
+ */
+struct vdec_vp8_vpu_inst {
+ wait_queue_head_t wq_hd;
+ int signaled;
+ int failure;
+ uint32_t inst_addr;
+};
+
+/* frame buffer (fb) list
+ * [available_fb_node_list] - decode fb are initialized to 0 and populated in
+ * [fb_use_list] - fb is set after decode and is moved to this list
+ * [fb_free_list] - fb is not needed for reference will be moved from
+ * [fb_use_list] to [fb_free_list] and
+ * once user remove fb from [fb_free_list],
+ * it is circulated back to [available_fb_node_list]
+ * [fb_disp_list] - fb is set after decode and is moved to this list
+ * once user remove fb from [fb_disp_list] it is
+ * circulated back to [available_fb_node_list]
+ */
+
+/**
+ * struct vdec_vp8_inst - VP8 decoder instance
+ * @cur_fb : current frame buffer
+ * @dec_fb : decode frame buffer node
+ * @available_fb_node_list : list to store available frame buffer node
+ * @fb_use_list : list to store frame buffer in use
+ * @fb_free_list : list to store free frame buffer
+ * @fb_disp_list : list to store display ready frame buffer
+ * @working_buf : HW decoder working buffer
+ * @reg_base : HW register base address
+ * @frm_cnt : decode frame count
+ * @ctx : V4L2 context
+ * @dev : platform device
+ * @vpu : VPU instance for decoder
+ * @vsi : VPU share information
+ */
+struct vdec_vp8_inst {
+ struct vdec_fb *cur_fb;
+ struct vdec_fb_node dec_fb[VP8_MAX_FRM_BUF_NODE_NUM];
+ struct list_head available_fb_node_list;
+ struct list_head fb_use_list;
+ struct list_head fb_free_list;
+ struct list_head fb_disp_list;
+ struct mtk_vcodec_mem working_buf;
+ struct vdec_vp8_hw_reg_base reg_base;
+ unsigned int frm_cnt;
+ struct mtk_vcodec_ctx *ctx;
+ struct vdec_vpu_inst vpu;
+ struct vdec_vp8_vsi *vsi;
+};
+
+static void get_hw_reg_base(struct vdec_vp8_inst *inst)
+{
+ inst->reg_base.top = mtk_vcodec_get_reg_addr(inst->ctx, VDEC_TOP);
+ inst->reg_base.cm = mtk_vcodec_get_reg_addr(inst->ctx, VDEC_CM);
+ inst->reg_base.hwd = mtk_vcodec_get_reg_addr(inst->ctx, VDEC_HWD);
+ inst->reg_base.sys = mtk_vcodec_get_reg_addr(inst->ctx, VDEC_SYS);
+ inst->reg_base.misc = mtk_vcodec_get_reg_addr(inst->ctx, VDEC_MISC);
+ inst->reg_base.ld = mtk_vcodec_get_reg_addr(inst->ctx, VDEC_LD);
+ inst->reg_base.hwb = mtk_vcodec_get_reg_addr(inst->ctx, VDEC_HWB);
+}
+
+static void write_hw_segmentation_data(struct vdec_vp8_inst *inst)
+{
+ int i, j;
+ u32 seg_id_addr;
+ u32 val;
+ void __iomem *cm = inst->reg_base.cm;
+ struct vdec_vp8_vsi *vsi = inst->vsi;
+
+ seg_id_addr = readl(inst->reg_base.top + VP8_SEGID_DRAM_ADDR) >> 4;
+
+ for (i = 0; i < ARRAY_SIZE(vsi->segment_buf); i++) {
+ for (j = ARRAY_SIZE(vsi->segment_buf[i]) - 1; j >= 0; j--) {
+ val = (1 << 16) + ((seg_id_addr + i) << 2) + j;
+ writel(val, cm + VP8_HW_VLD_ADDR);
+
+ val = vsi->segment_buf[i][j];
+ writel(val, cm + VP8_HW_VLD_VALUE);
+ }
+ }
+}
+
+static void read_hw_segmentation_data(struct vdec_vp8_inst *inst)
+{
+ int i, j;
+ u32 seg_id_addr;
+ u32 val;
+ void __iomem *cm = inst->reg_base.cm;
+ struct vdec_vp8_vsi *vsi = inst->vsi;
+
+ seg_id_addr = readl(inst->reg_base.top + VP8_SEGID_DRAM_ADDR) >> 4;
+
+ for (i = 0; i < ARRAY_SIZE(vsi->segment_buf); i++) {
+ for (j = ARRAY_SIZE(vsi->segment_buf[i]) - 1; j >= 0; j--) {
+ val = ((seg_id_addr + i) << 2) + j;
+ writel(val, cm + VP8_HW_VLD_ADDR);
+
+ val = readl(cm + VP8_HW_VLD_VALUE);
+ vsi->segment_buf[i][j] = val;
+ }
+ }
+}
+
+/* reset HW and enable HW read/write data function */
+static void enable_hw_rw_function(struct vdec_vp8_inst *inst)
+{
+ u32 val = 0;
+ void __iomem *sys = inst->reg_base.sys;
+ void __iomem *misc = inst->reg_base.misc;
+ void __iomem *ld = inst->reg_base.ld;
+ void __iomem *hwb = inst->reg_base.hwb;
+ void __iomem *hwd = inst->reg_base.hwd;
+
+ writel(0x1, sys + VP8_RW_CKEN_SET);
+ writel(0x101, ld + VP8_WO_VLD_SRST);
+ writel(0x101, hwb + VP8_WO_VLD_SRST);
+
+ writel(1, sys);
+ val = readl(misc + VP8_RW_MISC_SRST);
+ writel((val & 0xFFFFFFFE), misc + VP8_RW_MISC_SRST);
+
+ writel(0x1, misc + VP8_RW_MISC_SYS_SEL);
+ writel(0x17F, misc + VP8_RW_MISC_SPEC_CON);
+ writel(0x71201100, misc + VP8_RW_MISC_FUNC_CON);
+ writel(0x0, ld + VP8_WO_VLD_SRST);
+ writel(0x0, hwb + VP8_WO_VLD_SRST);
+ writel(0x1, sys + VP8_RW_DCM_CON);
+ writel(0x1, misc + VP8_RW_MISC_DCM_CON);
+ writel(0x1, hwd + VP8_RW_VP8_CTRL);
+}
+
+static void store_dec_table(struct vdec_vp8_inst *inst)
+{
+ int i, j;
+ u32 addr = 0, val = 0;
+ void __iomem *hwd = inst->reg_base.hwd;
+ u32 *p = &inst->vsi->dec_table[VP8_DEC_TABLE_OFFSET];
+
+ for (i = 0; i < VP8_DEC_TABLE_PROC_LOOP; i++) {
+ writel(addr, hwd + VP8_BSASET);
+ for (j = 0; j < VP8_DEC_TABLE_UNIT ; j++) {
+ val = *p++;
+ writel(val, hwd + VP8_BSDSET);
+ }
+ addr += VP8_DEC_TABLE_RW_UNIT;
+ }
+}
+
+static void load_dec_table(struct vdec_vp8_inst *inst)
+{
+ int i;
+ u32 addr = 0;
+ u32 *p = &inst->vsi->dec_table[VP8_DEC_TABLE_OFFSET];
+ void __iomem *hwd = inst->reg_base.hwd;
+
+ for (i = 0; i < VP8_DEC_TABLE_PROC_LOOP; i++) {
+ writel(addr, hwd + VP8_BSASET);
+ /* read total 11 bytes */
+ *p++ = readl(hwd + VP8_BSDSET);
+ *p++ = readl(hwd + VP8_BSDSET);
+ *p++ = readl(hwd + VP8_BSDSET) & 0xFFFFFF;
+ addr += VP8_DEC_TABLE_RW_UNIT;
+ }
+}
+
+static void get_pic_info(struct vdec_vp8_inst *inst, struct vdec_pic_info *pic)
+{
+ *pic = inst->vsi->pic;
+
+ mtk_vcodec_debug(inst, "pic(%d, %d), buf(%d, %d)",
+ pic->pic_w, pic->pic_h, pic->buf_w, pic->buf_h);
+ mtk_vcodec_debug(inst, "Y(%d, %d), C(%d, %d)", pic->y_bs_sz,
+ pic->y_len_sz, pic->c_bs_sz, pic->c_len_sz);
+}
+
+static void vp8_dec_finish(struct vdec_vp8_inst *inst)
+{
+ struct vdec_fb_node *node;
+ uint64_t prev_y_dma = inst->vsi->dec.prev_y_dma;
+
+ mtk_vcodec_debug(inst, "prev fb base dma=%llx", prev_y_dma);
+
+ /* put last decode ok frame to fb_free_list */
+ if (prev_y_dma != 0) {
+ list_for_each_entry(node, &inst->fb_use_list, list) {
+ struct vdec_fb *fb = (struct vdec_fb *)node->fb;
+
+ if (prev_y_dma == (uint64_t)fb->base_y.dma_addr) {
+ list_move_tail(&node->list,
+ &inst->fb_free_list);
+ break;
+ }
+ }
+ }
+
+ /* available_fb_node_list -> fb_use_list */
+ node = list_first_entry(&inst->available_fb_node_list,
+ struct vdec_fb_node, list);
+ node->fb = inst->cur_fb;
+ list_move_tail(&node->list, &inst->fb_use_list);
+
+ /* available_fb_node_list -> fb_disp_list */
+ if (inst->vsi->dec.show_frame) {
+ node = list_first_entry(&inst->available_fb_node_list,
+ struct vdec_fb_node, list);
+ node->fb = inst->cur_fb;
+ list_move_tail(&node->list, &inst->fb_disp_list);
+ }
+}
+
+static void move_fb_list_use_to_free(struct vdec_vp8_inst *inst)
+{
+ struct vdec_fb_node *node, *tmp;
+
+ list_for_each_entry_safe(node, tmp, &inst->fb_use_list, list)
+ list_move_tail(&node->list, &inst->fb_free_list);
+}
+
+static void init_list(struct vdec_vp8_inst *inst)
+{
+ int i;
+
+ INIT_LIST_HEAD(&inst->available_fb_node_list);
+ INIT_LIST_HEAD(&inst->fb_use_list);
+ INIT_LIST_HEAD(&inst->fb_free_list);
+ INIT_LIST_HEAD(&inst->fb_disp_list);
+
+ for (i = 0; i < ARRAY_SIZE(inst->dec_fb); i++) {
+ INIT_LIST_HEAD(&inst->dec_fb[i].list);
+ inst->dec_fb[i].fb = NULL;
+ list_add_tail(&inst->dec_fb[i].list,
+ &inst->available_fb_node_list);
+ }
+}
+
+static void add_fb_to_free_list(struct vdec_vp8_inst *inst, void *fb)
+{
+ struct vdec_fb_node *node;
+
+ if (fb) {
+ node = list_first_entry(&inst->available_fb_node_list,
+ struct vdec_fb_node, list);
+ node->fb = fb;
+ list_move_tail(&node->list, &inst->fb_free_list);
+ }
+}
+
+static int alloc_working_buf(struct vdec_vp8_inst *inst)
+{
+ int err;
+ struct mtk_vcodec_mem *mem = &inst->working_buf;
+
+ mem->size = VP8_WORKING_BUF_SZ;
+ err = mtk_vcodec_mem_alloc(inst->ctx, mem);
+ if (err) {
+ mtk_vcodec_err(inst, "Cannot allocate working buffer");
+ return err;
+ }
+
+ inst->vsi->dec.working_buf_dma = (uint64_t)mem->dma_addr;
+ return 0;
+}
+
+static void free_working_buf(struct vdec_vp8_inst *inst)
+{
+ struct mtk_vcodec_mem *mem = &inst->working_buf;
+
+ if (mem->va)
+ mtk_vcodec_mem_free(inst->ctx, mem);
+
+ inst->vsi->dec.working_buf_dma = 0;
+}
+
+static int vdec_vp8_init(struct mtk_vcodec_ctx *ctx, unsigned long *h_vdec)
+{
+ struct vdec_vp8_inst *inst;
+ int err;
+
+ inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ inst->ctx = ctx;
+
+ inst->vpu.id = IPI_VDEC_VP8;
+ inst->vpu.dev = ctx->dev->vpu_plat_dev;
+ inst->vpu.ctx = ctx;
+ inst->vpu.handler = vpu_dec_ipi_handler;
+
+ err = vpu_dec_init(&inst->vpu);
+ if (err) {
+ mtk_vcodec_err(inst, "vdec_vp8 init err=%d", err);
+ goto error_free_inst;
+ }
+
+ inst->vsi = (struct vdec_vp8_vsi *)inst->vpu.vsi;
+ init_list(inst);
+ err = alloc_working_buf(inst);
+ if (err)
+ goto error_deinit;
+
+ get_hw_reg_base(inst);
+ mtk_vcodec_debug(inst, "VP8 Instance >> %p", inst);
+
+ *h_vdec = (unsigned long)inst;
+ return 0;
+
+error_deinit:
+ vpu_dec_deinit(&inst->vpu);
+error_free_inst:
+ kfree(inst);
+ return err;
+}
+
+static int vdec_vp8_decode(unsigned long h_vdec, struct mtk_vcodec_mem *bs,
+ struct vdec_fb *fb, bool *res_chg)
+{
+ struct vdec_vp8_inst *inst = (struct vdec_vp8_inst *)h_vdec;
+ struct vdec_vp8_dec_info *dec = &inst->vsi->dec;
+ struct vdec_vpu_inst *vpu = &inst->vpu;
+ unsigned char *bs_va;
+ unsigned int data;
+ int err = 0;
+ uint64_t y_fb_dma;
+ uint64_t c_fb_dma;
+
+ /* bs NULL means flush decoder */
+ if (bs == NULL) {
+ move_fb_list_use_to_free(inst);
+ return vpu_dec_reset(vpu);
+ }
+
+ y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0;
+ c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0;
+
+ mtk_vcodec_debug(inst, "+ [%d] FB y_dma=%llx c_dma=%llx fb=%p",
+ inst->frm_cnt, y_fb_dma, c_fb_dma, fb);
+
+ inst->cur_fb = fb;
+ dec->bs_dma = (unsigned long)bs->dma_addr;
+ dec->bs_sz = bs->size;
+ dec->cur_y_fb_dma = y_fb_dma;
+ dec->cur_c_fb_dma = c_fb_dma;
+
+ mtk_vcodec_debug(inst, "\n + FRAME[%d] +\n", inst->frm_cnt);
+
+ write_hw_segmentation_data(inst);
+ enable_hw_rw_function(inst);
+ store_dec_table(inst);
+
+ bs_va = (unsigned char *)bs->va;
+
+ /* retrieve width/hight and scale info from header */
+ data = (*(bs_va + 9) << 24) | (*(bs_va + 8) << 16) |
+ (*(bs_va + 7) << 8) | *(bs_va + 6);
+ err = vpu_dec_start(vpu, &data, 1);
+ if (err) {
+ add_fb_to_free_list(inst, fb);
+ if (dec->wait_key_frame) {
+ mtk_vcodec_debug(inst, "wait key frame !");
+ return 0;
+ }
+
+ goto error;
+ }
+
+ if (dec->resolution_changed) {
+ mtk_vcodec_debug(inst, "- resolution_changed -");
+ *res_chg = true;
+ add_fb_to_free_list(inst, fb);
+ return 0;
+ }
+
+ /* wait decoder done interrupt */
+ mtk_vcodec_wait_for_done_ctx(inst->ctx, MTK_INST_IRQ_RECEIVED,
+ WAIT_INTR_TIMEOUT_MS);
+
+ if (inst->vsi->load_data)
+ load_dec_table(inst);
+
+ vp8_dec_finish(inst);
+ read_hw_segmentation_data(inst);
+
+ err = vpu_dec_end(vpu);
+ if (err)
+ goto error;
+
+ mtk_vcodec_debug(inst, "\n - FRAME[%d] - show=%d\n", inst->frm_cnt,
+ dec->show_frame);
+ inst->frm_cnt++;
+ *res_chg = false;
+ return 0;
+
+error:
+ mtk_vcodec_err(inst, "\n - FRAME[%d] - err=%d\n", inst->frm_cnt, err);
+ return err;
+}
+
+static void get_disp_fb(struct vdec_vp8_inst *inst, struct vdec_fb **out_fb)
+{
+ struct vdec_fb_node *node;
+ struct vdec_fb *fb;
+
+ node = list_first_entry_or_null(&inst->fb_disp_list,
+ struct vdec_fb_node, list);
+ if (node) {
+ list_move_tail(&node->list, &inst->available_fb_node_list);
+ fb = (struct vdec_fb *)node->fb;
+ fb->status |= FB_ST_DISPLAY;
+ mtk_vcodec_debug(inst, "[FB] get disp fb %p st=%d",
+ node->fb, fb->status);
+ } else {
+ fb = NULL;
+ mtk_vcodec_debug(inst, "[FB] there is no disp fb");
+ }
+
+ *out_fb = fb;
+}
+
+static void get_free_fb(struct vdec_vp8_inst *inst, struct vdec_fb **out_fb)
+{
+ struct vdec_fb_node *node;
+ struct vdec_fb *fb;
+
+ node = list_first_entry_or_null(&inst->fb_free_list,
+ struct vdec_fb_node, list);
+ if (node) {
+ list_move_tail(&node->list, &inst->available_fb_node_list);
+ fb = (struct vdec_fb *)node->fb;
+ fb->status |= FB_ST_FREE;
+ mtk_vcodec_debug(inst, "[FB] get free fb %p st=%d",
+ node->fb, fb->status);
+ } else {
+ fb = NULL;
+ mtk_vcodec_debug(inst, "[FB] there is no free fb");
+ }
+
+ *out_fb = fb;
+}
+
+static void get_crop_info(struct vdec_vp8_inst *inst, struct v4l2_rect *cr)
+{
+ cr->left = 0;
+ cr->top = 0;
+ cr->width = inst->vsi->pic.pic_w;
+ cr->height = inst->vsi->pic.pic_h;
+ mtk_vcodec_debug(inst, "get crop info l=%d, t=%d, w=%d, h=%d",
+ cr->left, cr->top, cr->width, cr->height);
+}
+
+static int vdec_vp8_get_param(unsigned long h_vdec,
+ enum vdec_get_param_type type, void *out)
+{
+ struct vdec_vp8_inst *inst = (struct vdec_vp8_inst *)h_vdec;
+
+ switch (type) {
+ case GET_PARAM_DISP_FRAME_BUFFER:
+ get_disp_fb(inst, out);
+ break;
+
+ case GET_PARAM_FREE_FRAME_BUFFER:
+ get_free_fb(inst, out);
+ break;
+
+ case GET_PARAM_PIC_INFO:
+ get_pic_info(inst, out);
+ break;
+
+ case GET_PARAM_CROP_INFO:
+ get_crop_info(inst, out);
+ break;
+
+ case GET_PARAM_DPB_SIZE:
+ *((unsigned int *)out) = VP8_DPB_SIZE;
+ break;
+
+ default:
+ mtk_vcodec_err(inst, "invalid get parameter type=%d", type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void vdec_vp8_deinit(unsigned long h_vdec)
+{
+ struct vdec_vp8_inst *inst = (struct vdec_vp8_inst *)h_vdec;
+
+ mtk_vcodec_debug_enter(inst);
+
+ vpu_dec_deinit(&inst->vpu);
+ free_working_buf(inst);
+ kfree(inst);
+}
+
+static struct vdec_common_if vdec_vp8_if = {
+ vdec_vp8_init,
+ vdec_vp8_decode,
+ vdec_vp8_get_param,
+ vdec_vp8_deinit,
+};
+
+struct vdec_common_if *get_vp8_dec_comm_if(void);
+
+struct vdec_common_if *get_vp8_dec_comm_if(void)
+{
+ return &vdec_vp8_if;
+}
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
new file mode 100644
index 000000000000..e91a3b425b0c
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
@@ -0,0 +1,967 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Daniel Hsiao <daniel.hsiao@mediatek.com>
+ * Kai-Sean Yang <kai-sean.yang@mediatek.com>
+ * Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+#include <linux/delay.h>
+#include <linux/time.h>
+
+#include "../mtk_vcodec_intr.h"
+#include "../vdec_drv_base.h"
+#include "../vdec_vpu_if.h"
+
+#define VP9_SUPER_FRAME_BS_SZ 64
+#define MAX_VP9_DPB_SIZE 9
+
+#define REFS_PER_FRAME 3
+#define MAX_NUM_REF_FRAMES 8
+#define VP9_MAX_FRM_BUF_NUM 9
+#define VP9_MAX_FRM_BUF_NODE_NUM (VP9_MAX_FRM_BUF_NUM * 2)
+
+/**
+ * struct vp9_dram_buf - contains buffer info for vpu
+ * @va : cpu address
+ * @pa : iova address
+ * @sz : buffer size
+ * @padding : for 64 bytes alignment
+ */
+struct vp9_dram_buf {
+ unsigned long va;
+ unsigned long pa;
+ unsigned int sz;
+ unsigned int padding;
+};
+
+/**
+ * struct vp9_fb_info - contains frame buffer info
+ * @fb : frmae buffer
+ * @reserved : reserved field used by vpu
+ */
+struct vp9_fb_info {
+ struct vdec_fb *fb;
+ unsigned int reserved[32];
+};
+
+/**
+ * struct vp9_ref_cnt_buf - contains reference buffer information
+ * @buf : referenced frame buffer
+ * @ref_cnt : referenced frame buffer's reference count.
+ * When reference count=0, remove it from reference list
+ */
+struct vp9_ref_cnt_buf {
+ struct vp9_fb_info buf;
+ unsigned int ref_cnt;
+};
+
+/**
+ * struct vp9_fb_info - contains current frame's reference buffer information
+ * @buf : reference buffer
+ * @idx : reference buffer index to frm_bufs
+ * @reserved : reserved field used by vpu
+ */
+struct vp9_ref_buf {
+ struct vp9_fb_info *buf;
+ unsigned int idx;
+ unsigned int reserved[6];
+};
+
+/**
+ * struct vp9_fb_info - contains frame buffer info
+ * @fb : super frame reference frame buffer
+ * @used : this reference frame info entry is used
+ * @padding : for 64 bytes size align
+ */
+struct vp9_sf_ref_fb {
+ struct vdec_fb fb;
+ int used;
+ int padding;
+};
+
+/*
+ * struct vdec_vp9_vsi - shared buffer between host and VPU firmware
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
+ * @sf_bs_buf : super frame backup buffer (AP-W, VPU-R)
+ * @sf_ref_fb : record supoer frame reference buffer information
+ * (AP-R/W, VPU-R/W)
+ * @sf_next_ref_fb_idx : next available super frame (AP-W, VPU-R)
+ * @sf_frm_cnt : super frame count, filled by vpu (AP-R, VPU-W)
+ * @sf_frm_offset : super frame offset, filled by vpu (AP-R, VPU-W)
+ * @sf_frm_sz : super frame size, filled by vpu (AP-R, VPU-W)
+ * @sf_frm_idx : current super frame (AP-R, VPU-W)
+ * @sf_init : inform super frame info already parsed by vpu (AP-R, VPU-W)
+ * @fb : capture buffer (AP-W, VPU-R)
+ * @bs : bs buffer (AP-W, VPU-R)
+ * @cur_fb : current show capture buffer (AP-R/W, VPU-R/W)
+ * @pic_w : picture width (AP-R, VPU-W)
+ * @pic_h : picture height (AP-R, VPU-W)
+ * @buf_w : codec width (AP-R, VPU-W)
+ * @buf_h : coded height (AP-R, VPU-W)
+ * @buf_sz_y_bs : ufo compressed y plane size (AP-R, VPU-W)
+ * @buf_sz_c_bs : ufo compressed cbcr plane size (AP-R, VPU-W)
+ * @buf_len_sz_y : size used to store y plane ufo info (AP-R, VPU-W)
+ * @buf_len_sz_c : size used to store cbcr plane ufo info (AP-R, VPU-W)
+
+ * @profile : profile sparsed from vpu (AP-R, VPU-W)
+ * @show_frame : display this frame or not (AP-R, VPU-W)
+ * @show_existing_frame : inform this frame is show existing frame
+ * (AP-R, VPU-W)
+ * @frm_to_show_idx : index to show frame (AP-R, VPU-W)
+
+ * @refresh_frm_flags : indicate when frame need to refine reference count
+ * (AP-R, VPU-W)
+ * @resolution_changed : resolution change in this frame (AP-R, VPU-W)
+
+ * @frm_bufs : maintain reference buffer info (AP-R/W, VPU-R/W)
+ * @ref_frm_map : maintain reference buffer map info (AP-R/W, VPU-R/W)
+ * @new_fb_idx : index to frm_bufs array (AP-R, VPU-W)
+ * @frm_num : decoded frame number, include sub-frame count (AP-R, VPU-W)
+ * @mv_buf : motion vector working buffer (AP-W, VPU-R)
+ * @frm_refs : maintain three reference buffer info (AP-R/W, VPU-R/W)
+ */
+struct vdec_vp9_vsi {
+ unsigned char sf_bs_buf[VP9_SUPER_FRAME_BS_SZ];
+ struct vp9_sf_ref_fb sf_ref_fb[VP9_MAX_FRM_BUF_NUM-1];
+ int sf_next_ref_fb_idx;
+ unsigned int sf_frm_cnt;
+ unsigned int sf_frm_offset[VP9_MAX_FRM_BUF_NUM-1];
+ unsigned int sf_frm_sz[VP9_MAX_FRM_BUF_NUM-1];
+ unsigned int sf_frm_idx;
+ unsigned int sf_init;
+ struct vdec_fb fb;
+ struct mtk_vcodec_mem bs;
+ struct vdec_fb cur_fb;
+ unsigned int pic_w;
+ unsigned int pic_h;
+ unsigned int buf_w;
+ unsigned int buf_h;
+ unsigned int buf_sz_y_bs;
+ unsigned int buf_sz_c_bs;
+ unsigned int buf_len_sz_y;
+ unsigned int buf_len_sz_c;
+ unsigned int profile;
+ unsigned int show_frame;
+ unsigned int show_existing_frame;
+ unsigned int frm_to_show_idx;
+ unsigned int refresh_frm_flags;
+ unsigned int resolution_changed;
+
+ struct vp9_ref_cnt_buf frm_bufs[VP9_MAX_FRM_BUF_NUM];
+ int ref_frm_map[MAX_NUM_REF_FRAMES];
+ unsigned int new_fb_idx;
+ unsigned int frm_num;
+ struct vp9_dram_buf mv_buf;
+
+ struct vp9_ref_buf frm_refs[REFS_PER_FRAME];
+};
+
+/*
+ * struct vdec_vp9_inst - vp9 decode instance
+ * @mv_buf : working buffer for mv
+ * @dec_fb : vdec_fb node to link fb to different fb_xxx_list
+ * @available_fb_node_list : current available vdec_fb node
+ * @fb_use_list : current used or referenced vdec_fb
+ * @fb_free_list : current available to free vdec_fb
+ * @fb_disp_list : current available to display vdec_fb
+ * @cur_fb : current frame buffer
+ * @ctx : current decode context
+ * @vpu : vpu instance information
+ * @vsi : shared buffer between host and VPU firmware
+ * @total_frm_cnt : total frame count, it do not include sub-frames in super
+ * frame
+ * @mem : instance memory information
+ */
+struct vdec_vp9_inst {
+ struct mtk_vcodec_mem mv_buf;
+
+ struct vdec_fb_node dec_fb[VP9_MAX_FRM_BUF_NODE_NUM];
+ struct list_head available_fb_node_list;
+ struct list_head fb_use_list;
+ struct list_head fb_free_list;
+ struct list_head fb_disp_list;
+ struct vdec_fb *cur_fb;
+ struct mtk_vcodec_ctx *ctx;
+ struct vdec_vpu_inst vpu;
+ struct vdec_vp9_vsi *vsi;
+ unsigned int total_frm_cnt;
+ struct mtk_vcodec_mem mem;
+};
+
+static bool vp9_is_sf_ref_fb(struct vdec_vp9_inst *inst, struct vdec_fb *fb)
+{
+ int i;
+ struct vdec_vp9_vsi *vsi = inst->vsi;
+
+ for (i = 0; i < ARRAY_SIZE(vsi->sf_ref_fb); i++) {
+ if (fb == &vsi->sf_ref_fb[i].fb)
+ return true;
+ }
+ return false;
+}
+
+static struct vdec_fb *vp9_rm_from_fb_use_list(struct vdec_vp9_inst
+ *inst, void *addr)
+{
+ struct vdec_fb *fb = NULL;
+ struct vdec_fb_node *node;
+
+ list_for_each_entry(node, &inst->fb_use_list, list) {
+ fb = (struct vdec_fb *)node->fb;
+ if (fb->base_y.va == addr) {
+ list_move_tail(&node->list,
+ &inst->available_fb_node_list);
+ break;
+ }
+ }
+ return fb;
+}
+
+static void vp9_add_to_fb_free_list(struct vdec_vp9_inst *inst,
+ struct vdec_fb *fb)
+{
+ struct vdec_fb_node *node;
+
+ if (fb) {
+ node = list_first_entry_or_null(&inst->available_fb_node_list,
+ struct vdec_fb_node, list);
+
+ if (node) {
+ node->fb = fb;
+ list_move_tail(&node->list, &inst->fb_free_list);
+ }
+ } else {
+ mtk_vcodec_debug(inst, "No free fb node");
+ }
+}
+
+static void vp9_free_sf_ref_fb(struct vdec_fb *fb)
+{
+ struct vp9_sf_ref_fb *sf_ref_fb =
+ container_of(fb, struct vp9_sf_ref_fb, fb);
+
+ sf_ref_fb->used = 0;
+}
+
+static void vp9_ref_cnt_fb(struct vdec_vp9_inst *inst, int *idx,
+ int new_idx)
+{
+ struct vdec_vp9_vsi *vsi = inst->vsi;
+ int ref_idx = *idx;
+
+ if (ref_idx >= 0 && vsi->frm_bufs[ref_idx].ref_cnt > 0) {
+ vsi->frm_bufs[ref_idx].ref_cnt--;
+
+ if (vsi->frm_bufs[ref_idx].ref_cnt == 0) {
+ if (!vp9_is_sf_ref_fb(inst,
+ vsi->frm_bufs[ref_idx].buf.fb)) {
+ struct vdec_fb *fb;
+
+ fb = vp9_rm_from_fb_use_list(inst,
+ vsi->frm_bufs[ref_idx].buf.fb->base_y.va);
+ vp9_add_to_fb_free_list(inst, fb);
+ } else
+ vp9_free_sf_ref_fb(
+ vsi->frm_bufs[ref_idx].buf.fb);
+ }
+ }
+
+ *idx = new_idx;
+ vsi->frm_bufs[new_idx].ref_cnt++;
+}
+
+static void vp9_free_all_sf_ref_fb(struct vdec_vp9_inst *inst)
+{
+ int i;
+ struct vdec_vp9_vsi *vsi = inst->vsi;
+
+ for (i = 0; i < ARRAY_SIZE(vsi->sf_ref_fb); i++) {
+ if (vsi->sf_ref_fb[i].fb.base_y.va) {
+ mtk_vcodec_mem_free(inst->ctx,
+ &vsi->sf_ref_fb[i].fb.base_y);
+ mtk_vcodec_mem_free(inst->ctx,
+ &vsi->sf_ref_fb[i].fb.base_c);
+ vsi->sf_ref_fb[i].used = 0;
+ }
+ }
+}
+
+/* For each sub-frame except the last one, the driver will dynamically
+ * allocate reference buffer by calling vp9_get_sf_ref_fb()
+ * The last sub-frame will use the original fb provided by the
+ * vp9_dec_decode() interface
+ */
+static int vp9_get_sf_ref_fb(struct vdec_vp9_inst *inst)
+{
+ int idx;
+ struct mtk_vcodec_mem *mem_basy_y;
+ struct mtk_vcodec_mem *mem_basy_c;
+ struct vdec_vp9_vsi *vsi = inst->vsi;
+
+ for (idx = 0;
+ idx < ARRAY_SIZE(vsi->sf_ref_fb);
+ idx++) {
+ if (vsi->sf_ref_fb[idx].fb.base_y.va &&
+ vsi->sf_ref_fb[idx].used == 0) {
+ return idx;
+ }
+ }
+
+ for (idx = 0;
+ idx < ARRAY_SIZE(vsi->sf_ref_fb);
+ idx++) {
+ if (vsi->sf_ref_fb[idx].fb.base_y.va == NULL)
+ break;
+ }
+
+ if (idx == ARRAY_SIZE(vsi->sf_ref_fb)) {
+ mtk_vcodec_err(inst, "List Full");
+ return -1;
+ }
+
+ mem_basy_y = &vsi->sf_ref_fb[idx].fb.base_y;
+ mem_basy_y->size = vsi->buf_sz_y_bs +
+ vsi->buf_len_sz_y;
+
+ if (mtk_vcodec_mem_alloc(inst->ctx, mem_basy_y)) {
+ mtk_vcodec_err(inst, "Cannot allocate sf_ref_buf y_buf");
+ return -1;
+ }
+
+ mem_basy_c = &vsi->sf_ref_fb[idx].fb.base_c;
+ mem_basy_c->size = vsi->buf_sz_c_bs +
+ vsi->buf_len_sz_c;
+
+ if (mtk_vcodec_mem_alloc(inst->ctx, mem_basy_c)) {
+ mtk_vcodec_err(inst, "Cannot allocate sf_ref_fb c_buf");
+ return -1;
+ }
+ vsi->sf_ref_fb[idx].used = 0;
+
+ return idx;
+}
+
+static bool vp9_alloc_work_buf(struct vdec_vp9_inst *inst)
+{
+ struct vdec_vp9_vsi *vsi = inst->vsi;
+ int result;
+ struct mtk_vcodec_mem *mem;
+
+ unsigned int max_pic_w;
+ unsigned int max_pic_h;
+
+
+ if (!(inst->ctx->dev->dec_capability &
+ VCODEC_CAPABILITY_4K_DISABLED)) {
+ max_pic_w = VCODEC_DEC_4K_CODED_WIDTH;
+ max_pic_h = VCODEC_DEC_4K_CODED_HEIGHT;
+ } else {
+ max_pic_w = MTK_VDEC_MAX_W;
+ max_pic_h = MTK_VDEC_MAX_H;
+ }
+
+ if ((vsi->pic_w > max_pic_w) ||
+ (vsi->pic_h > max_pic_h)) {
+ mtk_vcodec_err(inst, "Invalid w/h %d/%d",
+ vsi->pic_w, vsi->pic_h);
+ return false;
+ }
+
+ mtk_vcodec_debug(inst, "BUF CHG(%d): w/h/sb_w/sb_h=%d/%d/%d/%d",
+ vsi->resolution_changed,
+ vsi->pic_w,
+ vsi->pic_h,
+ vsi->buf_w,
+ vsi->buf_h);
+
+ mem = &inst->mv_buf;
+
+ if (mem->va)
+ mtk_vcodec_mem_free(inst->ctx, mem);
+
+ mem->size = ((vsi->buf_w / 64) *
+ (vsi->buf_h / 64) + 2) * 36 * 16;
+
+ result = mtk_vcodec_mem_alloc(inst->ctx, mem);
+ if (result) {
+ mem->size = 0;
+ mtk_vcodec_err(inst, "Cannot allocate mv_buf");
+ return false;
+ }
+ /* Set the va again */
+ vsi->mv_buf.va = (unsigned long)mem->va;
+ vsi->mv_buf.pa = (unsigned long)mem->dma_addr;
+ vsi->mv_buf.sz = (unsigned int)mem->size;
+
+ vp9_free_all_sf_ref_fb(inst);
+ vsi->sf_next_ref_fb_idx = vp9_get_sf_ref_fb(inst);
+
+ return true;
+}
+
+static bool vp9_add_to_fb_disp_list(struct vdec_vp9_inst *inst,
+ struct vdec_fb *fb)
+{
+ struct vdec_fb_node *node;
+
+ if (!fb) {
+ mtk_vcodec_err(inst, "fb == NULL");
+ return false;
+ }
+
+ node = list_first_entry_or_null(&inst->available_fb_node_list,
+ struct vdec_fb_node, list);
+ if (node) {
+ node->fb = fb;
+ list_move_tail(&node->list, &inst->fb_disp_list);
+ } else {
+ mtk_vcodec_err(inst, "No available fb node");
+ return false;
+ }
+
+ return true;
+}
+
+/* If any buffer updating is signaled it should be done here. */
+static void vp9_swap_frm_bufs(struct vdec_vp9_inst *inst)
+{
+ struct vdec_vp9_vsi *vsi = inst->vsi;
+ struct vp9_fb_info *frm_to_show;
+ int ref_index = 0, mask;
+
+ for (mask = vsi->refresh_frm_flags; mask; mask >>= 1) {
+ if (mask & 1)
+ vp9_ref_cnt_fb(inst, &vsi->ref_frm_map[ref_index],
+ vsi->new_fb_idx);
+ ++ref_index;
+ }
+
+ frm_to_show = &vsi->frm_bufs[vsi->new_fb_idx].buf;
+ vsi->frm_bufs[vsi->new_fb_idx].ref_cnt--;
+
+ if (frm_to_show->fb != inst->cur_fb) {
+ /* This frame is show exist frame and no decode output
+ * copy frame data from frm_to_show to current CAPTURE
+ * buffer
+ */
+ if ((frm_to_show->fb != NULL) &&
+ (inst->cur_fb->base_y.size >=
+ frm_to_show->fb->base_y.size)) {
+ memcpy((void *)inst->cur_fb->base_y.va,
+ (void *)frm_to_show->fb->base_y.va,
+ vsi->buf_w *
+ vsi->buf_h);
+ memcpy((void *)inst->cur_fb->base_c.va,
+ (void *)frm_to_show->fb->base_c.va,
+ vsi->buf_w *
+ vsi->buf_h / 2);
+ } else {
+ /* After resolution change case, current CAPTURE buffer
+ * may have less buffer size than frm_to_show buffer
+ * size
+ */
+ if (frm_to_show->fb != NULL)
+ mtk_vcodec_err(inst,
+ "inst->cur_fb->base_y.size=%zu, frm_to_show->fb.base_y.size=%zu",
+ inst->cur_fb->base_y.size,
+ frm_to_show->fb->base_y.size);
+ }
+ if (!vp9_is_sf_ref_fb(inst, inst->cur_fb)) {
+ if (vsi->show_frame)
+ vp9_add_to_fb_disp_list(inst, inst->cur_fb);
+ }
+ } else {
+ if (!vp9_is_sf_ref_fb(inst, inst->cur_fb)) {
+ if (vsi->show_frame)
+ vp9_add_to_fb_disp_list(inst, frm_to_show->fb);
+ }
+ }
+
+ /* when ref_cnt ==0, move this fb to fb_free_list. v4l2 driver will
+ * clean fb_free_list
+ */
+ if (vsi->frm_bufs[vsi->new_fb_idx].ref_cnt == 0) {
+ if (!vp9_is_sf_ref_fb(
+ inst, vsi->frm_bufs[vsi->new_fb_idx].buf.fb)) {
+ struct vdec_fb *fb;
+
+ fb = vp9_rm_from_fb_use_list(inst,
+ vsi->frm_bufs[vsi->new_fb_idx].buf.fb->base_y.va);
+
+ vp9_add_to_fb_free_list(inst, fb);
+ } else {
+ vp9_free_sf_ref_fb(
+ vsi->frm_bufs[vsi->new_fb_idx].buf.fb);
+ }
+ }
+
+ /* if this super frame and it is not last sub-frame, get next fb for
+ * sub-frame decode
+ */
+ if (vsi->sf_frm_cnt > 0 && vsi->sf_frm_idx != vsi->sf_frm_cnt - 1)
+ vsi->sf_next_ref_fb_idx = vp9_get_sf_ref_fb(inst);
+}
+
+static bool vp9_wait_dec_end(struct vdec_vp9_inst *inst)
+{
+ struct mtk_vcodec_ctx *ctx = inst->ctx;
+
+ mtk_vcodec_wait_for_done_ctx(inst->ctx,
+ MTK_INST_IRQ_RECEIVED,
+ WAIT_INTR_TIMEOUT_MS);
+
+ if (ctx->irq_status & MTK_VDEC_IRQ_STATUS_DEC_SUCCESS)
+ return true;
+ else
+ return false;
+}
+
+static struct vdec_vp9_inst *vp9_alloc_inst(struct mtk_vcodec_ctx *ctx)
+{
+ int result;
+ struct mtk_vcodec_mem mem;
+ struct vdec_vp9_inst *inst;
+
+ memset(&mem, 0, sizeof(mem));
+ mem.size = sizeof(struct vdec_vp9_inst);
+ result = mtk_vcodec_mem_alloc(ctx, &mem);
+ if (result)
+ return NULL;
+
+ inst = mem.va;
+ inst->mem = mem;
+
+ return inst;
+}
+
+static void vp9_free_inst(struct vdec_vp9_inst *inst)
+{
+ struct mtk_vcodec_mem mem;
+
+ mem = inst->mem;
+ if (mem.va)
+ mtk_vcodec_mem_free(inst->ctx, &mem);
+}
+
+static bool vp9_decode_end_proc(struct vdec_vp9_inst *inst)
+{
+ struct vdec_vp9_vsi *vsi = inst->vsi;
+ bool ret = false;
+
+ if (!vsi->show_existing_frame) {
+ ret = vp9_wait_dec_end(inst);
+ if (!ret) {
+ mtk_vcodec_err(inst, "Decode failed, Decode Timeout @[%d]",
+ vsi->frm_num);
+ return false;
+ }
+
+ if (vpu_dec_end(&inst->vpu)) {
+ mtk_vcodec_err(inst, "vp9_dec_vpu_end failed");
+ return false;
+ }
+ mtk_vcodec_debug(inst, "Decode Ok @%d (%d/%d)", vsi->frm_num,
+ vsi->pic_w, vsi->pic_h);
+ } else {
+ mtk_vcodec_debug(inst, "Decode Ok @%d (show_existing_frame)",
+ vsi->frm_num);
+ }
+
+ vp9_swap_frm_bufs(inst);
+ vsi->frm_num++;
+ return true;
+}
+
+static bool vp9_is_last_sub_frm(struct vdec_vp9_inst *inst)
+{
+ struct vdec_vp9_vsi *vsi = inst->vsi;
+
+ if (vsi->sf_frm_cnt <= 0 || vsi->sf_frm_idx == vsi->sf_frm_cnt)
+ return true;
+
+ return false;
+}
+
+static struct vdec_fb *vp9_rm_from_fb_disp_list(struct vdec_vp9_inst *inst)
+{
+ struct vdec_fb_node *node;
+ struct vdec_fb *fb = NULL;
+
+ node = list_first_entry_or_null(&inst->fb_disp_list,
+ struct vdec_fb_node, list);
+ if (node) {
+ fb = (struct vdec_fb *)node->fb;
+ fb->status |= FB_ST_DISPLAY;
+ list_move_tail(&node->list, &inst->available_fb_node_list);
+ mtk_vcodec_debug(inst, "[FB] get disp fb %p st=%d",
+ node->fb, fb->status);
+ } else
+ mtk_vcodec_debug(inst, "[FB] there is no disp fb");
+
+ return fb;
+}
+
+static bool vp9_add_to_fb_use_list(struct vdec_vp9_inst *inst,
+ struct vdec_fb *fb)
+{
+ struct vdec_fb_node *node;
+
+ if (!fb) {
+ mtk_vcodec_debug(inst, "fb == NULL");
+ return false;
+ }
+
+ node = list_first_entry_or_null(&inst->available_fb_node_list,
+ struct vdec_fb_node, list);
+ if (node) {
+ node->fb = fb;
+ list_move_tail(&node->list, &inst->fb_use_list);
+ } else {
+ mtk_vcodec_err(inst, "No free fb node");
+ return false;
+ }
+ return true;
+}
+
+static void vp9_reset(struct vdec_vp9_inst *inst)
+{
+ struct vdec_fb_node *node, *tmp;
+
+ list_for_each_entry_safe(node, tmp, &inst->fb_use_list, list)
+ list_move_tail(&node->list, &inst->fb_free_list);
+
+ vp9_free_all_sf_ref_fb(inst);
+ inst->vsi->sf_next_ref_fb_idx = vp9_get_sf_ref_fb(inst);
+
+ if (vpu_dec_reset(&inst->vpu))
+ mtk_vcodec_err(inst, "vp9_dec_vpu_reset failed");
+
+ /* Set the va again, since vpu_dec_reset will clear mv_buf in vpu */
+ inst->vsi->mv_buf.va = (unsigned long)inst->mv_buf.va;
+ inst->vsi->mv_buf.pa = (unsigned long)inst->mv_buf.dma_addr;
+ inst->vsi->mv_buf.sz = (unsigned long)inst->mv_buf.size;
+}
+
+static void init_all_fb_lists(struct vdec_vp9_inst *inst)
+{
+ int i;
+
+ INIT_LIST_HEAD(&inst->available_fb_node_list);
+ INIT_LIST_HEAD(&inst->fb_use_list);
+ INIT_LIST_HEAD(&inst->fb_free_list);
+ INIT_LIST_HEAD(&inst->fb_disp_list);
+
+ for (i = 0; i < ARRAY_SIZE(inst->dec_fb); i++) {
+ INIT_LIST_HEAD(&inst->dec_fb[i].list);
+ inst->dec_fb[i].fb = NULL;
+ list_add_tail(&inst->dec_fb[i].list,
+ &inst->available_fb_node_list);
+ }
+}
+
+static void get_pic_info(struct vdec_vp9_inst *inst, struct vdec_pic_info *pic)
+{
+ pic->y_bs_sz = inst->vsi->buf_sz_y_bs;
+ pic->c_bs_sz = inst->vsi->buf_sz_c_bs;
+ pic->y_len_sz = inst->vsi->buf_len_sz_y;
+ pic->c_len_sz = inst->vsi->buf_len_sz_c;
+
+ pic->pic_w = inst->vsi->pic_w;
+ pic->pic_h = inst->vsi->pic_h;
+ pic->buf_w = inst->vsi->buf_w;
+ pic->buf_h = inst->vsi->buf_h;
+
+ mtk_vcodec_debug(inst, "pic(%d, %d), buf(%d, %d)",
+ pic->pic_w, pic->pic_h, pic->buf_w, pic->buf_h);
+ mtk_vcodec_debug(inst, "Y(%d, %d), C(%d, %d)", pic->y_bs_sz,
+ pic->y_len_sz, pic->c_bs_sz, pic->c_len_sz);
+}
+
+static void get_disp_fb(struct vdec_vp9_inst *inst, struct vdec_fb **out_fb)
+{
+
+ *out_fb = vp9_rm_from_fb_disp_list(inst);
+ if (*out_fb)
+ (*out_fb)->status |= FB_ST_DISPLAY;
+}
+
+static void get_free_fb(struct vdec_vp9_inst *inst, struct vdec_fb **out_fb)
+{
+ struct vdec_fb_node *node;
+ struct vdec_fb *fb = NULL;
+
+ node = list_first_entry_or_null(&inst->fb_free_list,
+ struct vdec_fb_node, list);
+ if (node) {
+ list_move_tail(&node->list, &inst->available_fb_node_list);
+ fb = (struct vdec_fb *)node->fb;
+ fb->status |= FB_ST_FREE;
+ mtk_vcodec_debug(inst, "[FB] get free fb %p st=%d",
+ node->fb, fb->status);
+ } else {
+ mtk_vcodec_debug(inst, "[FB] there is no free fb");
+ }
+
+ *out_fb = fb;
+}
+
+static void vdec_vp9_deinit(unsigned long h_vdec)
+{
+ struct vdec_vp9_inst *inst = (struct vdec_vp9_inst *)h_vdec;
+ struct mtk_vcodec_mem *mem;
+ int ret = 0;
+
+ ret = vpu_dec_deinit(&inst->vpu);
+ if (ret)
+ mtk_vcodec_err(inst, "vpu_dec_deinit failed");
+
+ mem = &inst->mv_buf;
+ if (mem->va)
+ mtk_vcodec_mem_free(inst->ctx, mem);
+
+ vp9_free_all_sf_ref_fb(inst);
+ vp9_free_inst(inst);
+}
+
+static int vdec_vp9_init(struct mtk_vcodec_ctx *ctx, unsigned long *h_vdec)
+{
+ struct vdec_vp9_inst *inst;
+
+ inst = vp9_alloc_inst(ctx);
+ if (!inst)
+ return -ENOMEM;
+
+ inst->total_frm_cnt = 0;
+ inst->ctx = ctx;
+
+ inst->vpu.id = IPI_VDEC_VP9;
+ inst->vpu.dev = ctx->dev->vpu_plat_dev;
+ inst->vpu.ctx = ctx;
+ inst->vpu.handler = vpu_dec_ipi_handler;
+
+ if (vpu_dec_init(&inst->vpu)) {
+ mtk_vcodec_err(inst, "vp9_dec_vpu_init failed");
+ goto err_deinit_inst;
+ }
+
+ inst->vsi = (struct vdec_vp9_vsi *)inst->vpu.vsi;
+ init_all_fb_lists(inst);
+
+ (*h_vdec) = (unsigned long)inst;
+ return 0;
+
+err_deinit_inst:
+ vp9_free_inst(inst);
+
+ return -EINVAL;
+}
+
+static int vdec_vp9_decode(unsigned long h_vdec, struct mtk_vcodec_mem *bs,
+ struct vdec_fb *fb, bool *res_chg)
+{
+ int ret = 0;
+ struct vdec_vp9_inst *inst = (struct vdec_vp9_inst *)h_vdec;
+ struct vdec_vp9_vsi *vsi = inst->vsi;
+ u32 data[3];
+ int i;
+
+ *res_chg = false;
+
+ if ((bs == NULL) && (fb == NULL)) {
+ mtk_vcodec_debug(inst, "[EOS]");
+ vp9_reset(inst);
+ return ret;
+ }
+
+ if (bs == NULL) {
+ mtk_vcodec_err(inst, "bs == NULL");
+ return -EINVAL;
+ }
+
+ mtk_vcodec_debug(inst, "Input BS Size = %zu", bs->size);
+
+ while (1) {
+ struct vdec_fb *cur_fb = NULL;
+
+ data[0] = *((unsigned int *)bs->va);
+ data[1] = *((unsigned int *)(bs->va + 4));
+ data[2] = *((unsigned int *)(bs->va + 8));
+
+ vsi->bs = *bs;
+
+ if (fb)
+ vsi->fb = *fb;
+
+ if (!vsi->sf_init) {
+ unsigned int sf_bs_sz;
+ unsigned int sf_bs_off;
+ unsigned char *sf_bs_src;
+ unsigned char *sf_bs_dst;
+
+ sf_bs_sz = bs->size > VP9_SUPER_FRAME_BS_SZ ?
+ VP9_SUPER_FRAME_BS_SZ : bs->size;
+ sf_bs_off = VP9_SUPER_FRAME_BS_SZ - sf_bs_sz;
+ sf_bs_src = bs->va + bs->size - sf_bs_sz;
+ sf_bs_dst = vsi->sf_bs_buf + sf_bs_off;
+ memcpy(sf_bs_dst, sf_bs_src, sf_bs_sz);
+ } else {
+ if ((vsi->sf_frm_cnt > 0) &&
+ (vsi->sf_frm_idx < vsi->sf_frm_cnt)) {
+ unsigned int idx = vsi->sf_frm_idx;
+
+ memcpy((void *)bs->va,
+ (void *)(bs->va +
+ vsi->sf_frm_offset[idx]),
+ vsi->sf_frm_sz[idx]);
+ }
+ }
+ ret = vpu_dec_start(&inst->vpu, data, 3);
+ if (ret) {
+ mtk_vcodec_err(inst, "vpu_dec_start failed");
+ goto DECODE_ERROR;
+ }
+
+ if (vsi->resolution_changed) {
+ if (!vp9_alloc_work_buf(inst)) {
+ ret = -EINVAL;
+ goto DECODE_ERROR;
+ }
+ }
+
+ if (vsi->sf_frm_cnt > 0) {
+ cur_fb = &vsi->sf_ref_fb[vsi->sf_next_ref_fb_idx].fb;
+
+ if (vsi->sf_frm_idx < vsi->sf_frm_cnt)
+ inst->cur_fb = cur_fb;
+ else
+ inst->cur_fb = fb;
+ } else {
+ inst->cur_fb = fb;
+ }
+
+ vsi->frm_bufs[vsi->new_fb_idx].buf.fb = inst->cur_fb;
+ if (!vp9_is_sf_ref_fb(inst, inst->cur_fb))
+ vp9_add_to_fb_use_list(inst, inst->cur_fb);
+
+ mtk_vcodec_debug(inst, "[#pic %d]", vsi->frm_num);
+
+ if (vsi->show_existing_frame)
+ mtk_vcodec_debug(inst,
+ "drv->new_fb_idx=%d, drv->frm_to_show_idx=%d",
+ vsi->new_fb_idx, vsi->frm_to_show_idx);
+
+ if (vsi->show_existing_frame && (vsi->frm_to_show_idx <
+ VP9_MAX_FRM_BUF_NUM)) {
+ mtk_vcodec_err(inst,
+ "Skip Decode drv->new_fb_idx=%d, drv->frm_to_show_idx=%d",
+ vsi->new_fb_idx, vsi->frm_to_show_idx);
+
+ vp9_ref_cnt_fb(inst, &vsi->new_fb_idx,
+ vsi->frm_to_show_idx);
+ ret = -EINVAL;
+ goto DECODE_ERROR;
+ }
+
+ /* VPU assign the buffer pointer in its address space,
+ * reassign here
+ */
+ for (i = 0; i < ARRAY_SIZE(vsi->frm_refs); i++) {
+ unsigned int idx = vsi->frm_refs[i].idx;
+
+ vsi->frm_refs[i].buf = &vsi->frm_bufs[idx].buf;
+ }
+
+ if (vsi->resolution_changed) {
+ *res_chg = true;
+ mtk_vcodec_debug(inst, "VDEC_ST_RESOLUTION_CHANGED");
+
+ ret = 0;
+ goto DECODE_ERROR;
+ }
+
+ if (vp9_decode_end_proc(inst) != true) {
+ mtk_vcodec_err(inst, "vp9_decode_end_proc");
+ ret = -EINVAL;
+ goto DECODE_ERROR;
+ }
+
+ if (vp9_is_last_sub_frm(inst))
+ break;
+
+ }
+ inst->total_frm_cnt++;
+
+DECODE_ERROR:
+ if (ret < 0)
+ vp9_add_to_fb_free_list(inst, fb);
+
+ return ret;
+}
+
+static void get_crop_info(struct vdec_vp9_inst *inst, struct v4l2_rect *cr)
+{
+ cr->left = 0;
+ cr->top = 0;
+ cr->width = inst->vsi->pic_w;
+ cr->height = inst->vsi->pic_h;
+ mtk_vcodec_debug(inst, "get crop info l=%d, t=%d, w=%d, h=%d\n",
+ cr->left, cr->top, cr->width, cr->height);
+}
+
+static int vdec_vp9_get_param(unsigned long h_vdec,
+ enum vdec_get_param_type type, void *out)
+{
+ struct vdec_vp9_inst *inst = (struct vdec_vp9_inst *)h_vdec;
+ int ret = 0;
+
+ switch (type) {
+ case GET_PARAM_DISP_FRAME_BUFFER:
+ get_disp_fb(inst, out);
+ break;
+ case GET_PARAM_FREE_FRAME_BUFFER:
+ get_free_fb(inst, out);
+ break;
+ case GET_PARAM_PIC_INFO:
+ get_pic_info(inst, out);
+ break;
+ case GET_PARAM_DPB_SIZE:
+ *((unsigned int *)out) = MAX_VP9_DPB_SIZE;
+ break;
+ case GET_PARAM_CROP_INFO:
+ get_crop_info(inst, out);
+ break;
+ default:
+ mtk_vcodec_err(inst, "not supported param type %d", type);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static struct vdec_common_if vdec_vp9_if = {
+ vdec_vp9_init,
+ vdec_vp9_decode,
+ vdec_vp9_get_param,
+ vdec_vp9_deinit,
+};
+
+struct vdec_common_if *get_vp9_dec_comm_if(void);
+
+struct vdec_common_if *get_vp9_dec_comm_if(void)
+{
+ return &vdec_vp9_if;
+}
diff --git a/drivers/media/platform/mtk-vcodec/vdec_drv_base.h b/drivers/media/platform/mtk-vcodec/vdec_drv_base.h
new file mode 100644
index 000000000000..7e4c1a92bbd8
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec_drv_base.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _VDEC_DRV_BASE_
+#define _VDEC_DRV_BASE_
+
+#include "mtk_vcodec_drv.h"
+
+#include "vdec_drv_if.h"
+
+struct vdec_common_if {
+ /**
+ * (*init)() - initialize decode driver
+ * @ctx : [in] mtk v4l2 context
+ * @h_vdec : [out] driver handle
+ */
+ int (*init)(struct mtk_vcodec_ctx *ctx, unsigned long *h_vdec);
+
+ /**
+ * (*decode)() - trigger decode
+ * @h_vdec : [in] driver handle
+ * @bs : [in] input bitstream
+ * @fb : [in] frame buffer to store decoded frame
+ * @res_chg : [out] resolution change happen
+ */
+ int (*decode)(unsigned long h_vdec, struct mtk_vcodec_mem *bs,
+ struct vdec_fb *fb, bool *res_chg);
+
+ /**
+ * (*get_param)() - get driver's parameter
+ * @h_vdec : [in] driver handle
+ * @type : [in] input parameter type
+ * @out : [out] buffer to store query result
+ */
+ int (*get_param)(unsigned long h_vdec, enum vdec_get_param_type type,
+ void *out);
+
+ /**
+ * (*deinit)() - deinitialize driver.
+ * @h_vdec : [in] driver handle to be deinit
+ */
+ void (*deinit)(unsigned long h_vdec);
+};
+
+#endif
diff --git a/drivers/media/platform/mtk-vcodec/vdec_drv_if.c b/drivers/media/platform/mtk-vcodec/vdec_drv_if.c
new file mode 100644
index 000000000000..5ffc468dd910
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec_drv_if.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ * Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "vdec_drv_if.h"
+#include "mtk_vcodec_dec.h"
+#include "vdec_drv_base.h"
+#include "mtk_vcodec_dec_pm.h"
+#include "mtk_vpu.h"
+
+const struct vdec_common_if *get_h264_dec_comm_if(void);
+const struct vdec_common_if *get_vp8_dec_comm_if(void);
+const struct vdec_common_if *get_vp9_dec_comm_if(void);
+
+int vdec_if_init(struct mtk_vcodec_ctx *ctx, unsigned int fourcc)
+{
+ int ret = 0;
+
+ switch (fourcc) {
+ case V4L2_PIX_FMT_H264:
+ ctx->dec_if = get_h264_dec_comm_if();
+ break;
+ case V4L2_PIX_FMT_VP8:
+ ctx->dec_if = get_vp8_dec_comm_if();
+ break;
+ case V4L2_PIX_FMT_VP9:
+ ctx->dec_if = get_vp9_dec_comm_if();
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mtk_vdec_lock(ctx);
+ mtk_vcodec_dec_clock_on(&ctx->dev->pm);
+ ret = ctx->dec_if->init(ctx, &ctx->drv_handle);
+ mtk_vcodec_dec_clock_off(&ctx->dev->pm);
+ mtk_vdec_unlock(ctx);
+
+ return ret;
+}
+
+int vdec_if_decode(struct mtk_vcodec_ctx *ctx, struct mtk_vcodec_mem *bs,
+ struct vdec_fb *fb, bool *res_chg)
+{
+ int ret = 0;
+
+ if (bs) {
+ if ((bs->dma_addr & 63) != 0) {
+ mtk_v4l2_err("bs dma_addr should 64 byte align");
+ return -EINVAL;
+ }
+ }
+
+ if (fb) {
+ if (((fb->base_y.dma_addr & 511) != 0) ||
+ ((fb->base_c.dma_addr & 511) != 0)) {
+ mtk_v4l2_err("frame buffer dma_addr should 512 byte align");
+ return -EINVAL;
+ }
+ }
+
+ if (ctx->drv_handle == 0)
+ return -EIO;
+
+ mtk_vdec_lock(ctx);
+
+ mtk_vcodec_set_curr_ctx(ctx->dev, ctx);
+ mtk_vcodec_dec_clock_on(&ctx->dev->pm);
+ enable_irq(ctx->dev->dec_irq);
+ ret = ctx->dec_if->decode(ctx->drv_handle, bs, fb, res_chg);
+ disable_irq(ctx->dev->dec_irq);
+ mtk_vcodec_dec_clock_off(&ctx->dev->pm);
+ mtk_vcodec_set_curr_ctx(ctx->dev, NULL);
+
+ mtk_vdec_unlock(ctx);
+
+ return ret;
+}
+
+int vdec_if_get_param(struct mtk_vcodec_ctx *ctx, enum vdec_get_param_type type,
+ void *out)
+{
+ int ret = 0;
+
+ if (ctx->drv_handle == 0)
+ return -EIO;
+
+ mtk_vdec_lock(ctx);
+ ret = ctx->dec_if->get_param(ctx->drv_handle, type, out);
+ mtk_vdec_unlock(ctx);
+
+ return ret;
+}
+
+void vdec_if_deinit(struct mtk_vcodec_ctx *ctx)
+{
+ if (ctx->drv_handle == 0)
+ return;
+
+ mtk_vdec_lock(ctx);
+ mtk_vcodec_dec_clock_on(&ctx->dev->pm);
+ ctx->dec_if->deinit(ctx->drv_handle);
+ mtk_vcodec_dec_clock_off(&ctx->dev->pm);
+ mtk_vdec_unlock(ctx);
+
+ ctx->drv_handle = 0;
+}
diff --git a/drivers/media/platform/mtk-vcodec/vdec_drv_if.h b/drivers/media/platform/mtk-vcodec/vdec_drv_if.h
new file mode 100644
index 000000000000..db6b5205ffb1
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec_drv_if.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ * Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _VDEC_DRV_IF_H_
+#define _VDEC_DRV_IF_H_
+
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_dec.h"
+#include "mtk_vcodec_util.h"
+
+
+/**
+ * struct vdec_fb_status - decoder frame buffer status
+ * @FB_ST_NORMAL : initial state
+ * @FB_ST_DISPLAY : frmae buffer is ready to be displayed
+ * @FB_ST_FREE : frame buffer is not used by decoder any more
+ */
+enum vdec_fb_status {
+ FB_ST_NORMAL = 0,
+ FB_ST_DISPLAY = (1 << 0),
+ FB_ST_FREE = (1 << 1)
+};
+
+/* For GET_PARAM_DISP_FRAME_BUFFER and GET_PARAM_FREE_FRAME_BUFFER,
+ * the caller does not own the returned buffer. The buffer will not be
+ * released before vdec_if_deinit.
+ * GET_PARAM_DISP_FRAME_BUFFER : get next displayable frame buffer,
+ * struct vdec_fb**
+ * GET_PARAM_FREE_FRAME_BUFFER : get non-referenced framebuffer, vdec_fb**
+ * GET_PARAM_PIC_INFO : get picture info, struct vdec_pic_info*
+ * GET_PARAM_CROP_INFO : get crop info, struct v4l2_crop*
+ * GET_PARAM_DPB_SIZE : get dpb size, unsigned int*
+ */
+enum vdec_get_param_type {
+ GET_PARAM_DISP_FRAME_BUFFER,
+ GET_PARAM_FREE_FRAME_BUFFER,
+ GET_PARAM_PIC_INFO,
+ GET_PARAM_CROP_INFO,
+ GET_PARAM_DPB_SIZE
+};
+
+/**
+ * struct vdec_fb_node - decoder frame buffer node
+ * @list : list to hold this node
+ * @fb : point to frame buffer (vdec_fb), fb could point to frame buffer and
+ * working buffer this is for maintain buffers in different state
+ */
+struct vdec_fb_node {
+ struct list_head list;
+ struct vdec_fb *fb;
+};
+
+/**
+ * vdec_if_init() - initialize decode driver
+ * @ctx : [in] v4l2 context
+ * @fourcc : [in] video format fourcc, V4L2_PIX_FMT_H264/VP8/VP9..
+ */
+int vdec_if_init(struct mtk_vcodec_ctx *ctx, unsigned int fourcc);
+
+/**
+ * vdec_if_deinit() - deinitialize decode driver
+ * @ctx : [in] v4l2 context
+ *
+ */
+void vdec_if_deinit(struct mtk_vcodec_ctx *ctx);
+
+/**
+ * vdec_if_decode() - trigger decode
+ * @ctx : [in] v4l2 context
+ * @bs : [in] input bitstream
+ * @fb : [in] frame buffer to store decoded frame, when null menas parse
+ * header only
+ * @res_chg : [out] resolution change happens if current bs have different
+ * picture width/height
+ * Note: To flush the decoder when reaching EOF, set input bitstream as NULL.
+ */
+int vdec_if_decode(struct mtk_vcodec_ctx *ctx, struct mtk_vcodec_mem *bs,
+ struct vdec_fb *fb, bool *res_chg);
+
+/**
+ * vdec_if_get_param() - get driver's parameter
+ * @ctx : [in] v4l2 context
+ * @type : [in] input parameter type
+ * @out : [out] buffer to store query result
+ */
+int vdec_if_get_param(struct mtk_vcodec_ctx *ctx, enum vdec_get_param_type type,
+ void *out);
+
+#endif
diff --git a/drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h b/drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h
new file mode 100644
index 000000000000..5a8a629f4ac9
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _VDEC_IPI_MSG_H_
+#define _VDEC_IPI_MSG_H_
+
+/**
+ * enum vdec_ipi_msgid - message id between AP and VPU
+ * @AP_IPIMSG_XXX : AP to VPU cmd message id
+ * @VPU_IPIMSG_XXX_ACK : VPU ack AP cmd message id
+ */
+enum vdec_ipi_msgid {
+ AP_IPIMSG_DEC_INIT = 0xA000,
+ AP_IPIMSG_DEC_START = 0xA001,
+ AP_IPIMSG_DEC_END = 0xA002,
+ AP_IPIMSG_DEC_DEINIT = 0xA003,
+ AP_IPIMSG_DEC_RESET = 0xA004,
+
+ VPU_IPIMSG_DEC_INIT_ACK = 0xB000,
+ VPU_IPIMSG_DEC_START_ACK = 0xB001,
+ VPU_IPIMSG_DEC_END_ACK = 0xB002,
+ VPU_IPIMSG_DEC_DEINIT_ACK = 0xB003,
+ VPU_IPIMSG_DEC_RESET_ACK = 0xB004,
+};
+
+/**
+ * struct vdec_ap_ipi_cmd - generic AP to VPU ipi command format
+ * @msg_id : vdec_ipi_msgid
+ * @vpu_inst_addr : VPU decoder instance address
+ */
+struct vdec_ap_ipi_cmd {
+ uint32_t msg_id;
+ uint32_t vpu_inst_addr;
+};
+
+/**
+ * struct vdec_vpu_ipi_ack - generic VPU to AP ipi command format
+ * @msg_id : vdec_ipi_msgid
+ * @status : VPU exeuction result
+ * @ap_inst_addr : AP video decoder instance address
+ */
+struct vdec_vpu_ipi_ack {
+ uint32_t msg_id;
+ int32_t status;
+ uint64_t ap_inst_addr;
+};
+
+/**
+ * struct vdec_ap_ipi_init - for AP_IPIMSG_DEC_INIT
+ * @msg_id : AP_IPIMSG_DEC_INIT
+ * @reserved : Reserved field
+ * @ap_inst_addr : AP video decoder instance address
+ */
+struct vdec_ap_ipi_init {
+ uint32_t msg_id;
+ uint32_t reserved;
+ uint64_t ap_inst_addr;
+};
+
+/**
+ * struct vdec_ap_ipi_dec_start - for AP_IPIMSG_DEC_START
+ * @msg_id : AP_IPIMSG_DEC_START
+ * @vpu_inst_addr : VPU decoder instance address
+ * @data : Header info
+ * H264 decoder [0]:buf_sz [1]:nal_start
+ * VP8 decoder [0]:width/height
+ * VP9 decoder [0]:profile, [1][2] width/height
+ * @reserved : Reserved field
+ */
+struct vdec_ap_ipi_dec_start {
+ uint32_t msg_id;
+ uint32_t vpu_inst_addr;
+ uint32_t data[3];
+ uint32_t reserved;
+};
+
+/**
+ * struct vdec_vpu_ipi_init_ack - for VPU_IPIMSG_DEC_INIT_ACK
+ * @msg_id : VPU_IPIMSG_DEC_INIT_ACK
+ * @status : VPU exeuction result
+ * @ap_inst_addr : AP vcodec_vpu_inst instance address
+ * @vpu_inst_addr : VPU decoder instance address
+ */
+struct vdec_vpu_ipi_init_ack {
+ uint32_t msg_id;
+ int32_t status;
+ uint64_t ap_inst_addr;
+ uint32_t vpu_inst_addr;
+};
+
+#endif
diff --git a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
new file mode 100644
index 000000000000..5a24c51aebb7
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_util.h"
+#include "vdec_ipi_msg.h"
+#include "vdec_vpu_if.h"
+
+static void handle_init_ack_msg(struct vdec_vpu_ipi_init_ack *msg)
+{
+ struct vdec_vpu_inst *vpu = (struct vdec_vpu_inst *)
+ (unsigned long)msg->ap_inst_addr;
+
+ mtk_vcodec_debug(vpu, "+ ap_inst_addr = 0x%llx", msg->ap_inst_addr);
+
+ /* mapping VPU address to kernel virtual address */
+ /* the content in vsi is initialized to 0 in VPU */
+ vpu->vsi = vpu_mapping_dm_addr(vpu->dev, msg->vpu_inst_addr);
+ vpu->inst_addr = msg->vpu_inst_addr;
+
+ mtk_vcodec_debug(vpu, "- vpu_inst_addr = 0x%x", vpu->inst_addr);
+}
+
+/*
+ * This function runs in interrupt context and it means there's an IPI MSG
+ * from VPU.
+ */
+void vpu_dec_ipi_handler(void *data, unsigned int len, void *priv)
+{
+ struct vdec_vpu_ipi_ack *msg = data;
+ struct vdec_vpu_inst *vpu = (struct vdec_vpu_inst *)
+ (unsigned long)msg->ap_inst_addr;
+
+ mtk_vcodec_debug(vpu, "+ id=%X", msg->msg_id);
+
+ if (msg->status == 0) {
+ switch (msg->msg_id) {
+ case VPU_IPIMSG_DEC_INIT_ACK:
+ handle_init_ack_msg(data);
+ break;
+
+ case VPU_IPIMSG_DEC_START_ACK:
+ case VPU_IPIMSG_DEC_END_ACK:
+ case VPU_IPIMSG_DEC_DEINIT_ACK:
+ case VPU_IPIMSG_DEC_RESET_ACK:
+ break;
+
+ default:
+ mtk_vcodec_err(vpu, "invalid msg=%X", msg->msg_id);
+ break;
+ }
+ }
+
+ mtk_vcodec_debug(vpu, "- id=%X", msg->msg_id);
+ vpu->failure = msg->status;
+ vpu->signaled = 1;
+}
+
+static int vcodec_vpu_send_msg(struct vdec_vpu_inst *vpu, void *msg, int len)
+{
+ int err;
+ uint32_t msg_id = *(uint32_t *)msg;
+
+ mtk_vcodec_debug(vpu, "id=%X", msg_id);
+
+ vpu->failure = 0;
+ vpu->signaled = 0;
+
+ err = vpu_ipi_send(vpu->dev, vpu->id, msg, len);
+ if (err) {
+ mtk_vcodec_err(vpu, "send fail vpu_id=%d msg_id=%X status=%d",
+ vpu->id, msg_id, err);
+ return err;
+ }
+
+ return vpu->failure;
+}
+
+static int vcodec_send_ap_ipi(struct vdec_vpu_inst *vpu, unsigned int msg_id)
+{
+ struct vdec_ap_ipi_cmd msg;
+ int err = 0;
+
+ mtk_vcodec_debug(vpu, "+ id=%X", msg_id);
+
+ memset(&msg, 0, sizeof(msg));
+ msg.msg_id = msg_id;
+ msg.vpu_inst_addr = vpu->inst_addr;
+
+ err = vcodec_vpu_send_msg(vpu, &msg, sizeof(msg));
+ mtk_vcodec_debug(vpu, "- id=%X ret=%d", msg_id, err);
+ return err;
+}
+
+int vpu_dec_init(struct vdec_vpu_inst *vpu)
+{
+ struct vdec_ap_ipi_init msg;
+ int err;
+
+ mtk_vcodec_debug_enter(vpu);
+
+ init_waitqueue_head(&vpu->wq);
+
+ err = vpu_ipi_register(vpu->dev, vpu->id, vpu->handler, "vdec", NULL);
+ if (err != 0) {
+ mtk_vcodec_err(vpu, "vpu_ipi_register fail status=%d", err);
+ return err;
+ }
+
+ memset(&msg, 0, sizeof(msg));
+ msg.msg_id = AP_IPIMSG_DEC_INIT;
+ msg.ap_inst_addr = (unsigned long)vpu;
+
+ mtk_vcodec_debug(vpu, "vdec_inst=%p", vpu);
+
+ err = vcodec_vpu_send_msg(vpu, (void *)&msg, sizeof(msg));
+ mtk_vcodec_debug(vpu, "- ret=%d", err);
+ return err;
+}
+
+int vpu_dec_start(struct vdec_vpu_inst *vpu, uint32_t *data, unsigned int len)
+{
+ struct vdec_ap_ipi_dec_start msg;
+ int i;
+ int err = 0;
+
+ mtk_vcodec_debug_enter(vpu);
+
+ if (len > ARRAY_SIZE(msg.data)) {
+ mtk_vcodec_err(vpu, "invalid len = %d\n", len);
+ return -EINVAL;
+ }
+
+ memset(&msg, 0, sizeof(msg));
+ msg.msg_id = AP_IPIMSG_DEC_START;
+ msg.vpu_inst_addr = vpu->inst_addr;
+
+ for (i = 0; i < len; i++)
+ msg.data[i] = data[i];
+
+ err = vcodec_vpu_send_msg(vpu, (void *)&msg, sizeof(msg));
+ mtk_vcodec_debug(vpu, "- ret=%d", err);
+ return err;
+}
+
+int vpu_dec_end(struct vdec_vpu_inst *vpu)
+{
+ return vcodec_send_ap_ipi(vpu, AP_IPIMSG_DEC_END);
+}
+
+int vpu_dec_deinit(struct vdec_vpu_inst *vpu)
+{
+ return vcodec_send_ap_ipi(vpu, AP_IPIMSG_DEC_DEINIT);
+}
+
+int vpu_dec_reset(struct vdec_vpu_inst *vpu)
+{
+ return vcodec_send_ap_ipi(vpu, AP_IPIMSG_DEC_RESET);
+}
diff --git a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
new file mode 100644
index 000000000000..0dc9ed01fffe
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _VDEC_VPU_IF_H_
+#define _VDEC_VPU_IF_H_
+
+#include "mtk_vpu.h"
+
+/**
+ * struct vdec_vpu_inst - VPU instance for video codec
+ * @ipi_id : ipi id for each decoder
+ * @vsi : driver structure allocated by VPU side and shared to AP side
+ * for control and info share
+ * @failure : VPU execution result status, 0: success, others: fail
+ * @inst_addr : VPU decoder instance address
+ * @signaled : 1 - Host has received ack message from VPU, 0 - not received
+ * @ctx : context for v4l2 layer integration
+ * @dev : platform device of VPU
+ * @wq : wait queue to wait VPU message ack
+ * @handler : ipi handler for each decoder
+ */
+struct vdec_vpu_inst {
+ enum ipi_id id;
+ void *vsi;
+ int32_t failure;
+ uint32_t inst_addr;
+ unsigned int signaled;
+ struct mtk_vcodec_ctx *ctx;
+ struct platform_device *dev;
+ wait_queue_head_t wq;
+ ipi_handler_t handler;
+};
+
+/**
+ * vpu_dec_init - init decoder instance and allocate required resource in VPU.
+ *
+ * @vpu: instance for vdec_vpu_inst
+ */
+int vpu_dec_init(struct vdec_vpu_inst *vpu);
+
+/**
+ * vpu_dec_start - start decoding, basically the function will be invoked once
+ * every frame.
+ *
+ * @vpu : instance for vdec_vpu_inst
+ * @data: meta data to pass bitstream info to VPU decoder
+ * @len : meta data length
+ */
+int vpu_dec_start(struct vdec_vpu_inst *vpu, uint32_t *data, unsigned int len);
+
+/**
+ * vpu_dec_end - end decoding, basically the function will be invoked once
+ * when HW decoding done interrupt received successfully. The
+ * decoder in VPU will continute to do referene frame management
+ * and check if there is a new decoded frame available to display.
+ *
+ * @vpu : instance for vdec_vpu_inst
+ */
+int vpu_dec_end(struct vdec_vpu_inst *vpu);
+
+/**
+ * vpu_dec_deinit - deinit decoder instance and resource freed in VPU.
+ *
+ * @vpu: instance for vdec_vpu_inst
+ */
+int vpu_dec_deinit(struct vdec_vpu_inst *vpu);
+
+/**
+ * vpu_dec_reset - reset decoder, use for flush decoder when end of stream or
+ * seek. Remainig non displayed frame will be pushed to display.
+ *
+ * @vpu: instance for vdec_vpu_inst
+ */
+int vpu_dec_reset(struct vdec_vpu_inst *vpu);
+
+/**
+ * vpu_dec_ipi_handler - Handler for VPU ipi message.
+ *
+ * @data: ipi message
+ * @len : length of ipi message
+ * @priv: callback private data which is passed by decoder when register.
+ */
+void vpu_dec_ipi_handler(void *data, unsigned int len, void *priv);
+
+#endif
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c
index c9bf58c97878..463b69c934be 100644
--- a/drivers/media/platform/mtk-vpu/mtk_vpu.c
+++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c
@@ -134,6 +134,8 @@ struct vpu_wdt {
*
* @signaled: the signal of vpu initialization completed
* @fw_ver: VPU firmware version
+ * @dec_capability: decoder capability which is not used for now and
+ * the value is reserved for future use
* @enc_capability: encoder capability which is not used for now and
* the value is reserved for future use
* @wq: wait queue for VPU initialization status
@@ -141,6 +143,7 @@ struct vpu_wdt {
struct vpu_run {
u32 signaled;
char fw_ver[VPU_FW_VER_LEN];
+ unsigned int dec_capability;
unsigned int enc_capability;
wait_queue_head_t wq;
};
@@ -415,6 +418,14 @@ int vpu_wdt_reg_handler(struct platform_device *pdev,
}
EXPORT_SYMBOL_GPL(vpu_wdt_reg_handler);
+unsigned int vpu_get_vdec_hw_capa(struct platform_device *pdev)
+{
+ struct mtk_vpu *vpu = platform_get_drvdata(pdev);
+
+ return vpu->run.dec_capability;
+}
+EXPORT_SYMBOL_GPL(vpu_get_vdec_hw_capa);
+
unsigned int vpu_get_venc_hw_capa(struct platform_device *pdev)
{
struct mtk_vpu *vpu = platform_get_drvdata(pdev);
@@ -523,9 +534,9 @@ static int load_requested_vpu(struct mtk_vpu *vpu,
int vpu_load_firmware(struct platform_device *pdev)
{
- struct mtk_vpu *vpu = platform_get_drvdata(pdev);
+ struct mtk_vpu *vpu;
struct device *dev = &pdev->dev;
- struct vpu_run *run = &vpu->run;
+ struct vpu_run *run;
const struct firmware *vpu_fw = NULL;
int ret;
@@ -534,6 +545,9 @@ int vpu_load_firmware(struct platform_device *pdev)
return -EINVAL;
}
+ vpu = platform_get_drvdata(pdev);
+ run = &vpu->run;
+
mutex_lock(&vpu->vpu_mutex);
if (vpu->fw_loaded) {
mutex_unlock(&vpu->vpu_mutex);
@@ -600,6 +614,7 @@ static void vpu_init_ipi_handler(void *data, unsigned int len, void *priv)
vpu->run.signaled = run->signaled;
strncpy(vpu->run.fw_ver, run->fw_ver, VPU_FW_VER_LEN);
+ vpu->run.dec_capability = run->dec_capability;
vpu->run.enc_capability = run->enc_capability;
wake_up_interruptible(&vpu->run.wq);
}
@@ -674,7 +689,7 @@ static int vpu_alloc_ext_mem(struct mtk_vpu *vpu, u32 fw_type)
GFP_KERNEL);
if (!vpu->extmem[fw_type].va) {
dev_err(dev, "Failed to allocate the extended program memory\n");
- return PTR_ERR(vpu->extmem[fw_type].va);
+ return -ENOMEM;
}
/* Disable extend0. Enable extend1 */
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.h b/drivers/media/platform/mtk-vpu/mtk_vpu.h
index 5ab37f04bdfd..aec0268be3d0 100644
--- a/drivers/media/platform/mtk-vpu/mtk_vpu.h
+++ b/drivers/media/platform/mtk-vpu/mtk_vpu.h
@@ -31,23 +31,41 @@ typedef void (*ipi_handler_t) (void *data,
* enum ipi_id - the id of inter-processor interrupt
*
* @IPI_VPU_INIT: The interrupt from vpu is to notfiy kernel
- VPU initialization completed.
- IPI_VPU_INIT is sent from VPU when firmware is
- loaded. AP doesn't need to send IPI_VPU_INIT
- command to VPU.
- For other IPI below, AP should send the request
- to VPU to trigger the interrupt.
+ * VPU initialization completed.
+ * IPI_VPU_INIT is sent from VPU when firmware is
+ * loaded. AP doesn't need to send IPI_VPU_INIT
+ * command to VPU.
+ * For other IPI below, AP should send the request
+ * to VPU to trigger the interrupt.
+ * @IPI_VDEC_H264: The interrupt from vpu is to notify kernel to
+ * handle H264 vidoe decoder job, and vice versa.
+ * Decode output format is always MT21 no matter what
+ * the input format is.
+ * @IPI_VDEC_VP8: The interrupt from is to notify kernel to
+ * handle VP8 video decoder job, and vice versa.
+ * Decode output format is always MT21 no matter what
+ * the input format is.
+ * @IPI_VDEC_VP9: The interrupt from vpu is to notify kernel to
+ * handle VP9 video decoder job, and vice versa.
+ * Decode output format is always MT21 no matter what
+ * the input format is.
* @IPI_VENC_H264: The interrupt from vpu is to notify kernel to
- handle H264 video encoder job, and vice versa.
+ * handle H264 video encoder job, and vice versa.
* @IPI_VENC_VP8: The interrupt fro vpu is to notify kernel to
- handle VP8 video encoder job,, and vice versa.
+ * handle VP8 video encoder job,, and vice versa.
+ * @IPI_MDP: The interrupt from vpu is to notify kernel to
+ * handle MDP (Media Data Path) job, and vice versa.
* @IPI_MAX: The maximum IPI number
*/
enum ipi_id {
IPI_VPU_INIT = 0,
+ IPI_VDEC_H264,
+ IPI_VDEC_VP8,
+ IPI_VDEC_VP9,
IPI_VENC_H264,
IPI_VENC_VP8,
+ IPI_MDP,
IPI_MAX,
};
@@ -55,10 +73,14 @@ enum ipi_id {
* enum rst_id - reset id to register reset function for VPU watchdog timeout
*
* @VPU_RST_ENC: encoder reset id
+ * @VPU_RST_DEC: decoder reset id
+ * @VPU_RST_MDP: MDP (Media Data Path) reset id
* @VPU_RST_MAX: maximum reset id
*/
enum rst_id {
VPU_RST_ENC,
+ VPU_RST_DEC,
+ VPU_RST_MDP,
VPU_RST_MAX,
};
@@ -125,6 +147,16 @@ struct platform_device *vpu_get_plat_device(struct platform_device *pdev);
int vpu_wdt_reg_handler(struct platform_device *pdev,
void vpu_wdt_reset_func(void *),
void *priv, enum rst_id id);
+
+/**
+ * vpu_get_vdec_hw_capa - get video decoder hardware capability
+ *
+ * @pdev: VPU platform device
+ *
+ * Return: video decoder hardware capability
+ **/
+unsigned int vpu_get_vdec_hw_capa(struct platform_device *pdev);
+
/**
* vpu_get_venc_hw_capa - get video encoder hardware capability
*
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
index e68d271b10af..03e47e0f778d 100644
--- a/drivers/media/platform/mx2_emmaprp.c
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -724,10 +724,10 @@ static int emmaprp_buf_prepare(struct vb2_buffer *vb)
q_data = get_q_data(ctx, vb->vb2_queue->type);
if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
- dprintk(ctx->dev, "%s data will not fit into plane"
- "(%lu < %lu)\n", __func__,
- vb2_plane_size(vb, 0),
- (long)q_data->sizeimage);
+ dprintk(ctx->dev,
+ "%s data will not fit into plane(%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0),
+ (long)q_data->sizeimage);
return -EINVAL;
}
@@ -937,7 +937,7 @@ static int emmaprp_probe(struct platform_device *pdev)
snprintf(vfd->name, sizeof(vfd->name), "%s", emmaprp_videodev.name);
pcdev->vfd = vfd;
v4l2_info(&pcdev->v4l2_dev, EMMAPRP_MODULE_NAME
- " Device registered as /dev/video%d\n", vfd->num);
+ " Device registered as /dev/video%d\n", vfd->num);
platform_set_drvdata(pdev, pcdev);
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index a31b95cb3b09..4d29860d27b4 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -408,8 +408,8 @@ static int omapvid_setup_overlay(struct omap_vout_device *vout,
v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
"%s enable=%d addr=%pad width=%d\n height=%d color_mode=%d\n"
"rotation=%d mirror=%d posx=%d posy=%d out_width = %d \n"
- "out_height=%d rotation_type=%d screen_width=%d\n",
- __func__, ovl->is_enabled(ovl), &info.paddr, info.width, info.height,
+ "out_height=%d rotation_type=%d screen_width=%d\n", __func__,
+ ovl->is_enabled(ovl), &info.paddr, info.width, info.height,
info.color_mode, info.rotation, info.mirror, info.pos_x,
info.pos_y, info.out_width, info.out_height, info.rotation_type,
info.screen_width);
@@ -791,7 +791,8 @@ static int omap_vout_buffer_prepare(struct videobuf_queue *q,
dma_addr = dma_map_single(vout->vid_dev->v4l2_dev.dev, (void *) addr,
size, DMA_TO_DEVICE);
if (dma_mapping_error(vout->vid_dev->v4l2_dev.dev, dma_addr))
- v4l2_err(&vout->vid_dev->v4l2_dev, "dma_map_single failed\n");
+ v4l2_err(&vout->vid_dev->v4l2_dev,
+ "dma_map_single failed\n");
vout->queued_buf_addr[vb->i] = (u8 *)vout->buf_phy_addr[vb->i];
}
@@ -1657,8 +1658,8 @@ static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
/* Turn of the pipeline */
ret = omapvid_apply_changes(vout);
if (ret)
- v4l2_err(&vout->vid_dev->v4l2_dev, "failed to change mode in"
- " streamoff\n");
+ v4l2_err(&vout->vid_dev->v4l2_dev,
+ "failed to change mode in streamoff\n");
INIT_LIST_HEAD(&vout->dma_queue);
ret = videobuf_streamoff(&vout->vbq);
@@ -1858,8 +1859,8 @@ static int __init omap_vout_setup_video_data(struct omap_vout_device *vout)
vfd = vout->vfd = video_device_alloc();
if (!vfd) {
- printk(KERN_ERR VOUT_NAME ": could not allocate"
- " video device struct\n");
+ printk(KERN_ERR VOUT_NAME
+ ": could not allocate video device struct\n");
v4l2_ctrl_handler_free(hdl);
return -ENOMEM;
}
@@ -1984,16 +1985,17 @@ static int __init omap_vout_create_video_devices(struct platform_device *pdev)
*/
vfd = vout->vfd;
if (video_register_device(vfd, VFL_TYPE_GRABBER, -1) < 0) {
- dev_err(&pdev->dev, ": Could not register "
- "Video for Linux device\n");
+ dev_err(&pdev->dev,
+ ": Could not register Video for Linux device\n");
vfd->minor = -1;
ret = -ENODEV;
goto error2;
}
video_set_drvdata(vfd, vout);
- dev_info(&pdev->dev, ": registered and initialized"
- " video device %d\n", vfd->minor);
+ dev_info(&pdev->dev,
+ ": registered and initialized video device %d\n",
+ vfd->minor);
if (k == (pdev->num_resources - 1))
return 0;
diff --git a/drivers/media/platform/omap/omap_vout_vrfb.c b/drivers/media/platform/omap/omap_vout_vrfb.c
index b8638e4e1627..92c4e1826356 100644
--- a/drivers/media/platform/omap/omap_vout_vrfb.c
+++ b/drivers/media/platform/omap/omap_vout_vrfb.c
@@ -139,8 +139,9 @@ int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
(void *) &vout->vrfb_dma_tx, &vout->vrfb_dma_tx.dma_ch);
if (ret < 0) {
vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
- dev_info(&pdev->dev, ": failed to allocate DMA Channel for"
- " video%d\n", vfd->minor);
+ dev_info(&pdev->dev,
+ ": failed to allocate DMA Channel for video%d\n",
+ vfd->minor);
}
init_waitqueue_head(&vout->vrfb_dma_tx.wait);
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 0321d84addc7..084ecf4aa9a4 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -480,8 +480,8 @@ void omap3isp_hist_dma_done(struct isp_device *isp)
omap3isp_stat_pcr_busy(&isp->isp_hist)) {
/* Histogram cannot be enabled in this frame anymore */
atomic_set(&isp->isp_hist.buf_err, 1);
- dev_dbg(isp->dev, "hist: Out of synchronization with "
- "CCDC. Ignoring next buffer.\n");
+ dev_dbg(isp->dev,
+ "hist: Out of synchronization with CCDC. Ignoring next buffer.\n");
}
}
@@ -2117,23 +2117,18 @@ static int isp_of_parse_nodes(struct device *dev,
struct isp_async_subdev *isd;
isd = devm_kzalloc(dev, sizeof(*isd), GFP_KERNEL);
- if (!isd) {
- of_node_put(node);
- return -ENOMEM;
- }
+ if (!isd)
+ goto error;
notifier->subdevs[notifier->num_subdevs] = &isd->asd;
- if (isp_of_parse_node(dev, node, isd)) {
- of_node_put(node);
- return -EINVAL;
- }
+ if (isp_of_parse_node(dev, node, isd))
+ goto error;
isd->asd.match.of.node = of_graph_get_remote_port_parent(node);
- of_node_put(node);
if (!isd->asd.match.of.node) {
dev_warn(dev, "bad remote port parent\n");
- return -EINVAL;
+ goto error;
}
isd->asd.match_type = V4L2_ASYNC_MATCH_OF;
@@ -2141,6 +2136,10 @@ static int isp_of_parse_nodes(struct device *dev,
}
return notifier->num_subdevs;
+
+error:
+ of_node_put(node);
+ return -EINVAL;
}
static int isp_subdev_notifier_bound(struct v4l2_async_notifier *async,
diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c
index 882310eb45cc..7207558d722c 100644
--- a/drivers/media/platform/omap3isp/ispccdc.c
+++ b/drivers/media/platform/omap3isp/ispccdc.c
@@ -151,8 +151,8 @@ static int ccdc_lsc_validate_config(struct isp_ccdc_device *ccdc,
}
if (lsc_cfg->offset & 3) {
- dev_dbg(isp->dev, "CCDC: LSC: Offset must be a multiple of "
- "4\n");
+ dev_dbg(isp->dev,
+ "CCDC: LSC: Offset must be a multiple of 4\n");
return -EINVAL;
}
@@ -416,8 +416,9 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
return 0;
if (update != (OMAP3ISP_CCDC_CONFIG_LSC | OMAP3ISP_CCDC_TBL_LSC)) {
- dev_dbg(to_device(ccdc), "%s: Both LSC configuration and table "
- "need to be supplied\n", __func__);
+ dev_dbg(to_device(ccdc),
+ "%s: Both LSC configuration and table need to be supplied\n",
+ __func__);
return -EINVAL;
}
diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c
index f75a1be29d84..7dae2fe0d42d 100644
--- a/drivers/media/platform/omap3isp/ispcsi2.c
+++ b/drivers/media/platform/omap3isp/ispcsi2.c
@@ -753,8 +753,8 @@ void omap3isp_csi2_isr(struct isp_csi2_device *csi2)
ISPCSI2_PHY_IRQSTATUS);
isp_reg_writel(isp, cpxio1_irqstatus,
csi2->regs1, ISPCSI2_PHY_IRQSTATUS);
- dev_dbg(isp->dev, "CSI2: ComplexIO Error IRQ "
- "%x\n", cpxio1_irqstatus);
+ dev_dbg(isp->dev, "CSI2: ComplexIO Error IRQ %x\n",
+ cpxio1_irqstatus);
pipe->error = true;
}
@@ -763,13 +763,8 @@ void omap3isp_csi2_isr(struct isp_csi2_device *csi2)
ISPCSI2_IRQSTATUS_ECC_NO_CORRECTION_IRQ |
ISPCSI2_IRQSTATUS_COMPLEXIO2_ERR_IRQ |
ISPCSI2_IRQSTATUS_FIFO_OVF_IRQ)) {
- dev_dbg(isp->dev, "CSI2 Err:"
- " OCP:%d,"
- " Short_pack:%d,"
- " ECC:%d,"
- " CPXIO2:%d,"
- " FIFO_OVF:%d,"
- "\n",
+ dev_dbg(isp->dev,
+ "CSI2 Err: OCP:%d, Short_pack:%d, ECC:%d, CPXIO2:%d, FIFO_OVF:%d,\n",
(csi2_irqstatus &
ISPCSI2_IRQSTATUS_OCP_ERR_IRQ) ? 1 : 0,
(csi2_irqstatus &
diff --git a/drivers/media/platform/omap3isp/ispcsiphy.c b/drivers/media/platform/omap3isp/ispcsiphy.c
index 495447d66cfd..871d4fe09c7f 100644
--- a/drivers/media/platform/omap3isp/ispcsiphy.c
+++ b/drivers/media/platform/omap3isp/ispcsiphy.c
@@ -267,8 +267,8 @@ int omap3isp_csiphy_acquire(struct isp_csiphy *phy)
int rval;
if (phy->vdd == NULL) {
- dev_err(phy->isp->dev, "Power regulator for CSI PHY not "
- "available\n");
+ dev_err(phy->isp->dev,
+ "Power regulator for CSI PHY not available\n");
return -ENODEV;
}
diff --git a/drivers/media/platform/omap3isp/isph3a_aewb.c b/drivers/media/platform/omap3isp/isph3a_aewb.c
index ccaf92f39236..d44626f20ac6 100644
--- a/drivers/media/platform/omap3isp/isph3a_aewb.c
+++ b/drivers/media/platform/omap3isp/isph3a_aewb.c
@@ -304,8 +304,8 @@ int omap3isp_h3a_aewb_init(struct isp_device *isp)
aewb_recover_cfg = devm_kzalloc(isp->dev, sizeof(*aewb_recover_cfg),
GFP_KERNEL);
if (!aewb_recover_cfg) {
- dev_err(aewb->isp->dev, "AEWB: cannot allocate memory for "
- "recover configuration.\n");
+ dev_err(aewb->isp->dev,
+ "AEWB: cannot allocate memory for recover configuration.\n");
return -ENOMEM;
}
@@ -321,8 +321,8 @@ int omap3isp_h3a_aewb_init(struct isp_device *isp)
aewb_recover_cfg->subsample_hor_inc = OMAP3ISP_AEWB_MIN_SUB_INC;
if (h3a_aewb_validate_params(aewb, aewb_recover_cfg)) {
- dev_err(aewb->isp->dev, "AEWB: recover configuration is "
- "invalid.\n");
+ dev_err(aewb->isp->dev,
+ "AEWB: recover configuration is invalid.\n");
return -EINVAL;
}
diff --git a/drivers/media/platform/omap3isp/isph3a_af.c b/drivers/media/platform/omap3isp/isph3a_af.c
index 92937f7eecef..99bd6cc21d86 100644
--- a/drivers/media/platform/omap3isp/isph3a_af.c
+++ b/drivers/media/platform/omap3isp/isph3a_af.c
@@ -367,8 +367,8 @@ int omap3isp_h3a_af_init(struct isp_device *isp)
af_recover_cfg = devm_kzalloc(isp->dev, sizeof(*af_recover_cfg),
GFP_KERNEL);
if (!af_recover_cfg) {
- dev_err(af->isp->dev, "AF: cannot allocate memory for recover "
- "configuration.\n");
+ dev_err(af->isp->dev,
+ "AF: cannot allocate memory for recover configuration.\n");
return -ENOMEM;
}
@@ -379,8 +379,8 @@ int omap3isp_h3a_af_init(struct isp_device *isp)
af_recover_cfg->paxel.v_cnt = OMAP3ISP_AF_PAXEL_VERTICAL_COUNT_MIN;
af_recover_cfg->paxel.line_inc = OMAP3ISP_AF_PAXEL_INCREMENT_MIN;
if (h3a_af_validate_params(af, af_recover_cfg)) {
- dev_err(af->isp->dev, "AF: recover configuration is "
- "invalid.\n");
+ dev_err(af->isp->dev,
+ "AF: recover configuration is invalid.\n");
return -EINVAL;
}
diff --git a/drivers/media/platform/omap3isp/isphist.c b/drivers/media/platform/omap3isp/isphist.c
index 7138b043a4aa..a4ed5d140d48 100644
--- a/drivers/media/platform/omap3isp/isphist.c
+++ b/drivers/media/platform/omap3isp/isphist.c
@@ -18,7 +18,6 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dmaengine.h>
-#include <linux/omap-dmaengine.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
@@ -486,27 +485,30 @@ int omap3isp_hist_init(struct isp_device *isp)
hist->isp = isp;
if (HIST_CONFIG_DMA) {
- struct platform_device *pdev = to_platform_device(isp->dev);
- struct resource *res;
- unsigned int sig = 0;
dma_cap_mask_t mask;
+ /*
+ * We need slave capable channel without DMA request line for
+ * reading out the data.
+ * For this we can use dma_request_chan_by_mask() as we are
+ * happy with any channel as long as it is capable of slave
+ * configuration.
+ */
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
+ hist->dma_ch = dma_request_chan_by_mask(&mask);
+ if (IS_ERR(hist->dma_ch)) {
+ ret = PTR_ERR(hist->dma_ch);
+ if (ret == -EPROBE_DEFER)
+ return ret;
- res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
- "hist");
- if (res)
- sig = res->start;
-
- hist->dma_ch = dma_request_slave_channel_compat(mask,
- omap_dma_filter_fn, &sig, isp->dev, "hist");
- if (!hist->dma_ch)
+ hist->dma_ch = NULL;
dev_warn(isp->dev,
"hist: DMA channel request failed, using PIO\n");
- else
+ } else {
dev_dbg(isp->dev, "hist: using DMA channel %s\n",
dma_chan_name(hist->dma_ch));
+ }
}
hist->ops = &hist_ops;
diff --git a/drivers/media/platform/omap3isp/ispstat.c b/drivers/media/platform/omap3isp/ispstat.c
index 1b9217d3b1b6..47cbc7e3d825 100644
--- a/drivers/media/platform/omap3isp/ispstat.c
+++ b/drivers/media/platform/omap3isp/ispstat.c
@@ -113,8 +113,9 @@ static int isp_stat_buf_check_magic(struct ispstat *stat,
ret = 0;
if (ret) {
- dev_dbg(stat->isp->dev, "%s: beginning magic check does not "
- "match.\n", stat->subdev.name);
+ dev_dbg(stat->isp->dev,
+ "%s: beginning magic check does not match.\n",
+ stat->subdev.name);
return ret;
}
@@ -122,8 +123,9 @@ static int isp_stat_buf_check_magic(struct ispstat *stat,
for (w = buf->virt_addr + buf_size, end = w + MAGIC_SIZE;
w < end; w++) {
if (unlikely(*w != MAGIC_NUM)) {
- dev_dbg(stat->isp->dev, "%s: ending magic check does "
- "not match.\n", stat->subdev.name);
+ dev_dbg(stat->isp->dev,
+ "%s: ending magic check does not match.\n",
+ stat->subdev.name);
return -EINVAL;
}
}
@@ -256,9 +258,9 @@ static void isp_stat_buf_next(struct ispstat *stat)
{
if (unlikely(stat->active_buf))
/* Overwriting unused active buffer */
- dev_dbg(stat->isp->dev, "%s: new buffer requested without "
- "queuing active one.\n",
- stat->subdev.name);
+ dev_dbg(stat->isp->dev,
+ "%s: new buffer requested without queuing active one.\n",
+ stat->subdev.name);
else
stat->active_buf = isp_stat_buf_find_oldest_or_empty(stat);
}
@@ -292,8 +294,9 @@ static struct ispstat_buffer *isp_stat_buf_get(struct ispstat *stat,
return ERR_PTR(-EBUSY);
}
if (isp_stat_buf_check_magic(stat, buf)) {
- dev_dbg(stat->isp->dev, "%s: current buffer has "
- "corrupted data\n.", stat->subdev.name);
+ dev_dbg(stat->isp->dev,
+ "%s: current buffer has corrupted data\n.",
+ stat->subdev.name);
/* Mark empty because it doesn't have valid data. */
buf->empty = 1;
} else {
@@ -307,8 +310,9 @@ static struct ispstat_buffer *isp_stat_buf_get(struct ispstat *stat,
spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
if (buf->buf_size > data->buf_size) {
- dev_warn(stat->isp->dev, "%s: userspace's buffer size is "
- "not enough.\n", stat->subdev.name);
+ dev_warn(stat->isp->dev,
+ "%s: userspace's buffer size is not enough.\n",
+ stat->subdev.name);
isp_stat_buf_release(stat);
return ERR_PTR(-EINVAL);
}
@@ -531,20 +535,22 @@ int omap3isp_stat_config(struct ispstat *stat, void *new_conf)
mutex_lock(&stat->ioctl_lock);
- dev_dbg(stat->isp->dev, "%s: configuring module with buffer "
- "size=0x%08lx\n", stat->subdev.name, (unsigned long)buf_size);
+ dev_dbg(stat->isp->dev,
+ "%s: configuring module with buffer size=0x%08lx\n",
+ stat->subdev.name, (unsigned long)buf_size);
ret = stat->ops->validate_params(stat, new_conf);
if (ret) {
mutex_unlock(&stat->ioctl_lock);
- dev_dbg(stat->isp->dev, "%s: configuration values are "
- "invalid.\n", stat->subdev.name);
+ dev_dbg(stat->isp->dev, "%s: configuration values are invalid.\n",
+ stat->subdev.name);
return ret;
}
if (buf_size != user_cfg->buf_size)
- dev_dbg(stat->isp->dev, "%s: driver has corrected buffer size "
- "request to 0x%08lx\n", stat->subdev.name,
+ dev_dbg(stat->isp->dev,
+ "%s: driver has corrected buffer size request to 0x%08lx\n",
+ stat->subdev.name,
(unsigned long)user_cfg->buf_size);
/*
@@ -595,8 +601,9 @@ int omap3isp_stat_config(struct ispstat *stat, void *new_conf)
/* Module has a valid configuration. */
stat->configured = 1;
- dev_dbg(stat->isp->dev, "%s: module has been successfully "
- "configured.\n", stat->subdev.name);
+ dev_dbg(stat->isp->dev,
+ "%s: module has been successfully configured.\n",
+ stat->subdev.name);
mutex_unlock(&stat->ioctl_lock);
@@ -762,8 +769,8 @@ int omap3isp_stat_enable(struct ispstat *stat, u8 enable)
if (!stat->configured && enable) {
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
mutex_unlock(&stat->ioctl_lock);
- dev_dbg(stat->isp->dev, "%s: cannot enable module as it's "
- "never been successfully configured so far.\n",
+ dev_dbg(stat->isp->dev,
+ "%s: cannot enable module as it's never been successfully configured so far.\n",
stat->subdev.name);
return -EINVAL;
}
@@ -859,8 +866,8 @@ static void __stat_isr(struct ispstat *stat, int from_dma)
if (stat->state == ISPSTAT_ENABLED) {
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
dev_err(stat->isp->dev,
- "%s: interrupt occurred when module was still "
- "processing a buffer.\n", stat->subdev.name);
+ "%s: interrupt occurred when module was still processing a buffer.\n",
+ stat->subdev.name);
ret = STAT_NO_BUF;
goto out;
} else {
@@ -964,8 +971,9 @@ static void __stat_isr(struct ispstat *stat, int from_dma)
atomic_set(&stat->buf_err, 1);
ret = STAT_NO_BUF;
- dev_dbg(stat->isp->dev, "%s: cannot process buffer, "
- "device is busy.\n", stat->subdev.name);
+ dev_dbg(stat->isp->dev,
+ "%s: cannot process buffer, device is busy.\n",
+ stat->subdev.name);
}
out:
diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
index c12209c701d3..929006f65cc7 100644
--- a/drivers/media/platform/pxa_camera.c
+++ b/drivers/media/platform/pxa_camera.c
@@ -2125,17 +2125,22 @@ static int pxa_camera_sensor_bound(struct v4l2_async_notifier *notifier,
pix->bytesperline, pix->height);
pix->pixelformat = pcdev->current_fmt->host_fmt->fourcc;
v4l2_fill_mbus_format(mf, pix, pcdev->current_fmt->code);
- err = sensor_call(pcdev, pad, set_fmt, NULL, &format);
+
+ err = sensor_call(pcdev, core, s_power, 1);
if (err)
goto out;
+ err = sensor_call(pcdev, pad, set_fmt, NULL, &format);
+ if (err)
+ goto out_sensor_poweroff;
+
v4l2_fill_pix_format(pix, mf);
pr_info("%s(): colorspace=0x%x pixfmt=0x%x\n",
__func__, pix->colorspace, pix->pixelformat);
err = pxa_camera_init_videobuf2(pcdev);
if (err)
- goto out;
+ goto out_sensor_poweroff;
err = video_register_device(&pcdev->vdev, VFL_TYPE_GRABBER, -1);
if (err) {
@@ -2146,6 +2151,9 @@ static int pxa_camera_sensor_bound(struct v4l2_async_notifier *notifier,
"PXA Camera driver attached to camera %s\n",
subdev->name);
}
+
+out_sensor_poweroff:
+ err = sensor_call(pcdev, core, s_power, 0);
out:
mutex_unlock(&pcdev->mlock);
return err;
@@ -2347,8 +2355,7 @@ static int pxa_camera_probe(struct platform_device *pdev)
* Platform hasn't set available data widths. This is bad.
* Warn and use a default.
*/
- dev_warn(&pdev->dev, "WARNING! Platform hasn't set available "
- "data widths, using default 10 bit\n");
+ dev_warn(&pdev->dev, "WARNING! Platform hasn't set available data widths, using default 10 bit\n");
pcdev->platform_flags |= PXA_CAMERA_DATAWIDTH_10;
}
if (pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_8)
@@ -2359,8 +2366,7 @@ static int pxa_camera_probe(struct platform_device *pdev)
pcdev->width_flags |= 1 << 9;
if (!pcdev->mclk) {
dev_warn(&pdev->dev,
- "mclk == 0! Please, fix your platform data. "
- "Using default 20MHz\n");
+ "mclk == 0! Please, fix your platform data. Using default 20MHz\n");
pcdev->mclk = 20000000;
}
diff --git a/drivers/media/platform/rcar-fcp.c b/drivers/media/platform/rcar-fcp.c
index f3a3f31cdfa9..7146fc5ef168 100644
--- a/drivers/media/platform/rcar-fcp.c
+++ b/drivers/media/platform/rcar-fcp.c
@@ -169,6 +169,7 @@ static const struct of_device_id rcar_fcp_of_match[] = {
{ .compatible = "renesas,fcpv" },
{ },
};
+MODULE_DEVICE_TABLE(of, rcar_fcp_of_match);
static struct platform_driver rcar_fcp_platform_driver = {
.probe = rcar_fcp_probe,
diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c
new file mode 100644
index 000000000000..674cc1309b43
--- /dev/null
+++ b/drivers/media/platform/rcar_fdp1.c
@@ -0,0 +1,2445 @@
+/*
+ * Renesas RCar Fine Display Processor
+ *
+ * Video format converter and frame deinterlacer device.
+ *
+ * Author: Kieran Bingham, <kieran@bingham.xyz>
+ * Copyright (c) 2016 Renesas Electronics Corporation.
+ *
+ * This code is developed and inspired from the vim2m, rcar_jpu,
+ * m2m-deinterlace, and vsp1 drivers.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <media/rcar-fcp.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+static unsigned int debug;
+module_param(debug, uint, 0644);
+MODULE_PARM_DESC(debug, "activate debug info");
+
+/* Minimum and maximum frame width/height */
+#define FDP1_MIN_W 80U
+#define FDP1_MIN_H 80U
+
+#define FDP1_MAX_W 3840U
+#define FDP1_MAX_H 2160U
+
+#define FDP1_MAX_PLANES 3U
+#define FDP1_MAX_STRIDE 8190U
+
+/* Flags that indicate a format can be used for capture/output */
+#define FDP1_CAPTURE BIT(0)
+#define FDP1_OUTPUT BIT(1)
+
+#define DRIVER_NAME "rcar_fdp1"
+
+/* Number of Job's to have available on the processing queue */
+#define FDP1_NUMBER_JOBS 8
+
+#define dprintk(fdp1, fmt, arg...) \
+ v4l2_dbg(1, debug, &fdp1->v4l2_dev, "%s: " fmt, __func__, ## arg)
+
+/*
+ * FDP1 registers and bits
+ */
+
+/* FDP1 start register - Imm */
+#define FD1_CTL_CMD 0x0000
+#define FD1_CTL_CMD_STRCMD BIT(0)
+
+/* Sync generator register - Imm */
+#define FD1_CTL_SGCMD 0x0004
+#define FD1_CTL_SGCMD_SGEN BIT(0)
+
+/* Register set end register - Imm */
+#define FD1_CTL_REGEND 0x0008
+#define FD1_CTL_REGEND_REGEND BIT(0)
+
+/* Channel activation register - Vupdt */
+#define FD1_CTL_CHACT 0x000c
+#define FD1_CTL_CHACT_SMW BIT(9)
+#define FD1_CTL_CHACT_WR BIT(8)
+#define FD1_CTL_CHACT_SMR BIT(3)
+#define FD1_CTL_CHACT_RD2 BIT(2)
+#define FD1_CTL_CHACT_RD1 BIT(1)
+#define FD1_CTL_CHACT_RD0 BIT(0)
+
+/* Operation Mode Register - Vupdt */
+#define FD1_CTL_OPMODE 0x0010
+#define FD1_CTL_OPMODE_PRG BIT(4)
+#define FD1_CTL_OPMODE_VIMD_INTERRUPT (0 << 0)
+#define FD1_CTL_OPMODE_VIMD_BESTEFFORT (1 << 0)
+#define FD1_CTL_OPMODE_VIMD_NOINTERRUPT (2 << 0)
+
+#define FD1_CTL_VPERIOD 0x0014
+#define FD1_CTL_CLKCTRL 0x0018
+#define FD1_CTL_CLKCTRL_CSTP_N BIT(0)
+
+/* Software reset register */
+#define FD1_CTL_SRESET 0x001c
+#define FD1_CTL_SRESET_SRST BIT(0)
+
+/* Control status register (V-update-status) */
+#define FD1_CTL_STATUS 0x0024
+#define FD1_CTL_STATUS_VINT_CNT_MASK GENMASK(31, 16)
+#define FD1_CTL_STATUS_VINT_CNT_SHIFT 16
+#define FD1_CTL_STATUS_SGREGSET BIT(10)
+#define FD1_CTL_STATUS_SGVERR BIT(9)
+#define FD1_CTL_STATUS_SGFREND BIT(8)
+#define FD1_CTL_STATUS_BSY BIT(0)
+
+#define FD1_CTL_VCYCLE_STAT 0x0028
+
+/* Interrupt enable register */
+#define FD1_CTL_IRQENB 0x0038
+/* Interrupt status register */
+#define FD1_CTL_IRQSTA 0x003c
+/* Interrupt control register */
+#define FD1_CTL_IRQFSET 0x0040
+
+/* Common IRQ Bit settings */
+#define FD1_CTL_IRQ_VERE BIT(16)
+#define FD1_CTL_IRQ_VINTE BIT(4)
+#define FD1_CTL_IRQ_FREE BIT(0)
+#define FD1_CTL_IRQ_MASK (FD1_CTL_IRQ_VERE | \
+ FD1_CTL_IRQ_VINTE | \
+ FD1_CTL_IRQ_FREE)
+
+/* RPF */
+#define FD1_RPF_SIZE 0x0060
+#define FD1_RPF_SIZE_MASK GENMASK(12, 0)
+#define FD1_RPF_SIZE_H_SHIFT 16
+#define FD1_RPF_SIZE_V_SHIFT 0
+
+#define FD1_RPF_FORMAT 0x0064
+#define FD1_RPF_FORMAT_CIPM BIT(16)
+#define FD1_RPF_FORMAT_RSPYCS BIT(13)
+#define FD1_RPF_FORMAT_RSPUVS BIT(12)
+#define FD1_RPF_FORMAT_CF BIT(8)
+
+#define FD1_RPF_PSTRIDE 0x0068
+#define FD1_RPF_PSTRIDE_Y_SHIFT 16
+#define FD1_RPF_PSTRIDE_C_SHIFT 0
+
+/* RPF0 Source Component Y Address register */
+#define FD1_RPF0_ADDR_Y 0x006c
+
+/* RPF1 Current Picture Registers */
+#define FD1_RPF1_ADDR_Y 0x0078
+#define FD1_RPF1_ADDR_C0 0x007c
+#define FD1_RPF1_ADDR_C1 0x0080
+
+/* RPF2 next picture register */
+#define FD1_RPF2_ADDR_Y 0x0084
+
+#define FD1_RPF_SMSK_ADDR 0x0090
+#define FD1_RPF_SWAP 0x0094
+
+/* WPF */
+#define FD1_WPF_FORMAT 0x00c0
+#define FD1_WPF_FORMAT_PDV_SHIFT 24
+#define FD1_WPF_FORMAT_FCNL BIT(20)
+#define FD1_WPF_FORMAT_WSPYCS BIT(15)
+#define FD1_WPF_FORMAT_WSPUVS BIT(14)
+#define FD1_WPF_FORMAT_WRTM_601_16 (0 << 9)
+#define FD1_WPF_FORMAT_WRTM_601_0 (1 << 9)
+#define FD1_WPF_FORMAT_WRTM_709_16 (2 << 9)
+#define FD1_WPF_FORMAT_CSC BIT(8)
+
+#define FD1_WPF_RNDCTL 0x00c4
+#define FD1_WPF_RNDCTL_CBRM BIT(28)
+#define FD1_WPF_RNDCTL_CLMD_NOCLIP (0 << 12)
+#define FD1_WPF_RNDCTL_CLMD_CLIP_16_235 (1 << 12)
+#define FD1_WPF_RNDCTL_CLMD_CLIP_1_254 (2 << 12)
+
+#define FD1_WPF_PSTRIDE 0x00c8
+#define FD1_WPF_PSTRIDE_Y_SHIFT 16
+#define FD1_WPF_PSTRIDE_C_SHIFT 0
+
+/* WPF Destination picture */
+#define FD1_WPF_ADDR_Y 0x00cc
+#define FD1_WPF_ADDR_C0 0x00d0
+#define FD1_WPF_ADDR_C1 0x00d4
+#define FD1_WPF_SWAP 0x00d8
+#define FD1_WPF_SWAP_OSWAP_SHIFT 0
+#define FD1_WPF_SWAP_SSWAP_SHIFT 4
+
+/* WPF/RPF Common */
+#define FD1_RWPF_SWAP_BYTE BIT(0)
+#define FD1_RWPF_SWAP_WORD BIT(1)
+#define FD1_RWPF_SWAP_LWRD BIT(2)
+#define FD1_RWPF_SWAP_LLWD BIT(3)
+
+/* IPC */
+#define FD1_IPC_MODE 0x0100
+#define FD1_IPC_MODE_DLI BIT(8)
+#define FD1_IPC_MODE_DIM_ADAPT2D3D (0 << 0)
+#define FD1_IPC_MODE_DIM_FIXED2D (1 << 0)
+#define FD1_IPC_MODE_DIM_FIXED3D (2 << 0)
+#define FD1_IPC_MODE_DIM_PREVFIELD (3 << 0)
+#define FD1_IPC_MODE_DIM_NEXTFIELD (4 << 0)
+
+#define FD1_IPC_SMSK_THRESH 0x0104
+#define FD1_IPC_SMSK_THRESH_CONST 0x00010002
+
+#define FD1_IPC_COMB_DET 0x0108
+#define FD1_IPC_COMB_DET_CONST 0x00200040
+
+#define FD1_IPC_MOTDEC 0x010c
+#define FD1_IPC_MOTDEC_CONST 0x00008020
+
+/* DLI registers */
+#define FD1_IPC_DLI_BLEND 0x0120
+#define FD1_IPC_DLI_BLEND_CONST 0x0080ff02
+
+#define FD1_IPC_DLI_HGAIN 0x0124
+#define FD1_IPC_DLI_HGAIN_CONST 0x001000ff
+
+#define FD1_IPC_DLI_SPRS 0x0128
+#define FD1_IPC_DLI_SPRS_CONST 0x009004ff
+
+#define FD1_IPC_DLI_ANGLE 0x012c
+#define FD1_IPC_DLI_ANGLE_CONST 0x0004080c
+
+#define FD1_IPC_DLI_ISOPIX0 0x0130
+#define FD1_IPC_DLI_ISOPIX0_CONST 0xff10ff10
+
+#define FD1_IPC_DLI_ISOPIX1 0x0134
+#define FD1_IPC_DLI_ISOPIX1_CONST 0x0000ff10
+
+/* Sensor registers */
+#define FD1_IPC_SENSOR_TH0 0x0140
+#define FD1_IPC_SENSOR_TH0_CONST 0x20208080
+
+#define FD1_IPC_SENSOR_TH1 0x0144
+#define FD1_IPC_SENSOR_TH1_CONST 0
+
+#define FD1_IPC_SENSOR_CTL0 0x0170
+#define FD1_IPC_SENSOR_CTL0_CONST 0x00002201
+
+#define FD1_IPC_SENSOR_CTL1 0x0174
+#define FD1_IPC_SENSOR_CTL1_CONST 0
+
+#define FD1_IPC_SENSOR_CTL2 0x0178
+#define FD1_IPC_SENSOR_CTL2_X_SHIFT 16
+#define FD1_IPC_SENSOR_CTL2_Y_SHIFT 0
+
+#define FD1_IPC_SENSOR_CTL3 0x017c
+#define FD1_IPC_SENSOR_CTL3_0_SHIFT 16
+#define FD1_IPC_SENSOR_CTL3_1_SHIFT 0
+
+/* Line memory pixel number register */
+#define FD1_IPC_LMEM 0x01e0
+#define FD1_IPC_LMEM_LINEAR 1024
+#define FD1_IPC_LMEM_TILE 960
+
+/* Internal Data (HW Version) */
+#define FD1_IP_INTDATA 0x0800
+#define FD1_IP_H3 0x02010101
+#define FD1_IP_M3W 0x02010202
+
+/* LUTs */
+#define FD1_LUT_DIF_ADJ 0x1000
+#define FD1_LUT_SAD_ADJ 0x1400
+#define FD1_LUT_BLD_GAIN 0x1800
+#define FD1_LUT_DIF_GAIN 0x1c00
+#define FD1_LUT_MDET 0x2000
+
+/**
+ * struct fdp1_fmt - The FDP1 internal format data
+ * @fourcc: the fourcc code, to match the V4L2 API
+ * @bpp: bits per pixel per plane
+ * @num_planes: number of planes
+ * @hsub: horizontal subsampling factor
+ * @vsub: vertical subsampling factor
+ * @fmt: 7-bit format code for the fdp1 hardware
+ * @swap_yc: the Y and C components are swapped (Y comes before C)
+ * @swap_uv: the U and V components are swapped (V comes before U)
+ * @swap: swap register control
+ * @types: types of queue this format is applicable to
+ */
+struct fdp1_fmt {
+ u32 fourcc;
+ u8 bpp[3];
+ u8 num_planes;
+ u8 hsub;
+ u8 vsub;
+ u8 fmt;
+ bool swap_yc;
+ bool swap_uv;
+ u8 swap;
+ u8 types;
+};
+
+static const struct fdp1_fmt fdp1_formats[] = {
+ /* RGB formats are only supported by the Write Pixel Formatter */
+
+ { V4L2_PIX_FMT_RGB332, { 8, 0, 0 }, 1, 1, 1, 0x00, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_XRGB444, { 16, 0, 0 }, 1, 1, 1, 0x01, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_XRGB555, { 16, 0, 0 }, 1, 1, 1, 0x04, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_RGB565, { 16, 0, 0 }, 1, 1, 1, 0x06, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_ABGR32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_XBGR32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_ARGB32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_XRGB32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_RGB24, { 24, 0, 0 }, 1, 1, 1, 0x15, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_BGR24, { 24, 0, 0 }, 1, 1, 1, 0x18, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_ARGB444, { 16, 0, 0 }, 1, 1, 1, 0x19, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_ARGB555, { 16, 0, 0 }, 1, 1, 1, 0x1b, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD,
+ FDP1_CAPTURE },
+
+ /* YUV Formats are supported by Read and Write Pixel Formatters */
+
+ { V4L2_PIX_FMT_NV16M, { 8, 16, 0 }, 2, 2, 1, 0x41, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_NV61M, { 8, 16, 0 }, 2, 2, 1, 0x41, false, true,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_NV12M, { 8, 16, 0 }, 2, 2, 2, 0x42, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_NV21M, { 8, 16, 0 }, 2, 2, 2, 0x42, false, true,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_UYVY, { 16, 0, 0 }, 1, 2, 1, 0x47, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_VYUY, { 16, 0, 0 }, 1, 2, 1, 0x47, false, true,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_YUYV, { 16, 0, 0 }, 1, 2, 1, 0x47, true, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_YVYU, { 16, 0, 0 }, 1, 2, 1, 0x47, true, true,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_YUV444M, { 8, 8, 8 }, 3, 1, 1, 0x4a, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_YVU444M, { 8, 8, 8 }, 3, 1, 1, 0x4a, false, true,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_YUV422M, { 8, 8, 8 }, 3, 2, 1, 0x4b, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_YVU422M, { 8, 8, 8 }, 3, 2, 1, 0x4b, false, true,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_YUV420M, { 8, 8, 8 }, 3, 2, 2, 0x4c, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_YVU420M, { 8, 8, 8 }, 3, 2, 2, 0x4c, false, true,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+};
+
+static int fdp1_fmt_is_rgb(const struct fdp1_fmt *fmt)
+{
+ return fmt->fmt <= 0x1b; /* Last RGB code */
+}
+
+/*
+ * FDP1 Lookup tables range from 0...255 only
+ *
+ * Each table must be less than 256 entries, and all tables
+ * are padded out to 256 entries by duplicating the last value.
+ */
+static const u8 fdp1_diff_adj[] = {
+ 0x00, 0x24, 0x43, 0x5e, 0x76, 0x8c, 0x9e, 0xaf,
+ 0xbd, 0xc9, 0xd4, 0xdd, 0xe4, 0xea, 0xef, 0xf3,
+ 0xf6, 0xf9, 0xfb, 0xfc, 0xfd, 0xfe, 0xfe, 0xff,
+};
+
+static const u8 fdp1_sad_adj[] = {
+ 0x00, 0x24, 0x43, 0x5e, 0x76, 0x8c, 0x9e, 0xaf,
+ 0xbd, 0xc9, 0xd4, 0xdd, 0xe4, 0xea, 0xef, 0xf3,
+ 0xf6, 0xf9, 0xfb, 0xfc, 0xfd, 0xfe, 0xfe, 0xff,
+};
+
+static const u8 fdp1_bld_gain[] = {
+ 0x80,
+};
+
+static const u8 fdp1_dif_gain[] = {
+ 0x80,
+};
+
+static const u8 fdp1_mdet[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
+ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
+ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
+ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
+ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
+ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
+};
+
+/* Per-queue, driver-specific private data */
+struct fdp1_q_data {
+ const struct fdp1_fmt *fmt;
+ struct v4l2_pix_format_mplane format;
+
+ unsigned int vsize;
+ unsigned int stride_y;
+ unsigned int stride_c;
+};
+
+static const struct fdp1_fmt *fdp1_find_format(u32 pixelformat)
+{
+ const struct fdp1_fmt *fmt;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(fdp1_formats); i++) {
+ fmt = &fdp1_formats[i];
+ if (fmt->fourcc == pixelformat)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+enum fdp1_deint_mode {
+ FDP1_PROGRESSIVE = 0, /* Must be zero when !deinterlacing */
+ FDP1_ADAPT2D3D,
+ FDP1_FIXED2D,
+ FDP1_FIXED3D,
+ FDP1_PREVFIELD,
+ FDP1_NEXTFIELD,
+};
+
+#define FDP1_DEINT_MODE_USES_NEXT(mode) \
+ (mode == FDP1_ADAPT2D3D || \
+ mode == FDP1_FIXED3D || \
+ mode == FDP1_NEXTFIELD)
+
+#define FDP1_DEINT_MODE_USES_PREV(mode) \
+ (mode == FDP1_ADAPT2D3D || \
+ mode == FDP1_FIXED3D || \
+ mode == FDP1_PREVFIELD)
+
+/*
+ * FDP1 operates on potentially 3 fields, which are tracked
+ * from the VB buffers using this context structure.
+ * Will always be a field or a full frame, never two fields.
+ */
+struct fdp1_field_buffer {
+ struct vb2_v4l2_buffer *vb;
+ dma_addr_t addrs[3];
+
+ /* Should be NONE:TOP:BOTTOM only */
+ enum v4l2_field field;
+
+ /* Flag to indicate this is the last field in the vb */
+ bool last_field;
+
+ /* Buffer queue lists */
+ struct list_head list;
+};
+
+struct fdp1_buffer {
+ struct v4l2_m2m_buffer m2m_buf;
+ struct fdp1_field_buffer fields[2];
+ unsigned int num_fields;
+};
+
+static inline struct fdp1_buffer *to_fdp1_buffer(struct vb2_v4l2_buffer *vb)
+{
+ return container_of(vb, struct fdp1_buffer, m2m_buf.vb);
+}
+
+struct fdp1_job {
+ struct fdp1_field_buffer *previous;
+ struct fdp1_field_buffer *active;
+ struct fdp1_field_buffer *next;
+ struct fdp1_field_buffer *dst;
+
+ /* A job can only be on one list at a time */
+ struct list_head list;
+};
+
+struct fdp1_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device vfd;
+
+ struct mutex dev_mutex;
+ spinlock_t irqlock;
+ spinlock_t device_process_lock;
+
+ void __iomem *regs;
+ unsigned int irq;
+ struct device *dev;
+
+ /* Job Queues */
+ struct fdp1_job jobs[FDP1_NUMBER_JOBS];
+ struct list_head free_job_list;
+ struct list_head queued_job_list;
+ struct list_head hw_job_list;
+
+ unsigned int clk_rate;
+
+ struct rcar_fcp_device *fcp;
+ struct v4l2_m2m_dev *m2m_dev;
+};
+
+struct fdp1_ctx {
+ struct v4l2_fh fh;
+ struct fdp1_dev *fdp1;
+
+ struct v4l2_ctrl_handler hdl;
+ unsigned int sequence;
+
+ /* Processed buffers in this transaction */
+ u8 num_processed;
+
+ /* Transaction length (i.e. how many buffers per transaction) */
+ u32 translen;
+
+ /* Abort requested by m2m */
+ int aborting;
+
+ /* Deinterlace processing mode */
+ enum fdp1_deint_mode deint_mode;
+
+ /*
+ * Adaptive 2D/3D mode uses a shared mask
+ * This is allocated at streamon, if the ADAPT2D3D mode
+ * is requested
+ */
+ unsigned int smsk_size;
+ dma_addr_t smsk_addr[2];
+ void *smsk_cpu;
+
+ /* Capture pipeline, can specify an alpha value
+ * for supported formats. 0-255 only
+ */
+ unsigned char alpha;
+
+ /* Source and destination queue data */
+ struct fdp1_q_data out_q; /* HW Source */
+ struct fdp1_q_data cap_q; /* HW Destination */
+
+ /*
+ * Field Queues
+ * Interlaced fields are used on 3 occasions, and tracked in this list.
+ *
+ * V4L2 Buffers are tracked inside the fdp1_buffer
+ * and released when the last 'field' completes
+ */
+ struct list_head fields_queue;
+ unsigned int buffers_queued;
+
+ /*
+ * For de-interlacing we need to track our previous buffer
+ * while preparing our job lists.
+ */
+ struct fdp1_field_buffer *previous;
+};
+
+static inline struct fdp1_ctx *fh_to_ctx(struct v4l2_fh *fh)
+{
+ return container_of(fh, struct fdp1_ctx, fh);
+}
+
+static struct fdp1_q_data *get_q_data(struct fdp1_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return &ctx->out_q;
+ else
+ return &ctx->cap_q;
+}
+
+/*
+ * list_remove_job: Take the first item off the specified job list
+ *
+ * Returns: pointer to a job, or NULL if the list is empty.
+ */
+static struct fdp1_job *list_remove_job(struct fdp1_dev *fdp1,
+ struct list_head *list)
+{
+ struct fdp1_job *job;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fdp1->irqlock, flags);
+ job = list_first_entry_or_null(list, struct fdp1_job, list);
+ if (job)
+ list_del(&job->list);
+ spin_unlock_irqrestore(&fdp1->irqlock, flags);
+
+ return job;
+}
+
+/*
+ * list_add_job: Add a job to the specified job list
+ *
+ * Returns: void - always succeeds
+ */
+static void list_add_job(struct fdp1_dev *fdp1,
+ struct list_head *list,
+ struct fdp1_job *job)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fdp1->irqlock, flags);
+ list_add_tail(&job->list, list);
+ spin_unlock_irqrestore(&fdp1->irqlock, flags);
+}
+
+static struct fdp1_job *fdp1_job_alloc(struct fdp1_dev *fdp1)
+{
+ return list_remove_job(fdp1, &fdp1->free_job_list);
+}
+
+static void fdp1_job_free(struct fdp1_dev *fdp1, struct fdp1_job *job)
+{
+ /* Ensure that all residue from previous jobs is gone */
+ memset(job, 0, sizeof(struct fdp1_job));
+
+ list_add_job(fdp1, &fdp1->free_job_list, job);
+}
+
+static void queue_job(struct fdp1_dev *fdp1, struct fdp1_job *job)
+{
+ list_add_job(fdp1, &fdp1->queued_job_list, job);
+}
+
+static struct fdp1_job *get_queued_job(struct fdp1_dev *fdp1)
+{
+ return list_remove_job(fdp1, &fdp1->queued_job_list);
+}
+
+static void queue_hw_job(struct fdp1_dev *fdp1, struct fdp1_job *job)
+{
+ list_add_job(fdp1, &fdp1->hw_job_list, job);
+}
+
+static struct fdp1_job *get_hw_queued_job(struct fdp1_dev *fdp1)
+{
+ return list_remove_job(fdp1, &fdp1->hw_job_list);
+}
+
+/*
+ * Buffer lists handling
+ */
+static void fdp1_field_complete(struct fdp1_ctx *ctx,
+ struct fdp1_field_buffer *fbuf)
+{
+ /* job->previous may be on the first field */
+ if (!fbuf)
+ return;
+
+ if (fbuf->last_field)
+ v4l2_m2m_buf_done(fbuf->vb, VB2_BUF_STATE_DONE);
+}
+
+static void fdp1_queue_field(struct fdp1_ctx *ctx,
+ struct fdp1_field_buffer *fbuf)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
+ list_add_tail(&fbuf->list, &ctx->fields_queue);
+ spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
+
+ ctx->buffers_queued++;
+}
+
+static struct fdp1_field_buffer *fdp1_dequeue_field(struct fdp1_ctx *ctx)
+{
+ struct fdp1_field_buffer *fbuf;
+ unsigned long flags;
+
+ ctx->buffers_queued--;
+
+ spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
+ fbuf = list_first_entry_or_null(&ctx->fields_queue,
+ struct fdp1_field_buffer, list);
+ if (fbuf)
+ list_del(&fbuf->list);
+ spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
+
+ return fbuf;
+}
+
+/*
+ * Return the next field in the queue - or NULL,
+ * without removing the item from the list
+ */
+static struct fdp1_field_buffer *fdp1_peek_queued_field(struct fdp1_ctx *ctx)
+{
+ struct fdp1_field_buffer *fbuf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
+ fbuf = list_first_entry_or_null(&ctx->fields_queue,
+ struct fdp1_field_buffer, list);
+ spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
+
+ return fbuf;
+}
+
+static u32 fdp1_read(struct fdp1_dev *fdp1, unsigned int reg)
+{
+ u32 value = ioread32(fdp1->regs + reg);
+
+ if (debug >= 2)
+ dprintk(fdp1, "Read 0x%08x from 0x%04x\n", value, reg);
+
+ return value;
+}
+
+static void fdp1_write(struct fdp1_dev *fdp1, u32 val, unsigned int reg)
+{
+ if (debug >= 2)
+ dprintk(fdp1, "Write 0x%08x to 0x%04x\n", val, reg);
+
+ iowrite32(val, fdp1->regs + reg);
+}
+
+/* IPC registers are to be programmed with constant values */
+static void fdp1_set_ipc_dli(struct fdp1_ctx *ctx)
+{
+ struct fdp1_dev *fdp1 = ctx->fdp1;
+
+ fdp1_write(fdp1, FD1_IPC_SMSK_THRESH_CONST, FD1_IPC_SMSK_THRESH);
+ fdp1_write(fdp1, FD1_IPC_COMB_DET_CONST, FD1_IPC_COMB_DET);
+ fdp1_write(fdp1, FD1_IPC_MOTDEC_CONST, FD1_IPC_MOTDEC);
+
+ fdp1_write(fdp1, FD1_IPC_DLI_BLEND_CONST, FD1_IPC_DLI_BLEND);
+ fdp1_write(fdp1, FD1_IPC_DLI_HGAIN_CONST, FD1_IPC_DLI_HGAIN);
+ fdp1_write(fdp1, FD1_IPC_DLI_SPRS_CONST, FD1_IPC_DLI_SPRS);
+ fdp1_write(fdp1, FD1_IPC_DLI_ANGLE_CONST, FD1_IPC_DLI_ANGLE);
+ fdp1_write(fdp1, FD1_IPC_DLI_ISOPIX0_CONST, FD1_IPC_DLI_ISOPIX0);
+ fdp1_write(fdp1, FD1_IPC_DLI_ISOPIX1_CONST, FD1_IPC_DLI_ISOPIX1);
+}
+
+
+static void fdp1_set_ipc_sensor(struct fdp1_ctx *ctx)
+{
+ struct fdp1_dev *fdp1 = ctx->fdp1;
+ struct fdp1_q_data *src_q_data = &ctx->out_q;
+ unsigned int x0, x1;
+ unsigned int hsize = src_q_data->format.width;
+ unsigned int vsize = src_q_data->format.height;
+
+ x0 = hsize / 3;
+ x1 = 2 * hsize / 3;
+
+ fdp1_write(fdp1, FD1_IPC_SENSOR_TH0_CONST, FD1_IPC_SENSOR_TH0);
+ fdp1_write(fdp1, FD1_IPC_SENSOR_TH1_CONST, FD1_IPC_SENSOR_TH1);
+ fdp1_write(fdp1, FD1_IPC_SENSOR_CTL0_CONST, FD1_IPC_SENSOR_CTL0);
+ fdp1_write(fdp1, FD1_IPC_SENSOR_CTL1_CONST, FD1_IPC_SENSOR_CTL1);
+
+ fdp1_write(fdp1, ((hsize - 1) << FD1_IPC_SENSOR_CTL2_X_SHIFT) |
+ ((vsize - 1) << FD1_IPC_SENSOR_CTL2_Y_SHIFT),
+ FD1_IPC_SENSOR_CTL2);
+
+ fdp1_write(fdp1, (x0 << FD1_IPC_SENSOR_CTL3_0_SHIFT) |
+ (x1 << FD1_IPC_SENSOR_CTL3_1_SHIFT),
+ FD1_IPC_SENSOR_CTL3);
+}
+
+/*
+ * fdp1_write_lut: Write a padded LUT to the hw
+ *
+ * FDP1 uses constant data for de-interlacing processing,
+ * with large tables. These hardware tables are all 256 bytes
+ * long, however they often contain repeated data at the end.
+ *
+ * The last byte of the table is written to all remaining entries.
+ */
+static void fdp1_write_lut(struct fdp1_dev *fdp1, const u8 *lut,
+ unsigned int len, unsigned int base)
+{
+ unsigned int i;
+ u8 pad;
+
+ /* Tables larger than the hw are clipped */
+ len = min(len, 256u);
+
+ for (i = 0; i < len; i++)
+ fdp1_write(fdp1, lut[i], base + (i*4));
+
+ /* Tables are padded with the last entry */
+ pad = lut[i-1];
+
+ for (; i < 256; i++)
+ fdp1_write(fdp1, pad, base + (i*4));
+}
+
+static void fdp1_set_lut(struct fdp1_dev *fdp1)
+{
+ fdp1_write_lut(fdp1, fdp1_diff_adj, ARRAY_SIZE(fdp1_diff_adj),
+ FD1_LUT_DIF_ADJ);
+ fdp1_write_lut(fdp1, fdp1_sad_adj, ARRAY_SIZE(fdp1_sad_adj),
+ FD1_LUT_SAD_ADJ);
+ fdp1_write_lut(fdp1, fdp1_bld_gain, ARRAY_SIZE(fdp1_bld_gain),
+ FD1_LUT_BLD_GAIN);
+ fdp1_write_lut(fdp1, fdp1_dif_gain, ARRAY_SIZE(fdp1_dif_gain),
+ FD1_LUT_DIF_GAIN);
+ fdp1_write_lut(fdp1, fdp1_mdet, ARRAY_SIZE(fdp1_mdet),
+ FD1_LUT_MDET);
+}
+
+static void fdp1_configure_rpf(struct fdp1_ctx *ctx,
+ struct fdp1_job *job)
+{
+ struct fdp1_dev *fdp1 = ctx->fdp1;
+ u32 picture_size;
+ u32 pstride;
+ u32 format;
+ u32 smsk_addr;
+
+ struct fdp1_q_data *q_data = &ctx->out_q;
+
+ /* Picture size is common to Source and Destination frames */
+ picture_size = (q_data->format.width << FD1_RPF_SIZE_H_SHIFT)
+ | (q_data->vsize << FD1_RPF_SIZE_V_SHIFT);
+
+ /* Strides */
+ pstride = q_data->stride_y << FD1_RPF_PSTRIDE_Y_SHIFT;
+ if (q_data->format.num_planes > 1)
+ pstride |= q_data->stride_c << FD1_RPF_PSTRIDE_C_SHIFT;
+
+ /* Format control */
+ format = q_data->fmt->fmt;
+ if (q_data->fmt->swap_yc)
+ format |= FD1_RPF_FORMAT_RSPYCS;
+
+ if (q_data->fmt->swap_uv)
+ format |= FD1_RPF_FORMAT_RSPUVS;
+
+ if (job->active->field == V4L2_FIELD_BOTTOM) {
+ format |= FD1_RPF_FORMAT_CF; /* Set for Bottom field */
+ smsk_addr = ctx->smsk_addr[0];
+ } else {
+ smsk_addr = ctx->smsk_addr[1];
+ }
+
+ /* Deint mode is non-zero when deinterlacing */
+ if (ctx->deint_mode)
+ format |= FD1_RPF_FORMAT_CIPM;
+
+ fdp1_write(fdp1, format, FD1_RPF_FORMAT);
+ fdp1_write(fdp1, q_data->fmt->swap, FD1_RPF_SWAP);
+ fdp1_write(fdp1, picture_size, FD1_RPF_SIZE);
+ fdp1_write(fdp1, pstride, FD1_RPF_PSTRIDE);
+ fdp1_write(fdp1, smsk_addr, FD1_RPF_SMSK_ADDR);
+
+ /* Previous Field Channel (CH0) */
+ if (job->previous)
+ fdp1_write(fdp1, job->previous->addrs[0], FD1_RPF0_ADDR_Y);
+
+ /* Current Field Channel (CH1) */
+ fdp1_write(fdp1, job->active->addrs[0], FD1_RPF1_ADDR_Y);
+ fdp1_write(fdp1, job->active->addrs[1], FD1_RPF1_ADDR_C0);
+ fdp1_write(fdp1, job->active->addrs[2], FD1_RPF1_ADDR_C1);
+
+ /* Next Field Channel (CH2) */
+ if (job->next)
+ fdp1_write(fdp1, job->next->addrs[0], FD1_RPF2_ADDR_Y);
+}
+
+static void fdp1_configure_wpf(struct fdp1_ctx *ctx,
+ struct fdp1_job *job)
+{
+ struct fdp1_dev *fdp1 = ctx->fdp1;
+ struct fdp1_q_data *src_q_data = &ctx->out_q;
+ struct fdp1_q_data *q_data = &ctx->cap_q;
+ u32 pstride;
+ u32 format;
+ u32 swap;
+ u32 rndctl;
+
+ pstride = q_data->format.plane_fmt[0].bytesperline
+ << FD1_WPF_PSTRIDE_Y_SHIFT;
+
+ if (q_data->format.num_planes > 1)
+ pstride |= q_data->format.plane_fmt[1].bytesperline
+ << FD1_WPF_PSTRIDE_C_SHIFT;
+
+ format = q_data->fmt->fmt; /* Output Format Code */
+
+ if (q_data->fmt->swap_yc)
+ format |= FD1_WPF_FORMAT_WSPYCS;
+
+ if (q_data->fmt->swap_uv)
+ format |= FD1_WPF_FORMAT_WSPUVS;
+
+ if (fdp1_fmt_is_rgb(q_data->fmt)) {
+ /* Enable Colour Space conversion */
+ format |= FD1_WPF_FORMAT_CSC;
+
+ /* Set WRTM */
+ if (src_q_data->format.ycbcr_enc == V4L2_YCBCR_ENC_709)
+ format |= FD1_WPF_FORMAT_WRTM_709_16;
+ else if (src_q_data->format.quantization ==
+ V4L2_QUANTIZATION_FULL_RANGE)
+ format |= FD1_WPF_FORMAT_WRTM_601_0;
+ else
+ format |= FD1_WPF_FORMAT_WRTM_601_16;
+ }
+
+ /* Set an alpha value into the Pad Value */
+ format |= ctx->alpha << FD1_WPF_FORMAT_PDV_SHIFT;
+
+ /* Determine picture rounding and clipping */
+ rndctl = FD1_WPF_RNDCTL_CBRM; /* Rounding Off */
+ rndctl |= FD1_WPF_RNDCTL_CLMD_NOCLIP;
+
+ /* WPF Swap needs both ISWAP and OSWAP setting */
+ swap = q_data->fmt->swap << FD1_WPF_SWAP_OSWAP_SHIFT;
+ swap |= src_q_data->fmt->swap << FD1_WPF_SWAP_SSWAP_SHIFT;
+
+ fdp1_write(fdp1, format, FD1_WPF_FORMAT);
+ fdp1_write(fdp1, rndctl, FD1_WPF_RNDCTL);
+ fdp1_write(fdp1, swap, FD1_WPF_SWAP);
+ fdp1_write(fdp1, pstride, FD1_WPF_PSTRIDE);
+
+ fdp1_write(fdp1, job->dst->addrs[0], FD1_WPF_ADDR_Y);
+ fdp1_write(fdp1, job->dst->addrs[1], FD1_WPF_ADDR_C0);
+ fdp1_write(fdp1, job->dst->addrs[2], FD1_WPF_ADDR_C1);
+}
+
+static void fdp1_configure_deint_mode(struct fdp1_ctx *ctx,
+ struct fdp1_job *job)
+{
+ struct fdp1_dev *fdp1 = ctx->fdp1;
+ u32 opmode = FD1_CTL_OPMODE_VIMD_NOINTERRUPT;
+ u32 ipcmode = FD1_IPC_MODE_DLI; /* Always set */
+ u32 channels = FD1_CTL_CHACT_WR | FD1_CTL_CHACT_RD1; /* Always on */
+
+ /* De-interlacing Mode */
+ switch (ctx->deint_mode) {
+ default:
+ case FDP1_PROGRESSIVE:
+ dprintk(fdp1, "Progressive Mode\n");
+ opmode |= FD1_CTL_OPMODE_PRG;
+ ipcmode |= FD1_IPC_MODE_DIM_FIXED2D;
+ break;
+ case FDP1_ADAPT2D3D:
+ dprintk(fdp1, "Adapt2D3D Mode\n");
+ if (ctx->sequence == 0 || ctx->aborting)
+ ipcmode |= FD1_IPC_MODE_DIM_FIXED2D;
+ else
+ ipcmode |= FD1_IPC_MODE_DIM_ADAPT2D3D;
+
+ if (ctx->sequence > 1) {
+ channels |= FD1_CTL_CHACT_SMW;
+ channels |= FD1_CTL_CHACT_RD0 | FD1_CTL_CHACT_RD2;
+ }
+
+ if (ctx->sequence > 2)
+ channels |= FD1_CTL_CHACT_SMR;
+
+ break;
+ case FDP1_FIXED3D:
+ dprintk(fdp1, "Fixed 3D Mode\n");
+ ipcmode |= FD1_IPC_MODE_DIM_FIXED3D;
+ /* Except for first and last frame, enable all channels */
+ if (!(ctx->sequence == 0 || ctx->aborting))
+ channels |= FD1_CTL_CHACT_RD0 | FD1_CTL_CHACT_RD2;
+ break;
+ case FDP1_FIXED2D:
+ dprintk(fdp1, "Fixed 2D Mode\n");
+ ipcmode |= FD1_IPC_MODE_DIM_FIXED2D;
+ /* No extra channels enabled */
+ break;
+ case FDP1_PREVFIELD:
+ dprintk(fdp1, "Previous Field Mode\n");
+ ipcmode |= FD1_IPC_MODE_DIM_PREVFIELD;
+ channels |= FD1_CTL_CHACT_RD0; /* Previous */
+ break;
+ case FDP1_NEXTFIELD:
+ dprintk(fdp1, "Next Field Mode\n");
+ ipcmode |= FD1_IPC_MODE_DIM_NEXTFIELD;
+ channels |= FD1_CTL_CHACT_RD2; /* Next */
+ break;
+ }
+
+ fdp1_write(fdp1, channels, FD1_CTL_CHACT);
+ fdp1_write(fdp1, opmode, FD1_CTL_OPMODE);
+ fdp1_write(fdp1, ipcmode, FD1_IPC_MODE);
+}
+
+/*
+ * fdp1_device_process() - Run the hardware
+ *
+ * Configure and start the hardware to generate a single frame
+ * of output given our input parameters.
+ */
+static int fdp1_device_process(struct fdp1_ctx *ctx)
+
+{
+ struct fdp1_dev *fdp1 = ctx->fdp1;
+ struct fdp1_job *job;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fdp1->device_process_lock, flags);
+
+ /* Get a job to process */
+ job = get_queued_job(fdp1);
+ if (!job) {
+ /*
+ * VINT can call us to see if we can queue another job.
+ * If we have no work to do, we simply return.
+ */
+ spin_unlock_irqrestore(&fdp1->device_process_lock, flags);
+ return 0;
+ }
+
+ /* First Frame only? ... */
+ fdp1_write(fdp1, FD1_CTL_CLKCTRL_CSTP_N, FD1_CTL_CLKCTRL);
+
+ /* Set the mode, and configuration */
+ fdp1_configure_deint_mode(ctx, job);
+
+ /* DLI Static Configuration */
+ fdp1_set_ipc_dli(ctx);
+
+ /* Sensor Configuration */
+ fdp1_set_ipc_sensor(ctx);
+
+ /* Setup the source picture */
+ fdp1_configure_rpf(ctx, job);
+
+ /* Setup the destination picture */
+ fdp1_configure_wpf(ctx, job);
+
+ /* Line Memory Pixel Number Register for linear access */
+ fdp1_write(fdp1, FD1_IPC_LMEM_LINEAR, FD1_IPC_LMEM);
+
+ /* Enable Interrupts */
+ fdp1_write(fdp1, FD1_CTL_IRQ_MASK, FD1_CTL_IRQENB);
+
+ /* Finally, the Immediate Registers */
+
+ /* This job is now in the HW queue */
+ queue_hw_job(fdp1, job);
+
+ /* Start the command */
+ fdp1_write(fdp1, FD1_CTL_CMD_STRCMD, FD1_CTL_CMD);
+
+ /* Registers will update to HW at next VINT */
+ fdp1_write(fdp1, FD1_CTL_REGEND_REGEND, FD1_CTL_REGEND);
+
+ /* Enable VINT Generator */
+ fdp1_write(fdp1, FD1_CTL_SGCMD_SGEN, FD1_CTL_SGCMD);
+
+ spin_unlock_irqrestore(&fdp1->device_process_lock, flags);
+
+ return 0;
+}
+
+/*
+ * mem2mem callbacks
+ */
+
+/**
+ * job_ready() - check whether an instance is ready to be scheduled to run
+ */
+static int fdp1_m2m_job_ready(void *priv)
+{
+ struct fdp1_ctx *ctx = priv;
+ struct fdp1_q_data *src_q_data = &ctx->out_q;
+ int srcbufs = 1;
+ int dstbufs = 1;
+
+ dprintk(ctx->fdp1, "+ Src: %d : Dst: %d\n",
+ v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx),
+ v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx));
+
+ /* One output buffer is required for each field */
+ if (V4L2_FIELD_HAS_BOTH(src_q_data->format.field))
+ dstbufs = 2;
+
+ if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) < srcbufs
+ || v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) < dstbufs) {
+ dprintk(ctx->fdp1, "Not enough buffers available\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+static void fdp1_m2m_job_abort(void *priv)
+{
+ struct fdp1_ctx *ctx = priv;
+
+ dprintk(ctx->fdp1, "+\n");
+
+ /* Will cancel the transaction in the next interrupt handler */
+ ctx->aborting = 1;
+
+ /* Immediate abort sequence */
+ fdp1_write(ctx->fdp1, 0, FD1_CTL_SGCMD);
+ fdp1_write(ctx->fdp1, FD1_CTL_SRESET_SRST, FD1_CTL_SRESET);
+}
+
+/*
+ * fdp1_prepare_job: Prepare and queue a new job for a single action of work
+ *
+ * Prepare the next field, (or frame in progressive) and an output
+ * buffer for the hardware to perform a single operation.
+ */
+static struct fdp1_job *fdp1_prepare_job(struct fdp1_ctx *ctx)
+{
+ struct vb2_v4l2_buffer *vbuf;
+ struct fdp1_buffer *fbuf;
+ struct fdp1_dev *fdp1 = ctx->fdp1;
+ struct fdp1_job *job;
+ unsigned int buffers_required = 1;
+
+ dprintk(fdp1, "+\n");
+
+ if (FDP1_DEINT_MODE_USES_NEXT(ctx->deint_mode))
+ buffers_required = 2;
+
+ if (ctx->buffers_queued < buffers_required)
+ return NULL;
+
+ job = fdp1_job_alloc(fdp1);
+ if (!job) {
+ dprintk(fdp1, "No free jobs currently available\n");
+ return NULL;
+ }
+
+ job->active = fdp1_dequeue_field(ctx);
+ if (!job->active) {
+ /* Buffer check should prevent this ever happening */
+ dprintk(fdp1, "No input buffers currently available\n");
+
+ fdp1_job_free(fdp1, job);
+ return NULL;
+ }
+
+ dprintk(fdp1, "+ Buffer en-route...\n");
+
+ /* Source buffers have been prepared on our buffer_queue
+ * Prepare our Output buffer
+ */
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ fbuf = to_fdp1_buffer(vbuf);
+ job->dst = &fbuf->fields[0];
+
+ job->active->vb->sequence = ctx->sequence;
+ job->dst->vb->sequence = ctx->sequence;
+ ctx->sequence++;
+
+ if (FDP1_DEINT_MODE_USES_PREV(ctx->deint_mode)) {
+ job->previous = ctx->previous;
+
+ /* Active buffer becomes the next job's previous buffer */
+ ctx->previous = job->active;
+ }
+
+ if (FDP1_DEINT_MODE_USES_NEXT(ctx->deint_mode)) {
+ /* Must be called after 'active' is dequeued */
+ job->next = fdp1_peek_queued_field(ctx);
+ }
+
+ /* Transfer timestamps and flags from src->dst */
+
+ job->dst->vb->vb2_buf.timestamp = job->active->vb->vb2_buf.timestamp;
+
+ job->dst->vb->flags = job->active->vb->flags &
+ V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+
+ /* Ideally, the frame-end function will just 'check' to see
+ * if there are more jobs instead
+ */
+ ctx->translen++;
+
+ /* Finally, Put this job on the processing queue */
+ queue_job(fdp1, job);
+
+ dprintk(fdp1, "Job Queued translen = %d\n", ctx->translen);
+
+ return job;
+}
+
+/* fdp1_m2m_device_run() - prepares and starts the device for an M2M task
+ *
+ * A single input buffer is taken and serialised into our fdp1_buffer
+ * queue. The queue is then processed to create as many jobs as possible
+ * from our available input.
+ */
+static void fdp1_m2m_device_run(void *priv)
+{
+ struct fdp1_ctx *ctx = priv;
+ struct fdp1_dev *fdp1 = ctx->fdp1;
+ struct vb2_v4l2_buffer *src_vb;
+ struct fdp1_buffer *buf;
+ unsigned int i;
+
+ dprintk(fdp1, "+\n");
+
+ ctx->translen = 0;
+
+ /* Get our incoming buffer of either one or two fields, or one frame */
+ src_vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ buf = to_fdp1_buffer(src_vb);
+
+ for (i = 0; i < buf->num_fields; i++) {
+ struct fdp1_field_buffer *fbuf = &buf->fields[i];
+
+ fdp1_queue_field(ctx, fbuf);
+ dprintk(fdp1, "Queued Buffer [%d] last_field:%d\n",
+ i, fbuf->last_field);
+ }
+
+ /* Queue as many jobs as our data provides for */
+ while (fdp1_prepare_job(ctx))
+ ;
+
+ if (ctx->translen == 0) {
+ dprintk(fdp1, "No jobs were processed. M2M action complete\n");
+ v4l2_m2m_job_finish(fdp1->m2m_dev, ctx->fh.m2m_ctx);
+ return;
+ }
+
+ /* Kick the job processing action */
+ fdp1_device_process(ctx);
+}
+
+/*
+ * device_frame_end:
+ *
+ * Handles the M2M level after a buffer completion event.
+ */
+static void device_frame_end(struct fdp1_dev *fdp1,
+ enum vb2_buffer_state state)
+{
+ struct fdp1_ctx *ctx;
+ unsigned long flags;
+ struct fdp1_job *job = get_hw_queued_job(fdp1);
+
+ dprintk(fdp1, "+\n");
+
+ ctx = v4l2_m2m_get_curr_priv(fdp1->m2m_dev);
+
+ if (ctx == NULL) {
+ v4l2_err(&fdp1->v4l2_dev,
+ "Instance released before the end of transaction\n");
+ return;
+ }
+
+ ctx->num_processed++;
+
+ /*
+ * fdp1_field_complete will call buf_done only when the last vb2_buffer
+ * reference is complete
+ */
+ if (FDP1_DEINT_MODE_USES_PREV(ctx->deint_mode))
+ fdp1_field_complete(ctx, job->previous);
+ else
+ fdp1_field_complete(ctx, job->active);
+
+ spin_lock_irqsave(&fdp1->irqlock, flags);
+ v4l2_m2m_buf_done(job->dst->vb, state);
+ job->dst = NULL;
+ spin_unlock_irqrestore(&fdp1->irqlock, flags);
+
+ /* Move this job back to the free job list */
+ fdp1_job_free(fdp1, job);
+
+ dprintk(fdp1, "curr_ctx->num_processed %d curr_ctx->translen %d\n",
+ ctx->num_processed, ctx->translen);
+
+ if (ctx->num_processed == ctx->translen ||
+ ctx->aborting) {
+ dprintk(ctx->fdp1, "Finishing transaction\n");
+ ctx->num_processed = 0;
+ v4l2_m2m_job_finish(fdp1->m2m_dev, ctx->fh.m2m_ctx);
+ } else {
+ /*
+ * For pipelined performance support, this would
+ * be called from a VINT handler
+ */
+ fdp1_device_process(ctx);
+ }
+}
+
+/*
+ * video ioctls
+ */
+static int fdp1_vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strlcpy(cap->driver, DRIVER_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, DRIVER_NAME, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", DRIVER_NAME);
+ return 0;
+}
+
+static int fdp1_enum_fmt(struct v4l2_fmtdesc *f, u32 type)
+{
+ unsigned int i, num;
+
+ num = 0;
+
+ for (i = 0; i < ARRAY_SIZE(fdp1_formats); ++i) {
+ if (fdp1_formats[i].types & type) {
+ if (num == f->index)
+ break;
+ ++num;
+ }
+ }
+
+ /* Format not found */
+ if (i >= ARRAY_SIZE(fdp1_formats))
+ return -EINVAL;
+
+ /* Format found */
+ f->pixelformat = fdp1_formats[i].fourcc;
+
+ return 0;
+}
+
+static int fdp1_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return fdp1_enum_fmt(f, FDP1_CAPTURE);
+}
+
+static int fdp1_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return fdp1_enum_fmt(f, FDP1_OUTPUT);
+}
+
+static int fdp1_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct fdp1_q_data *q_data;
+ struct fdp1_ctx *ctx = fh_to_ctx(priv);
+
+ if (!v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type))
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, f->type);
+ f->fmt.pix_mp = q_data->format;
+
+ return 0;
+}
+
+static void fdp1_compute_stride(struct v4l2_pix_format_mplane *pix,
+ const struct fdp1_fmt *fmt)
+{
+ unsigned int i;
+
+ /* Compute and clamp the stride and image size. */
+ for (i = 0; i < min_t(unsigned int, fmt->num_planes, 2U); ++i) {
+ unsigned int hsub = i > 0 ? fmt->hsub : 1;
+ unsigned int vsub = i > 0 ? fmt->vsub : 1;
+ /* From VSP : TODO: Confirm alignment limits for FDP1 */
+ unsigned int align = 128;
+ unsigned int bpl;
+
+ bpl = clamp_t(unsigned int, pix->plane_fmt[i].bytesperline,
+ pix->width / hsub * fmt->bpp[i] / 8,
+ round_down(FDP1_MAX_STRIDE, align));
+
+ pix->plane_fmt[i].bytesperline = round_up(bpl, align);
+ pix->plane_fmt[i].sizeimage = pix->plane_fmt[i].bytesperline
+ * pix->height / vsub;
+
+ memset(pix->plane_fmt[i].reserved, 0,
+ sizeof(pix->plane_fmt[i].reserved));
+ }
+
+ if (fmt->num_planes == 3) {
+ /* The two chroma planes must have the same stride. */
+ pix->plane_fmt[2].bytesperline = pix->plane_fmt[1].bytesperline;
+ pix->plane_fmt[2].sizeimage = pix->plane_fmt[1].sizeimage;
+
+ memset(pix->plane_fmt[2].reserved, 0,
+ sizeof(pix->plane_fmt[2].reserved));
+ }
+}
+
+static void fdp1_try_fmt_output(struct fdp1_ctx *ctx,
+ const struct fdp1_fmt **fmtinfo,
+ struct v4l2_pix_format_mplane *pix)
+{
+ const struct fdp1_fmt *fmt;
+ unsigned int width;
+ unsigned int height;
+
+ /* Validate the pixel format to ensure the output queue supports it. */
+ fmt = fdp1_find_format(pix->pixelformat);
+ if (!fmt || !(fmt->types & FDP1_OUTPUT))
+ fmt = fdp1_find_format(V4L2_PIX_FMT_YUYV);
+
+ if (fmtinfo)
+ *fmtinfo = fmt;
+
+ pix->pixelformat = fmt->fourcc;
+ pix->num_planes = fmt->num_planes;
+
+ /*
+ * Progressive video and all interlaced field orders are acceptable.
+ * Default to V4L2_FIELD_INTERLACED.
+ */
+ if (pix->field != V4L2_FIELD_NONE &&
+ pix->field != V4L2_FIELD_ALTERNATE &&
+ !V4L2_FIELD_HAS_BOTH(pix->field))
+ pix->field = V4L2_FIELD_INTERLACED;
+
+ /*
+ * The deinterlacer doesn't care about the colorspace, accept all values
+ * and default to V4L2_COLORSPACE_SMPTE170M. The YUV to RGB conversion
+ * at the output of the deinterlacer supports a subset of encodings and
+ * quantization methods and will only be available when the colorspace
+ * allows it.
+ */
+ if (pix->colorspace == V4L2_COLORSPACE_DEFAULT)
+ pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+
+ /*
+ * Align the width and height for YUV 4:2:2 and 4:2:0 formats and clamp
+ * them to the supported frame size range. The height boundary are
+ * related to the full frame, divide them by two when the format passes
+ * fields in separate buffers.
+ */
+ width = round_down(pix->width, fmt->hsub);
+ pix->width = clamp(width, FDP1_MIN_W, FDP1_MAX_W);
+
+ height = round_down(pix->height, fmt->vsub);
+ if (pix->field == V4L2_FIELD_ALTERNATE)
+ pix->height = clamp(height, FDP1_MIN_H / 2, FDP1_MAX_H / 2);
+ else
+ pix->height = clamp(height, FDP1_MIN_H, FDP1_MAX_H);
+
+ fdp1_compute_stride(pix, fmt);
+}
+
+static void fdp1_try_fmt_capture(struct fdp1_ctx *ctx,
+ const struct fdp1_fmt **fmtinfo,
+ struct v4l2_pix_format_mplane *pix)
+{
+ struct fdp1_q_data *src_data = &ctx->out_q;
+ enum v4l2_colorspace colorspace;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_quantization quantization;
+ const struct fdp1_fmt *fmt;
+ bool allow_rgb;
+
+ /*
+ * Validate the pixel format. We can only accept RGB output formats if
+ * the input encoding and quantization are compatible with the format
+ * conversions supported by the hardware. The supported combinations are
+ *
+ * V4L2_YCBCR_ENC_601 + V4L2_QUANTIZATION_LIM_RANGE
+ * V4L2_YCBCR_ENC_601 + V4L2_QUANTIZATION_FULL_RANGE
+ * V4L2_YCBCR_ENC_709 + V4L2_QUANTIZATION_LIM_RANGE
+ */
+ colorspace = src_data->format.colorspace;
+
+ ycbcr_enc = src_data->format.ycbcr_enc;
+ if (ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT)
+ ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(colorspace);
+
+ quantization = src_data->format.quantization;
+ if (quantization == V4L2_QUANTIZATION_DEFAULT)
+ quantization = V4L2_MAP_QUANTIZATION_DEFAULT(false, colorspace,
+ ycbcr_enc);
+
+ allow_rgb = ycbcr_enc == V4L2_YCBCR_ENC_601 ||
+ (ycbcr_enc == V4L2_YCBCR_ENC_709 &&
+ quantization == V4L2_QUANTIZATION_LIM_RANGE);
+
+ fmt = fdp1_find_format(pix->pixelformat);
+ if (!fmt || (!allow_rgb && fdp1_fmt_is_rgb(fmt)))
+ fmt = fdp1_find_format(V4L2_PIX_FMT_YUYV);
+
+ if (fmtinfo)
+ *fmtinfo = fmt;
+
+ pix->pixelformat = fmt->fourcc;
+ pix->num_planes = fmt->num_planes;
+ pix->field = V4L2_FIELD_NONE;
+
+ /*
+ * The colorspace on the capture queue is copied from the output queue
+ * as the hardware can't change the colorspace. It can convert YCbCr to
+ * RGB though, in which case the encoding and quantization are set to
+ * default values as anything else wouldn't make sense.
+ */
+ pix->colorspace = src_data->format.colorspace;
+ pix->xfer_func = src_data->format.xfer_func;
+
+ if (fdp1_fmt_is_rgb(fmt)) {
+ pix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ pix->quantization = V4L2_QUANTIZATION_DEFAULT;
+ } else {
+ pix->ycbcr_enc = src_data->format.ycbcr_enc;
+ pix->quantization = src_data->format.quantization;
+ }
+
+ /*
+ * The frame width is identical to the output queue, and the height is
+ * either doubled or identical depending on whether the output queue
+ * field order contains one or two fields per frame.
+ */
+ pix->width = src_data->format.width;
+ if (src_data->format.field == V4L2_FIELD_ALTERNATE)
+ pix->height = 2 * src_data->format.height;
+ else
+ pix->height = src_data->format.height;
+
+ fdp1_compute_stride(pix, fmt);
+}
+
+static int fdp1_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct fdp1_ctx *ctx = fh_to_ctx(priv);
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ fdp1_try_fmt_output(ctx, NULL, &f->fmt.pix_mp);
+ else
+ fdp1_try_fmt_capture(ctx, NULL, &f->fmt.pix_mp);
+
+ dprintk(ctx->fdp1, "Try %s format: %4s (0x%08x) %ux%u field %u\n",
+ V4L2_TYPE_IS_OUTPUT(f->type) ? "output" : "capture",
+ (char *)&f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.pixelformat,
+ f->fmt.pix_mp.width, f->fmt.pix_mp.height, f->fmt.pix_mp.field);
+
+ return 0;
+}
+
+static void fdp1_set_format(struct fdp1_ctx *ctx,
+ struct v4l2_pix_format_mplane *pix,
+ enum v4l2_buf_type type)
+{
+ struct fdp1_q_data *q_data = get_q_data(ctx, type);
+ const struct fdp1_fmt *fmtinfo;
+
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ fdp1_try_fmt_output(ctx, &fmtinfo, pix);
+ else
+ fdp1_try_fmt_capture(ctx, &fmtinfo, pix);
+
+ q_data->fmt = fmtinfo;
+ q_data->format = *pix;
+
+ q_data->vsize = pix->height;
+ if (pix->field != V4L2_FIELD_NONE)
+ q_data->vsize /= 2;
+
+ q_data->stride_y = pix->plane_fmt[0].bytesperline;
+ q_data->stride_c = pix->plane_fmt[1].bytesperline;
+
+ /* Adjust strides for interleaved buffers */
+ if (pix->field == V4L2_FIELD_INTERLACED ||
+ pix->field == V4L2_FIELD_INTERLACED_TB ||
+ pix->field == V4L2_FIELD_INTERLACED_BT) {
+ q_data->stride_y *= 2;
+ q_data->stride_c *= 2;
+ }
+
+ /* Propagate the format from the output node to the capture node. */
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ struct fdp1_q_data *dst_data = &ctx->cap_q;
+
+ /*
+ * Copy the format, clear the per-plane bytes per line and image
+ * size, override the field and double the height if needed.
+ */
+ dst_data->format = q_data->format;
+ memset(dst_data->format.plane_fmt, 0,
+ sizeof(dst_data->format.plane_fmt));
+
+ dst_data->format.field = V4L2_FIELD_NONE;
+ if (pix->field == V4L2_FIELD_ALTERNATE)
+ dst_data->format.height *= 2;
+
+ fdp1_try_fmt_capture(ctx, &dst_data->fmt, &dst_data->format);
+
+ dst_data->vsize = dst_data->format.height;
+ dst_data->stride_y = dst_data->format.plane_fmt[0].bytesperline;
+ dst_data->stride_c = dst_data->format.plane_fmt[1].bytesperline;
+ }
+}
+
+static int fdp1_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct fdp1_ctx *ctx = fh_to_ctx(priv);
+ struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
+ struct vb2_queue *vq = v4l2_m2m_get_vq(m2m_ctx, f->type);
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&ctx->fdp1->v4l2_dev, "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ fdp1_set_format(ctx, &f->fmt.pix_mp, f->type);
+
+ dprintk(ctx->fdp1, "Set %s format: %4s (0x%08x) %ux%u field %u\n",
+ V4L2_TYPE_IS_OUTPUT(f->type) ? "output" : "capture",
+ (char *)&f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.pixelformat,
+ f->fmt.pix_mp.width, f->fmt.pix_mp.height, f->fmt.pix_mp.field);
+
+ return 0;
+}
+
+static int fdp1_g_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct fdp1_ctx *ctx =
+ container_of(ctrl->handler, struct fdp1_ctx, hdl);
+ struct fdp1_q_data *src_q_data = &ctx->out_q;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+ if (V4L2_FIELD_HAS_BOTH(src_q_data->format.field))
+ ctrl->val = 2;
+ else
+ ctrl->val = 1;
+ return 0;
+ }
+
+ return 1;
+}
+
+static int fdp1_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct fdp1_ctx *ctx =
+ container_of(ctrl->handler, struct fdp1_ctx, hdl);
+
+ switch (ctrl->id) {
+ case V4L2_CID_ALPHA_COMPONENT:
+ ctx->alpha = ctrl->val;
+ break;
+
+ case V4L2_CID_DEINTERLACING_MODE:
+ ctx->deint_mode = ctrl->val;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops fdp1_ctrl_ops = {
+ .s_ctrl = fdp1_s_ctrl,
+ .g_volatile_ctrl = fdp1_g_ctrl,
+};
+
+static const char * const fdp1_ctrl_deint_menu[] = {
+ "Progressive",
+ "Adaptive 2D/3D",
+ "Fixed 2D",
+ "Fixed 3D",
+ "Previous field",
+ "Next field",
+ NULL
+};
+
+static const struct v4l2_ioctl_ops fdp1_ioctl_ops = {
+ .vidioc_querycap = fdp1_vidioc_querycap,
+
+ .vidioc_enum_fmt_vid_cap_mplane = fdp1_enum_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_out_mplane = fdp1_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_cap_mplane = fdp1_g_fmt,
+ .vidioc_g_fmt_vid_out_mplane = fdp1_g_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = fdp1_try_fmt,
+ .vidioc_try_fmt_vid_out_mplane = fdp1_try_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = fdp1_s_fmt,
+ .vidioc_s_fmt_vid_out_mplane = fdp1_s_fmt,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/*
+ * Queue operations
+ */
+
+static int fdp1_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[],
+ struct device *alloc_ctxs[])
+{
+ struct fdp1_ctx *ctx = vb2_get_drv_priv(vq);
+ struct fdp1_q_data *q_data;
+ unsigned int i;
+
+ q_data = get_q_data(ctx, vq->type);
+
+ if (*nplanes) {
+ if (*nplanes > FDP1_MAX_PLANES)
+ return -EINVAL;
+
+ return 0;
+ }
+
+ *nplanes = q_data->format.num_planes;
+
+ for (i = 0; i < *nplanes; i++)
+ sizes[i] = q_data->format.plane_fmt[i].sizeimage;
+
+ return 0;
+}
+
+static void fdp1_buf_prepare_field(struct fdp1_q_data *q_data,
+ struct vb2_v4l2_buffer *vbuf,
+ unsigned int field_num)
+{
+ struct fdp1_buffer *buf = to_fdp1_buffer(vbuf);
+ struct fdp1_field_buffer *fbuf = &buf->fields[field_num];
+ unsigned int num_fields;
+ unsigned int i;
+
+ num_fields = V4L2_FIELD_HAS_BOTH(vbuf->field) ? 2 : 1;
+
+ fbuf->vb = vbuf;
+ fbuf->last_field = (field_num + 1) == num_fields;
+
+ for (i = 0; i < vbuf->vb2_buf.num_planes; ++i)
+ fbuf->addrs[i] = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, i);
+
+ switch (vbuf->field) {
+ case V4L2_FIELD_INTERLACED:
+ /*
+ * Interlaced means bottom-top for 60Hz TV standards (NTSC) and
+ * top-bottom for 50Hz. As TV standards are not applicable to
+ * the mem-to-mem API, use the height as a heuristic.
+ */
+ fbuf->field = (q_data->format.height < 576) == field_num
+ ? V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
+ break;
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_SEQ_TB:
+ fbuf->field = field_num ? V4L2_FIELD_BOTTOM : V4L2_FIELD_TOP;
+ break;
+ case V4L2_FIELD_INTERLACED_BT:
+ case V4L2_FIELD_SEQ_BT:
+ fbuf->field = field_num ? V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
+ break;
+ default:
+ fbuf->field = vbuf->field;
+ break;
+ }
+
+ /* Buffer is completed */
+ if (!field_num)
+ return;
+
+ /* Adjust buffer addresses for second field */
+ switch (vbuf->field) {
+ case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ for (i = 0; i < vbuf->vb2_buf.num_planes; i++)
+ fbuf->addrs[i] +=
+ (i == 0 ? q_data->stride_y : q_data->stride_c);
+ break;
+ case V4L2_FIELD_SEQ_TB:
+ case V4L2_FIELD_SEQ_BT:
+ for (i = 0; i < vbuf->vb2_buf.num_planes; i++)
+ fbuf->addrs[i] += q_data->vsize *
+ (i == 0 ? q_data->stride_y : q_data->stride_c);
+ break;
+ }
+}
+
+static int fdp1_buf_prepare(struct vb2_buffer *vb)
+{
+ struct fdp1_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct fdp1_q_data *q_data = get_q_data(ctx, vb->vb2_queue->type);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct fdp1_buffer *buf = to_fdp1_buffer(vbuf);
+ unsigned int i;
+
+ if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ bool field_valid = true;
+
+ /* Validate the buffer field. */
+ switch (q_data->format.field) {
+ case V4L2_FIELD_NONE:
+ if (vbuf->field != V4L2_FIELD_NONE)
+ field_valid = false;
+ break;
+
+ case V4L2_FIELD_ALTERNATE:
+ if (vbuf->field != V4L2_FIELD_TOP &&
+ vbuf->field != V4L2_FIELD_BOTTOM)
+ field_valid = false;
+ break;
+
+ case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_SEQ_TB:
+ case V4L2_FIELD_SEQ_BT:
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ if (vbuf->field != q_data->format.field)
+ field_valid = false;
+ break;
+ }
+
+ if (!field_valid) {
+ dprintk(ctx->fdp1,
+ "buffer field %u invalid for format field %u\n",
+ vbuf->field, q_data->format.field);
+ return -EINVAL;
+ }
+ } else {
+ vbuf->field = V4L2_FIELD_NONE;
+ }
+
+ /* Validate the planes sizes. */
+ for (i = 0; i < q_data->format.num_planes; i++) {
+ unsigned long size = q_data->format.plane_fmt[i].sizeimage;
+
+ if (vb2_plane_size(vb, i) < size) {
+ dprintk(ctx->fdp1,
+ "data will not fit into plane [%u/%u] (%lu < %lu)\n",
+ i, q_data->format.num_planes,
+ vb2_plane_size(vb, i), size);
+ return -EINVAL;
+ }
+
+ /* We have known size formats all around */
+ vb2_set_plane_payload(vb, i, size);
+ }
+
+ buf->num_fields = V4L2_FIELD_HAS_BOTH(vbuf->field) ? 2 : 1;
+ for (i = 0; i < buf->num_fields; ++i)
+ fdp1_buf_prepare_field(q_data, vbuf, i);
+
+ return 0;
+}
+
+static void fdp1_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct fdp1_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static int fdp1_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct fdp1_ctx *ctx = vb2_get_drv_priv(q);
+ struct fdp1_q_data *q_data = get_q_data(ctx, q->type);
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ /*
+ * Force our deint_mode when we are progressive,
+ * ignoring any setting on the device from the user,
+ * Otherwise, lock in the requested de-interlace mode.
+ */
+ if (q_data->format.field == V4L2_FIELD_NONE)
+ ctx->deint_mode = FDP1_PROGRESSIVE;
+
+ if (ctx->deint_mode == FDP1_ADAPT2D3D) {
+ u32 stride;
+ dma_addr_t smsk_base;
+ const u32 bpp = 2; /* bytes per pixel */
+
+ stride = round_up(q_data->format.width, 8);
+
+ ctx->smsk_size = bpp * stride * q_data->vsize;
+
+ ctx->smsk_cpu = dma_alloc_coherent(ctx->fdp1->dev,
+ ctx->smsk_size, &smsk_base, GFP_KERNEL);
+
+ if (ctx->smsk_cpu == NULL) {
+ dprintk(ctx->fdp1, "Failed to alloc smsk\n");
+ return -ENOMEM;
+ }
+
+ ctx->smsk_addr[0] = smsk_base;
+ ctx->smsk_addr[1] = smsk_base + (ctx->smsk_size/2);
+ }
+ }
+
+ return 0;
+}
+
+static void fdp1_stop_streaming(struct vb2_queue *q)
+{
+ struct fdp1_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_v4l2_buffer *vbuf;
+ unsigned long flags;
+
+ while (1) {
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (vbuf == NULL)
+ break;
+ spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
+ }
+
+ /* Empty Output queues */
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ /* Empty our internal queues */
+ struct fdp1_field_buffer *fbuf;
+
+ /* Free any queued buffers */
+ fbuf = fdp1_dequeue_field(ctx);
+ while (fbuf != NULL) {
+ fdp1_field_complete(ctx, fbuf);
+ fbuf = fdp1_dequeue_field(ctx);
+ }
+
+ /* Free smsk_data */
+ if (ctx->smsk_cpu) {
+ dma_free_coherent(ctx->fdp1->dev, ctx->smsk_size,
+ ctx->smsk_cpu, ctx->smsk_addr[0]);
+ ctx->smsk_addr[0] = ctx->smsk_addr[1] = 0;
+ ctx->smsk_cpu = NULL;
+ }
+
+ WARN(!list_empty(&ctx->fields_queue),
+ "Buffer queue not empty");
+ } else {
+ /* Empty Capture queues (Jobs) */
+ struct fdp1_job *job;
+
+ job = get_queued_job(ctx->fdp1);
+ while (job) {
+ if (FDP1_DEINT_MODE_USES_PREV(ctx->deint_mode))
+ fdp1_field_complete(ctx, job->previous);
+ else
+ fdp1_field_complete(ctx, job->active);
+
+ v4l2_m2m_buf_done(job->dst->vb, VB2_BUF_STATE_ERROR);
+ job->dst = NULL;
+
+ job = get_queued_job(ctx->fdp1);
+ }
+
+ /* Free any held buffer in the ctx */
+ fdp1_field_complete(ctx, ctx->previous);
+
+ WARN(!list_empty(&ctx->fdp1->queued_job_list),
+ "Queued Job List not empty");
+
+ WARN(!list_empty(&ctx->fdp1->hw_job_list),
+ "HW Job list not empty");
+ }
+}
+
+static struct vb2_ops fdp1_qops = {
+ .queue_setup = fdp1_queue_setup,
+ .buf_prepare = fdp1_buf_prepare,
+ .buf_queue = fdp1_buf_queue,
+ .start_streaming = fdp1_start_streaming,
+ .stop_streaming = fdp1_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct fdp1_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct fdp1_buffer);
+ src_vq->ops = &fdp1_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->fdp1->dev_mutex;
+ src_vq->dev = ctx->fdp1->dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct fdp1_buffer);
+ dst_vq->ops = &fdp1_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->fdp1->dev_mutex;
+ dst_vq->dev = ctx->fdp1->dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+/*
+ * File operations
+ */
+static int fdp1_open(struct file *file)
+{
+ struct fdp1_dev *fdp1 = video_drvdata(file);
+ struct v4l2_pix_format_mplane format;
+ struct fdp1_ctx *ctx = NULL;
+ struct v4l2_ctrl *ctrl;
+ int ret = 0;
+
+ if (mutex_lock_interruptible(&fdp1->dev_mutex))
+ return -ERESTARTSYS;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ ctx->fdp1 = fdp1;
+
+ /* Initialise Queues */
+ INIT_LIST_HEAD(&ctx->fields_queue);
+
+ ctx->translen = 1;
+ ctx->sequence = 0;
+
+ /* Initialise controls */
+
+ v4l2_ctrl_handler_init(&ctx->hdl, 3);
+ v4l2_ctrl_new_std_menu_items(&ctx->hdl, &fdp1_ctrl_ops,
+ V4L2_CID_DEINTERLACING_MODE,
+ FDP1_NEXTFIELD, BIT(0), FDP1_FIXED3D,
+ fdp1_ctrl_deint_menu);
+
+ ctrl = v4l2_ctrl_new_std(&ctx->hdl, &fdp1_ctrl_ops,
+ V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 1, 2, 1, 1);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ v4l2_ctrl_new_std(&ctx->hdl, &fdp1_ctrl_ops,
+ V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 255);
+
+ if (ctx->hdl.error) {
+ ret = ctx->hdl.error;
+ v4l2_ctrl_handler_free(&ctx->hdl);
+ goto done;
+ }
+
+ ctx->fh.ctrl_handler = &ctx->hdl;
+ v4l2_ctrl_handler_setup(&ctx->hdl);
+
+ /* Configure default parameters. */
+ memset(&format, 0, sizeof(format));
+ fdp1_set_format(ctx, &format, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(fdp1->m2m_dev, ctx, &queue_init);
+
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+
+ v4l2_ctrl_handler_free(&ctx->hdl);
+ kfree(ctx);
+ goto done;
+ }
+
+ /* Perform any power management required */
+ pm_runtime_get_sync(fdp1->dev);
+
+ v4l2_fh_add(&ctx->fh);
+
+ dprintk(fdp1, "Created instance: %p, m2m_ctx: %p\n",
+ ctx, ctx->fh.m2m_ctx);
+
+done:
+ mutex_unlock(&fdp1->dev_mutex);
+ return ret;
+}
+
+static int fdp1_release(struct file *file)
+{
+ struct fdp1_dev *fdp1 = video_drvdata(file);
+ struct fdp1_ctx *ctx = fh_to_ctx(file->private_data);
+
+ dprintk(fdp1, "Releasing instance %p\n", ctx);
+
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_ctrl_handler_free(&ctx->hdl);
+ mutex_lock(&fdp1->dev_mutex);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ mutex_unlock(&fdp1->dev_mutex);
+ kfree(ctx);
+
+ pm_runtime_put(fdp1->dev);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations fdp1_fops = {
+ .owner = THIS_MODULE,
+ .open = fdp1_open,
+ .release = fdp1_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static const struct video_device fdp1_videodev = {
+ .name = DRIVER_NAME,
+ .vfl_dir = VFL_DIR_M2M,
+ .fops = &fdp1_fops,
+ .device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING,
+ .ioctl_ops = &fdp1_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release_empty,
+};
+
+static const struct v4l2_m2m_ops m2m_ops = {
+ .device_run = fdp1_m2m_device_run,
+ .job_ready = fdp1_m2m_job_ready,
+ .job_abort = fdp1_m2m_job_abort,
+};
+
+static irqreturn_t fdp1_irq_handler(int irq, void *dev_id)
+{
+ struct fdp1_dev *fdp1 = dev_id;
+ u32 int_status;
+ u32 ctl_status;
+ u32 vint_cnt;
+ u32 cycles;
+
+ int_status = fdp1_read(fdp1, FD1_CTL_IRQSTA);
+ cycles = fdp1_read(fdp1, FD1_CTL_VCYCLE_STAT);
+ ctl_status = fdp1_read(fdp1, FD1_CTL_STATUS);
+ vint_cnt = (ctl_status & FD1_CTL_STATUS_VINT_CNT_MASK) >>
+ FD1_CTL_STATUS_VINT_CNT_SHIFT;
+
+ /* Clear interrupts */
+ fdp1_write(fdp1, ~(int_status) & FD1_CTL_IRQ_MASK, FD1_CTL_IRQSTA);
+
+ if (debug >= 2) {
+ dprintk(fdp1, "IRQ: 0x%x %s%s%s\n", int_status,
+ int_status & FD1_CTL_IRQ_VERE ? "[Error]" : "[!E]",
+ int_status & FD1_CTL_IRQ_VINTE ? "[VSync]" : "[!V]",
+ int_status & FD1_CTL_IRQ_FREE ? "[FrameEnd]" : "[!F]");
+
+ dprintk(fdp1, "CycleStatus = %d (%dms)\n",
+ cycles, cycles/(fdp1->clk_rate/1000));
+
+ dprintk(fdp1,
+ "Control Status = 0x%08x : VINT_CNT = %d %s:%s:%s:%s\n",
+ ctl_status, vint_cnt,
+ ctl_status & FD1_CTL_STATUS_SGREGSET ? "RegSet" : "",
+ ctl_status & FD1_CTL_STATUS_SGVERR ? "Vsync Error" : "",
+ ctl_status & FD1_CTL_STATUS_SGFREND ? "FrameEnd" : "",
+ ctl_status & FD1_CTL_STATUS_BSY ? "Busy" : "");
+ dprintk(fdp1, "***********************************\n");
+ }
+
+ /* Spurious interrupt */
+ if (!(FD1_CTL_IRQ_MASK & int_status))
+ return IRQ_NONE;
+
+ /* Work completed, release the frame */
+ if (FD1_CTL_IRQ_VERE & int_status)
+ device_frame_end(fdp1, VB2_BUF_STATE_ERROR);
+ else if (FD1_CTL_IRQ_FREE & int_status)
+ device_frame_end(fdp1, VB2_BUF_STATE_DONE);
+
+ return IRQ_HANDLED;
+}
+
+static int fdp1_probe(struct platform_device *pdev)
+{
+ struct fdp1_dev *fdp1;
+ struct video_device *vfd;
+ struct device_node *fcp_node;
+ struct resource *res;
+ struct clk *clk;
+ unsigned int i;
+
+ int ret;
+ int hw_version;
+
+ fdp1 = devm_kzalloc(&pdev->dev, sizeof(*fdp1), GFP_KERNEL);
+ if (!fdp1)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&fdp1->free_job_list);
+ INIT_LIST_HEAD(&fdp1->queued_job_list);
+ INIT_LIST_HEAD(&fdp1->hw_job_list);
+
+ /* Initialise the jobs on the free list */
+ for (i = 0; i < ARRAY_SIZE(fdp1->jobs); i++)
+ list_add(&fdp1->jobs[i].list, &fdp1->free_job_list);
+
+ mutex_init(&fdp1->dev_mutex);
+
+ spin_lock_init(&fdp1->irqlock);
+ spin_lock_init(&fdp1->device_process_lock);
+ fdp1->dev = &pdev->dev;
+ platform_set_drvdata(pdev, fdp1);
+
+ /* Memory-mapped registers */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ fdp1->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fdp1->regs))
+ return PTR_ERR(fdp1->regs);
+
+ /* Interrupt service routine registration */
+ fdp1->irq = ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot find IRQ\n");
+ return ret;
+ }
+
+ ret = devm_request_irq(&pdev->dev, fdp1->irq, fdp1_irq_handler, 0,
+ dev_name(&pdev->dev), fdp1);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot claim IRQ %d\n", fdp1->irq);
+ return ret;
+ }
+
+ /* FCP */
+ fcp_node = of_parse_phandle(pdev->dev.of_node, "renesas,fcp", 0);
+ if (fcp_node) {
+ fdp1->fcp = rcar_fcp_get(fcp_node);
+ of_node_put(fcp_node);
+ if (IS_ERR(fdp1->fcp)) {
+ dev_err(&pdev->dev, "FCP not found (%ld)\n",
+ PTR_ERR(fdp1->fcp));
+ return PTR_ERR(fdp1->fcp);
+ }
+ }
+
+ /* Determine our clock rate */
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ fdp1->clk_rate = clk_get_rate(clk);
+ clk_put(clk);
+
+ /* V4L2 device registration */
+ ret = v4l2_device_register(&pdev->dev, &fdp1->v4l2_dev);
+ if (ret) {
+ v4l2_err(&fdp1->v4l2_dev, "Failed to register video device\n");
+ return ret;
+ }
+
+ /* M2M registration */
+ fdp1->m2m_dev = v4l2_m2m_init(&m2m_ops);
+ if (IS_ERR(fdp1->m2m_dev)) {
+ v4l2_err(&fdp1->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(fdp1->m2m_dev);
+ goto unreg_dev;
+ }
+
+ /* Video registration */
+ fdp1->vfd = fdp1_videodev;
+ vfd = &fdp1->vfd;
+ vfd->lock = &fdp1->dev_mutex;
+ vfd->v4l2_dev = &fdp1->v4l2_dev;
+ video_set_drvdata(vfd, fdp1);
+ strlcpy(vfd->name, fdp1_videodev.name, sizeof(vfd->name));
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&fdp1->v4l2_dev, "Failed to register video device\n");
+ goto release_m2m;
+ }
+
+ v4l2_info(&fdp1->v4l2_dev,
+ "Device registered as /dev/video%d\n", vfd->num);
+
+ /* Power up the cells to read HW */
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(fdp1->dev);
+
+ hw_version = fdp1_read(fdp1, FD1_IP_INTDATA);
+ switch (hw_version) {
+ case FD1_IP_H3:
+ dprintk(fdp1, "FDP1 Version R-Car H3\n");
+ break;
+ case FD1_IP_M3W:
+ dprintk(fdp1, "FDP1 Version R-Car M3-W\n");
+ break;
+ default:
+ dev_err(fdp1->dev, "FDP1 Unidentifiable (0x%08x)\n",
+ hw_version);
+ }
+
+ /* Allow the hw to sleep until an open call puts it to use */
+ pm_runtime_put(fdp1->dev);
+
+ return 0;
+
+release_m2m:
+ v4l2_m2m_release(fdp1->m2m_dev);
+
+unreg_dev:
+ v4l2_device_unregister(&fdp1->v4l2_dev);
+
+ return ret;
+}
+
+static int fdp1_remove(struct platform_device *pdev)
+{
+ struct fdp1_dev *fdp1 = platform_get_drvdata(pdev);
+
+ v4l2_m2m_release(fdp1->m2m_dev);
+ video_unregister_device(&fdp1->vfd);
+ v4l2_device_unregister(&fdp1->v4l2_dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused fdp1_pm_runtime_suspend(struct device *dev)
+{
+ struct fdp1_dev *fdp1 = dev_get_drvdata(dev);
+
+ rcar_fcp_disable(fdp1->fcp);
+
+ return 0;
+}
+
+static int __maybe_unused fdp1_pm_runtime_resume(struct device *dev)
+{
+ struct fdp1_dev *fdp1 = dev_get_drvdata(dev);
+
+ /* Program in the static LUTs */
+ fdp1_set_lut(fdp1);
+
+ return rcar_fcp_enable(fdp1->fcp);
+}
+
+static const struct dev_pm_ops fdp1_pm_ops = {
+ SET_RUNTIME_PM_OPS(fdp1_pm_runtime_suspend,
+ fdp1_pm_runtime_resume,
+ NULL)
+};
+
+static const struct of_device_id fdp1_dt_ids[] = {
+ { .compatible = "renesas,fdp1" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, fdp1_dt_ids);
+
+static struct platform_driver fdp1_pdrv = {
+ .probe = fdp1_probe,
+ .remove = fdp1_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = fdp1_dt_ids,
+ .pm = &fdp1_pm_ops,
+ },
+};
+
+module_platform_driver(fdp1_pdrv);
+
+MODULE_DESCRIPTION("Renesas R-Car Fine Display Processor Driver");
+MODULE_AUTHOR("Kieran Bingham <kieran@bingham.xyz>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c
index 0912d0a892e2..a1d823ab0c63 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c
@@ -178,20 +178,12 @@ void exynos4_jpeg_set_interrupt(void __iomem *base, unsigned int version)
unsigned int exynos4_jpeg_get_int_status(void __iomem *base)
{
- unsigned int int_status;
-
- int_status = readl(base + EXYNOS4_INT_STATUS_REG);
-
- return int_status;
+ return readl(base + EXYNOS4_INT_STATUS_REG);
}
unsigned int exynos4_jpeg_get_fifo_status(void __iomem *base)
{
- unsigned int fifo_status;
-
- fifo_status = readl(base + EXYNOS4_FIFO_STATUS_REG);
-
- return fifo_status;
+ return readl(base + EXYNOS4_FIFO_STATUS_REG);
}
void exynos4_jpeg_set_huf_table_enable(void __iomem *base, int value)
@@ -296,10 +288,7 @@ void exynos4_jpeg_set_encode_hoff_cnt(void __iomem *base, unsigned int fmt)
unsigned int exynos4_jpeg_get_stream_size(void __iomem *base)
{
- unsigned int size;
-
- size = readl(base + EXYNOS4_BITSTREAM_SIZE_REG);
- return size;
+ return readl(base + EXYNOS4_BITSTREAM_SIZE_REG);
}
void exynos4_jpeg_set_dec_bitstream_size(void __iomem *base, unsigned int size)
diff --git a/drivers/media/platform/s5p-mfc/regs-mfc-v6.h b/drivers/media/platform/s5p-mfc/regs-mfc-v6.h
index 83e01f3466e9..d2cd35916dc5 100644
--- a/drivers/media/platform/s5p-mfc/regs-mfc-v6.h
+++ b/drivers/media/platform/s5p-mfc/regs-mfc-v6.h
@@ -386,7 +386,8 @@
((w) * 144 + 8192 * (h) + 49216 + 1048576)
#define S5P_FIMV_SCRATCH_BUF_SIZE_VC1_DEC_V6(w, h) \
(2096 * ((w) + (h) + 1))
-#define S5P_FIMV_SCRATCH_BUF_SIZE_H263_DEC_V6(w, h) ((w) * 400)
+#define S5P_FIMV_SCRATCH_BUF_SIZE_H263_DEC_V6(w, h) \
+ S5P_FIMV_SCRATCH_BUF_SIZE_MPEG4_DEC_V6(w, h)
#define S5P_FIMV_SCRATCH_BUF_SIZE_VP8_DEC_V6(w, h) \
((w) * 32 + (h) * 128 + (((w) + 1) / 2) * 64 + 2112)
#define S5P_FIMV_SCRATCH_BUF_SIZE_H264_ENC_V6(w, h) \
diff --git a/drivers/media/platform/s5p-mfc/regs-mfc-v8.h b/drivers/media/platform/s5p-mfc/regs-mfc-v8.h
index cc7cbec51b5e..4d1c3750eb5e 100644
--- a/drivers/media/platform/s5p-mfc/regs-mfc-v8.h
+++ b/drivers/media/platform/s5p-mfc/regs-mfc-v8.h
@@ -90,7 +90,7 @@
#define S5P_FIMV_E_H264_OPTIONS_V8 0xfb54
/* MFCv8 Context buffer sizes */
-#define MFC_CTX_BUF_SIZE_V8 (30 * SZ_1K) /* 30KB */
+#define MFC_CTX_BUF_SIZE_V8 (36 * SZ_1K) /* 36KB */
#define MFC_H264_DEC_CTX_BUF_SIZE_V8 (2 * SZ_1M) /* 2MB */
#define MFC_OTHER_DEC_CTX_BUF_SIZE_V8 (20 * SZ_1K) /* 20KB */
#define MFC_H264_ENC_CTX_BUF_SIZE_V8 (100 * SZ_1K) /* 100KB */
diff --git a/drivers/media/platform/s5p-mfc/regs-mfc.h b/drivers/media/platform/s5p-mfc/regs-mfc.h
index 6ccc3f8c122a..57b7e0be0596 100644
--- a/drivers/media/platform/s5p-mfc/regs-mfc.h
+++ b/drivers/media/platform/s5p-mfc/regs-mfc.h
@@ -393,6 +393,9 @@
#define S5P_FIMV_REG_CLEAR_COUNT 0
/* Error handling defines */
+#define S5P_FIMV_ERR_NO_VALID_SEQ_HDR 67
+#define S5P_FIMV_ERR_INCOMPLETE_FRAME 124
+#define S5P_FIMV_ERR_TIMEOUT 140
#define S5P_FIMV_ERR_WARNINGS_START 145
#define S5P_FIMV_ERR_DEC_MASK 0xFFFF
#define S5P_FIMV_ERR_DEC_SHIFT 0
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 0a5b8f5e011e..bb0a5887c9a9 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -641,8 +641,11 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
case S5P_MFC_R2H_CMD_ERR_RET:
/* An error has occurred */
if (ctx->state == MFCINST_RUNNING &&
- s5p_mfc_hw_call(dev->mfc_ops, err_dec, err) >=
- dev->warn_start)
+ (s5p_mfc_hw_call(dev->mfc_ops, err_dec, err) >=
+ dev->warn_start ||
+ err == S5P_FIMV_ERR_NO_VALID_SEQ_HDR ||
+ err == S5P_FIMV_ERR_INCOMPLETE_FRAME ||
+ err == S5P_FIMV_ERR_TIMEOUT))
s5p_mfc_handle_frame(ctx, reason, err);
else
s5p_mfc_handle_error(dev, ctx, reason, err);
@@ -848,6 +851,11 @@ static int s5p_mfc_open(struct file *file)
ret = -ENOENT;
goto err_queue_init;
}
+ /*
+ * We'll do mostly sequential access, so sacrifice TLB efficiency for
+ * faster allocation.
+ */
+ q->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES;
q->mem_ops = &vb2_dma_contig_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
ret = vb2_queue_init(q);
@@ -878,6 +886,12 @@ static int s5p_mfc_open(struct file *file)
* will keep the value of bytesused intact.
*/
q->allow_zero_bytesused = 1;
+
+ /*
+ * We'll do mostly sequential access, so sacrifice TLB efficiency for
+ * faster allocation.
+ */
+ q->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES;
q->mem_ops = &vb2_dma_contig_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
ret = vb2_queue_init(q);
@@ -926,10 +940,11 @@ static int s5p_mfc_release(struct file *file)
mfc_debug_enter();
if (dev)
mutex_lock(&dev->mfc_mutex);
- s5p_mfc_clock_on();
vb2_queue_release(&ctx->vq_src);
vb2_queue_release(&ctx->vq_dst);
if (dev) {
+ s5p_mfc_clock_on();
+
/* Mark context as idle */
clear_work_bit_irqsave(ctx);
/*
@@ -948,12 +963,14 @@ static int s5p_mfc_release(struct file *file)
mfc_debug(2, "Last instance\n");
s5p_mfc_deinit_hw(dev);
del_timer_sync(&dev->watchdog_timer);
+ s5p_mfc_clock_off();
if (s5p_mfc_power_off() < 0)
mfc_err("Power off failed\n");
+ } else {
+ mfc_debug(2, "Shutting down clock\n");
+ s5p_mfc_clock_off();
}
}
- mfc_debug(2, "Shutting down clock\n");
- s5p_mfc_clock_off();
if (dev)
dev->ctx[ctx->num] = NULL;
s5p_mfc_dec_ctrls_delete(ctx);
@@ -1082,6 +1099,7 @@ static struct device *s5p_mfc_alloc_memdev(struct device *dev,
idx);
if (ret == 0)
return child;
+ device_del(child);
}
put_device(child);
@@ -1387,31 +1405,9 @@ static int s5p_mfc_resume(struct device *dev)
}
#endif
-#ifdef CONFIG_PM
-static int s5p_mfc_runtime_suspend(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
-
- atomic_set(&m_dev->pm.power, 0);
- return 0;
-}
-
-static int s5p_mfc_runtime_resume(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
-
- atomic_set(&m_dev->pm.power, 1);
- return 0;
-}
-#endif
-
/* Power management */
static const struct dev_pm_ops s5p_mfc_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(s5p_mfc_suspend, s5p_mfc_resume)
- SET_RUNTIME_PM_OPS(s5p_mfc_runtime_suspend, s5p_mfc_runtime_resume,
- NULL)
};
static struct s5p_mfc_buf_size_v5 mfc_buf_size_v5 = {
@@ -1438,6 +1434,9 @@ static struct s5p_mfc_variant mfc_drvdata_v5 = {
.buf_size = &buf_size_v5,
.buf_align = &mfc_buf_align_v5,
.fw_name[0] = "s5p-mfc.fw",
+ .clk_names = {"mfc", "sclk_mfc"},
+ .num_clocks = 2,
+ .use_clock_gating = true,
};
static struct s5p_mfc_buf_size_v6 mfc_buf_size_v6 = {
@@ -1470,6 +1469,8 @@ static struct s5p_mfc_variant mfc_drvdata_v6 = {
* for init buffer command
*/
.fw_name[1] = "s5p-mfc-v6-v2.fw",
+ .clk_names = {"mfc"},
+ .num_clocks = 1,
};
static struct s5p_mfc_buf_size_v6 mfc_buf_size_v7 = {
@@ -1497,6 +1498,8 @@ static struct s5p_mfc_variant mfc_drvdata_v7 = {
.buf_size = &buf_size_v7,
.buf_align = &mfc_buf_align_v7,
.fw_name[0] = "s5p-mfc-v7.fw",
+ .clk_names = {"mfc", "sclk_mfc"},
+ .num_clocks = 2,
};
static struct s5p_mfc_buf_size_v6 mfc_buf_size_v8 = {
@@ -1524,6 +1527,19 @@ static struct s5p_mfc_variant mfc_drvdata_v8 = {
.buf_size = &buf_size_v8,
.buf_align = &mfc_buf_align_v8,
.fw_name[0] = "s5p-mfc-v8.fw",
+ .clk_names = {"mfc"},
+ .num_clocks = 1,
+};
+
+static struct s5p_mfc_variant mfc_drvdata_v8_5433 = {
+ .version = MFC_VERSION_V8,
+ .version_bit = MFC_V8_BIT,
+ .port_num = MFC_NUM_PORTS_V8,
+ .buf_size = &buf_size_v8,
+ .buf_align = &mfc_buf_align_v8,
+ .fw_name[0] = "s5p-mfc-v8.fw",
+ .clk_names = {"pclk", "aclk", "aclk_xiu"},
+ .num_clocks = 3,
};
static const struct of_device_id exynos_mfc_match[] = {
@@ -1539,6 +1555,9 @@ static const struct of_device_id exynos_mfc_match[] = {
}, {
.compatible = "samsung,mfc-v8",
.data = &mfc_drvdata_v8,
+ }, {
+ .compatible = "samsung,exynos5433-mfc",
+ .data = &mfc_drvdata_v8_5433,
},
{},
};
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
index 46b99f28cbd7..ab23236aa942 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
@@ -104,6 +104,8 @@ static inline dma_addr_t s5p_mfc_mem_cookie(void *a, void *b)
#define S5P_MFC_R2H_CMD_ENC_BUFFER_FUL_RET 16
#define S5P_MFC_R2H_CMD_ERR_RET 32
+#define MFC_MAX_CLOCKS 4
+
#define mfc_read(dev, offset) readl(dev->regs_base + (offset))
#define mfc_write(dev, data, offset) writel((data), dev->regs_base + \
(offset))
@@ -197,9 +199,12 @@ struct s5p_mfc_buf {
* struct s5p_mfc_pm - power management data structure
*/
struct s5p_mfc_pm {
- struct clk *clock;
struct clk *clock_gate;
- atomic_t power;
+ const char **clk_names;
+ struct clk *clocks[MFC_MAX_CLOCKS];
+ int num_clocks;
+ bool use_clock_gating;
+
struct device *device;
};
@@ -235,6 +240,9 @@ struct s5p_mfc_variant {
struct s5p_mfc_buf_size *buf_size;
struct s5p_mfc_buf_align *buf_align;
char *fw_name[MFC_FW_MAX_VERSIONS];
+ const char *clk_names[MFC_MAX_CLOCKS];
+ int num_clocks;
+ bool use_clock_gating;
};
/**
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h b/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h
index 5936923c631c..1936a5b868f5 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h
@@ -39,6 +39,12 @@ extern int mfc_debug_level;
__func__, __LINE__, ##args); \
} while (0)
+#define mfc_err_limited(fmt, args...) \
+ do { \
+ printk_ratelimited(KERN_ERR "%s:%d: " fmt, \
+ __func__, __LINE__, ##args); \
+ } while (0)
+
#define mfc_info(fmt, args...) \
do { \
printk(KERN_INFO "%s:%d: " fmt, \
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
index 52081ddc9bf2..367ef8e8dbf0 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -642,7 +642,7 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
int ret;
if (ctx->state == MFCINST_ERROR) {
- mfc_err("Call on DQBUF after unrecoverable error\n");
+ mfc_err_limited("Call on DQBUF after unrecoverable error\n");
return -EIO;
}
@@ -793,18 +793,17 @@ static int vidioc_g_crop(struct file *file, void *priv,
cr->c.top = top;
cr->c.width = ctx->img_width - left - right;
cr->c.height = ctx->img_height - top - bottom;
- mfc_debug(2, "Cropping info [h264]: l=%d t=%d "
- "w=%d h=%d (r=%d b=%d fw=%d fh=%d\n", left, top,
- cr->c.width, cr->c.height, right, bottom,
- ctx->buf_width, ctx->buf_height);
+ mfc_debug(2, "Cropping info [h264]: l=%d t=%d w=%d h=%d (r=%d b=%d fw=%d fh=%d\n",
+ left, top, cr->c.width, cr->c.height, right, bottom,
+ ctx->buf_width, ctx->buf_height);
} else {
cr->c.left = 0;
cr->c.top = 0;
cr->c.width = ctx->img_width;
cr->c.height = ctx->img_height;
- mfc_debug(2, "Cropping info: w=%d h=%d fw=%d "
- "fh=%d\n", cr->c.width, cr->c.height, ctx->buf_width,
- ctx->buf_height);
+ mfc_debug(2, "Cropping info: w=%d h=%d fw=%d fh=%d\n",
+ cr->c.width, cr->c.height, ctx->buf_width,
+ ctx->buf_height);
}
return 0;
}
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index fcc2e054c61f..e39d9e06e299 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -1268,7 +1268,7 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
int ret;
if (ctx->state == MFCINST_ERROR) {
- mfc_err("Call on DQBUF after unrecoverable error\n");
+ mfc_err_limited("Call on DQBUF after unrecoverable error\n");
return -EIO;
}
if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
index 1e7250260a9a..99f65a92a6be 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
@@ -45,13 +45,13 @@ int s5p_mfc_alloc_priv_buf(struct device *dev, dma_addr_t base,
b->virt = dma_alloc_coherent(dev, b->size, &b->dma, GFP_KERNEL);
if (!b->virt) {
- mfc_err("Allocating private buffer failed\n");
+ mfc_err("Allocating private buffer of size %zu failed\n",
+ b->size);
return -ENOMEM;
}
if (b->dma < base) {
- mfc_err("Invaling memory configuration!\n");
- mfc_err("Allocated buffer (%pad) is lower than memory base address (%pad)\n",
+ mfc_err("Invalid memory configuration - buffer (%pad) is below base memory address(%pad)\n",
&b->dma, &base);
dma_free_coherent(dev, b->size, b->virt, b->dma);
return -ENOMEM;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
index 81e1e4ce6c24..f4301d5bbd32 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
@@ -1293,14 +1293,11 @@ static int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx)
* First set the output frame buffers
*/
if (ctx->capture_state != QUEUE_BUFS_MMAPED) {
- mfc_err("It seems that not all destionation buffers were "
- "mmaped\nMFC requires that all destination are mmaped "
- "before starting processing\n");
+ mfc_err("It seems that not all destionation buffers were mmaped\nMFC requires that all destination are mmaped before starting processing\n");
return -EAGAIN;
}
if (list_empty(&ctx->src_queue)) {
- mfc_err("Header has been deallocated in the middle of"
- " initialization\n");
+ mfc_err("Header has been deallocated in the middle of initialization\n");
return -EIO;
}
temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
index 930dc2dddae6..eb85cedc5ef3 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
@@ -18,129 +18,101 @@
#include "s5p_mfc_debug.h"
#include "s5p_mfc_pm.h"
-#define MFC_GATE_CLK_NAME "mfc"
-#define MFC_SCLK_NAME "sclk_mfc"
-#define MFC_SCLK_RATE (200 * 1000000)
-
-#define CLK_DEBUG
-
static struct s5p_mfc_pm *pm;
static struct s5p_mfc_dev *p_dev;
-
-#ifdef CLK_DEBUG
static atomic_t clk_ref;
-#endif
int s5p_mfc_init_pm(struct s5p_mfc_dev *dev)
{
- int ret = 0;
+ int i;
pm = &dev->pm;
p_dev = dev;
- pm->clock_gate = clk_get(&dev->plat_dev->dev, MFC_GATE_CLK_NAME);
- if (IS_ERR(pm->clock_gate)) {
- mfc_err("Failed to get clock-gating control\n");
- ret = PTR_ERR(pm->clock_gate);
- goto err_g_ip_clk;
- }
- ret = clk_prepare(pm->clock_gate);
- if (ret) {
- mfc_err("Failed to prepare clock-gating control\n");
- goto err_p_ip_clk;
- }
+ pm->num_clocks = dev->variant->num_clocks;
+ pm->clk_names = dev->variant->clk_names;
+ pm->device = &dev->plat_dev->dev;
+ pm->clock_gate = NULL;
- if (dev->variant->version != MFC_VERSION_V6) {
- pm->clock = clk_get(&dev->plat_dev->dev, MFC_SCLK_NAME);
- if (IS_ERR(pm->clock)) {
- mfc_info("Failed to get MFC special clock control\n");
- pm->clock = NULL;
- } else {
- clk_set_rate(pm->clock, MFC_SCLK_RATE);
- ret = clk_prepare_enable(pm->clock);
- if (ret) {
- mfc_err("Failed to enable MFC special clock\n");
- goto err_s_clk;
- }
+ /* clock control */
+ for (i = 0; i < pm->num_clocks; i++) {
+ pm->clocks[i] = devm_clk_get(pm->device, pm->clk_names[i]);
+ if (IS_ERR(pm->clocks[i])) {
+ mfc_err("Failed to get clock: %s\n",
+ pm->clk_names[i]);
+ return PTR_ERR(pm->clocks[i]);
}
}
- atomic_set(&pm->power, 0);
-#ifdef CONFIG_PM
- pm->device = &dev->plat_dev->dev;
+ if (dev->variant->use_clock_gating)
+ pm->clock_gate = pm->clocks[0];
+
pm_runtime_enable(pm->device);
-#endif
-#ifdef CLK_DEBUG
atomic_set(&clk_ref, 0);
-#endif
return 0;
-
-err_s_clk:
- clk_put(pm->clock);
- pm->clock = NULL;
-err_p_ip_clk:
- clk_put(pm->clock_gate);
- pm->clock_gate = NULL;
-err_g_ip_clk:
- return ret;
}
void s5p_mfc_final_pm(struct s5p_mfc_dev *dev)
{
- if (dev->variant->version != MFC_VERSION_V6 &&
- pm->clock) {
- clk_disable_unprepare(pm->clock);
- clk_put(pm->clock);
- pm->clock = NULL;
- }
- clk_unprepare(pm->clock_gate);
- clk_put(pm->clock_gate);
- pm->clock_gate = NULL;
-#ifdef CONFIG_PM
pm_runtime_disable(pm->device);
-#endif
}
int s5p_mfc_clock_on(void)
{
- int ret = 0;
-#ifdef CLK_DEBUG
atomic_inc(&clk_ref);
mfc_debug(3, "+ %d\n", atomic_read(&clk_ref));
-#endif
- if (!IS_ERR_OR_NULL(pm->clock_gate))
- ret = clk_enable(pm->clock_gate);
- return ret;
+
+ return clk_enable(pm->clock_gate);
}
void s5p_mfc_clock_off(void)
{
-#ifdef CLK_DEBUG
atomic_dec(&clk_ref);
mfc_debug(3, "- %d\n", atomic_read(&clk_ref));
-#endif
- if (!IS_ERR_OR_NULL(pm->clock_gate))
- clk_disable(pm->clock_gate);
+
+ clk_disable(pm->clock_gate);
}
int s5p_mfc_power_on(void)
{
-#ifdef CONFIG_PM
- return pm_runtime_get_sync(pm->device);
-#else
- atomic_set(&pm->power, 1);
+ int i, ret = 0;
+
+ ret = pm_runtime_get_sync(pm->device);
+ if (ret < 0)
+ return ret;
+
+ /* clock control */
+ for (i = 0; i < pm->num_clocks; i++) {
+ ret = clk_prepare_enable(pm->clocks[i]);
+ if (ret < 0) {
+ mfc_err("clock prepare failed for clock: %s\n",
+ pm->clk_names[i]);
+ i++;
+ goto err;
+ }
+ }
+
+ /* prepare for software clock gating */
+ clk_disable(pm->clock_gate);
+
return 0;
-#endif
+err:
+ while (--i > 0)
+ clk_disable_unprepare(pm->clocks[i]);
+ pm_runtime_put(pm->device);
+ return ret;
}
int s5p_mfc_power_off(void)
{
-#ifdef CONFIG_PM
+ int i;
+
+ /* finish software clock gating */
+ clk_enable(pm->clock_gate);
+
+ for (i = 0; i < pm->num_clocks; i++)
+ clk_disable_unprepare(pm->clocks[i]);
+
return pm_runtime_put_sync(pm->device);
-#else
- atomic_set(&pm->power, 0);
- return 0;
-#endif
}
-
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
index 45f82b5ddd77..823608112d89 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -1337,6 +1337,7 @@ static int bdisp_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
dev_err(dev, "failed to get IRQ resource\n");
+ ret = -EINVAL;
goto err_clk;
}
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
index 30c148b9d65e..7652ce2ec1dc 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
@@ -112,8 +112,7 @@ static void channel_swdemux_tsklet(unsigned long data)
buf = (u8 *) channel->back_buffer_aligned;
dev_dbg(fei->dev,
- "chan=%d channel=%p num_packets = %d, buf = %p, pos = 0x%x\n\t"
- "rp=0x%lx, wp=0x%lx\n",
+ "chan=%d channel=%p num_packets = %d, buf = %p, pos = 0x%x\n\trp=0x%lx, wp=0x%lx\n",
channel->tsin_id, channel, num_packets, buf, pos, rp, wp);
for (n = 0; n < num_packets; n++) {
@@ -789,8 +788,7 @@ static int c8sectpfe_probe(struct platform_device *pdev)
/* sanity check value */
if (tsin->tsin_id > fei->hw_stats.num_ib) {
dev_err(&pdev->dev,
- "tsin-num %d specified greater than number\n\t"
- "of input block hw in SoC! (%d)",
+ "tsin-num %d specified greater than number\n\tof input block hw in SoC! (%d)",
tsin->tsin_id, fei->hw_stats.num_ib);
ret = -EINVAL;
goto err_clk_disable;
@@ -815,6 +813,7 @@ static int c8sectpfe_probe(struct platform_device *pdev)
i2c_bus = of_parse_phandle(child, "i2c-bus", 0);
if (!i2c_bus) {
dev_err(&pdev->dev, "No i2c-bus found\n");
+ ret = -ENODEV;
goto err_clk_disable;
}
tsin->i2c_adapter =
@@ -822,6 +821,7 @@ static int c8sectpfe_probe(struct platform_device *pdev)
if (!tsin->i2c_adapter) {
dev_err(&pdev->dev, "No i2c adapter found\n");
of_node_put(i2c_bus);
+ ret = -ENODEV;
goto err_clk_disable;
}
of_node_put(i2c_bus);
@@ -855,8 +855,7 @@ static int c8sectpfe_probe(struct platform_device *pdev)
tsin->demux_mapping = index;
dev_dbg(fei->dev,
- "channel=%p n=%d tsin_num=%d, invert-ts-clk=%d\n\t"
- "serial-not-parallel=%d pkt-clk-valid=%d dvb-card=%d\n",
+ "channel=%p n=%d tsin_num=%d, invert-ts-clk=%d\n\tserial-not-parallel=%d pkt-clk-valid=%d dvb-card=%d\n",
fei->channel_data[index], index,
tsin->tsin_id, tsin->invert_ts_clk,
tsin->serial_not_parallel, tsin->async_not_sync,
@@ -888,8 +887,7 @@ static int c8sectpfe_probe(struct platform_device *pdev)
return 0;
err_clk_disable:
- /* TODO uncomment when upstream has taken a reference on this clk */
- /*clk_disable_unprepare(fei->c8sectpfeclk);*/
+ clk_disable_unprepare(fei->c8sectpfeclk);
return ret;
}
@@ -924,11 +922,8 @@ static int c8sectpfe_remove(struct platform_device *pdev)
if (readl(fei->io + SYS_OTHER_CLKEN))
writel(0, fei->io + SYS_OTHER_CLKEN);
- /* TODO uncomment when upstream has taken a reference on this clk */
- /*
if (fei->c8sectpfeclk)
clk_disable_unprepare(fei->c8sectpfeclk);
- */
return 0;
}
@@ -1045,8 +1040,8 @@ static void load_imem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
*/
dev_dbg(fei->dev,
- "Loading IMEM segment %d 0x%08x\n\t"
- " (0x%x bytes) -> 0x%p (0x%x bytes)\n", seg_num,
+ "Loading IMEM segment %d 0x%08x\n\t (0x%x bytes) -> 0x%p (0x%x bytes)\n",
+seg_num,
phdr->p_paddr, phdr->p_filesz,
dest, phdr->p_memsz + phdr->p_memsz / 3);
@@ -1075,8 +1070,7 @@ static void load_dmem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
*/
dev_dbg(fei->dev,
- "Loading DMEM segment %d 0x%08x\n\t"
- "(0x%x bytes) -> 0x%p (0x%x bytes)\n",
+ "Loading DMEM segment %d 0x%08x\n\t(0x%x bytes) -> 0x%p (0x%x bytes)\n",
seg_num, phdr->p_paddr, phdr->p_filesz,
dst, phdr->p_memsz);
diff --git a/drivers/media/platform/sti/hva/hva-hw.c b/drivers/media/platform/sti/hva/hva-hw.c
index d341d4994528..68d625b412b6 100644
--- a/drivers/media/platform/sti/hva/hva-hw.c
+++ b/drivers/media/platform/sti/hva/hva-hw.c
@@ -245,7 +245,7 @@ static irqreturn_t hva_hw_err_irq_thread(int irq, void *arg)
ctx->hw_err = true;
}
- if (hva->lmi_err_reg) {
+ if (hva->emi_err_reg) {
dev_err(dev, "%s external memory interface error: 0x%08x\n",
ctx->name, hva->emi_err_reg);
ctx->hw_err = true;
@@ -305,16 +305,16 @@ int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva)
/* get memory for registers */
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hva->regs = devm_ioremap_resource(dev, regs);
- if (IS_ERR_OR_NULL(hva->regs)) {
+ if (IS_ERR(hva->regs)) {
dev_err(dev, "%s failed to get regs\n", HVA_PREFIX);
return PTR_ERR(hva->regs);
}
/* get memory for esram */
esram = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (IS_ERR_OR_NULL(esram)) {
+ if (!esram) {
dev_err(dev, "%s failed to get esram\n", HVA_PREFIX);
- return PTR_ERR(esram);
+ return -ENODEV;
}
hva->esram_addr = esram->start;
hva->esram_size = resource_size(esram);
diff --git a/drivers/media/platform/ti-vpe/Makefile b/drivers/media/platform/ti-vpe/Makefile
index e236059a60ad..32504b724b5d 100644
--- a/drivers/media/platform/ti-vpe/Makefile
+++ b/drivers/media/platform/ti-vpe/Makefile
@@ -1,6 +1,12 @@
obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe.o
-
-ti-vpe-y := vpe.o sc.o csc.o vpdma.o
+obj-$(CONFIG_VIDEO_TI_VPDMA) += ti-vpdma.o
+obj-$(CONFIG_VIDEO_TI_SC) += ti-sc.o
+obj-$(CONFIG_VIDEO_TI_CSC) += ti-csc.o
+
+ti-vpe-y := vpe.o
+ti-vpdma-y := vpdma.o
+ti-sc-y := sc.o
+ti-csc-y := csc.o
ccflags-$(CONFIG_VIDEO_TI_VPE_DEBUG) += -DDEBUG
diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
index 44323cb5d287..7a058b6e03d0 100644
--- a/drivers/media/platform/ti-vpe/cal.c
+++ b/drivers/media/platform/ti-vpe/cal.c
@@ -483,11 +483,7 @@ static void cal_get_hwinfo(struct cal_dev *dev)
static inline int cal_runtime_get(struct cal_dev *dev)
{
- int r;
-
- r = pm_runtime_get_sync(&dev->pdev->dev);
-
- return r;
+ return pm_runtime_get_sync(&dev->pdev->dev);
}
static inline void cal_runtime_put(struct cal_dev *dev)
@@ -1749,13 +1745,13 @@ static int of_cal_create_instance(struct cal_ctx *ctx, int inst)
}
cleanup_exit:
- if (!remote_ep)
+ if (remote_ep)
of_node_put(remote_ep);
- if (!sensor_node)
+ if (sensor_node)
of_node_put(sensor_node);
- if (!ep_node)
+ if (ep_node)
of_node_put(ep_node);
- if (!port)
+ if (port)
of_node_put(port);
return ret;
diff --git a/drivers/media/platform/ti-vpe/csc.c b/drivers/media/platform/ti-vpe/csc.c
index bec674994752..44b8465cf101 100644
--- a/drivers/media/platform/ti-vpe/csc.c
+++ b/drivers/media/platform/ti-vpe/csc.c
@@ -14,6 +14,7 @@
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
@@ -96,6 +97,8 @@ void csc_dump_regs(struct csc_data *csc)
#define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, \
ioread32(csc->base + CSC_##r))
+ dev_dbg(dev, "CSC Registers @ %pa:\n", &csc->res->start);
+
DUMPREG(CSC00);
DUMPREG(CSC01);
DUMPREG(CSC02);
@@ -105,11 +108,13 @@ void csc_dump_regs(struct csc_data *csc)
#undef DUMPREG
}
+EXPORT_SYMBOL(csc_dump_regs);
void csc_set_coeff_bypass(struct csc_data *csc, u32 *csc_reg5)
{
*csc_reg5 |= CSC_BYPASS;
}
+EXPORT_SYMBOL(csc_set_coeff_bypass);
/*
* set the color space converter coefficient shadow register values
@@ -160,8 +165,9 @@ void csc_set_coeff(struct csc_data *csc, u32 *csc_reg0,
for (; coeff < end_coeff; coeff += 2)
*shadow_csc++ = (*(coeff + 1) << 16) | *coeff;
}
+EXPORT_SYMBOL(csc_set_coeff);
-struct csc_data *csc_create(struct platform_device *pdev)
+struct csc_data *csc_create(struct platform_device *pdev, const char *res_name)
{
struct csc_data *csc;
@@ -176,9 +182,10 @@ struct csc_data *csc_create(struct platform_device *pdev)
csc->pdev = pdev;
csc->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "csc");
+ res_name);
if (csc->res == NULL) {
- dev_err(&pdev->dev, "missing platform resources data\n");
+ dev_err(&pdev->dev, "missing '%s' platform resources data\n",
+ res_name);
return ERR_PTR(-ENODEV);
}
@@ -190,3 +197,8 @@ struct csc_data *csc_create(struct platform_device *pdev)
return csc;
}
+EXPORT_SYMBOL(csc_create);
+
+MODULE_DESCRIPTION("TI VIP/VPE Color Space Converter");
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/ti-vpe/csc.h b/drivers/media/platform/ti-vpe/csc.h
index 1ad2b6dad561..024700b15152 100644
--- a/drivers/media/platform/ti-vpe/csc.h
+++ b/drivers/media/platform/ti-vpe/csc.h
@@ -63,6 +63,6 @@ void csc_set_coeff_bypass(struct csc_data *csc, u32 *csc_reg5);
void csc_set_coeff(struct csc_data *csc, u32 *csc_reg0,
enum v4l2_colorspace src_colorspace,
enum v4l2_colorspace dst_colorspace);
-struct csc_data *csc_create(struct platform_device *pdev);
+struct csc_data *csc_create(struct platform_device *pdev, const char *res_name);
#endif
diff --git a/drivers/media/platform/ti-vpe/sc.c b/drivers/media/platform/ti-vpe/sc.c
index f82d1c7f667f..e9273b713782 100644
--- a/drivers/media/platform/ti-vpe/sc.c
+++ b/drivers/media/platform/ti-vpe/sc.c
@@ -14,6 +14,7 @@
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -27,6 +28,8 @@ void sc_dump_regs(struct sc_data *sc)
#define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, \
ioread32(sc->base + CFG_##r))
+ dev_dbg(dev, "SC Registers @ %pa:\n", &sc->res->start);
+
DUMPREG(SC0);
DUMPREG(SC1);
DUMPREG(SC2);
@@ -52,6 +55,7 @@ void sc_dump_regs(struct sc_data *sc)
#undef DUMPREG
}
+EXPORT_SYMBOL(sc_dump_regs);
/*
* set the horizontal scaler coefficients according to the ratio of output to
@@ -84,9 +88,6 @@ void sc_set_hs_coeffs(struct sc_data *sc, void *addr, unsigned int src_w,
}
}
- if (idx == sc->hs_index)
- return;
-
cp = scaler_hs_coeffs[idx];
for (i = 0; i < SC_NUM_PHASES * 2; i++) {
@@ -101,10 +102,9 @@ void sc_set_hs_coeffs(struct sc_data *sc, void *addr, unsigned int src_w,
coeff_h += SC_NUM_TAPS_MEM_ALIGN - SC_H_NUM_TAPS;
}
- sc->hs_index = idx;
-
sc->load_coeff_h = true;
}
+EXPORT_SYMBOL(sc_set_hs_coeffs);
/*
* set the vertical scaler coefficients according to the ratio of output to
@@ -130,9 +130,6 @@ void sc_set_vs_coeffs(struct sc_data *sc, void *addr, unsigned int src_h,
idx = VS_LT_9_16_SCALE + sixteenths - 8;
}
- if (idx == sc->vs_index)
- return;
-
cp = scaler_vs_coeffs[idx];
for (i = 0; i < SC_NUM_PHASES * 2; i++) {
@@ -146,9 +143,9 @@ void sc_set_vs_coeffs(struct sc_data *sc, void *addr, unsigned int src_h,
coeff_v += SC_NUM_TAPS_MEM_ALIGN - SC_V_NUM_TAPS;
}
- sc->vs_index = idx;
sc->load_coeff_v = true;
}
+EXPORT_SYMBOL(sc_set_vs_coeffs);
void sc_config_scaler(struct sc_data *sc, u32 *sc_reg0, u32 *sc_reg8,
u32 *sc_reg17, unsigned int src_w, unsigned int src_h,
@@ -276,8 +273,9 @@ void sc_config_scaler(struct sc_data *sc, u32 *sc_reg0, u32 *sc_reg8,
*sc_reg24 = (src_w << CFG_ORG_W_SHIFT) | (src_h << CFG_ORG_H_SHIFT);
}
+EXPORT_SYMBOL(sc_config_scaler);
-struct sc_data *sc_create(struct platform_device *pdev)
+struct sc_data *sc_create(struct platform_device *pdev, const char *res_name)
{
struct sc_data *sc;
@@ -291,9 +289,10 @@ struct sc_data *sc_create(struct platform_device *pdev)
sc->pdev = pdev;
- sc->res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sc");
+ sc->res = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
if (!sc->res) {
- dev_err(&pdev->dev, "missing platform resources data\n");
+ dev_err(&pdev->dev, "missing '%s' platform resources data\n",
+ res_name);
return ERR_PTR(-ENODEV);
}
@@ -305,3 +304,8 @@ struct sc_data *sc_create(struct platform_device *pdev)
return sc;
}
+EXPORT_SYMBOL(sc_create);
+
+MODULE_DESCRIPTION("TI VIP/VPE Scaler");
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/ti-vpe/sc.h b/drivers/media/platform/ti-vpe/sc.h
index 60e411e05c30..f1fe80b38c9f 100644
--- a/drivers/media/platform/ti-vpe/sc.h
+++ b/drivers/media/platform/ti-vpe/sc.h
@@ -173,6 +173,12 @@
/* number of taps expected by the scaler in it's coefficient memory */
#define SC_NUM_TAPS_MEM_ALIGN 8
+/* Maximum frame width the scaler can handle (in pixels) */
+#define SC_MAX_PIXEL_WIDTH 2047
+
+/* Maximum frame height the scaler can handle (in lines) */
+#define SC_MAX_PIXEL_HEIGHT 2047
+
/*
* coefficient memory size in bytes:
* num phases x num sets(luma and chroma) x num taps(aligned) x coeff size
@@ -189,9 +195,6 @@ struct sc_data {
bool load_coeff_h; /* have new h SC coeffs */
bool load_coeff_v; /* have new v SC coeffs */
- unsigned int hs_index; /* h SC coeffs selector */
- unsigned int vs_index; /* v SC coeffs selector */
-
struct platform_device *pdev;
};
@@ -203,6 +206,6 @@ void sc_set_vs_coeffs(struct sc_data *sc, void *addr, unsigned int src_h,
void sc_config_scaler(struct sc_data *sc, u32 *sc_reg0, u32 *sc_reg8,
u32 *sc_reg17, unsigned int src_w, unsigned int src_h,
unsigned int dst_w, unsigned int dst_h);
-struct sc_data *sc_create(struct platform_device *pdev);
+struct sc_data *sc_create(struct platform_device *pdev, const char *res_name);
#endif
diff --git a/drivers/media/platform/ti-vpe/vpdma.c b/drivers/media/platform/ti-vpe/vpdma.c
index 3e2e3a33e6ed..13bfd7184160 100644
--- a/drivers/media/platform/ti-vpe/vpdma.c
+++ b/drivers/media/platform/ti-vpe/vpdma.c
@@ -59,9 +59,9 @@ const struct vpdma_data_format vpdma_yuv_fmts[] = {
.data_type = DATA_TYPE_C420,
.depth = 4,
},
- [VPDMA_DATA_FMT_YC422] = {
+ [VPDMA_DATA_FMT_YCR422] = {
.type = VPDMA_DATA_FMT_TYPE_YUV,
- .data_type = DATA_TYPE_YC422,
+ .data_type = DATA_TYPE_YCR422,
.depth = 16,
},
[VPDMA_DATA_FMT_YC444] = {
@@ -69,12 +69,23 @@ const struct vpdma_data_format vpdma_yuv_fmts[] = {
.data_type = DATA_TYPE_YC444,
.depth = 24,
},
- [VPDMA_DATA_FMT_CY422] = {
+ [VPDMA_DATA_FMT_CRY422] = {
.type = VPDMA_DATA_FMT_TYPE_YUV,
- .data_type = DATA_TYPE_CY422,
+ .data_type = DATA_TYPE_CRY422,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_CBY422] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
+ .data_type = DATA_TYPE_CBY422,
+ .depth = 16,
+ },
+ [VPDMA_DATA_FMT_YCB422] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
+ .data_type = DATA_TYPE_YCB422,
.depth = 16,
},
};
+EXPORT_SYMBOL(vpdma_yuv_fmts);
const struct vpdma_data_format vpdma_rgb_fmts[] = {
[VPDMA_DATA_FMT_RGB565] = {
@@ -178,6 +189,30 @@ const struct vpdma_data_format vpdma_rgb_fmts[] = {
.depth = 32,
},
};
+EXPORT_SYMBOL(vpdma_rgb_fmts);
+
+/*
+ * To handle RAW format we are re-using the CBY422
+ * vpdma data type so that we use the vpdma to re-order
+ * the incoming bytes, as the parser assumes that the
+ * first byte presented on the bus is the MSB of a 2
+ * bytes value.
+ * RAW8 handles from 1 to 8 bits
+ * RAW16 handles from 9 to 16 bits
+ */
+const struct vpdma_data_format vpdma_raw_fmts[] = {
+ [VPDMA_DATA_FMT_RAW8] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
+ .data_type = DATA_TYPE_CBY422,
+ .depth = 8,
+ },
+ [VPDMA_DATA_FMT_RAW16] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
+ .data_type = DATA_TYPE_CBY422,
+ .depth = 16,
+ },
+};
+EXPORT_SYMBOL(vpdma_raw_fmts);
const struct vpdma_data_format vpdma_misc_fmts[] = {
[VPDMA_DATA_FMT_MV] = {
@@ -186,6 +221,7 @@ const struct vpdma_data_format vpdma_misc_fmts[] = {
.depth = 4,
},
};
+EXPORT_SYMBOL(vpdma_misc_fmts);
struct vpdma_channel_info {
int num; /* VPDMA channel number */
@@ -317,6 +353,7 @@ void vpdma_dump_regs(struct vpdma_data *vpdma)
DUMPREG(VIP_UP_UV_CSTAT);
DUMPREG(VPI_CTL_CSTAT);
}
+EXPORT_SYMBOL(vpdma_dump_regs);
/*
* Allocate a DMA buffer
@@ -333,6 +370,7 @@ int vpdma_alloc_desc_buf(struct vpdma_buf *buf, size_t size)
return 0;
}
+EXPORT_SYMBOL(vpdma_alloc_desc_buf);
void vpdma_free_desc_buf(struct vpdma_buf *buf)
{
@@ -341,6 +379,7 @@ void vpdma_free_desc_buf(struct vpdma_buf *buf)
buf->addr = NULL;
buf->size = 0;
}
+EXPORT_SYMBOL(vpdma_free_desc_buf);
/*
* map descriptor/payload DMA buffer, enabling DMA access
@@ -351,7 +390,7 @@ int vpdma_map_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf)
WARN_ON(buf->mapped);
buf->dma_addr = dma_map_single(dev, buf->addr, buf->size,
- DMA_TO_DEVICE);
+ DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, buf->dma_addr)) {
dev_err(dev, "failed to map buffer\n");
return -EINVAL;
@@ -361,6 +400,7 @@ int vpdma_map_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf)
return 0;
}
+EXPORT_SYMBOL(vpdma_map_desc_buf);
/*
* unmap descriptor/payload DMA buffer, disabling DMA access and
@@ -371,10 +411,62 @@ void vpdma_unmap_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf)
struct device *dev = &vpdma->pdev->dev;
if (buf->mapped)
- dma_unmap_single(dev, buf->dma_addr, buf->size, DMA_TO_DEVICE);
+ dma_unmap_single(dev, buf->dma_addr, buf->size,
+ DMA_BIDIRECTIONAL);
buf->mapped = false;
}
+EXPORT_SYMBOL(vpdma_unmap_desc_buf);
+
+/*
+ * Cleanup all pending descriptors of a list
+ * First, stop the current list being processed.
+ * If the VPDMA was busy, this step makes vpdma to accept post lists.
+ * To cleanup the internal FSM, post abort list descriptor for all the
+ * channels from @channels array of size @size.
+ */
+int vpdma_list_cleanup(struct vpdma_data *vpdma, int list_num,
+ int *channels, int size)
+{
+ struct vpdma_desc_list abort_list;
+ int i, ret, timeout = 500;
+
+ write_reg(vpdma, VPDMA_LIST_ATTR,
+ (list_num << VPDMA_LIST_NUM_SHFT) |
+ (1 << VPDMA_LIST_STOP_SHFT));
+
+ if (size <= 0 || !channels)
+ return 0;
+
+ ret = vpdma_create_desc_list(&abort_list,
+ size * sizeof(struct vpdma_dtd), VPDMA_LIST_TYPE_NORMAL);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < size; i++)
+ vpdma_add_abort_channel_ctd(&abort_list, channels[i]);
+
+ ret = vpdma_map_desc_buf(vpdma, &abort_list.buf);
+ if (ret)
+ return ret;
+ ret = vpdma_submit_descs(vpdma, &abort_list, list_num);
+ if (ret)
+ return ret;
+
+ while (vpdma_list_busy(vpdma, list_num) && timeout--)
+ ;
+
+ if (timeout == 0) {
+ dev_err(&vpdma->pdev->dev, "Timed out cleaning up VPDMA list\n");
+ return -EBUSY;
+ }
+
+ vpdma_unmap_desc_buf(vpdma, &abort_list.buf);
+ vpdma_free_desc_buf(&abort_list.buf);
+
+ return 0;
+}
+EXPORT_SYMBOL(vpdma_list_cleanup);
/*
* create a descriptor list, the user of this list will append configuration,
@@ -396,6 +488,7 @@ int vpdma_create_desc_list(struct vpdma_desc_list *list, size_t size, int type)
return 0;
}
+EXPORT_SYMBOL(vpdma_create_desc_list);
/*
* once a descriptor list is parsed by VPDMA, we reset the list by emptying it,
@@ -405,6 +498,7 @@ void vpdma_reset_desc_list(struct vpdma_desc_list *list)
{
list->next = list->buf.addr;
}
+EXPORT_SYMBOL(vpdma_reset_desc_list);
/*
* free the buffer allocated fot the VPDMA descriptor list, this should be
@@ -416,20 +510,22 @@ void vpdma_free_desc_list(struct vpdma_desc_list *list)
list->next = NULL;
}
+EXPORT_SYMBOL(vpdma_free_desc_list);
-static bool vpdma_list_busy(struct vpdma_data *vpdma, int list_num)
+bool vpdma_list_busy(struct vpdma_data *vpdma, int list_num)
{
return read_reg(vpdma, VPDMA_LIST_STAT_SYNC) & BIT(list_num + 16);
}
+EXPORT_SYMBOL(vpdma_list_busy);
/*
* submit a list of DMA descriptors to the VPE VPDMA, do not wait for completion
*/
-int vpdma_submit_descs(struct vpdma_data *vpdma, struct vpdma_desc_list *list)
+int vpdma_submit_descs(struct vpdma_data *vpdma,
+ struct vpdma_desc_list *list, int list_num)
{
- /* we always use the first list */
- int list_num = 0;
int list_size;
+ unsigned long flags;
if (vpdma_list_busy(vpdma, list_num))
return -EBUSY;
@@ -437,15 +533,68 @@ int vpdma_submit_descs(struct vpdma_data *vpdma, struct vpdma_desc_list *list)
/* 16-byte granularity */
list_size = (list->next - list->buf.addr) >> 4;
+ spin_lock_irqsave(&vpdma->lock, flags);
write_reg(vpdma, VPDMA_LIST_ADDR, (u32) list->buf.dma_addr);
write_reg(vpdma, VPDMA_LIST_ATTR,
(list_num << VPDMA_LIST_NUM_SHFT) |
(list->type << VPDMA_LIST_TYPE_SHFT) |
list_size);
+ spin_unlock_irqrestore(&vpdma->lock, flags);
return 0;
}
+EXPORT_SYMBOL(vpdma_submit_descs);
+
+static void dump_dtd(struct vpdma_dtd *dtd);
+
+void vpdma_update_dma_addr(struct vpdma_data *vpdma,
+ struct vpdma_desc_list *list, dma_addr_t dma_addr,
+ void *write_dtd, int drop, int idx)
+{
+ struct vpdma_dtd *dtd = list->buf.addr;
+ dma_addr_t write_desc_addr;
+ int offset;
+
+ dtd += idx;
+ vpdma_unmap_desc_buf(vpdma, &list->buf);
+
+ dtd->start_addr = dma_addr;
+
+ /* Calculate write address from the offset of write_dtd from start
+ * of the list->buf
+ */
+ offset = (void *)write_dtd - list->buf.addr;
+ write_desc_addr = list->buf.dma_addr + offset;
+
+ if (drop)
+ dtd->desc_write_addr = dtd_desc_write_addr(write_desc_addr,
+ 1, 1, 0);
+ else
+ dtd->desc_write_addr = dtd_desc_write_addr(write_desc_addr,
+ 1, 0, 0);
+
+ vpdma_map_desc_buf(vpdma, &list->buf);
+
+ dump_dtd(dtd);
+}
+EXPORT_SYMBOL(vpdma_update_dma_addr);
+
+void vpdma_set_max_size(struct vpdma_data *vpdma, int reg_addr,
+ u32 width, u32 height)
+{
+ if (reg_addr != VPDMA_MAX_SIZE1 && reg_addr != VPDMA_MAX_SIZE2 &&
+ reg_addr != VPDMA_MAX_SIZE3)
+ reg_addr = VPDMA_MAX_SIZE1;
+
+ write_field_reg(vpdma, reg_addr, width - 1,
+ VPDMA_MAX_SIZE_WIDTH_MASK, VPDMA_MAX_SIZE_WIDTH_SHFT);
+
+ write_field_reg(vpdma, reg_addr, height - 1,
+ VPDMA_MAX_SIZE_HEIGHT_MASK, VPDMA_MAX_SIZE_HEIGHT_SHFT);
+
+}
+EXPORT_SYMBOL(vpdma_set_max_size);
static void dump_cfd(struct vpdma_cfd *cfd)
{
@@ -466,10 +615,10 @@ static void dump_cfd(struct vpdma_cfd *cfd)
pr_debug("word2: payload_addr = 0x%08x\n", cfd->payload_addr);
- pr_debug("word3: pkt_type = %d, direct = %d, class = %d, dest = %d, "
- "payload_len = %d\n", cfd_get_pkt_type(cfd),
- cfd_get_direct(cfd), class, cfd_get_dest(cfd),
- cfd_get_payload_len(cfd));
+ pr_debug("word3: pkt_type = %d, direct = %d, class = %d, dest = %d, payload_len = %d\n",
+ cfd_get_pkt_type(cfd),
+ cfd_get_direct(cfd), class, cfd_get_dest(cfd),
+ cfd_get_payload_len(cfd));
}
/*
@@ -498,6 +647,7 @@ void vpdma_add_cfd_block(struct vpdma_desc_list *list, int client,
dump_cfd(cfd);
}
+EXPORT_SYMBOL(vpdma_add_cfd_block);
/*
* append a configuration descriptor to the given descriptor list, where the
@@ -526,6 +676,7 @@ void vpdma_add_cfd_adb(struct vpdma_desc_list *list, int client,
dump_cfd(cfd);
};
+EXPORT_SYMBOL(vpdma_add_cfd_adb);
/*
* control descriptor format change based on what type of control descriptor it
@@ -563,6 +714,32 @@ void vpdma_add_sync_on_channel_ctd(struct vpdma_desc_list *list,
dump_ctd(ctd);
}
+EXPORT_SYMBOL(vpdma_add_sync_on_channel_ctd);
+
+/*
+ * append an 'abort_channel' type control descriptor to the given descriptor
+ * list, this descriptor aborts any DMA transaction happening using the
+ * specified channel
+ */
+void vpdma_add_abort_channel_ctd(struct vpdma_desc_list *list,
+ int chan_num)
+{
+ struct vpdma_ctd *ctd;
+
+ ctd = list->next;
+ WARN_ON((void *)(ctd + 1) > (list->buf.addr + list->buf.size));
+
+ ctd->w0 = 0;
+ ctd->w1 = 0;
+ ctd->w2 = 0;
+ ctd->type_source_ctl = ctd_type_source_ctl(chan_num,
+ CTD_TYPE_ABORT_CHANNEL);
+
+ list->next = ctd + 1;
+
+ dump_ctd(ctd);
+}
+EXPORT_SYMBOL(vpdma_add_abort_channel_ctd);
static void dump_dtd(struct vpdma_dtd *dtd)
{
@@ -574,8 +751,7 @@ static void dump_dtd(struct vpdma_dtd *dtd)
pr_debug("%s data transfer descriptor for channel %d\n",
dir == DTD_DIR_OUT ? "outbound" : "inbound", chan);
- pr_debug("word0: data_type = %d, notify = %d, field = %d, 1D = %d, "
- "even_ln_skp = %d, odd_ln_skp = %d, line_stride = %d\n",
+ pr_debug("word0: data_type = %d, notify = %d, field = %d, 1D = %d, even_ln_skp = %d, odd_ln_skp = %d, line_stride = %d\n",
dtd_get_data_type(dtd), dtd_get_notify(dtd), dtd_get_field(dtd),
dtd_get_1d(dtd), dtd_get_even_line_skip(dtd),
dtd_get_odd_line_skip(dtd), dtd_get_line_stride(dtd));
@@ -586,17 +762,16 @@ static void dump_dtd(struct vpdma_dtd *dtd)
pr_debug("word2: start_addr = %pad\n", &dtd->start_addr);
- pr_debug("word3: pkt_type = %d, mode = %d, dir = %d, chan = %d, "
- "pri = %d, next_chan = %d\n", dtd_get_pkt_type(dtd),
- dtd_get_mode(dtd), dir, chan, dtd_get_priority(dtd),
- dtd_get_next_chan(dtd));
+ pr_debug("word3: pkt_type = %d, mode = %d, dir = %d, chan = %d, pri = %d, next_chan = %d\n",
+ dtd_get_pkt_type(dtd),
+ dtd_get_mode(dtd), dir, chan, dtd_get_priority(dtd),
+ dtd_get_next_chan(dtd));
if (dir == DTD_DIR_IN)
pr_debug("word4: frame_width = %d, frame_height = %d\n",
dtd_get_frame_width(dtd), dtd_get_frame_height(dtd));
else
- pr_debug("word4: desc_write_addr = 0x%08x, write_desc = %d, "
- "drp_data = %d, use_desc_reg = %d\n",
+ pr_debug("word4: desc_write_addr = 0x%08x, write_desc = %d, drp_data = %d, use_desc_reg = %d\n",
dtd_get_desc_write_addr(dtd), dtd_get_write_desc(dtd),
dtd_get_drop_data(dtd), dtd_get_use_desc(dtd));
@@ -620,13 +795,25 @@ static void dump_dtd(struct vpdma_dtd *dtd)
* @c_rect: compose params of output image
* @fmt: vpdma data format of the buffer
* dma_addr: dma address as seen by VPDMA
+ * max_width: enum for maximum width of data transfer
+ * max_height: enum for maximum height of data transfer
* chan: VPDMA channel
* flags: VPDMA flags to configure some descriptor fileds
*/
void vpdma_add_out_dtd(struct vpdma_desc_list *list, int width,
const struct v4l2_rect *c_rect,
const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
- enum vpdma_channel chan, u32 flags)
+ int max_w, int max_h, enum vpdma_channel chan, u32 flags)
+{
+ vpdma_rawchan_add_out_dtd(list, width, c_rect, fmt, dma_addr,
+ max_w, max_h, chan_info[chan].num, flags);
+}
+EXPORT_SYMBOL(vpdma_add_out_dtd);
+
+void vpdma_rawchan_add_out_dtd(struct vpdma_desc_list *list, int width,
+ const struct v4l2_rect *c_rect,
+ const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
+ int max_w, int max_h, int raw_vpdma_chan, u32 flags)
{
int priority = 0;
int field = 0;
@@ -637,7 +824,7 @@ void vpdma_add_out_dtd(struct vpdma_desc_list *list, int width,
int stride;
struct vpdma_dtd *dtd;
- channel = next_chan = chan_info[chan].num;
+ channel = next_chan = raw_vpdma_chan;
if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV &&
fmt->data_type == DATA_TYPE_C420) {
@@ -665,8 +852,7 @@ void vpdma_add_out_dtd(struct vpdma_desc_list *list, int width,
dtd->pkt_ctl = dtd_pkt_ctl(!!(flags & VPDMA_DATA_MODE_TILED),
DTD_DIR_OUT, channel, priority, next_chan);
dtd->desc_write_addr = dtd_desc_write_addr(0, 0, 0, 0);
- dtd->max_width_height = dtd_max_width_height(MAX_OUT_WIDTH_1920,
- MAX_OUT_HEIGHT_1080);
+ dtd->max_width_height = dtd_max_width_height(max_w, max_h);
dtd->client_attr0 = 0;
dtd->client_attr1 = 0;
@@ -674,6 +860,7 @@ void vpdma_add_out_dtd(struct vpdma_desc_list *list, int width,
dump_dtd(dtd);
}
+EXPORT_SYMBOL(vpdma_rawchan_add_out_dtd);
/*
* append an inbound data transfer descriptor to the given descriptor list,
@@ -747,27 +934,105 @@ void vpdma_add_in_dtd(struct vpdma_desc_list *list, int width,
dump_dtd(dtd);
}
+EXPORT_SYMBOL(vpdma_add_in_dtd);
+
+int vpdma_hwlist_alloc(struct vpdma_data *vpdma, void *priv)
+{
+ int i, list_num = -1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vpdma->lock, flags);
+ for (i = 0; i < VPDMA_MAX_NUM_LIST &&
+ vpdma->hwlist_used[i] == true; i++)
+ ;
+
+ if (i < VPDMA_MAX_NUM_LIST) {
+ list_num = i;
+ vpdma->hwlist_used[i] = true;
+ vpdma->hwlist_priv[i] = priv;
+ }
+ spin_unlock_irqrestore(&vpdma->lock, flags);
+
+ return list_num;
+}
+EXPORT_SYMBOL(vpdma_hwlist_alloc);
+
+void *vpdma_hwlist_get_priv(struct vpdma_data *vpdma, int list_num)
+{
+ if (!vpdma || list_num >= VPDMA_MAX_NUM_LIST)
+ return NULL;
+
+ return vpdma->hwlist_priv[list_num];
+}
+EXPORT_SYMBOL(vpdma_hwlist_get_priv);
+
+void *vpdma_hwlist_release(struct vpdma_data *vpdma, int list_num)
+{
+ void *priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vpdma->lock, flags);
+ vpdma->hwlist_used[list_num] = false;
+ priv = vpdma->hwlist_priv;
+ spin_unlock_irqrestore(&vpdma->lock, flags);
+
+ return priv;
+}
+EXPORT_SYMBOL(vpdma_hwlist_release);
/* set or clear the mask for list complete interrupt */
-void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int list_num,
- bool enable)
+void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int irq_num,
+ int list_num, bool enable)
{
+ u32 reg_addr = VPDMA_INT_LIST0_MASK + VPDMA_INTX_OFFSET * irq_num;
u32 val;
- val = read_reg(vpdma, VPDMA_INT_LIST0_MASK);
+ val = read_reg(vpdma, reg_addr);
if (enable)
val |= (1 << (list_num * 2));
else
val &= ~(1 << (list_num * 2));
- write_reg(vpdma, VPDMA_INT_LIST0_MASK, val);
+ write_reg(vpdma, reg_addr, val);
}
+EXPORT_SYMBOL(vpdma_enable_list_complete_irq);
+
+/* get the LIST_STAT register */
+unsigned int vpdma_get_list_stat(struct vpdma_data *vpdma, int irq_num)
+{
+ u32 reg_addr = VPDMA_INT_LIST0_STAT + VPDMA_INTX_OFFSET * irq_num;
+
+ return read_reg(vpdma, reg_addr);
+}
+EXPORT_SYMBOL(vpdma_get_list_stat);
+
+/* get the LIST_MASK register */
+unsigned int vpdma_get_list_mask(struct vpdma_data *vpdma, int irq_num)
+{
+ u32 reg_addr = VPDMA_INT_LIST0_MASK + VPDMA_INTX_OFFSET * irq_num;
+
+ return read_reg(vpdma, reg_addr);
+}
+EXPORT_SYMBOL(vpdma_get_list_mask);
/* clear previosuly occured list intterupts in the LIST_STAT register */
-void vpdma_clear_list_stat(struct vpdma_data *vpdma)
+void vpdma_clear_list_stat(struct vpdma_data *vpdma, int irq_num,
+ int list_num)
+{
+ u32 reg_addr = VPDMA_INT_LIST0_STAT + VPDMA_INTX_OFFSET * irq_num;
+
+ write_reg(vpdma, reg_addr, 3 << (list_num * 2));
+}
+EXPORT_SYMBOL(vpdma_clear_list_stat);
+
+void vpdma_set_bg_color(struct vpdma_data *vpdma,
+ struct vpdma_data_format *fmt, u32 color)
{
- write_reg(vpdma, VPDMA_INT_LIST0_STAT,
- read_reg(vpdma, VPDMA_INT_LIST0_STAT));
+ if (fmt->type == VPDMA_DATA_FMT_TYPE_RGB)
+ write_reg(vpdma, VPDMA_BG_RGB, color);
+ else if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV)
+ write_reg(vpdma, VPDMA_BG_YUV, color);
}
+EXPORT_SYMBOL(vpdma_set_bg_color);
/*
* configures the output mode of the line buffer for the given client, the
@@ -782,6 +1047,7 @@ void vpdma_set_line_mode(struct vpdma_data *vpdma, int line_mode,
write_field_reg(vpdma, client_cstat, line_mode,
VPDMA_CSTAT_LINE_MODE_MASK, VPDMA_CSTAT_LINE_MODE_SHIFT);
}
+EXPORT_SYMBOL(vpdma_set_line_mode);
/*
* configures the event which should trigger VPDMA transfer for the given
@@ -796,6 +1062,7 @@ void vpdma_set_frame_start_event(struct vpdma_data *vpdma,
write_field_reg(vpdma, client_cstat, fs_event,
VPDMA_CSTAT_FRAME_START_MASK, VPDMA_CSTAT_FRAME_START_SHIFT);
}
+EXPORT_SYMBOL(vpdma_set_frame_start_event);
static void vpdma_firmware_cb(const struct firmware *f, void *context)
{
@@ -871,42 +1138,40 @@ static int vpdma_load_firmware(struct vpdma_data *vpdma)
return 0;
}
-struct vpdma_data *vpdma_create(struct platform_device *pdev,
+int vpdma_create(struct platform_device *pdev, struct vpdma_data *vpdma,
void (*cb)(struct platform_device *pdev))
{
struct resource *res;
- struct vpdma_data *vpdma;
int r;
dev_dbg(&pdev->dev, "vpdma_create\n");
- vpdma = devm_kzalloc(&pdev->dev, sizeof(*vpdma), GFP_KERNEL);
- if (!vpdma) {
- dev_err(&pdev->dev, "couldn't alloc vpdma_dev\n");
- return ERR_PTR(-ENOMEM);
- }
-
vpdma->pdev = pdev;
vpdma->cb = cb;
+ spin_lock_init(&vpdma->lock);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpdma");
if (res == NULL) {
dev_err(&pdev->dev, "missing platform resources data\n");
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
}
vpdma->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!vpdma->base) {
dev_err(&pdev->dev, "failed to ioremap\n");
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
r = vpdma_load_firmware(vpdma);
if (r) {
pr_err("failed to load firmware %s\n", VPDMA_FIRMWARE);
- return ERR_PTR(r);
+ return r;
}
- return vpdma;
+ return 0;
}
+EXPORT_SYMBOL(vpdma_create);
+
+MODULE_AUTHOR("Texas Instruments Inc.");
MODULE_FIRMWARE(VPDMA_FIRMWARE);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/ti-vpe/vpdma.h b/drivers/media/platform/ti-vpe/vpdma.h
index 2bd8fb050381..131700c112b2 100644
--- a/drivers/media/platform/ti-vpe/vpdma.h
+++ b/drivers/media/platform/ti-vpe/vpdma.h
@@ -13,6 +13,7 @@
#ifndef __TI_VPDMA_H_
#define __TI_VPDMA_H_
+#define VPDMA_MAX_NUM_LIST 8
/*
* A vpdma_buf tracks the size, DMA address and mapping status of each
* driver DMA area.
@@ -35,6 +36,9 @@ struct vpdma_data {
struct platform_device *pdev;
+ spinlock_t lock;
+ bool hwlist_used[VPDMA_MAX_NUM_LIST];
+ void *hwlist_priv[VPDMA_MAX_NUM_LIST];
/* callback to VPE driver when the firmware is loaded */
void (*cb)(struct platform_device *pdev);
};
@@ -70,9 +74,11 @@ enum vpdma_yuv_formats {
VPDMA_DATA_FMT_C444,
VPDMA_DATA_FMT_C422,
VPDMA_DATA_FMT_C420,
- VPDMA_DATA_FMT_YC422,
+ VPDMA_DATA_FMT_YCR422,
VPDMA_DATA_FMT_YC444,
- VPDMA_DATA_FMT_CY422,
+ VPDMA_DATA_FMT_CRY422,
+ VPDMA_DATA_FMT_CBY422,
+ VPDMA_DATA_FMT_YCB422,
};
enum vpdma_rgb_formats {
@@ -98,12 +104,18 @@ enum vpdma_rgb_formats {
VPDMA_DATA_FMT_BGRA32,
};
+enum vpdma_raw_formats {
+ VPDMA_DATA_FMT_RAW8 = 0,
+ VPDMA_DATA_FMT_RAW16,
+};
+
enum vpdma_misc_formats {
VPDMA_DATA_FMT_MV = 0,
};
extern const struct vpdma_data_format vpdma_yuv_fmts[];
extern const struct vpdma_data_format vpdma_rgb_fmts[];
+extern const struct vpdma_data_format vpdma_raw_fmts[];
extern const struct vpdma_data_format vpdma_misc_fmts[];
enum vpdma_frame_start_event {
@@ -117,6 +129,30 @@ enum vpdma_frame_start_event {
VPDMA_FSEVENT_CHANNEL_ACTIVE,
};
+/* max width configurations */
+enum vpdma_max_width {
+ MAX_OUT_WIDTH_UNLIMITED = 0,
+ MAX_OUT_WIDTH_REG1,
+ MAX_OUT_WIDTH_REG2,
+ MAX_OUT_WIDTH_REG3,
+ MAX_OUT_WIDTH_352,
+ MAX_OUT_WIDTH_768,
+ MAX_OUT_WIDTH_1280,
+ MAX_OUT_WIDTH_1920,
+};
+
+/* max height configurations */
+enum vpdma_max_height {
+ MAX_OUT_HEIGHT_UNLIMITED = 0,
+ MAX_OUT_HEIGHT_REG1,
+ MAX_OUT_HEIGHT_REG2,
+ MAX_OUT_HEIGHT_REG3,
+ MAX_OUT_HEIGHT_288,
+ MAX_OUT_HEIGHT_576,
+ MAX_OUT_HEIGHT_720,
+ MAX_OUT_HEIGHT_1080,
+};
+
/*
* VPDMA channel numbers
*/
@@ -134,6 +170,13 @@ enum vpdma_channel {
VPE_CHAN_RGB_OUT,
};
+#define VIP_CHAN_VIP2_OFFSET 70
+#define VIP_CHAN_MULT_PORTB_OFFSET 16
+#define VIP_CHAN_YUV_PORTB_OFFSET 2
+#define VIP_CHAN_RGB_PORTB_OFFSET 1
+
+#define VPDMA_MAX_CHANNELS 256
+
/* flags for VPDMA data descriptors */
#define VPDMA_DATA_ODD_LINE_SKIP (1 << 0)
#define VPDMA_DATA_EVEN_LINE_SKIP (1 << 1)
@@ -177,7 +220,17 @@ void vpdma_unmap_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf);
int vpdma_create_desc_list(struct vpdma_desc_list *list, size_t size, int type);
void vpdma_reset_desc_list(struct vpdma_desc_list *list);
void vpdma_free_desc_list(struct vpdma_desc_list *list);
-int vpdma_submit_descs(struct vpdma_data *vpdma, struct vpdma_desc_list *list);
+int vpdma_submit_descs(struct vpdma_data *vpdma, struct vpdma_desc_list *list,
+ int list_num);
+bool vpdma_list_busy(struct vpdma_data *vpdma, int list_num);
+void vpdma_update_dma_addr(struct vpdma_data *vpdma,
+ struct vpdma_desc_list *list, dma_addr_t dma_addr,
+ void *write_dtd, int drop, int idx);
+
+/* VPDMA hardware list funcs */
+int vpdma_hwlist_alloc(struct vpdma_data *vpdma, void *priv);
+void *vpdma_hwlist_get_priv(struct vpdma_data *vpdma, int list_num);
+void *vpdma_hwlist_release(struct vpdma_data *vpdma, int list_num);
/* helpers for creating vpdma descriptors */
void vpdma_add_cfd_block(struct vpdma_desc_list *list, int client,
@@ -186,31 +239,47 @@ void vpdma_add_cfd_adb(struct vpdma_desc_list *list, int client,
struct vpdma_buf *adb);
void vpdma_add_sync_on_channel_ctd(struct vpdma_desc_list *list,
enum vpdma_channel chan);
+void vpdma_add_abort_channel_ctd(struct vpdma_desc_list *list,
+ int chan_num);
void vpdma_add_out_dtd(struct vpdma_desc_list *list, int width,
const struct v4l2_rect *c_rect,
const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
- enum vpdma_channel chan, u32 flags);
+ int max_w, int max_h, enum vpdma_channel chan, u32 flags);
+void vpdma_rawchan_add_out_dtd(struct vpdma_desc_list *list, int width,
+ const struct v4l2_rect *c_rect,
+ const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
+ int max_w, int max_h, int raw_vpdma_chan, u32 flags);
+
void vpdma_add_in_dtd(struct vpdma_desc_list *list, int width,
const struct v4l2_rect *c_rect,
const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
enum vpdma_channel chan, int field, u32 flags, int frame_width,
int frame_height, int start_h, int start_v);
+int vpdma_list_cleanup(struct vpdma_data *vpdma, int list_num,
+ int *channels, int size);
/* vpdma list interrupt management */
-void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int list_num,
- bool enable);
-void vpdma_clear_list_stat(struct vpdma_data *vpdma);
+void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int irq_num,
+ int list_num, bool enable);
+void vpdma_clear_list_stat(struct vpdma_data *vpdma, int irq_num,
+ int list_num);
+unsigned int vpdma_get_list_stat(struct vpdma_data *vpdma, int irq_num);
+unsigned int vpdma_get_list_mask(struct vpdma_data *vpdma, int irq_num);
/* vpdma client configuration */
void vpdma_set_line_mode(struct vpdma_data *vpdma, int line_mode,
enum vpdma_channel chan);
void vpdma_set_frame_start_event(struct vpdma_data *vpdma,
enum vpdma_frame_start_event fs_event, enum vpdma_channel chan);
+void vpdma_set_max_size(struct vpdma_data *vpdma, int reg_addr,
+ u32 width, u32 height);
+void vpdma_set_bg_color(struct vpdma_data *vpdma,
+ struct vpdma_data_format *fmt, u32 color);
void vpdma_dump_regs(struct vpdma_data *vpdma);
/* initialize vpdma, passed with VPE's platform device pointer */
-struct vpdma_data *vpdma_create(struct platform_device *pdev,
+int vpdma_create(struct platform_device *pdev, struct vpdma_data *vpdma,
void (*cb)(struct platform_device *pdev));
#endif
diff --git a/drivers/media/platform/ti-vpe/vpdma_priv.h b/drivers/media/platform/ti-vpe/vpdma_priv.h
index c1a6ce1884f3..72c7f13b4a9d 100644
--- a/drivers/media/platform/ti-vpe/vpdma_priv.h
+++ b/drivers/media/platform/ti-vpe/vpdma_priv.h
@@ -28,6 +28,10 @@
#define VPDMA_MAX_SIZE1 0x34
#define VPDMA_MAX_SIZE2 0x38
#define VPDMA_MAX_SIZE3 0x3c
+#define VPDMA_MAX_SIZE_WIDTH_MASK 0xffff
+#define VPDMA_MAX_SIZE_WIDTH_SHFT 16
+#define VPDMA_MAX_SIZE_HEIGHT_MASK 0xffff
+#define VPDMA_MAX_SIZE_HEIGHT_SHFT 0
/* Interrupts */
#define VPDMA_INT_CHAN_STAT(grp) (0x40 + grp * 8)
@@ -39,9 +43,11 @@
#define VPDMA_INT_LIST0_STAT 0x88
#define VPDMA_INT_LIST0_MASK 0x8c
+#define VPDMA_INTX_OFFSET 0x50
+
#define VPDMA_PERFMON(i) (0x200 + i * 4)
-/* VPE specific client registers */
+/* VIP/VPE client registers */
#define VPDMA_DEI_CHROMA1_CSTAT 0x0300
#define VPDMA_DEI_LUMA1_CSTAT 0x0304
#define VPDMA_DEI_LUMA2_CSTAT 0x0308
@@ -50,6 +56,8 @@
#define VPDMA_DEI_CHROMA3_CSTAT 0x0314
#define VPDMA_DEI_MV_IN_CSTAT 0x0330
#define VPDMA_DEI_MV_OUT_CSTAT 0x033c
+#define VPDMA_VIP_LO_Y_CSTAT 0x0388
+#define VPDMA_VIP_LO_UV_CSTAT 0x038c
#define VPDMA_VIP_UP_Y_CSTAT 0x0390
#define VPDMA_VIP_UP_UV_CSTAT 0x0394
#define VPDMA_VPI_CTL_CSTAT 0x03d0
@@ -69,41 +77,63 @@
#define VPDMA_LIST_TYPE_SHFT 16
#define VPDMA_LIST_SIZE_MASK 0xffff
-/* VPDMA data type values for data formats */
+/*
+ * The YUV data type definition below are taken from
+ * both the TRM and i839 Errata information.
+ * Use the correct data type considering byte
+ * reordering of components.
+ *
+ * Also since the single use of "C" in the 422 case
+ * to mean "Cr" (i.e. V component). It was decided
+ * to explicitly label them CR to remove any confusion.
+ * Bear in mind that the type label refer to the memory
+ * packed order (LSB - MSB).
+ */
#define DATA_TYPE_Y444 0x0
#define DATA_TYPE_Y422 0x1
#define DATA_TYPE_Y420 0x2
#define DATA_TYPE_C444 0x4
#define DATA_TYPE_C422 0x5
#define DATA_TYPE_C420 0x6
-#define DATA_TYPE_YC422 0x7
#define DATA_TYPE_YC444 0x8
-#define DATA_TYPE_CY422 0x27
-
-#define DATA_TYPE_RGB16_565 0x0
-#define DATA_TYPE_ARGB_1555 0x1
-#define DATA_TYPE_ARGB_4444 0x2
-#define DATA_TYPE_RGBA_5551 0x3
-#define DATA_TYPE_RGBA_4444 0x4
-#define DATA_TYPE_ARGB24_6666 0x5
-#define DATA_TYPE_RGB24_888 0x6
-#define DATA_TYPE_ARGB32_8888 0x7
-#define DATA_TYPE_RGBA24_6666 0x8
-#define DATA_TYPE_RGBA32_8888 0x9
-#define DATA_TYPE_BGR16_565 0x10
-#define DATA_TYPE_ABGR_1555 0x11
-#define DATA_TYPE_ABGR_4444 0x12
-#define DATA_TYPE_BGRA_5551 0x13
-#define DATA_TYPE_BGRA_4444 0x14
-#define DATA_TYPE_ABGR24_6666 0x15
-#define DATA_TYPE_BGR24_888 0x16
-#define DATA_TYPE_ABGR32_8888 0x17
-#define DATA_TYPE_BGRA24_6666 0x18
-#define DATA_TYPE_BGRA32_8888 0x19
+#define DATA_TYPE_YCB422 0x7
+#define DATA_TYPE_YCR422 0x17
+#define DATA_TYPE_CBY422 0x27
+#define DATA_TYPE_CRY422 0x37
+
+/*
+ * The RGB data type definition below are defined
+ * to follow Errata i819.
+ * The initial values were taken from:
+ * VPDMA_data_type_mapping_v0.2vayu_c.pdf
+ * But some of the ARGB definition appeared to be wrong
+ * in the document also. As they would yield RGBA instead.
+ * They have been corrected based on experimentation.
+ */
+#define DATA_TYPE_RGB16_565 0x10
+#define DATA_TYPE_ARGB_1555 0x13
+#define DATA_TYPE_ARGB_4444 0x14
+#define DATA_TYPE_RGBA_5551 0x11
+#define DATA_TYPE_RGBA_4444 0x12
+#define DATA_TYPE_ARGB24_6666 0x18
+#define DATA_TYPE_RGB24_888 0x16
+#define DATA_TYPE_ARGB32_8888 0x17
+#define DATA_TYPE_RGBA24_6666 0x15
+#define DATA_TYPE_RGBA32_8888 0x19
+#define DATA_TYPE_BGR16_565 0x0
+#define DATA_TYPE_ABGR_1555 0x3
+#define DATA_TYPE_ABGR_4444 0x4
+#define DATA_TYPE_BGRA_5551 0x1
+#define DATA_TYPE_BGRA_4444 0x2
+#define DATA_TYPE_ABGR24_6666 0x8
+#define DATA_TYPE_BGR24_888 0x6
+#define DATA_TYPE_ABGR32_8888 0x7
+#define DATA_TYPE_BGRA24_6666 0x5
+#define DATA_TYPE_BGRA32_8888 0x9
#define DATA_TYPE_MV 0x3
-/* VPDMA channel numbers(only VPE channels for now) */
+/* VPDMA channel numbers, some are common between VIP/VPE and appear twice */
#define VPE_CHAN_NUM_LUMA1_IN 0
#define VPE_CHAN_NUM_CHROMA1_IN 1
#define VPE_CHAN_NUM_LUMA2_IN 2
@@ -112,10 +142,15 @@
#define VPE_CHAN_NUM_CHROMA3_IN 5
#define VPE_CHAN_NUM_MV_IN 12
#define VPE_CHAN_NUM_MV_OUT 15
+#define VIP1_CHAN_NUM_MULT_PORT_A_SRC0 38
+#define VIP1_CHAN_NUM_MULT_ANC_A_SRC0 70
#define VPE_CHAN_NUM_LUMA_OUT 102
#define VPE_CHAN_NUM_CHROMA_OUT 103
+#define VIP1_CHAN_NUM_PORT_A_LUMA 102
+#define VIP1_CHAN_NUM_PORT_A_CHROMA 103
#define VPE_CHAN_NUM_RGB_OUT 106
-
+#define VIP1_CHAN_NUM_PORT_A_RGB 106
+#define VIP1_CHAN_NUM_PORT_B_RGB 107
/*
* a VPDMA address data block payload for a configuration descriptor needs to
* have each sub block length as a multiple of 16 bytes. Therefore, the overall
@@ -203,6 +238,7 @@ struct vpdma_dtd {
#define DTD_V_START_MASK 0xffff
#define DTD_V_START_SHFT 0
+#define DTD_DESC_START_MASK 0xffffffe0
#define DTD_DESC_START_SHIFT 5
#define DTD_WRITE_DESC_MASK 0x01
#define DTD_WRITE_DESC_SHIFT 2
@@ -217,42 +253,6 @@ struct vpdma_dtd {
#define DTD_MAX_HEIGHT_MASK 0x07
#define DTD_MAX_HEIGHT_SHFT 0
-/* max width configurations */
- /* unlimited width */
-#define MAX_OUT_WIDTH_UNLIMITED 0
-/* as specified in max_size1 reg */
-#define MAX_OUT_WIDTH_REG1 1
-/* as specified in max_size2 reg */
-#define MAX_OUT_WIDTH_REG2 2
-/* as specified in max_size3 reg */
-#define MAX_OUT_WIDTH_REG3 3
-/* maximum of 352 pixels as width */
-#define MAX_OUT_WIDTH_352 4
-/* maximum of 768 pixels as width */
-#define MAX_OUT_WIDTH_768 5
-/* maximum of 1280 pixels width */
-#define MAX_OUT_WIDTH_1280 6
-/* maximum of 1920 pixels as width */
-#define MAX_OUT_WIDTH_1920 7
-
-/* max height configurations */
- /* unlimited height */
-#define MAX_OUT_HEIGHT_UNLIMITED 0
-/* as specified in max_size1 reg */
-#define MAX_OUT_HEIGHT_REG1 1
-/* as specified in max_size2 reg */
-#define MAX_OUT_HEIGHT_REG2 2
-/* as specified in max_size3 reg */
-#define MAX_OUT_HEIGHT_REG3 3
-/* maximum of 288 lines as height */
-#define MAX_OUT_HEIGHT_288 4
-/* maximum of 576 lines as height */
-#define MAX_OUT_HEIGHT_576 5
-/* maximum of 720 lines as height */
-#define MAX_OUT_HEIGHT_720 6
-/* maximum of 1080 lines as height */
-#define MAX_OUT_HEIGHT_1080 7
-
static inline u32 dtd_type_ctl_stride(int type, bool notify, int field,
bool one_d, bool even_line_skip, bool odd_line_skip,
int line_stride)
@@ -285,7 +285,7 @@ static inline u32 dtd_frame_width_height(int width, int height)
static inline u32 dtd_desc_write_addr(unsigned int addr, bool write_desc,
bool drop_data, bool use_desc)
{
- return (addr << DTD_DESC_START_SHIFT) |
+ return (addr & DTD_DESC_START_MASK) |
(write_desc << DTD_WRITE_DESC_SHIFT) |
(drop_data << DTD_DROP_DATA_SHIFT) |
use_desc;
@@ -390,7 +390,7 @@ static inline int dtd_get_frame_height(struct vpdma_dtd *dtd)
static inline int dtd_get_desc_write_addr(struct vpdma_dtd *dtd)
{
- return dtd->desc_write_addr >> DTD_DESC_START_SHIFT;
+ return dtd->desc_write_addr & DTD_DESC_START_MASK;
}
static inline bool dtd_get_write_desc(struct vpdma_dtd *dtd)
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index 0189f7f7cb03..f0156b7759e9 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -44,6 +44,7 @@
#include <media/videobuf2-dma-contig.h>
#include "vpdma.h"
+#include "vpdma_priv.h"
#include "vpe_regs.h"
#include "sc.h"
#include "csc.h"
@@ -53,8 +54,8 @@
/* minimum and maximum frame sizes */
#define MIN_W 32
#define MIN_H 32
-#define MAX_W 1920
-#define MAX_H 1080
+#define MAX_W 2048
+#define MAX_H 1184
/* required alignments */
#define S_ALIGN 0 /* multiple of 1 */
@@ -141,7 +142,7 @@ struct vpe_dei_regs {
*/
static const struct vpe_dei_regs dei_regs = {
.mdt_spacial_freq_thr_reg = 0x020C0804u,
- .edi_config_reg = 0x0118100Fu,
+ .edi_config_reg = 0x0118100Cu,
.edi_lut_reg0 = 0x08040200u,
.edi_lut_reg1 = 0x1010100Cu,
.edi_lut_reg2 = 0x10101010u,
@@ -236,7 +237,7 @@ struct vpe_fmt {
static struct vpe_fmt vpe_formats[] = {
{
- .name = "YUV 422 co-planar",
+ .name = "NV16 YUV 422 co-planar",
.fourcc = V4L2_PIX_FMT_NV16,
.types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
.coplanar = 1,
@@ -245,7 +246,7 @@ static struct vpe_fmt vpe_formats[] = {
},
},
{
- .name = "YUV 420 co-planar",
+ .name = "NV12 YUV 420 co-planar",
.fourcc = V4L2_PIX_FMT_NV12,
.types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
.coplanar = 1,
@@ -258,7 +259,7 @@ static struct vpe_fmt vpe_formats[] = {
.fourcc = V4L2_PIX_FMT_YUYV,
.types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
.coplanar = 0,
- .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_YC422],
+ .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_YCB422],
},
},
{
@@ -266,7 +267,7 @@ static struct vpe_fmt vpe_formats[] = {
.fourcc = V4L2_PIX_FMT_UYVY,
.types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
.coplanar = 0,
- .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_CY422],
+ .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_CBY422],
},
},
{
@@ -301,6 +302,22 @@ static struct vpe_fmt vpe_formats[] = {
.vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_ABGR32],
},
},
+ {
+ .name = "RGB565",
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .types = VPE_FMT_TYPE_CAPTURE,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_RGB565],
+ },
+ },
+ {
+ .name = "RGB5551",
+ .fourcc = V4L2_PIX_FMT_RGB555,
+ .types = VPE_FMT_TYPE_CAPTURE,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_RGBA16_5551],
+ },
+ },
};
/*
@@ -310,6 +327,7 @@ static struct vpe_fmt vpe_formats[] = {
struct vpe_q_data {
unsigned int width; /* frame width */
unsigned int height; /* frame height */
+ unsigned int nplanes; /* Current number of planes */
unsigned int bytesperline[VPE_MAX_PLANES]; /* bytes per line in memory */
enum v4l2_colorspace colorspace;
enum v4l2_field field; /* supported field value */
@@ -320,9 +338,13 @@ struct vpe_q_data {
};
/* vpe_q_data flag bits */
-#define Q_DATA_FRAME_1D (1 << 0)
-#define Q_DATA_MODE_TILED (1 << 1)
-#define Q_DATA_INTERLACED (1 << 2)
+#define Q_DATA_FRAME_1D BIT(0)
+#define Q_DATA_MODE_TILED BIT(1)
+#define Q_DATA_INTERLACED_ALTERNATE BIT(2)
+#define Q_DATA_INTERLACED_SEQ_TB BIT(3)
+
+#define Q_IS_INTERLACED (Q_DATA_INTERLACED_ALTERNATE | \
+ Q_DATA_INTERLACED_SEQ_TB)
enum {
Q_DATA_SRC = 0,
@@ -362,6 +384,7 @@ struct vpe_dev {
void __iomem *base;
struct resource *res;
+ struct vpdma_data vpdma_data;
struct vpdma_data *vpdma; /* vpdma data handle */
struct sc_data *sc; /* scaler data handle */
struct csc_data *csc; /* csc data handle */
@@ -416,7 +439,7 @@ static struct vpe_q_data *get_q_data(struct vpe_ctx *ctx,
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
return &ctx->q_data[Q_DATA_DST];
default:
- BUG();
+ return NULL;
}
return NULL;
}
@@ -584,7 +607,10 @@ static void free_vbs(struct vpe_ctx *ctx)
spin_lock_irqsave(&dev->lock, flags);
if (ctx->src_vbs[2]) {
v4l2_m2m_buf_done(ctx->src_vbs[2], VB2_BUF_STATE_DONE);
- v4l2_m2m_buf_done(ctx->src_vbs[1], VB2_BUF_STATE_DONE);
+ if (ctx->src_vbs[1] && (ctx->src_vbs[1] != ctx->src_vbs[2]))
+ v4l2_m2m_buf_done(ctx->src_vbs[1], VB2_BUF_STATE_DONE);
+ ctx->src_vbs[2] = NULL;
+ ctx->src_vbs[1] = NULL;
}
spin_unlock_irqrestore(&dev->lock, flags);
}
@@ -638,7 +664,7 @@ static void set_us_coefficients(struct vpe_ctx *ctx)
cp = &us_coeffs[0].anchor_fid0_c0;
- if (s_q_data->flags & Q_DATA_INTERLACED) /* interlaced */
+ if (s_q_data->flags & Q_IS_INTERLACED) /* interlaced */
cp += sizeof(us_coeffs[0]) / sizeof(*cp);
end_cp = cp + sizeof(us_coeffs[0]) / sizeof(*cp);
@@ -655,14 +681,13 @@ static void set_us_coefficients(struct vpe_ctx *ctx)
/*
* Set the upsampler config mode and the VPDMA line mode in the shadow MMRs.
*/
-static void set_cfg_and_line_modes(struct vpe_ctx *ctx)
+static void set_cfg_modes(struct vpe_ctx *ctx)
{
struct vpe_fmt *fmt = ctx->q_data[Q_DATA_SRC].fmt;
struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
u32 *us1_reg0 = &mmr_adb->us1_regs[0];
u32 *us2_reg0 = &mmr_adb->us2_regs[0];
u32 *us3_reg0 = &mmr_adb->us3_regs[0];
- int line_mode = 1;
int cfg_mode = 1;
/*
@@ -670,15 +695,24 @@ static void set_cfg_and_line_modes(struct vpe_ctx *ctx)
* Cfg Mode 1: YUV422 source, disable upsampler, DEI is de-interlacing.
*/
- if (fmt->fourcc == V4L2_PIX_FMT_NV12) {
+ if (fmt->fourcc == V4L2_PIX_FMT_NV12)
cfg_mode = 0;
- line_mode = 0; /* double lines to line buffer */
- }
write_field(us1_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
write_field(us2_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
write_field(us3_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
+ ctx->load_mmrs = true;
+}
+
+static void set_line_modes(struct vpe_ctx *ctx)
+{
+ struct vpe_fmt *fmt = ctx->q_data[Q_DATA_SRC].fmt;
+ int line_mode = 1;
+
+ if (fmt->fourcc == V4L2_PIX_FMT_NV12)
+ line_mode = 0; /* double lines to line buffer */
+
/* regs for now */
vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA1_IN);
vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA2_IN);
@@ -703,8 +737,6 @@ static void set_cfg_and_line_modes(struct vpe_ctx *ctx)
/* frame start for MV in client */
vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
VPE_CHAN_MV_IN);
-
- ctx->load_mmrs = true;
}
/*
@@ -727,9 +759,11 @@ static void set_dst_registers(struct vpe_ctx *ctx)
struct vpe_fmt *fmt = ctx->q_data[Q_DATA_DST].fmt;
u32 val = 0;
- if (clrspc == V4L2_COLORSPACE_SRGB)
+ if (clrspc == V4L2_COLORSPACE_SRGB) {
val |= VPE_RGB_OUT_SELECT;
- else if (fmt->fourcc == V4L2_PIX_FMT_NV16)
+ vpdma_set_bg_color(ctx->dev->vpdma,
+ (struct vpdma_data_format *)fmt->vpdma_fmt[0], 0xff);
+ } else if (fmt->fourcc == V4L2_PIX_FMT_NV16)
val |= VPE_COLOR_SEPARATE_422;
/*
@@ -765,8 +799,7 @@ static void set_dei_regs(struct vpe_ctx *ctx)
* for both progressive and interlace content in interlace bypass mode.
* It has been recommended not to use progressive bypass mode.
*/
- if ((!ctx->deinterlacing && (s_q_data->flags & Q_DATA_INTERLACED)) ||
- !(s_q_data->flags & Q_DATA_INTERLACED)) {
+ if (!(s_q_data->flags & Q_IS_INTERLACED) || !ctx->deinterlacing) {
deinterlace = false;
val = VPE_DEI_INTERLACE_BYPASS;
}
@@ -798,6 +831,23 @@ static void set_dei_shadow_registers(struct vpe_ctx *ctx)
ctx->load_mmrs = true;
}
+static void config_edi_input_mode(struct vpe_ctx *ctx, int mode)
+{
+ struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
+ u32 *edi_config_reg = &mmr_adb->dei_regs[3];
+
+ if (mode & 0x2)
+ write_field(edi_config_reg, 1, 1, 2); /* EDI_ENABLE_3D */
+
+ if (mode & 0x3)
+ write_field(edi_config_reg, 1, 1, 3); /* EDI_CHROMA_3D */
+
+ write_field(edi_config_reg, mode, VPE_EDI_INP_MODE_MASK,
+ VPE_EDI_INP_MODE_SHIFT);
+
+ ctx->load_mmrs = true;
+}
+
/*
* Set the shadow registers whose values are modified when either the
* source or destination format is changed.
@@ -817,8 +867,8 @@ static int set_srcdst_params(struct vpe_ctx *ctx)
ctx->sequence = 0;
ctx->field = V4L2_FIELD_TOP;
- if ((s_q_data->flags & Q_DATA_INTERLACED) &&
- !(d_q_data->flags & Q_DATA_INTERLACED)) {
+ if ((s_q_data->flags & Q_IS_INTERLACED) &&
+ !(d_q_data->flags & Q_IS_INTERLACED)) {
int bytes_per_line;
const struct vpdma_data_format *mv =
&vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
@@ -842,12 +892,13 @@ static int set_srcdst_params(struct vpe_ctx *ctx)
}
free_vbs(ctx);
+ ctx->src_vbs[2] = ctx->src_vbs[1] = ctx->src_vbs[0] = NULL;
ret = realloc_mv_buffers(ctx, mv_buf_size);
if (ret)
return ret;
- set_cfg_and_line_modes(ctx);
+ set_cfg_modes(ctx);
set_dei_regs(ctx);
csc_set_coeff(ctx->dev->csc, &mmr_adb->csc_regs[0],
@@ -881,15 +932,14 @@ static struct vpe_ctx *file2ctx(struct file *file)
static int job_ready(void *priv)
{
struct vpe_ctx *ctx = priv;
- int needed = ctx->bufs_per_job;
-
- if (ctx->deinterlacing && ctx->src_vbs[2] == NULL)
- needed += 2; /* need additional two most recent fields */
-
- if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) < needed)
- return 0;
- if (v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) < needed)
+ /*
+ * This check is needed as this might be called directly from driver
+ * When called by m2m framework, this will always satisfy, but when
+ * called from vpe_irq, this might fail. (src stream with zero buffers)
+ */
+ if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) <= 0 ||
+ v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) <= 0)
return 0;
return 1;
@@ -993,22 +1043,38 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port)
int mv_buf_selector = !ctx->src_mv_buf_selector;
dma_addr_t dma_addr;
u32 flags = 0;
+ u32 offset = 0;
if (port == VPE_PORT_MV_OUT) {
vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
dma_addr = ctx->mv_buf_dma[mv_buf_selector];
+ q_data = &ctx->q_data[Q_DATA_SRC];
} else {
/* to incorporate interleaved formats */
int plane = fmt->coplanar ? p_data->vb_part : 0;
vpdma_fmt = fmt->vpdma_fmt[plane];
- dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
+ /*
+ * If we are using a single plane buffer and
+ * we need to set a separate vpdma chroma channel.
+ */
+ if (q_data->nplanes == 1 && plane) {
+ dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+ /* Compute required offset */
+ offset = q_data->bytesperline[0] * q_data->height;
+ } else {
+ dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
+ /* Use address as is, no offset */
+ offset = 0;
+ }
if (!dma_addr) {
vpe_err(ctx->dev,
"acquiring output buffer(%d) dma_addr failed\n",
port);
return;
}
+ /* Apply the offset */
+ dma_addr += offset;
}
if (q_data->flags & Q_DATA_FRAME_1D)
@@ -1016,8 +1082,12 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port)
if (q_data->flags & Q_DATA_MODE_TILED)
flags |= VPDMA_DATA_MODE_TILED;
+ vpdma_set_max_size(ctx->dev->vpdma, VPDMA_MAX_SIZE1,
+ MAX_W, MAX_H);
+
vpdma_add_out_dtd(&ctx->desc_list, q_data->width, &q_data->c_rect,
- vpdma_fmt, dma_addr, p_data->channel, flags);
+ vpdma_fmt, dma_addr, MAX_OUT_WIDTH_REG1,
+ MAX_OUT_HEIGHT_REG1, p_data->channel, flags);
}
static void add_in_dtd(struct vpe_ctx *ctx, int port)
@@ -1033,6 +1103,7 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
int frame_width, frame_height;
dma_addr_t dma_addr;
u32 flags = 0;
+ u32 offset = 0;
if (port == VPE_PORT_MV_IN) {
vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
@@ -1042,14 +1113,49 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
int plane = fmt->coplanar ? p_data->vb_part : 0;
vpdma_fmt = fmt->vpdma_fmt[plane];
-
- dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
+ /*
+ * If we are using a single plane buffer and
+ * we need to set a separate vpdma chroma channel.
+ */
+ if (q_data->nplanes == 1 && plane) {
+ dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+ /* Compute required offset */
+ offset = q_data->bytesperline[0] * q_data->height;
+ } else {
+ dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
+ /* Use address as is, no offset */
+ offset = 0;
+ }
if (!dma_addr) {
vpe_err(ctx->dev,
- "acquiring input buffer(%d) dma_addr failed\n",
+ "acquiring output buffer(%d) dma_addr failed\n",
port);
return;
}
+ /* Apply the offset */
+ dma_addr += offset;
+
+ if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB) {
+ /*
+ * Use top or bottom field from same vb alternately
+ * f,f-1,f-2 = TBT when seq is even
+ * f,f-1,f-2 = BTB when seq is odd
+ */
+ field = (p_data->vb_index + (ctx->sequence % 2)) % 2;
+
+ if (field) {
+ /*
+ * bottom field of a SEQ_TB buffer
+ * Skip the top field data by
+ */
+ int height = q_data->height / 2;
+ int bpp = fmt->fourcc == V4L2_PIX_FMT_NV12 ?
+ 1 : (vpdma_fmt->depth >> 3);
+ if (plane)
+ height /= 2;
+ dma_addr += q_data->width * height * bpp;
+ }
+ }
}
if (q_data->flags & Q_DATA_FRAME_1D)
@@ -1077,7 +1183,7 @@ static void enable_irqs(struct vpe_ctx *ctx)
write_reg(ctx->dev, VPE_INT0_ENABLE1_SET, VPE_DEI_ERROR_INT |
VPE_DS1_UV_ERROR_INT);
- vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, true);
+ vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, 0, true);
}
static void disable_irqs(struct vpe_ctx *ctx)
@@ -1085,7 +1191,7 @@ static void disable_irqs(struct vpe_ctx *ctx)
write_reg(ctx->dev, VPE_INT0_ENABLE0_CLR, 0xffffffff);
write_reg(ctx->dev, VPE_INT0_ENABLE1_CLR, 0xffffffff);
- vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, false);
+ vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, 0, false);
}
/* device_run() - prepares and starts the device
@@ -1098,23 +1204,49 @@ static void device_run(void *priv)
struct vpe_ctx *ctx = priv;
struct sc_data *sc = ctx->dev->sc;
struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
+ struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
- if (ctx->deinterlacing && ctx->src_vbs[2] == NULL) {
- ctx->src_vbs[2] = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
- WARN_ON(ctx->src_vbs[2] == NULL);
- ctx->src_vbs[1] = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
- WARN_ON(ctx->src_vbs[1] == NULL);
+ if (ctx->deinterlacing && s_q_data->flags & Q_DATA_INTERLACED_SEQ_TB &&
+ ctx->sequence % 2 == 0) {
+ /* When using SEQ_TB buffers, When using it first time,
+ * No need to remove the buffer as the next field is present
+ * in the same buffer. (so that job_ready won't fail)
+ * It will be removed when using bottom field
+ */
+ ctx->src_vbs[0] = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ WARN_ON(ctx->src_vbs[0] == NULL);
+ } else {
+ ctx->src_vbs[0] = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ WARN_ON(ctx->src_vbs[0] == NULL);
}
- ctx->src_vbs[0] = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
- WARN_ON(ctx->src_vbs[0] == NULL);
ctx->dst_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
WARN_ON(ctx->dst_vb == NULL);
+ if (ctx->deinterlacing) {
+
+ if (ctx->src_vbs[2] == NULL) {
+ ctx->src_vbs[2] = ctx->src_vbs[0];
+ WARN_ON(ctx->src_vbs[2] == NULL);
+ ctx->src_vbs[1] = ctx->src_vbs[0];
+ WARN_ON(ctx->src_vbs[1] == NULL);
+ }
+
+ /*
+ * we have output the first 2 frames through line average, we
+ * now switch to EDI de-interlacer
+ */
+ if (ctx->sequence == 2)
+ config_edi_input_mode(ctx, 0x3); /* EDI (Y + UV) */
+ }
+
/* config descriptors */
if (ctx->dev->loaded_mmrs != ctx->mmr_adb.dma_addr || ctx->load_mmrs) {
vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->mmr_adb);
vpdma_add_cfd_adb(&ctx->desc_list, CFD_MMR_CLIENT, &ctx->mmr_adb);
+
+ set_line_modes(ctx);
+
ctx->dev->loaded_mmrs = ctx->mmr_adb.dma_addr;
ctx->load_mmrs = false;
}
@@ -1202,7 +1334,7 @@ static void device_run(void *priv)
enable_irqs(ctx);
vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->desc_list.buf);
- vpdma_submit_descs(ctx->dev->vpdma, &ctx->desc_list);
+ vpdma_submit_descs(ctx->dev->vpdma, &ctx->desc_list, 0);
}
static void dei_error(struct vpe_ctx *ctx)
@@ -1225,6 +1357,7 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
struct vb2_v4l2_buffer *s_vb, *d_vb;
unsigned long flags;
u32 irqst0, irqst1;
+ bool list_complete = false;
irqst0 = read_reg(dev, VPE_INT0_STATUS0);
if (irqst0) {
@@ -1257,17 +1390,24 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
if (irqst0) {
if (irqst0 & VPE_INT0_LIST0_COMPLETE)
- vpdma_clear_list_stat(ctx->dev->vpdma);
+ vpdma_clear_list_stat(ctx->dev->vpdma, 0, 0);
irqst0 &= ~(VPE_INT0_LIST0_COMPLETE);
+ list_complete = true;
}
if (irqst0 | irqst1) {
- dev_warn(dev->v4l2_dev.dev, "Unexpected interrupt: "
- "INT0_STATUS0 = 0x%08x, INT0_STATUS1 = 0x%08x\n",
+ dev_warn(dev->v4l2_dev.dev, "Unexpected interrupt: INT0_STATUS0 = 0x%08x, INT0_STATUS1 = 0x%08x\n",
irqst0, irqst1);
}
+ /*
+ * Setup next operation only when list complete IRQ occurs
+ * otherwise, skip the following code
+ */
+ if (!list_complete)
+ goto handled;
+
disable_irqs(ctx);
vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf);
@@ -1295,7 +1435,7 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
d_vb->sequence = ctx->sequence;
d_q_data = &ctx->q_data[Q_DATA_DST];
- if (d_q_data->flags & Q_DATA_INTERLACED) {
+ if (d_q_data->flags & Q_IS_INTERLACED) {
d_vb->field = ctx->field;
if (ctx->field == V4L2_FIELD_BOTTOM) {
ctx->sequence++;
@@ -1309,12 +1449,28 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
ctx->sequence++;
}
- if (ctx->deinterlacing)
- s_vb = ctx->src_vbs[2];
+ if (ctx->deinterlacing) {
+ /*
+ * Allow source buffer to be dequeued only if it won't be used
+ * in the next iteration. All vbs are initialized to first
+ * buffer and we are shifting buffers every iteration, for the
+ * first two iterations, no buffer will be dequeued.
+ * This ensures that driver will keep (n-2)th (n-1)th and (n)th
+ * field when deinterlacing is enabled
+ */
+ if (ctx->src_vbs[2] != ctx->src_vbs[1])
+ s_vb = ctx->src_vbs[2];
+ else
+ s_vb = NULL;
+ }
spin_lock_irqsave(&dev->lock, flags);
- v4l2_m2m_buf_done(s_vb, VB2_BUF_STATE_DONE);
+
+ if (s_vb)
+ v4l2_m2m_buf_done(s_vb, VB2_BUF_STATE_DONE);
+
v4l2_m2m_buf_done(d_vb, VB2_BUF_STATE_DONE);
+
spin_unlock_irqrestore(&dev->lock, flags);
if (ctx->deinterlacing) {
@@ -1322,8 +1478,16 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
ctx->src_vbs[1] = ctx->src_vbs[0];
}
+ /*
+ * Since the vb2_buf_done has already been called fir therse
+ * buffer we can now NULL them out so that we won't try
+ * to clean out stray pointer later on.
+ */
+ ctx->src_vbs[0] = NULL;
+ ctx->dst_vb = NULL;
+
ctx->bufs_completed++;
- if (ctx->bufs_completed < ctx->bufs_per_job) {
+ if (ctx->bufs_completed < ctx->bufs_per_job && job_ready(ctx)) {
device_run(ctx);
goto handled;
}
@@ -1414,7 +1578,7 @@ static int vpe_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
pix->colorspace = s_q_data->colorspace;
}
- pix->num_planes = q_data->fmt->coplanar ? 2 : 1;
+ pix->num_planes = q_data->nplanes;
for (i = 0; i < pix->num_planes; i++) {
pix->plane_fmt[i].bytesperline = q_data->bytesperline[i];
@@ -1430,7 +1594,7 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
struct v4l2_plane_pix_format *plane_fmt;
unsigned int w_align;
- int i, depth, depth_bytes;
+ int i, depth, depth_bytes, height;
if (!fmt || !(fmt->types & type)) {
vpe_err(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
@@ -1438,7 +1602,8 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
return -EINVAL;
}
- if (pix->field != V4L2_FIELD_NONE && pix->field != V4L2_FIELD_ALTERNATE)
+ if (pix->field != V4L2_FIELD_NONE && pix->field != V4L2_FIELD_ALTERNATE
+ && pix->field != V4L2_FIELD_SEQ_TB)
pix->field = V4L2_FIELD_NONE;
depth = fmt->vpdma_fmt[VPE_LUMA]->depth;
@@ -1450,28 +1615,53 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
*/
depth_bytes = depth >> 3;
- if (depth_bytes == 3)
+ if (depth_bytes == 3) {
/*
* if bpp is 3(as in some RGB formats), the pixel width doesn't
* really help in ensuring line stride is 16 byte aligned
*/
w_align = 4;
- else
+ } else {
/*
* for the remainder bpp(4, 2 and 1), the pixel width alignment
* can ensure a line stride alignment of 16 bytes. For example,
* if bpp is 2, then the line stride can be 16 byte aligned if
* the width is 8 byte aligned
*/
- w_align = order_base_2(VPDMA_DESC_ALIGN / depth_bytes);
+
+ /*
+ * HACK: using order_base_2() here causes lots of asm output
+ * errors with smatch, on i386:
+ * ./arch/x86/include/asm/bitops.h:457:22:
+ * warning: asm output is not an lvalue
+ * Perhaps some gcc optimization is doing the wrong thing
+ * there.
+ * Let's get rid of them by doing the calculus on two steps
+ */
+ w_align = roundup_pow_of_two(VPDMA_DESC_ALIGN / depth_bytes);
+ w_align = ilog2(w_align);
+ }
v4l_bound_align_image(&pix->width, MIN_W, MAX_W, w_align,
&pix->height, MIN_H, MAX_H, H_ALIGN,
S_ALIGN);
- pix->num_planes = fmt->coplanar ? 2 : 1;
+ if (!pix->num_planes)
+ pix->num_planes = fmt->coplanar ? 2 : 1;
+ else if (pix->num_planes > 1 && !fmt->coplanar)
+ pix->num_planes = 1;
+
pix->pixelformat = fmt->fourcc;
+ /*
+ * For the actual image parameters, we need to consider the field
+ * height of the image for SEQ_TB buffers.
+ */
+ if (pix->field == V4L2_FIELD_SEQ_TB)
+ height = pix->height / 2;
+ else
+ height = pix->height;
+
if (!pix->colorspace) {
if (fmt->fourcc == V4L2_PIX_FMT_RGB24 ||
fmt->fourcc == V4L2_PIX_FMT_BGR24 ||
@@ -1479,7 +1669,7 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
fmt->fourcc == V4L2_PIX_FMT_BGR32) {
pix->colorspace = V4L2_COLORSPACE_SRGB;
} else {
- if (pix->height > 1280) /* HD */
+ if (height > 1280) /* HD */
pix->colorspace = V4L2_COLORSPACE_REC709;
else /* SD */
pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
@@ -1496,6 +1686,8 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
else
plane_fmt->bytesperline = pix->width;
+ if (pix->num_planes == 1 && fmt->coplanar)
+ depth += fmt->vpdma_fmt[VPE_CHROMA]->depth;
plane_fmt->sizeimage =
(pix->height * pix->width * depth) >> 3;
@@ -1542,6 +1734,7 @@ static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f)
q_data->height = pix->height;
q_data->colorspace = pix->colorspace;
q_data->field = pix->field;
+ q_data->nplanes = pix->num_planes;
for (i = 0; i < pix->num_planes; i++) {
plane_fmt = &pix->plane_fmt[i];
@@ -1556,14 +1749,20 @@ static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f)
q_data->c_rect.height = q_data->height;
if (q_data->field == V4L2_FIELD_ALTERNATE)
- q_data->flags |= Q_DATA_INTERLACED;
+ q_data->flags |= Q_DATA_INTERLACED_ALTERNATE;
+ else if (q_data->field == V4L2_FIELD_SEQ_TB)
+ q_data->flags |= Q_DATA_INTERLACED_SEQ_TB;
else
- q_data->flags &= ~Q_DATA_INTERLACED;
+ q_data->flags &= ~Q_IS_INTERLACED;
+
+ /* the crop height is halved for the case of SEQ_TB buffers */
+ if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB)
+ q_data->c_rect.height /= 2;
vpe_dbg(ctx->dev, "Setting format for type %d, wxh: %dx%d, fmt: %d bpl_y %d",
f->type, q_data->width, q_data->height, q_data->fmt->fourcc,
q_data->bytesperline[VPE_LUMA]);
- if (q_data->fmt->coplanar)
+ if (q_data->nplanes == 2)
vpe_dbg(ctx->dev, " bpl_uv %d\n",
q_data->bytesperline[VPE_CHROMA]);
@@ -1594,6 +1793,7 @@ static int vpe_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
static int __vpe_try_selection(struct vpe_ctx *ctx, struct v4l2_selection *s)
{
struct vpe_q_data *q_data;
+ int height;
if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
(s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT))
@@ -1628,13 +1828,22 @@ static int __vpe_try_selection(struct vpe_ctx *ctx, struct v4l2_selection *s)
return -EINVAL;
}
+ /*
+ * For SEQ_TB buffers, crop height should be less than the height of
+ * the field height, not the buffer height
+ */
+ if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB)
+ height = q_data->height / 2;
+ else
+ height = q_data->height;
+
if (s->r.top < 0 || s->r.left < 0) {
vpe_err(ctx->dev, "negative values for top and left\n");
s->r.top = s->r.left = 0;
}
v4l_bound_align_image(&s->r.width, MIN_W, q_data->width, 1,
- &s->r.height, MIN_H, q_data->height, H_ALIGN, S_ALIGN);
+ &s->r.height, MIN_H, height, H_ALIGN, S_ALIGN);
/* adjust left/top if cropping rectangle is out of bounds */
if (s->r.left + s->r.width > q_data->width)
@@ -1784,6 +1993,7 @@ static const struct v4l2_ioctl_ops vpe_ioctl_ops = {
.vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
.vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
.vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
.vidioc_streamon = v4l2_m2m_ioctl_streamon,
.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
@@ -1804,14 +2014,14 @@ static int vpe_queue_setup(struct vb2_queue *vq,
q_data = get_q_data(ctx, vq->type);
- *nplanes = q_data->fmt->coplanar ? 2 : 1;
+ *nplanes = q_data->nplanes;
for (i = 0; i < *nplanes; i++)
sizes[i] = q_data->sizeimage[i];
vpe_dbg(ctx->dev, "get %d buffer(s) of size %d", *nbuffers,
sizes[VPE_LUMA]);
- if (q_data->fmt->coplanar)
+ if (q_data->nplanes == 2)
vpe_dbg(ctx->dev, " and %d\n", sizes[VPE_CHROMA]);
return 0;
@@ -1827,14 +2037,15 @@ static int vpe_buf_prepare(struct vb2_buffer *vb)
vpe_dbg(ctx->dev, "type: %d\n", vb->vb2_queue->type);
q_data = get_q_data(ctx, vb->vb2_queue->type);
- num_planes = q_data->fmt->coplanar ? 2 : 1;
+ num_planes = q_data->nplanes;
if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
- if (!(q_data->flags & Q_DATA_INTERLACED)) {
+ if (!(q_data->flags & Q_IS_INTERLACED)) {
vbuf->field = V4L2_FIELD_NONE;
} else {
if (vbuf->field != V4L2_FIELD_TOP &&
- vbuf->field != V4L2_FIELD_BOTTOM)
+ vbuf->field != V4L2_FIELD_BOTTOM &&
+ vbuf->field != V4L2_FIELD_SEQ_TB)
return -EINVAL;
}
}
@@ -1863,9 +2074,98 @@ static void vpe_buf_queue(struct vb2_buffer *vb)
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}
+static int check_srcdst_sizes(struct vpe_ctx *ctx)
+{
+ struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
+ struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
+ unsigned int src_w = s_q_data->c_rect.width;
+ unsigned int src_h = s_q_data->c_rect.height;
+ unsigned int dst_w = d_q_data->c_rect.width;
+ unsigned int dst_h = d_q_data->c_rect.height;
+
+ if (src_w == dst_w && src_h == dst_h)
+ return 0;
+
+ if (src_h <= SC_MAX_PIXEL_HEIGHT &&
+ src_w <= SC_MAX_PIXEL_WIDTH &&
+ dst_h <= SC_MAX_PIXEL_HEIGHT &&
+ dst_w <= SC_MAX_PIXEL_WIDTH)
+ return 0;
+
+ return -1;
+}
+
+static void vpe_return_all_buffers(struct vpe_ctx *ctx, struct vb2_queue *q,
+ enum vb2_buffer_state state)
+{
+ struct vb2_v4l2_buffer *vb;
+ unsigned long flags;
+
+ for (;;) {
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (!vb)
+ break;
+ spin_lock_irqsave(&ctx->dev->lock, flags);
+ v4l2_m2m_buf_done(vb, state);
+ spin_unlock_irqrestore(&ctx->dev->lock, flags);
+ }
+
+ /*
+ * Cleanup the in-transit vb2 buffers that have been
+ * removed from their respective queue already but for
+ * which procecessing has not been completed yet.
+ */
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ spin_lock_irqsave(&ctx->dev->lock, flags);
+
+ if (ctx->src_vbs[2])
+ v4l2_m2m_buf_done(ctx->src_vbs[2], state);
+
+ if (ctx->src_vbs[1] && (ctx->src_vbs[1] != ctx->src_vbs[2]))
+ v4l2_m2m_buf_done(ctx->src_vbs[1], state);
+
+ if (ctx->src_vbs[0] &&
+ (ctx->src_vbs[0] != ctx->src_vbs[1]) &&
+ (ctx->src_vbs[0] != ctx->src_vbs[2]))
+ v4l2_m2m_buf_done(ctx->src_vbs[0], state);
+
+ ctx->src_vbs[2] = NULL;
+ ctx->src_vbs[1] = NULL;
+ ctx->src_vbs[0] = NULL;
+
+ spin_unlock_irqrestore(&ctx->dev->lock, flags);
+ } else {
+ if (ctx->dst_vb) {
+ spin_lock_irqsave(&ctx->dev->lock, flags);
+
+ v4l2_m2m_buf_done(ctx->dst_vb, state);
+ ctx->dst_vb = NULL;
+ spin_unlock_irqrestore(&ctx->dev->lock, flags);
+ }
+ }
+}
+
static int vpe_start_streaming(struct vb2_queue *q, unsigned int count)
{
- /* currently we do nothing here */
+ struct vpe_ctx *ctx = vb2_get_drv_priv(q);
+
+ /* Check any of the size exceed maximum scaling sizes */
+ if (check_srcdst_sizes(ctx)) {
+ vpe_err(ctx->dev,
+ "Conversion setup failed, check source and destination parameters\n"
+ );
+ vpe_return_all_buffers(ctx, q, VB2_BUF_STATE_QUEUED);
+ return -EINVAL;
+ }
+
+ if (ctx->deinterlacing)
+ config_edi_input_mode(ctx, 0x0);
+
+ if (ctx->sequence != 0)
+ set_srcdst_params(ctx);
return 0;
}
@@ -1876,6 +2176,8 @@ static void vpe_stop_streaming(struct vb2_queue *q)
vpe_dump_regs(ctx->dev);
vpdma_dump_regs(ctx->dev->vpdma);
+
+ vpe_return_all_buffers(ctx, q, VB2_BUF_STATE_ERROR);
}
static const struct vb2_ops vpe_qops = {
@@ -1995,6 +2297,7 @@ static int vpe_open(struct file *file)
s_q_data->fmt = &vpe_formats[2];
s_q_data->width = 1920;
s_q_data->height = 1080;
+ s_q_data->nplanes = 1;
s_q_data->bytesperline[VPE_LUMA] = (s_q_data->width *
s_q_data->fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3;
s_q_data->sizeimage[VPE_LUMA] = (s_q_data->bytesperline[VPE_LUMA] *
@@ -2068,11 +2371,13 @@ static int vpe_release(struct file *file)
vpe_dbg(dev, "releasing instance %p\n", ctx);
mutex_lock(&dev->dev_mutex);
- free_vbs(ctx);
free_mv_buffers(ctx);
vpdma_free_desc_list(&ctx->desc_list);
vpdma_free_desc_buf(&ctx->mmr_adb);
+ vpdma_free_desc_buf(&ctx->sc_coeff_v);
+ vpdma_free_desc_buf(&ctx->sc_coeff_h);
+
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->hdl);
@@ -2235,23 +2540,22 @@ static int vpe_probe(struct platform_device *pdev)
vpe_top_vpdma_reset(dev);
- dev->sc = sc_create(pdev);
+ dev->sc = sc_create(pdev, "sc");
if (IS_ERR(dev->sc)) {
ret = PTR_ERR(dev->sc);
goto runtime_put;
}
- dev->csc = csc_create(pdev);
+ dev->csc = csc_create(pdev, "csc");
if (IS_ERR(dev->csc)) {
ret = PTR_ERR(dev->csc);
goto runtime_put;
}
- dev->vpdma = vpdma_create(pdev, vpe_fw_cb);
- if (IS_ERR(dev->vpdma)) {
- ret = PTR_ERR(dev->vpdma);
+ dev->vpdma = &dev->vpdma_data;
+ ret = vpdma_create(pdev, dev->vpdma, vpe_fw_cb);
+ if (ret)
goto runtime_put;
- }
return 0;
@@ -2290,6 +2594,7 @@ static const struct of_device_id vpe_of_match[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, vpe_of_match);
#endif
static struct platform_driver vpe_pdrv = {
diff --git a/drivers/media/platform/via-camera.c b/drivers/media/platform/via-camera.c
index 7ca12deba89c..e16f70a5df1d 100644
--- a/drivers/media/platform/via-camera.c
+++ b/drivers/media/platform/via-camera.c
@@ -39,15 +39,12 @@ MODULE_LICENSE("GPL");
static bool flip_image;
module_param(flip_image, bool, 0444);
MODULE_PARM_DESC(flip_image,
- "If set, the sensor will be instructed to flip the image "
- "vertically.");
+ "If set, the sensor will be instructed to flip the image vertically.");
static bool override_serial;
module_param(override_serial, bool, 0444);
MODULE_PARM_DESC(override_serial,
- "The camera driver will normally refuse to load if "
- "the XO 1.5 serial port is enabled. Set this option "
- "to force-enable the camera.");
+ "The camera driver will normally refuse to load if the XO 1.5 serial port is enabled. Set this option to force-enable the camera.");
/*
* The structure describing our camera.
diff --git a/drivers/media/platform/vivid/Kconfig b/drivers/media/platform/vivid/Kconfig
index 8e6918c5c87c..db0dd19d227a 100644
--- a/drivers/media/platform/vivid/Kconfig
+++ b/drivers/media/platform/vivid/Kconfig
@@ -25,7 +25,7 @@ config VIDEO_VIVID
config VIDEO_VIVID_CEC
bool "Enable CEC emulation support"
- depends on VIDEO_VIVID && MEDIA_CEC
+ depends on VIDEO_VIVID && MEDIA_CEC_SUPPORT
---help---
When selected the vivid module will emulate the optional
HDMI CEC feature.
diff --git a/drivers/media/platform/vivid/vivid-cec.c b/drivers/media/platform/vivid/vivid-cec.c
index f9f878b8e0a7..cb4933592a3c 100644
--- a/drivers/media/platform/vivid/vivid-cec.c
+++ b/drivers/media/platform/vivid/vivid-cec.c
@@ -216,7 +216,6 @@ static const struct cec_adap_ops vivid_cec_adap_ops = {
struct cec_adapter *vivid_cec_alloc_adap(struct vivid_dev *dev,
unsigned int idx,
- struct device *parent,
bool is_source)
{
char name[sizeof(dev->vid_out_dev.name) + 2];
@@ -227,5 +226,5 @@ struct cec_adapter *vivid_cec_alloc_adap(struct vivid_dev *dev,
is_source ? dev->vid_out_dev.name : dev->vid_cap_dev.name,
idx);
return cec_allocate_adapter(&vivid_cec_adap_ops, dev,
- name, caps, 1, parent);
+ name, caps, 1);
}
diff --git a/drivers/media/platform/vivid/vivid-cec.h b/drivers/media/platform/vivid/vivid-cec.h
index 97892afa6b3b..3926b1422777 100644
--- a/drivers/media/platform/vivid/vivid-cec.h
+++ b/drivers/media/platform/vivid/vivid-cec.h
@@ -20,7 +20,6 @@
#ifdef CONFIG_VIDEO_VIVID_CEC
struct cec_adapter *vivid_cec_alloc_adap(struct vivid_dev *dev,
unsigned int idx,
- struct device *parent,
bool is_source);
void vivid_cec_bus_free_work(struct vivid_dev *dev);
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index 5464fefbaab9..51e37812ec98 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -183,7 +183,7 @@ static const u8 vivid_hdmi_edid[256] = {
0x5e, 0x5d, 0x10, 0x1f, 0x04, 0x13, 0x22, 0x21,
0x20, 0x05, 0x14, 0x02, 0x11, 0x01, 0x23, 0x09,
0x07, 0x07, 0x83, 0x01, 0x00, 0x00, 0x6d, 0x03,
- 0x0c, 0x00, 0x10, 0x00, 0x00, 0x78, 0x21, 0x00,
+ 0x0c, 0x00, 0x10, 0x00, 0x00, 0x3c, 0x21, 0x00,
0x60, 0x01, 0x02, 0x03, 0x67, 0xd8, 0x5d, 0xc4,
0x01, 0x78, 0x00, 0x00, 0xe2, 0x00, 0xea, 0xe3,
0x05, 0x00, 0x00, 0xe3, 0x06, 0x01, 0x00, 0x4d,
@@ -194,7 +194,7 @@ static const u8 vivid_hdmi_edid[256] = {
0x00, 0x00, 0x1a, 0x1a, 0x1d, 0x00, 0x80, 0x51,
0xd0, 0x1c, 0x20, 0x40, 0x80, 0x35, 0x00, 0xc0,
0x1c, 0x32, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x27,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63,
};
static int vidioc_querycap(struct file *file, void *priv,
@@ -1167,12 +1167,12 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
if (in_type_counter[HDMI]) {
struct cec_adapter *adap;
- adap = vivid_cec_alloc_adap(dev, 0, &pdev->dev, false);
+ adap = vivid_cec_alloc_adap(dev, 0, false);
ret = PTR_ERR_OR_ZERO(adap);
if (ret < 0)
goto unreg_dev;
dev->cec_rx_adap = adap;
- ret = cec_register_adapter(adap);
+ ret = cec_register_adapter(adap, &pdev->dev);
if (ret < 0) {
cec_delete_adapter(adap);
dev->cec_rx_adap = NULL;
@@ -1222,13 +1222,12 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
if (dev->output_type[i] != HDMI)
continue;
dev->cec_output2bus_map[i] = bus_cnt;
- adap = vivid_cec_alloc_adap(dev, bus_cnt,
- &pdev->dev, true);
+ adap = vivid_cec_alloc_adap(dev, bus_cnt, true);
ret = PTR_ERR_OR_ZERO(adap);
if (ret < 0)
goto unreg_dev;
dev->cec_tx_adap[bus_cnt] = adap;
- ret = cec_register_adapter(adap);
+ ret = cec_register_adapter(adap, &pdev->dev);
if (ret < 0) {
cec_delete_adapter(adap);
dev->cec_tx_adap[bus_cnt] = NULL;
diff --git a/drivers/media/platform/vivid/vivid-core.h b/drivers/media/platform/vivid/vivid-core.h
index a7daa40d0a49..5cdf95bdc4d1 100644
--- a/drivers/media/platform/vivid/vivid-core.h
+++ b/drivers/media/platform/vivid/vivid-core.h
@@ -80,7 +80,7 @@ extern unsigned vivid_debug;
struct vivid_fmt {
u32 fourcc; /* v4l2 format id */
- bool is_yuv;
+ enum tgp_color_enc color_enc;
bool can_do_overlay;
u8 vdownsampling[TPG_MAX_PLANES];
u32 alpha_mask;
@@ -346,6 +346,7 @@ struct vivid_dev {
struct v4l2_dv_timings dv_timings_out;
u32 colorspace_out;
u32 ycbcr_enc_out;
+ u32 hsv_enc_out;
u32 quantization_out;
u32 xfer_func_out;
u32 service_set_out;
diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c
index aceb38d9f7e7..34731f71cc00 100644
--- a/drivers/media/platform/vivid/vivid-ctrls.c
+++ b/drivers/media/platform/vivid/vivid-ctrls.c
@@ -79,6 +79,7 @@
#define VIVID_CID_MAX_EDID_BLOCKS (VIVID_CID_VIVID_BASE + 40)
#define VIVID_CID_PERCENTAGE_FILL (VIVID_CID_VIVID_BASE + 41)
#define VIVID_CID_REDUCED_FPS (VIVID_CID_VIVID_BASE + 42)
+#define VIVID_CID_HSV_ENC (VIVID_CID_VIVID_BASE + 43)
#define VIVID_CID_STD_SIGNAL_MODE (VIVID_CID_VIVID_BASE + 60)
#define VIVID_CID_STANDARD (VIVID_CID_VIVID_BASE + 61)
@@ -378,6 +379,14 @@ static int vivid_vid_cap_s_ctrl(struct v4l2_ctrl *ctrl)
vivid_send_source_change(dev, HDMI);
vivid_send_source_change(dev, WEBCAM);
break;
+ case VIVID_CID_HSV_ENC:
+ tpg_s_hsv_enc(&dev->tpg, ctrl->val ? V4L2_HSV_ENC_256 :
+ V4L2_HSV_ENC_180);
+ vivid_send_source_change(dev, TV);
+ vivid_send_source_change(dev, SVID);
+ vivid_send_source_change(dev, HDMI);
+ vivid_send_source_change(dev, WEBCAM);
+ break;
case VIVID_CID_QUANTIZATION:
tpg_s_quantization(&dev->tpg, ctrl->val);
vivid_send_source_change(dev, TV);
@@ -778,6 +787,21 @@ static const struct v4l2_ctrl_config vivid_ctrl_ycbcr_enc = {
.qmenu = vivid_ctrl_ycbcr_enc_strings,
};
+static const char * const vivid_ctrl_hsv_enc_strings[] = {
+ "Hue 0-179",
+ "Hue 0-256",
+ NULL,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_hsv_enc = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_HSV_ENC,
+ .name = "HSV Encoding",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = ARRAY_SIZE(vivid_ctrl_hsv_enc_strings) - 2,
+ .qmenu = vivid_ctrl_hsv_enc_strings,
+};
+
static const char * const vivid_ctrl_quantization_strings[] = {
"Default",
"Full Range",
@@ -1454,6 +1478,7 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
&vivid_ctrl_colorspace, NULL);
v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_xfer_func, NULL);
v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_ycbcr_enc, NULL);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_hsv_enc, NULL);
v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_quantization, NULL);
v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_alpha_mode, NULL);
}
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index d5c84ecf2027..c52dd8787794 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -510,6 +510,13 @@ static unsigned vivid_ycbcr_enc_cap(struct vivid_dev *dev)
return dev->ycbcr_enc_out;
}
+static unsigned int vivid_hsv_enc_cap(struct vivid_dev *dev)
+{
+ if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
+ return tpg_g_hsv_enc(&dev->tpg);
+ return dev->hsv_enc_out;
+}
+
static unsigned vivid_quantization_cap(struct vivid_dev *dev)
{
if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
@@ -530,7 +537,10 @@ int vivid_g_fmt_vid_cap(struct file *file, void *priv,
mp->pixelformat = dev->fmt_cap->fourcc;
mp->colorspace = vivid_colorspace_cap(dev);
mp->xfer_func = vivid_xfer_func_cap(dev);
- mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev);
+ if (dev->fmt_cap->color_enc == TGP_COLOR_ENC_HSV)
+ mp->hsv_enc = vivid_hsv_enc_cap(dev);
+ else
+ mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev);
mp->quantization = vivid_quantization_cap(dev);
mp->num_planes = dev->fmt_cap->buffers;
for (p = 0; p < mp->num_planes; p++) {
@@ -618,7 +628,10 @@ int vivid_try_fmt_vid_cap(struct file *file, void *priv,
memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved));
}
mp->colorspace = vivid_colorspace_cap(dev);
- mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev);
+ if (fmt->color_enc == TGP_COLOR_ENC_HSV)
+ mp->hsv_enc = vivid_hsv_enc_cap(dev);
+ else
+ mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev);
mp->xfer_func = vivid_xfer_func_cap(dev);
mp->quantization = vivid_quantization_cap(dev);
memset(mp->reserved, 0, sizeof(mp->reserved));
diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
index fcda3ae4e6b0..5fc010f6ce67 100644
--- a/drivers/media/platform/vivid/vivid-vid-common.c
+++ b/drivers/media/platform/vivid/vivid-vid-common.c
@@ -48,7 +48,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_YUYV,
.vdownsampling = { 1 },
.bit_depth = { 16 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 1,
.buffers = 1,
.data_offset = { PLANE0_DATA_OFFSET },
@@ -57,7 +57,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_UYVY,
.vdownsampling = { 1 },
.bit_depth = { 16 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 1,
.buffers = 1,
},
@@ -65,7 +65,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_YVYU,
.vdownsampling = { 1 },
.bit_depth = { 16 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 1,
.buffers = 1,
},
@@ -73,7 +73,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_VYUY,
.vdownsampling = { 1 },
.bit_depth = { 16 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 1,
.buffers = 1,
},
@@ -81,7 +81,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_YUV422P,
.vdownsampling = { 1, 1, 1 },
.bit_depth = { 8, 4, 4 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 3,
.buffers = 1,
},
@@ -89,7 +89,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_YUV420,
.vdownsampling = { 1, 2, 2 },
.bit_depth = { 8, 4, 4 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 3,
.buffers = 1,
},
@@ -97,7 +97,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_YVU420,
.vdownsampling = { 1, 2, 2 },
.bit_depth = { 8, 4, 4 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 3,
.buffers = 1,
},
@@ -105,7 +105,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_NV12,
.vdownsampling = { 1, 2 },
.bit_depth = { 8, 8 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 2,
.buffers = 1,
},
@@ -113,7 +113,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_NV21,
.vdownsampling = { 1, 2 },
.bit_depth = { 8, 8 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 2,
.buffers = 1,
},
@@ -121,7 +121,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_NV16,
.vdownsampling = { 1, 1 },
.bit_depth = { 8, 8 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 2,
.buffers = 1,
},
@@ -129,7 +129,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_NV61,
.vdownsampling = { 1, 1 },
.bit_depth = { 8, 8 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 2,
.buffers = 1,
},
@@ -137,7 +137,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_NV24,
.vdownsampling = { 1, 1 },
.bit_depth = { 8, 16 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 2,
.buffers = 1,
},
@@ -145,7 +145,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_NV42,
.vdownsampling = { 1, 1 },
.bit_depth = { 8, 16 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 2,
.buffers = 1,
},
@@ -184,7 +184,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_GREY,
.vdownsampling = { 1 },
.bit_depth = { 8 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_LUMA,
.planes = 1,
.buffers = 1,
},
@@ -192,7 +192,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_Y16,
.vdownsampling = { 1 },
.bit_depth = { 16 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_LUMA,
.planes = 1,
.buffers = 1,
},
@@ -200,7 +200,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_Y16_BE,
.vdownsampling = { 1 },
.bit_depth = { 16 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_LUMA,
.planes = 1,
.buffers = 1,
},
@@ -445,6 +445,22 @@ struct vivid_fmt vivid_formats[] = {
.planes = 1,
.buffers = 1,
},
+ {
+ .fourcc = V4L2_PIX_FMT_HSV24, /* HSV 24bits */
+ .color_enc = TGP_COLOR_ENC_HSV,
+ .vdownsampling = { 1 },
+ .bit_depth = { 24 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_HSV32, /* HSV 32bits */
+ .color_enc = TGP_COLOR_ENC_HSV,
+ .vdownsampling = { 1 },
+ .bit_depth = { 32 },
+ .planes = 1,
+ .buffers = 1,
+ },
/* Multiplanar formats */
@@ -452,7 +468,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_NV16M,
.vdownsampling = { 1, 1 },
.bit_depth = { 8, 8 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 2,
.buffers = 2,
.data_offset = { PLANE0_DATA_OFFSET, 0 },
@@ -461,7 +477,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_NV61M,
.vdownsampling = { 1, 1 },
.bit_depth = { 8, 8 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 2,
.buffers = 2,
.data_offset = { 0, PLANE0_DATA_OFFSET },
@@ -470,7 +486,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_YUV420M,
.vdownsampling = { 1, 2, 2 },
.bit_depth = { 8, 4, 4 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 3,
.buffers = 3,
},
@@ -478,7 +494,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_YVU420M,
.vdownsampling = { 1, 2, 2 },
.bit_depth = { 8, 4, 4 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 3,
.buffers = 3,
},
@@ -486,7 +502,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_NV12M,
.vdownsampling = { 1, 2 },
.bit_depth = { 8, 8 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 2,
.buffers = 2,
},
@@ -494,7 +510,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_NV21M,
.vdownsampling = { 1, 2 },
.bit_depth = { 8, 8 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 2,
.buffers = 2,
},
@@ -502,7 +518,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_YUV422M,
.vdownsampling = { 1, 1, 1 },
.bit_depth = { 8, 4, 4 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 3,
.buffers = 3,
},
@@ -510,7 +526,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_YVU422M,
.vdownsampling = { 1, 1, 1 },
.bit_depth = { 8, 4, 4 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 3,
.buffers = 3,
},
@@ -518,7 +534,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_YUV444M,
.vdownsampling = { 1, 1, 1 },
.bit_depth = { 8, 8, 8 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 3,
.buffers = 3,
},
@@ -526,7 +542,7 @@ struct vivid_fmt vivid_formats[] = {
.fourcc = V4L2_PIX_FMT_YVU444M,
.vdownsampling = { 1, 1, 1 },
.bit_depth = { 8, 8, 8 },
- .is_yuv = true,
+ .color_enc = TGP_COLOR_ENC_YCBCR,
.planes = 3,
.buffers = 3,
},
@@ -616,6 +632,7 @@ void fmt_sp2mp(const struct v4l2_format *sp_fmt, struct v4l2_format *mp_fmt)
mp->field = pix->field;
mp->colorspace = pix->colorspace;
mp->xfer_func = pix->xfer_func;
+ /* Also copies hsv_enc */
mp->ycbcr_enc = pix->ycbcr_enc;
mp->quantization = pix->quantization;
mp->num_planes = 1;
@@ -645,6 +662,7 @@ int fmt_sp2mp_func(struct file *file, void *priv,
pix->field = mp->field;
pix->colorspace = mp->colorspace;
pix->xfer_func = mp->xfer_func;
+ /* Also copies hsv_enc */
pix->ycbcr_enc = mp->ycbcr_enc;
pix->quantization = mp->quantization;
pix->sizeimage = ppix->sizeimage;
diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
index dd609eea4753..7ba52ee98371 100644
--- a/drivers/media/platform/vivid/vivid-vid-out.c
+++ b/drivers/media/platform/vivid/vivid-vid-out.c
@@ -256,6 +256,7 @@ void vivid_update_format_out(struct vivid_dev *dev)
}
dev->xfer_func_out = V4L2_XFER_FUNC_DEFAULT;
dev->ycbcr_enc_out = V4L2_YCBCR_ENC_DEFAULT;
+ dev->hsv_enc_out = V4L2_HSV_ENC_180;
dev->quantization_out = V4L2_QUANTIZATION_DEFAULT;
dev->compose_out = dev->sink_rect;
dev->compose_bounds_out = dev->sink_rect;
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
index 57c713a4e1df..aa237b48ad55 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -770,6 +770,7 @@ static const struct of_device_id vsp1_of_match[] = {
{ .compatible = "renesas,vsp2" },
{ },
};
+MODULE_DEVICE_TABLE(of, vsp1_of_match);
static struct platform_driver vsp1_platform_driver = {
.probe = vsp1_probe,
diff --git a/drivers/media/platform/vsp1/vsp1_pipe.c b/drivers/media/platform/vsp1/vsp1_pipe.c
index 756ca4ea7668..280ba0804699 100644
--- a/drivers/media/platform/vsp1/vsp1_pipe.c
+++ b/drivers/media/platform/vsp1/vsp1_pipe.c
@@ -78,6 +78,14 @@ static const struct vsp1_format_info vsp1_video_formats[] = {
VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
1, { 32, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_HSV24, MEDIA_BUS_FMT_AHSV8888_1X32,
+ VI6_FMT_RGB_888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 24, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_HSV32, MEDIA_BUS_FMT_AHSV8888_1X32,
+ VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 32, 0, 0 }, false, false, 1, 1, false },
{ V4L2_PIX_FMT_UYVY, MEDIA_BUS_FMT_AYUV8_1X32,
VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.c b/drivers/media/platform/vsp1/vsp1_rwpf.c
index 66e4d7ea31d6..04104ef28fb5 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.c
@@ -37,6 +37,7 @@ static int vsp1_rwpf_enum_mbus_code(struct v4l2_subdev *subdev,
{
static const unsigned int codes[] = {
MEDIA_BUS_FMT_ARGB8888_1X32,
+ MEDIA_BUS_FMT_AHSV8888_1X32,
MEDIA_BUS_FMT_AYUV8_1X32,
};
@@ -78,6 +79,7 @@ static int vsp1_rwpf_set_format(struct v4l2_subdev *subdev,
/* Default to YUV if the requested format is not supported. */
if (fmt->format.code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
+ fmt->format.code != MEDIA_BUS_FMT_AHSV8888_1X32 &&
fmt->format.code != MEDIA_BUS_FMT_AYUV8_1X32)
fmt->format.code = MEDIA_BUS_FMT_AYUV8_1X32;
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index d351b9c768d2..41e8b096dab8 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -124,6 +124,11 @@ static int __vsp1_video_try_format(struct vsp1_video *video,
pix->pixelformat = info->fourcc;
pix->colorspace = V4L2_COLORSPACE_SRGB;
pix->field = V4L2_FIELD_NONE;
+
+ if (info->fourcc == V4L2_PIX_FMT_HSV24 ||
+ info->fourcc == V4L2_PIX_FMT_HSV32)
+ pix->hsv_enc = V4L2_HSV_ENC_256;
+
memset(pix->reserved, 0, sizeof(pix->reserved));
/* Align the width and height for YUV 4:2:2 and 4:2:0 formats. */
diff --git a/drivers/media/radio/radio-gemtek.c b/drivers/media/radio/radio-gemtek.c
index cff1eb144a5c..ca051ccbc3e4 100644
--- a/drivers/media/radio/radio-gemtek.c
+++ b/drivers/media/radio/radio-gemtek.c
@@ -67,14 +67,10 @@ module_param(probe, bool, 0444);
MODULE_PARM_DESC(probe, "Enable automatic device probing.");
module_param(hardmute, bool, 0644);
-MODULE_PARM_DESC(hardmute, "Enable 'hard muting' by shutting down PLL, may "
- "reduce static noise.");
+MODULE_PARM_DESC(hardmute, "Enable 'hard muting' by shutting down PLL, may reduce static noise.");
module_param_array(io, int, NULL, 0444);
-MODULE_PARM_DESC(io, "Force I/O ports for the GemTek Radio card if automatic "
- "probing is disabled or fails. The most common I/O ports are: 0x20c "
- "0x30c, 0x24c or 0x34c (0x20c, 0x248 and 0x28c have been reported to "
- "work for the combined sound/radiocard).");
+MODULE_PARM_DESC(io, "Force I/O ports for the GemTek Radio card if automatic probing is disabled or fails. The most common I/O ports are: 0x20c 0x30c, 0x24c or 0x34c (0x20c, 0x248 and 0x28c have been reported to work for the combined sound/radiocard).");
module_param_array(radio_nr, int, NULL, 0444);
MODULE_PARM_DESC(radio_nr, "Radio device numbers");
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index a93f681aa9d6..9ce4b12299b4 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -2068,8 +2068,7 @@ static int wl1273_fm_radio_probe(struct platform_device *pdev)
goto err_request_irq;
}
} else {
- dev_err(radio->dev, WL1273_FM_DRIVER_NAME ": Core WL1273 IRQ"
- " not configured");
+ dev_err(radio->dev, WL1273_FM_DRIVER_NAME ": Core WL1273 IRQ not configured");
r = -EINVAL;
goto pdata_err;
}
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index ee0470a3196b..9b81969d76b5 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -387,8 +387,8 @@ static int si470x_i2c_probe(struct i2c_client *client,
radio->registers[DEVICEID], radio->registers[SI_CHIPID]);
if ((radio->registers[SI_CHIPID] & SI_CHIPID_FIRMWARE) < RADIO_FW_VERSION) {
dev_warn(&client->dev,
- "This driver is known to work with "
- "firmware version %hu,\n", RADIO_FW_VERSION);
+ "This driver is known to work with firmware version %hu,\n",
+ RADIO_FW_VERSION);
dev_warn(&client->dev,
"but the device has firmware version %hu.\n",
radio->registers[SI_CHIPID] & SI_CHIPID_FIRMWARE);
@@ -400,8 +400,7 @@ static int si470x_i2c_probe(struct i2c_client *client,
dev_warn(&client->dev,
"If you have some trouble using this driver,\n");
dev_warn(&client->dev,
- "please report to V4L ML at "
- "linux-media@vger.kernel.org\n");
+ "please report to V4L ML at linux-media@vger.kernel.org\n");
}
/* set initial frequency */
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index 4b132c29f290..1add136d37a3 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -351,8 +351,8 @@ static int si470x_get_scratch_page_versions(struct si470x_device *radio)
retval = si470x_get_report(radio, radio->usb_buf, SCRATCH_REPORT_SIZE);
if (retval < 0)
- dev_warn(&radio->intf->dev, "si470x_get_scratch: "
- "si470x_get_report returned %d\n", retval);
+ dev_warn(&radio->intf->dev, "si470x_get_scratch: si470x_get_report returned %d\n",
+ retval);
else {
radio->software_version = radio->usb_buf[1];
radio->hardware_version = radio->usb_buf[2];
@@ -688,8 +688,8 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
radio->registers[DEVICEID], radio->registers[SI_CHIPID]);
if ((radio->registers[SI_CHIPID] & SI_CHIPID_FIRMWARE) < RADIO_FW_VERSION) {
dev_warn(&intf->dev,
- "This driver is known to work with "
- "firmware version %hu,\n", RADIO_FW_VERSION);
+ "This driver is known to work with firmware version %hu,\n",
+ RADIO_FW_VERSION);
dev_warn(&intf->dev,
"but the device has firmware version %hu.\n",
radio->registers[SI_CHIPID] & SI_CHIPID_FIRMWARE);
@@ -705,8 +705,8 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
radio->software_version, radio->hardware_version);
if (radio->hardware_version < RADIO_HW_VERSION) {
dev_warn(&intf->dev,
- "This driver is known to work with "
- "hardware version %hu,\n", RADIO_HW_VERSION);
+ "This driver is known to work with hardware version %hu,\n",
+ RADIO_HW_VERSION);
dev_warn(&intf->dev,
"but the device has hardware version %hu.\n",
radio->hardware_version);
@@ -718,8 +718,7 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
dev_warn(&intf->dev,
"If you have some trouble using this driver,\n");
dev_warn(&intf->dev,
- "please report to V4L ML at "
- "linux-media@vger.kernel.org\n");
+ "please report to V4L ML at linux-media@vger.kernel.org\n");
}
/* set led to connect state */
diff --git a/drivers/media/radio/si4713/si4713.c b/drivers/media/radio/si4713/si4713.c
index 0b04b56571da..bc2a8b5442ae 100644
--- a/drivers/media/radio/si4713/si4713.c
+++ b/drivers/media/radio/si4713/si4713.c
@@ -716,9 +716,9 @@ static int si4713_tx_tune_status(struct si4713_device *sdev, u8 intack,
*power = val[5];
*antcap = val[6];
*noise = val[7];
- v4l2_dbg(1, debug, &sdev->sd, "%s: response: %d x 10 kHz "
- "(power %d, antcap %d, rnl %d)\n", __func__,
- *frequency, *power, *antcap, *noise);
+ v4l2_dbg(1, debug, &sdev->sd,
+ "%s: response: %d x 10 kHz (power %d, antcap %d, rnl %d)\n",
+ __func__, *frequency, *power, *antcap, *noise);
}
return err;
@@ -758,10 +758,9 @@ static int si4713_tx_rds_buff(struct si4713_device *sdev, u8 mode, u16 rdsb,
v4l2_dbg(1, debug, &sdev->sd,
"%s: status=0x%02x\n", __func__, val[0]);
*cbleft = (s8)val[2] - val[3];
- v4l2_dbg(1, debug, &sdev->sd, "%s: response: interrupts"
- " 0x%02x cb avail: %d cb used %d fifo avail"
- " %d fifo used %d\n", __func__, val[1],
- val[2], val[3], val[4], val[5]);
+ v4l2_dbg(1, debug, &sdev->sd,
+ "%s: response: interrupts 0x%02x cb avail: %d cb used %d fifo avail %d fifo used %d\n",
+ __func__, val[1], val[2], val[3], val[4], val[5]);
}
return err;
diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
index 642b89c66bcb..4be07656fbc0 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.c
+++ b/drivers/media/radio/wl128x/fmdrv_common.c
@@ -212,14 +212,14 @@ inline void dump_tx_skb_data(struct sk_buff *skb)
len_org = skb->len - FM_CMD_MSG_HDR_SIZE;
if (len_org > 0) {
- printk("\n data(%d): ", cmd_hdr->dlen);
+ printk(KERN_CONT "\n data(%d): ", cmd_hdr->dlen);
len = min(len_org, 14);
for (index = 0; index < len; index++)
- printk("%x ",
+ printk(KERN_CONT "%x ",
skb->data[FM_CMD_MSG_HDR_SIZE + index]);
- printk("%s", (len_org > 14) ? ".." : "");
+ printk(KERN_CONT "%s", (len_org > 14) ? ".." : "");
}
- printk("\n");
+ printk(KERN_CONT "\n");
}
/* To dump incoming FM Channel-8 packets */
@@ -230,21 +230,21 @@ inline void dump_rx_skb_data(struct sk_buff *skb)
struct fm_event_msg_hdr *evt_hdr;
evt_hdr = (struct fm_event_msg_hdr *)skb->data;
- printk(KERN_INFO ">> hdr:%02x len:%02x sts:%02x numhci:%02x "
- "opcode:%02x type:%s dlen:%02x", evt_hdr->hdr, evt_hdr->len,
- evt_hdr->status, evt_hdr->num_fm_hci_cmds, evt_hdr->op,
- (evt_hdr->rd_wr) ? "RD" : "WR", evt_hdr->dlen);
+ printk(KERN_INFO ">> hdr:%02x len:%02x sts:%02x numhci:%02x opcode:%02x type:%s dlen:%02x",
+ evt_hdr->hdr, evt_hdr->len,
+ evt_hdr->status, evt_hdr->num_fm_hci_cmds, evt_hdr->op,
+ (evt_hdr->rd_wr) ? "RD" : "WR", evt_hdr->dlen);
len_org = skb->len - FM_EVT_MSG_HDR_SIZE;
if (len_org > 0) {
- printk("\n data(%d): ", evt_hdr->dlen);
+ printk(KERN_CONT "\n data(%d): ", evt_hdr->dlen);
len = min(len_org, 14);
for (index = 0; index < len; index++)
- printk("%x ",
+ printk(KERN_CONT "%x ",
skb->data[FM_EVT_MSG_HDR_SIZE + index]);
- printk("%s", (len_org > 14) ? ".." : "");
+ printk(KERN_CONT "%s", (len_org > 14) ? ".." : "");
}
- printk("\n");
+ printk(KERN_CONT "\n");
}
#endif
@@ -271,9 +271,9 @@ static void recv_tasklet(unsigned long arg)
/* Process all packets in the RX queue */
while ((skb = skb_dequeue(&fmdev->rx_q))) {
if (skb->len < sizeof(struct fm_event_msg_hdr)) {
- fmerr("skb(%p) has only %d bytes, "
- "at least need %zu bytes to decode\n", skb,
- skb->len, sizeof(struct fm_event_msg_hdr));
+ fmerr("skb(%p) has only %d bytes, at least need %zu bytes to decode\n",
+ skb,
+ skb->len, sizeof(struct fm_event_msg_hdr));
kfree_skb(skb);
continue;
}
@@ -472,8 +472,7 @@ int fmc_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
if (!wait_for_completion_timeout(&fmdev->maintask_comp,
FM_DRV_TX_TIMEOUT)) {
- fmerr("Timeout(%d sec),didn't get reg"
- "completion signal from RX tasklet\n",
+ fmerr("Timeout(%d sec),didn't get regcompletion signal from RX tasklet\n",
jiffies_to_msecs(FM_DRV_TX_TIMEOUT) / 1000);
return -ETIMEDOUT;
}
@@ -523,8 +522,7 @@ static inline int check_cmdresp_status(struct fmdev *fmdev,
fm_evt_hdr = (void *)(*skb)->data;
if (fm_evt_hdr->status != 0) {
- fmerr("irq: opcode %x response status is not zero "
- "Initiating irq recovery process\n",
+ fmerr("irq: opcode %x response status is not zero Initiating irq recovery process\n",
fm_evt_hdr->op);
mod_timer(&fmdev->irq_info.timer, jiffies + FM_DRV_TX_TIMEOUT);
@@ -564,8 +562,7 @@ static void int_timeout_handler(unsigned long data)
* reset stage index & retry count values */
fmirq->stage = 0;
fmirq->retry = 0;
- fmerr("Recovery action failed during"
- "irq processing, max retry reached\n");
+ fmerr("Recovery action failed duringirq processing, max retry reached\n");
return;
}
fm_irq_call_stage(fmdev, FM_SEND_INTMSK_CMD_IDX);
@@ -1516,14 +1513,13 @@ int fmc_prepare(struct fmdev *fmdev)
if (!wait_for_completion_timeout(&wait_for_fmdrv_reg_comp,
FM_ST_REG_TIMEOUT)) {
- fmerr("Timeout(%d sec), didn't get reg "
- "completion signal from ST\n",
+ fmerr("Timeout(%d sec), didn't get reg completion signal from ST\n",
jiffies_to_msecs(FM_ST_REG_TIMEOUT) / 1000);
return -ETIMEDOUT;
}
if (fmdev->streg_cbdata != 0) {
- fmerr("ST reg comp CB called with error "
- "status %d\n", fmdev->streg_cbdata);
+ fmerr("ST reg comp CB called with error status %d\n",
+ fmdev->streg_cbdata);
return -EAGAIN;
}
diff --git a/drivers/media/radio/wl128x/fmdrv_rx.c b/drivers/media/radio/wl128x/fmdrv_rx.c
index cfaeb2417fbb..e7455f82fadc 100644
--- a/drivers/media/radio/wl128x/fmdrv_rx.c
+++ b/drivers/media/radio/wl128x/fmdrv_rx.c
@@ -120,8 +120,8 @@ int fm_rx_set_freq(struct fmdev *fmdev, u32 freq)
curr_frq_in_khz = (fmdev->rx.region.bot_freq + ((u32)curr_frq * FM_FREQ_MUL));
if (curr_frq_in_khz != freq) {
- pr_info("Frequency is set to (%d) but "
- "requested freq is (%d)\n", curr_frq_in_khz, freq);
+ pr_info("Frequency is set to (%d) but requested freq is (%d)\n",
+ curr_frq_in_khz, freq);
}
/* Update local cache */
@@ -390,8 +390,8 @@ int fm_rx_set_region(struct fmdev *fmdev, u8 region_to_set)
new_frq = fmdev->rx.region.top_freq;
if (new_frq) {
- fmdbg("Current freq is not within band limit boundary,"
- "switching to %d KHz\n", new_frq);
+ fmdbg("Current freq is not within band limit boundary,switching to %d KHz\n",
+ new_frq);
/* Current RX frequency is not in range. So, update it */
ret = fm_rx_set_freq(fmdev, new_frq);
}
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 370e16e07867..629e8ca15ab3 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -389,4 +389,21 @@ config IR_SUNXI
To compile this driver as a module, choose M here: the module will
be called sunxi-ir.
+config IR_SERIAL
+ tristate "Homebrew Serial Port Receiver"
+ depends on RC_CORE
+ ---help---
+ Say Y if you want to use Homebrew Serial Port Receivers and
+ Transceivers.
+
+ To compile this driver as a module, choose M here: the module will
+ be called serial-ir.
+
+config IR_SERIAL_TRANSMITTER
+ bool "Serial Port Transmitter"
+ default y
+ depends on IR_SERIAL
+ ---help---
+ Serial Port Transmitter support
+
endif #RC_DEVICES
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
index 379a5c0f1379..3a984ee301e2 100644
--- a/drivers/media/rc/Makefile
+++ b/drivers/media/rc/Makefile
@@ -37,3 +37,4 @@ obj-$(CONFIG_IR_TTUSBIR) += ttusbir.o
obj-$(CONFIG_RC_ST) += st_rc.o
obj-$(CONFIG_IR_SUNXI) += sunxi-cir.o
obj-$(CONFIG_IR_IMG) += img-ir/
+obj-$(CONFIG_IR_SERIAL) += serial_ir.o
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
index 9f5b59706741..0884b7dc0e71 100644
--- a/drivers/media/rc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -527,8 +527,7 @@ static void ati_remote_input_report(struct urb *urb)
remote_num = (data[3] >> 4) & 0x0f;
if (channel_mask & (1 << (remote_num + 1))) {
dbginfo(&ati_remote->interface->dev,
- "Masked input from channel 0x%02x: data %02x, "
- "mask= 0x%02lx\n",
+ "Masked input from channel 0x%02x: data %02x, mask= 0x%02lx\n",
remote_num, data[2], channel_mask);
return;
}
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index d1c61cd035f6..bd5512e64aea 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -1210,8 +1210,7 @@ MODULE_PARM_DESC(txsim,
MODULE_DEVICE_TABLE(pnp, ene_ids);
MODULE_DESCRIPTION
- ("Infrared input driver for KB3926B/C/D/E/F "
- "(aka ENE0100/ENE0200/ENE0201/ENE0202) CIR port");
+ ("Infrared input driver for KB3926B/C/D/E/F (aka ENE0100/ENE0200/ENE0201/ENE0202) CIR port");
MODULE_AUTHOR("Maxim Levitsky");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/rc/fintek-cir.c b/drivers/media/rc/fintek-cir.c
index bd7b3bdb1a88..ecab69ea3d51 100644
--- a/drivers/media/rc/fintek-cir.c
+++ b/drivers/media/rc/fintek-cir.c
@@ -104,11 +104,7 @@ static inline void fintek_cir_reg_write(struct fintek_dev *fintek, u8 val, u8 of
/* read val from cir config register */
static u8 fintek_cir_reg_read(struct fintek_dev *fintek, u8 offset)
{
- u8 val;
-
- val = inb(fintek->cir_addr + offset);
-
- return val;
+ return inb(fintek->cir_addr + offset);
}
/* dump current cir register contents */
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 86cc70fe2534..0785a24af8fc 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -441,13 +441,11 @@ MODULE_PARM_DESC(debug, "Debug messages: 0=no, 1=yes (default: no)");
/* lcd, vfd, vga or none? should be auto-detected, but can be overridden... */
static int display_type;
module_param(display_type, int, S_IRUGO);
-MODULE_PARM_DESC(display_type, "Type of attached display. 0=autodetect, "
- "1=vfd, 2=lcd, 3=vga, 4=none (default: autodetect)");
+MODULE_PARM_DESC(display_type, "Type of attached display. 0=autodetect, 1=vfd, 2=lcd, 3=vga, 4=none (default: autodetect)");
static int pad_stabilize = 1;
module_param(pad_stabilize, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(pad_stabilize, "Apply stabilization algorithm to iMON PAD "
- "presses in arrow key mode. 0=disable, 1=enable (default).");
+MODULE_PARM_DESC(pad_stabilize, "Apply stabilization algorithm to iMON PAD presses in arrow key mode. 0=disable, 1=enable (default).");
/*
* In certain use cases, mouse mode isn't really helpful, and could actually
@@ -455,14 +453,12 @@ MODULE_PARM_DESC(pad_stabilize, "Apply stabilization algorithm to iMON PAD "
*/
static bool nomouse;
module_param(nomouse, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(nomouse, "Disable mouse input device mode when IR device is "
- "open. 0=don't disable, 1=disable. (default: don't disable)");
+MODULE_PARM_DESC(nomouse, "Disable mouse input device mode when IR device is open. 0=don't disable, 1=disable. (default: don't disable)");
/* threshold at which a pad push registers as an arrow key in kbd mode */
static int pad_thresh;
module_param(pad_thresh, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(pad_thresh, "Threshold at which a pad push registers as an "
- "arrow key in kbd mode (default: 28)");
+MODULE_PARM_DESC(pad_thresh, "Threshold at which a pad push registers as an arrow key in kbd mode (default: 28)");
static void free_imon_context(struct imon_context *ictx)
@@ -611,7 +607,7 @@ static int send_packet(struct imon_context *ictx)
ictx->tx_urb->actual_length = 0;
}
- init_completion(&ictx->tx.finished);
+ reinit_completion(&ictx->tx.finished);
ictx->tx.busy = true;
smp_rmb(); /* ensure later readers know we're busy */
@@ -785,9 +781,7 @@ static ssize_t show_associate_remote(struct device *d,
else
strcpy(buf, "closed\n");
- dev_info(d, "Visit http://www.lirc.org/html/imon-24g.html for "
- "instructions on how to associate your iMON 2.4G DT/LT "
- "remote\n");
+ dev_info(d, "Visit http://www.lirc.org/html/imon-24g.html for instructions on how to associate your iMON 2.4G DT/LT remote\n");
mutex_unlock(&ictx->lock);
return strlen(buf);
}
@@ -1115,8 +1109,7 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 *rc_type)
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86 };
if (*rc_type && !(*rc_type & rc->allowed_protocols))
- dev_warn(dev, "Looks like you're trying to use an IR protocol "
- "this device does not support\n");
+ dev_warn(dev, "Looks like you're trying to use an IR protocol this device does not support\n");
if (*rc_type & RC_BIT_RC6_MCE) {
dev_dbg(dev, "Configuring IR receiver for MCE protocol\n");
@@ -1129,8 +1122,7 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 *rc_type)
/* ir_proto_packet[0] = 0x00; // already the default */
*rc_type = RC_BIT_OTHER;
} else {
- dev_warn(dev, "Unsupported IR protocol specified, overriding "
- "to iMON IR protocol\n");
+ dev_warn(dev, "Unsupported IR protocol specified, overriding to iMON IR protocol\n");
if (!pad_stabilize)
dev_dbg(dev, "PAD stabilize functionality disabled\n");
/* ir_proto_packet[0] = 0x00; // already the default */
@@ -1593,7 +1585,6 @@ static void imon_incoming_packet(struct imon_context *ictx,
struct device *dev = ictx->dev;
unsigned long flags;
u32 kc;
- int i;
u64 scancode;
int press_type = 0;
int msec;
@@ -1664,10 +1655,8 @@ static void imon_incoming_packet(struct imon_context *ictx,
}
if (debug) {
- printk(KERN_INFO "intf%d decoded packet: ", intf);
- for (i = 0; i < len; ++i)
- printk("%02x ", buf[i]);
- printk("\n");
+ printk(KERN_INFO "intf%d decoded packet: %*ph\n",
+ intf, len, buf);
}
press_type = imon_parse_press_type(ictx, buf, ktype);
@@ -1722,8 +1711,8 @@ static void imon_incoming_packet(struct imon_context *ictx,
not_input_data:
if (len != 8) {
- dev_warn(dev, "imon %s: invalid incoming packet "
- "size (len = %d, intf%d)\n", __func__, len, intf);
+ dev_warn(dev, "imon %s: invalid incoming packet size (len = %d, intf%d)\n",
+ __func__, len, intf);
return;
}
@@ -1879,8 +1868,7 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
allowed_protos = RC_BIT_RC6_MCE;
break;
default:
- dev_info(ictx->dev, "Unknown 0xffdc device, "
- "defaulting to VFD and iMON IR");
+ dev_info(ictx->dev, "Unknown 0xffdc device, defaulting to VFD and iMON IR");
detected_display_type = IMON_DISPLAY_TYPE_VFD;
/* We don't know which one it is, allow user to set the
* RC6 one from userspace if OTHER wasn't correct. */
@@ -1937,8 +1925,8 @@ static void imon_set_display_type(struct imon_context *ictx)
ictx->display_supported = false;
else
ictx->display_supported = true;
- dev_info(ictx->dev, "%s: overriding display type to %d via "
- "modparam\n", __func__, display_type);
+ dev_info(ictx->dev, "%s: overriding display type to %d via modparam\n",
+ __func__, display_type);
}
ictx->display_type = configured_display_type;
@@ -2159,8 +2147,8 @@ static bool imon_find_endpoints(struct imon_context *ictx,
if (!display_ep_found) {
tx_control = true;
display_ep_found = true;
- dev_dbg(ictx->dev, "%s: device uses control endpoint, not "
- "interface OUT endpoint\n", __func__);
+ dev_dbg(ictx->dev, "%s: device uses control endpoint, not interface OUT endpoint\n",
+ __func__);
}
/*
@@ -2228,6 +2216,8 @@ static struct imon_context *imon_init_intf0(struct usb_interface *intf,
ictx->tx_urb = tx_urb;
ictx->rf_device = false;
+ init_completion(&ictx->tx.finished);
+
ictx->vendor = le16_to_cpu(ictx->usbdev_intf0->descriptor.idVendor);
ictx->product = le16_to_cpu(ictx->usbdev_intf0->descriptor.idProduct);
@@ -2369,8 +2359,8 @@ static void imon_init_display(struct imon_context *ictx,
/* set up sysfs entry for built-in clock */
ret = sysfs_create_group(&intf->dev.kobj, &imon_display_attr_group);
if (ret)
- dev_err(ictx->dev, "Could not create display sysfs "
- "entries(%d)", ret);
+ dev_err(ictx->dev, "Could not create display sysfs entries(%d)",
+ ret);
if (ictx->display_type == IMON_DISPLAY_TYPE_LCD)
ret = usb_register_dev(intf, &imon_lcd_class);
@@ -2378,8 +2368,7 @@ static void imon_init_display(struct imon_context *ictx,
ret = usb_register_dev(intf, &imon_vfd_class);
if (ret)
/* Not a fatal error, so ignore */
- dev_info(ictx->dev, "could not get a minor number for "
- "display\n");
+ dev_info(ictx->dev, "could not get a minor number for display\n");
}
@@ -2459,8 +2448,8 @@ static int imon_probe(struct usb_interface *interface,
mutex_unlock(&ictx->lock);
}
- dev_info(dev, "iMON device (%04x:%04x, intf%d) on "
- "usb<%d:%d> initialized\n", vendor, product, ifnum,
+ dev_info(dev, "iMON device (%04x:%04x, intf%d) on usb<%d:%d> initialized\n",
+ vendor, product, ifnum,
usbdev->bus->busnum, usbdev->devnum);
mutex_unlock(&driver_lock);
@@ -2504,7 +2493,7 @@ static void imon_disconnect(struct usb_interface *interface)
/* Abort ongoing write */
if (ictx->tx.busy) {
usb_kill_urb(ictx->tx_urb);
- complete_all(&ictx->tx.finished);
+ complete(&ictx->tx.finished);
}
if (ifnum == 0) {
diff --git a/drivers/media/rc/ir-hix5hd2.c b/drivers/media/rc/ir-hix5hd2.c
index d0549fba711c..d26907e684dc 100644
--- a/drivers/media/rc/ir-hix5hd2.c
+++ b/drivers/media/rc/ir-hix5hd2.c
@@ -75,15 +75,22 @@ static void hix5hd2_ir_enable(struct hix5hd2_ir_priv *dev, bool on)
{
u32 val;
- regmap_read(dev->regmap, IR_CLK, &val);
- if (on) {
- val &= ~IR_CLK_RESET;
- val |= IR_CLK_ENABLE;
+ if (dev->regmap) {
+ regmap_read(dev->regmap, IR_CLK, &val);
+ if (on) {
+ val &= ~IR_CLK_RESET;
+ val |= IR_CLK_ENABLE;
+ } else {
+ val &= ~IR_CLK_ENABLE;
+ val |= IR_CLK_RESET;
+ }
+ regmap_write(dev->regmap, IR_CLK, val);
} else {
- val &= ~IR_CLK_ENABLE;
- val |= IR_CLK_RESET;
+ if (on)
+ clk_prepare_enable(dev->clock);
+ else
+ clk_disable_unprepare(dev->clock);
}
- regmap_write(dev->regmap, IR_CLK, val);
}
static int hix5hd2_ir_config(struct hix5hd2_ir_priv *priv)
@@ -207,8 +214,8 @@ static int hix5hd2_ir_probe(struct platform_device *pdev)
priv->regmap = syscon_regmap_lookup_by_phandle(node,
"hisilicon,power-syscon");
if (IS_ERR(priv->regmap)) {
- dev_err(dev, "no power-reg\n");
- return -EINVAL;
+ dev_info(dev, "no power-reg\n");
+ priv->regmap = NULL;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/media/rc/ir-rx51.c b/drivers/media/rc/ir-rx51.c
index 82fb6f2ca011..e6efa8c267a0 100644
--- a/drivers/media/rc/ir-rx51.c
+++ b/drivers/media/rc/ir-rx51.c
@@ -109,7 +109,7 @@ static enum hrtimer_restart lirc_rx51_timer_cb(struct hrtimer *timer)
now = timer->base->get_time();
- } while (hrtimer_get_expires_tv64(timer) < now.tv64);
+ } while (hrtimer_get_expires_tv64(timer) < now);
return HRTIMER_RESTART;
end:
diff --git a/drivers/media/rc/ir-sanyo-decoder.c b/drivers/media/rc/ir-sanyo-decoder.c
index 7331e5e7c497..b07d9caebeb1 100644
--- a/drivers/media/rc/ir-sanyo-decoder.c
+++ b/drivers/media/rc/ir-sanyo-decoder.c
@@ -56,7 +56,8 @@ static int ir_sanyo_decode(struct rc_dev *dev, struct ir_raw_event ev)
{
struct sanyo_dec *data = &dev->raw->sanyo;
u32 scancode;
- u8 address, command, not_command;
+ u16 address;
+ u8 command, not_command;
if (!is_timing_event(ev)) {
if (ev.reset) {
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index 0f301903aa6f..367b28bed627 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -55,14 +55,12 @@ MODULE_PARM_DESC(debug, "Enable debugging output");
/* low limit for RX carrier freq, Hz, 0 for no RX demodulation */
static int rx_low_carrier_freq;
module_param(rx_low_carrier_freq, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(rx_low_carrier_freq, "Override low RX carrier frequency, Hz, "
- "0 for no RX demodulation");
+MODULE_PARM_DESC(rx_low_carrier_freq, "Override low RX carrier frequency, Hz, 0 for no RX demodulation");
/* high limit for RX carrier freq, Hz, 0 for no RX demodulation */
static int rx_high_carrier_freq;
module_param(rx_high_carrier_freq, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(rx_high_carrier_freq, "Override high RX carrier frequency, "
- "Hz, 0 for no RX demodulation");
+MODULE_PARM_DESC(rx_high_carrier_freq, "Override high RX carrier frequency, Hz, 0 for no RX demodulation");
/* override tx carrier frequency */
static int tx_carrier_freq;
@@ -263,6 +261,8 @@ static void ite_set_carrier_params(struct ite_dev *dev)
if (allowance > ITE_RXDCR_MAX)
allowance = ITE_RXDCR_MAX;
+
+ use_demodulator = true;
}
}
@@ -1484,8 +1484,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
if (model_number >= 0 && model_number < ARRAY_SIZE(ite_dev_descs)) {
model_no = model_number;
- ite_pr(KERN_NOTICE, "The model has been fixed by a module "
- "parameter.");
+ ite_pr(KERN_NOTICE, "The model has been fixed by a module parameter.");
}
ite_pr(KERN_NOTICE, "Using model: %s\n", ite_dev_descs[model_no].model);
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index 91f9bb87ce68..3854809e8531 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -150,9 +150,6 @@ static const struct file_operations lirc_dev_fops = {
.write = lirc_dev_fop_write,
.poll = lirc_dev_fop_poll,
.unlocked_ioctl = lirc_dev_fop_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = lirc_dev_fop_ioctl,
-#endif
.open = lirc_dev_fop_open,
.release = lirc_dev_fop_close,
.llseek = noop_llseek,
@@ -160,19 +157,19 @@ static const struct file_operations lirc_dev_fops = {
static int lirc_cdev_add(struct irctl *ir)
{
- int retval = -ENOMEM;
struct lirc_driver *d = &ir->d;
struct cdev *cdev;
+ int retval;
- cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ cdev = cdev_alloc();
if (!cdev)
- goto err_out;
+ return -ENOMEM;
if (d->fops) {
- cdev_init(cdev, d->fops);
+ cdev->ops = d->fops;
cdev->owner = d->owner;
} else {
- cdev_init(cdev, &lirc_dev_fops);
+ cdev->ops = &lirc_dev_fops;
cdev->owner = THIS_MODULE;
}
retval = kobject_set_name(&cdev->kobj, "lirc%d", d->minor);
@@ -180,17 +177,15 @@ static int lirc_cdev_add(struct irctl *ir)
goto err_out;
retval = cdev_add(cdev, MKDEV(MAJOR(lirc_base_dev), d->minor), 1);
- if (retval) {
- kobject_put(&cdev->kobj);
+ if (retval)
goto err_out;
- }
ir->cdev = cdev;
return 0;
err_out:
- kfree(cdev);
+ cdev_del(cdev);
return retval;
}
@@ -420,7 +415,6 @@ int lirc_unregister_driver(int minor)
} else {
lirc_irctl_cleanup(ir);
cdev_del(cdev);
- kfree(cdev);
kfree(ir);
irctls[minor] = NULL;
}
@@ -521,7 +515,6 @@ int lirc_dev_fop_close(struct inode *inode, struct file *file)
lirc_irctl_cleanup(ir);
cdev_del(cdev);
irctls[ir->d.minor] = NULL;
- kfree(cdev);
kfree(ir);
}
@@ -684,7 +677,6 @@ ssize_t lirc_dev_fop_read(struct file *file,
* between while condition checking and scheduling)
*/
add_wait_queue(&ir->buf->wait_poll, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
/*
* while we didn't provide 'length' bytes, device is opened in blocking
@@ -709,19 +701,19 @@ ssize_t lirc_dev_fop_read(struct file *file,
}
mutex_unlock(&ir->irctl_lock);
- schedule();
set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ set_current_state(TASK_RUNNING);
if (mutex_lock_interruptible(&ir->irctl_lock)) {
ret = -ERESTARTSYS;
remove_wait_queue(&ir->buf->wait_poll, &wait);
- set_current_state(TASK_RUNNING);
goto out_unlocked;
}
if (!ir->attached) {
ret = -ENODEV;
- break;
+ goto out_locked;
}
} else {
lirc_buffer_read(ir->buf, buf);
@@ -735,7 +727,6 @@ ssize_t lirc_dev_fop_read(struct file *file,
}
remove_wait_queue(&ir->buf->wait_poll, &wait);
- set_current_state(TASK_RUNNING);
out_locked:
mutex_unlock(&ir->irctl_lock);
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 4f8c7effdcee..9bf69179eee0 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -153,15 +153,6 @@
#define MCE_COMMAND_IRDATA 0x80
#define MCE_PACKET_LENGTH_MASK 0x1f /* Packet length mask */
-/* general constants */
-#define SEND_FLAG_IN_PROGRESS 1
-#define SEND_FLAG_COMPLETE 2
-#define RECV_FLAG_IN_PROGRESS 3
-#define RECV_FLAG_COMPLETE 4
-
-#define MCEUSB_RX 1
-#define MCEUSB_TX 2
-
#define VENDOR_PHILIPS 0x0471
#define VENDOR_SMK 0x0609
#define VENDOR_TATUNG 0x1460
@@ -422,7 +413,6 @@ struct mceusb_dev {
struct rc_dev *rc;
/* optional features we can enable */
- bool carrier_report_enabled;
bool learning_enabled;
/* core device bits */
@@ -455,7 +445,6 @@ struct mceusb_dev {
} flags;
/* transmit support */
- int send_flags;
u32 carrier;
unsigned char tx_mask;
@@ -604,9 +593,7 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
break;
case MCE_RSP_EQWAKEVERSION:
if (!out)
- dev_dbg(dev, "Wake version, proto: 0x%02x, "
- "payload: 0x%02x, address: 0x%02x, "
- "version: 0x%02x",
+ dev_dbg(dev, "Wake version, proto: 0x%02x, payload: 0x%02x, address: 0x%02x, version: 0x%02x",
data1, data2, data3, data4);
break;
case MCE_RSP_GETPORTSTATUS:
@@ -740,52 +727,40 @@ static void mce_async_callback(struct urb *urb)
/* request incoming or send outgoing usb packet - used to initialize remote */
static void mce_request_packet(struct mceusb_dev *ir, unsigned char *data,
- int size, int urb_type)
+ int size)
{
int res, pipe;
struct urb *async_urb;
struct device *dev = ir->dev;
unsigned char *async_buf;
- if (urb_type == MCEUSB_TX) {
- async_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (unlikely(!async_urb)) {
- dev_err(dev, "Error, couldn't allocate urb!\n");
- return;
- }
-
- async_buf = kzalloc(size, GFP_KERNEL);
- if (!async_buf) {
- dev_err(dev, "Error, couldn't allocate buf!\n");
- usb_free_urb(async_urb);
- return;
- }
-
- /* outbound data */
- if (usb_endpoint_xfer_int(ir->usb_ep_out)) {
- pipe = usb_sndintpipe(ir->usbdev,
- ir->usb_ep_out->bEndpointAddress);
- usb_fill_int_urb(async_urb, ir->usbdev, pipe, async_buf,
- size, mce_async_callback, ir,
- ir->usb_ep_out->bInterval);
- } else {
- pipe = usb_sndbulkpipe(ir->usbdev,
- ir->usb_ep_out->bEndpointAddress);
- usb_fill_bulk_urb(async_urb, ir->usbdev, pipe,
- async_buf, size, mce_async_callback,
- ir);
- }
- memcpy(async_buf, data, size);
+ async_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (unlikely(!async_urb)) {
+ dev_err(dev, "Error, couldn't allocate urb!\n");
+ return;
+ }
- } else if (urb_type == MCEUSB_RX) {
- /* standard request */
- async_urb = ir->urb_in;
- ir->send_flags = RECV_FLAG_IN_PROGRESS;
+ async_buf = kmalloc(size, GFP_KERNEL);
+ if (!async_buf) {
+ usb_free_urb(async_urb);
+ return;
+ }
+ /* outbound data */
+ if (usb_endpoint_xfer_int(ir->usb_ep_out)) {
+ pipe = usb_sndintpipe(ir->usbdev,
+ ir->usb_ep_out->bEndpointAddress);
+ usb_fill_int_urb(async_urb, ir->usbdev, pipe, async_buf,
+ size, mce_async_callback, ir,
+ ir->usb_ep_out->bInterval);
} else {
- dev_err(dev, "Error! Unknown urb type %d\n", urb_type);
- return;
+ pipe = usb_sndbulkpipe(ir->usbdev,
+ ir->usb_ep_out->bEndpointAddress);
+ usb_fill_bulk_urb(async_urb, ir->usbdev, pipe,
+ async_buf, size, mce_async_callback,
+ ir);
}
+ memcpy(async_buf, data, size);
dev_dbg(dev, "receive request called (size=%#x)", size);
@@ -806,19 +781,14 @@ static void mce_async_out(struct mceusb_dev *ir, unsigned char *data, int size)
if (ir->need_reset) {
ir->need_reset = false;
- mce_request_packet(ir, DEVICE_RESUME, rsize, MCEUSB_TX);
+ mce_request_packet(ir, DEVICE_RESUME, rsize);
msleep(10);
}
- mce_request_packet(ir, data, size, MCEUSB_TX);
+ mce_request_packet(ir, data, size);
msleep(10);
}
-static void mce_flush_rx_buffer(struct mceusb_dev *ir, int size)
-{
- mce_request_packet(ir, NULL, size, MCEUSB_RX);
-}
-
/* Send data out the IR blaster port(s) */
static int mceusb_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count)
{
@@ -1062,7 +1032,6 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
static void mceusb_dev_recv(struct urb *urb)
{
struct mceusb_dev *ir;
- int buf_len;
if (!urb)
return;
@@ -1073,18 +1042,10 @@ static void mceusb_dev_recv(struct urb *urb)
return;
}
- buf_len = urb->actual_length;
-
- if (ir->send_flags == RECV_FLAG_IN_PROGRESS) {
- ir->send_flags = SEND_FLAG_COMPLETE;
- dev_dbg(ir->dev, "setup answer received %d bytes\n",
- buf_len);
- }
-
switch (urb->status) {
/* success */
case 0:
- mceusb_process_ir_data(ir, buf_len);
+ mceusb_process_ir_data(ir, urb->actual_length);
break;
case -ECONNRESET:
@@ -1285,7 +1246,7 @@ static int mceusb_dev_probe(struct usb_interface *intf,
struct usb_endpoint_descriptor *ep_in = NULL;
struct usb_endpoint_descriptor *ep_out = NULL;
struct mceusb_dev *ir = NULL;
- int pipe, maxp, i;
+ int pipe, maxp, i, res;
char buf[63], name[128] = "";
enum mceusb_model_type model = id->driver_info;
bool is_gen3;
@@ -1388,7 +1349,9 @@ static int mceusb_dev_probe(struct usb_interface *intf,
/* flush buffers on the device */
dev_dbg(&intf->dev, "Flushing receive buffers\n");
- mce_flush_rx_buffer(ir, maxp);
+ res = usb_submit_urb(ir->urb_in, GFP_KERNEL);
+ if (res)
+ dev_err(&intf->dev, "failed to flush buffers: %d\n", res);
/* figure out which firmware/emulator version this hardware has */
mceusb_get_emulator_version(ir);
@@ -1423,6 +1386,7 @@ static int mceusb_dev_probe(struct usb_interface *intf,
/* Error-handling path */
rc_dev_fail:
usb_put_dev(ir->usbdev);
+ usb_kill_urb(ir->urb_in);
usb_free_urb(ir->urb_in);
urb_in_alloc_fail:
usb_free_coherent(dev, maxp, ir->buf_in, ir->dma_in);
diff --git a/drivers/media/rc/meson-ir.c b/drivers/media/rc/meson-ir.c
index 003fff07ade2..7eb3f4f1ddcd 100644
--- a/drivers/media/rc/meson-ir.c
+++ b/drivers/media/rc/meson-ir.c
@@ -218,6 +218,7 @@ static const struct of_device_id meson_ir_match[] = {
{ .compatible = "amlogic,meson-gxbb-ir" },
{ },
};
+MODULE_DEVICE_TABLE(of, meson_ir_match);
static struct platform_driver meson_ir_driver = {
.probe = meson_ir_probe,
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index 04fedaa75612..4b78c891eb77 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -48,6 +48,11 @@ static const struct nvt_chip nvt_chips[] = {
{ "NCT6779D", NVT_6779D },
};
+static inline struct device *nvt_get_dev(const struct nvt_dev *nvt)
+{
+ return nvt->rdev->dev.parent;
+}
+
static inline bool is_w83667hg(struct nvt_dev *nvt)
{
return nvt->chip_ver == NVT_W83667HG;
@@ -182,7 +187,7 @@ static ssize_t wakeup_data_show(struct device *dev,
ssize_t buf_len = 0;
int i;
- spin_lock_irqsave(&nvt->nvt_lock, flags);
+ spin_lock_irqsave(&nvt->lock, flags);
fifo_len = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT);
fifo_len = min(fifo_len, WAKEUP_MAX_SIZE);
@@ -199,7 +204,7 @@ static ssize_t wakeup_data_show(struct device *dev,
}
buf_len += snprintf(buf + buf_len, PAGE_SIZE - buf_len, "\n");
- spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+ spin_unlock_irqrestore(&nvt->lock, flags);
return buf_len;
}
@@ -243,7 +248,7 @@ static ssize_t wakeup_data_store(struct device *dev,
/* hardcode the tolerance to 10% */
tolerance = DIV_ROUND_UP(count, 10);
- spin_lock_irqsave(&nvt->nvt_lock, flags);
+ spin_lock_irqsave(&nvt->lock, flags);
nvt_clear_cir_wake_fifo(nvt);
nvt_cir_wake_reg_write(nvt, count, CIR_WAKE_FIFO_CMP_DEEP);
@@ -260,7 +265,7 @@ static ssize_t wakeup_data_store(struct device *dev,
nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON);
- spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+ spin_unlock_irqrestore(&nvt->lock, flags);
ret = len;
out:
@@ -385,6 +390,7 @@ static inline const char *nvt_find_chip(struct nvt_dev *nvt, int id)
/* detect hardware features */
static int nvt_hw_detect(struct nvt_dev *nvt)
{
+ struct device *dev = nvt_get_dev(nvt);
const char *chip_name;
int chip_id;
@@ -405,8 +411,7 @@ static int nvt_hw_detect(struct nvt_dev *nvt)
chip_id = nvt->chip_major << 8 | nvt->chip_minor;
if (chip_id == NVT_INVALID) {
- dev_err(&nvt->pdev->dev,
- "No device found on either EFM port\n");
+ dev_err(dev, "No device found on either EFM port\n");
return -ENODEV;
}
@@ -414,12 +419,11 @@ static int nvt_hw_detect(struct nvt_dev *nvt)
/* warn, but still let the driver load, if we don't know this chip */
if (!chip_name)
- dev_warn(&nvt->pdev->dev,
+ dev_warn(dev,
"unknown chip, id: 0x%02x 0x%02x, it may not work...",
nvt->chip_major, nvt->chip_minor);
else
- dev_info(&nvt->pdev->dev,
- "found %s or compatible: chip id: 0x%02x 0x%02x",
+ dev_info(dev, "found %s or compatible: chip id: 0x%02x 0x%02x",
chip_name, nvt->chip_major, nvt->chip_minor);
return 0;
@@ -586,7 +590,7 @@ static void nvt_enable_wake(struct nvt_dev *nvt)
nvt_efm_disable(nvt);
- spin_lock_irqsave(&nvt->nvt_lock, flags);
+ spin_lock_irqsave(&nvt->lock, flags);
nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN |
CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
@@ -595,11 +599,11 @@ static void nvt_enable_wake(struct nvt_dev *nvt)
nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
- spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+ spin_unlock_irqrestore(&nvt->lock, flags);
}
#if 0 /* Currently unused */
-/* rx carrier detect only works in learning mode, must be called w/nvt_lock */
+/* rx carrier detect only works in learning mode, must be called w/lock */
static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt)
{
u32 count, carrier, duration = 0;
@@ -616,7 +620,7 @@ static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt)
duration *= SAMPLE_PERIOD;
if (!count || !duration) {
- dev_notice(&nvt->pdev->dev,
+ dev_notice(nvt_get_dev(nvt),
"Unable to determine carrier! (c:%u, d:%u)",
count, duration);
return 0;
@@ -684,7 +688,7 @@ static int nvt_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned n)
u8 iren;
int ret;
- spin_lock_irqsave(&nvt->tx.lock, flags);
+ spin_lock_irqsave(&nvt->lock, flags);
ret = min((unsigned)(TX_BUF_LEN / sizeof(unsigned)), n);
nvt->tx.buf_count = (ret * sizeof(unsigned));
@@ -708,13 +712,13 @@ static int nvt_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned n)
for (i = 0; i < 9; i++)
nvt_cir_reg_write(nvt, 0x01, CIR_STXFIFO);
- spin_unlock_irqrestore(&nvt->tx.lock, flags);
+ spin_unlock_irqrestore(&nvt->lock, flags);
wait_event(nvt->tx.queue, nvt->tx.tx_state == ST_TX_REQUEST);
- spin_lock_irqsave(&nvt->tx.lock, flags);
+ spin_lock_irqsave(&nvt->lock, flags);
nvt->tx.tx_state = ST_TX_NONE;
- spin_unlock_irqrestore(&nvt->tx.lock, flags);
+ spin_unlock_irqrestore(&nvt->lock, flags);
/* restore enabled interrupts to prior state */
nvt_cir_reg_write(nvt, iren, CIR_IREN);
@@ -781,7 +785,7 @@ static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
static void nvt_handle_rx_fifo_overrun(struct nvt_dev *nvt)
{
- dev_warn(&nvt->pdev->dev, "RX FIFO overrun detected, flushing data!");
+ dev_warn(nvt_get_dev(nvt), "RX FIFO overrun detected, flushing data!");
nvt->pkts = 0;
nvt_clear_cir_fifo(nvt);
@@ -828,14 +832,7 @@ static void nvt_cir_log_irqs(u8 status, u8 iren)
static bool nvt_cir_tx_inactive(struct nvt_dev *nvt)
{
- unsigned long flags;
- u8 tx_state;
-
- spin_lock_irqsave(&nvt->tx.lock, flags);
- tx_state = nvt->tx.tx_state;
- spin_unlock_irqrestore(&nvt->tx.lock, flags);
-
- return tx_state == ST_TX_NONE;
+ return nvt->tx.tx_state == ST_TX_NONE;
}
/* interrupt service routine for incoming and outgoing CIR data */
@@ -843,11 +840,10 @@ static irqreturn_t nvt_cir_isr(int irq, void *data)
{
struct nvt_dev *nvt = data;
u8 status, iren;
- unsigned long flags;
nvt_dbg_verbose("%s firing", __func__);
- spin_lock_irqsave(&nvt->nvt_lock, flags);
+ spin_lock(&nvt->lock);
/*
* Get IR Status register contents. Write 1 to ack/clear
@@ -869,7 +865,7 @@ static irqreturn_t nvt_cir_isr(int irq, void *data)
* logical device is being disabled.
*/
if (status == 0xff && iren == 0xff) {
- spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+ spin_unlock(&nvt->lock);
nvt_dbg_verbose("Spurious interrupt detected");
return IRQ_HANDLED;
}
@@ -878,7 +874,7 @@ static irqreturn_t nvt_cir_isr(int irq, void *data)
* status bit whether the related interrupt source is enabled
*/
if (!(status & iren)) {
- spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+ spin_unlock(&nvt->lock);
nvt_dbg_verbose("%s exiting, IRSTS 0x0", __func__);
return IRQ_NONE;
}
@@ -898,8 +894,6 @@ static irqreturn_t nvt_cir_isr(int irq, void *data)
nvt_get_rx_ir_data(nvt);
}
- spin_unlock_irqrestore(&nvt->nvt_lock, flags);
-
if (status & CIR_IRSTS_TE)
nvt_clear_tx_fifo(nvt);
@@ -907,8 +901,6 @@ static irqreturn_t nvt_cir_isr(int irq, void *data)
unsigned int pos, count;
u8 tmp;
- spin_lock_irqsave(&nvt->tx.lock, flags);
-
pos = nvt->tx.cur_buf_num;
count = nvt->tx.buf_count;
@@ -921,20 +913,17 @@ static irqreturn_t nvt_cir_isr(int irq, void *data)
tmp = nvt_cir_reg_read(nvt, CIR_IREN);
nvt_cir_reg_write(nvt, tmp & ~CIR_IREN_TTR, CIR_IREN);
}
-
- spin_unlock_irqrestore(&nvt->tx.lock, flags);
-
}
if (status & CIR_IRSTS_TFU) {
- spin_lock_irqsave(&nvt->tx.lock, flags);
if (nvt->tx.tx_state == ST_TX_REPLY) {
nvt->tx.tx_state = ST_TX_REQUEST;
wake_up(&nvt->tx.queue);
}
- spin_unlock_irqrestore(&nvt->tx.lock, flags);
}
+ spin_unlock(&nvt->lock);
+
nvt_dbg_verbose("%s done", __func__);
return IRQ_HANDLED;
}
@@ -943,7 +932,7 @@ static void nvt_disable_cir(struct nvt_dev *nvt)
{
unsigned long flags;
- spin_lock_irqsave(&nvt->nvt_lock, flags);
+ spin_lock_irqsave(&nvt->lock, flags);
/* disable CIR interrupts */
nvt_cir_reg_write(nvt, 0, CIR_IREN);
@@ -958,7 +947,7 @@ static void nvt_disable_cir(struct nvt_dev *nvt)
nvt_clear_cir_fifo(nvt);
nvt_clear_tx_fifo(nvt);
- spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+ spin_unlock_irqrestore(&nvt->lock, flags);
/* disable the CIR logical device */
nvt_disable_logical_dev(nvt, LOGICAL_DEV_CIR);
@@ -969,7 +958,7 @@ static int nvt_open(struct rc_dev *dev)
struct nvt_dev *nvt = dev->priv;
unsigned long flags;
- spin_lock_irqsave(&nvt->nvt_lock, flags);
+ spin_lock_irqsave(&nvt->lock, flags);
/* set function enable flags */
nvt_cir_reg_write(nvt, CIR_IRCON_TXEN | CIR_IRCON_RXEN |
@@ -982,7 +971,7 @@ static int nvt_open(struct rc_dev *dev)
/* enable interrupts */
nvt_set_cir_iren(nvt);
- spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+ spin_unlock_irqrestore(&nvt->lock, flags);
/* enable the CIR logical device */
nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR);
@@ -1002,40 +991,41 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
{
struct nvt_dev *nvt;
struct rc_dev *rdev;
- int ret = -ENOMEM;
+ int ret;
nvt = devm_kzalloc(&pdev->dev, sizeof(struct nvt_dev), GFP_KERNEL);
if (!nvt)
- return ret;
+ return -ENOMEM;
/* input device for IR remote (and tx) */
- rdev = rc_allocate_device();
- if (!rdev)
- goto exit_free_dev_rdev;
+ nvt->rdev = devm_rc_allocate_device(&pdev->dev);
+ if (!nvt->rdev)
+ return -ENOMEM;
+ rdev = nvt->rdev;
- ret = -ENODEV;
/* activate pnp device */
- if (pnp_activate_dev(pdev) < 0) {
+ ret = pnp_activate_dev(pdev);
+ if (ret) {
dev_err(&pdev->dev, "Could not activate PNP device!\n");
- goto exit_free_dev_rdev;
+ return ret;
}
/* validate pnp resources */
if (!pnp_port_valid(pdev, 0) ||
pnp_port_len(pdev, 0) < CIR_IOREG_LENGTH) {
dev_err(&pdev->dev, "IR PNP Port not valid!\n");
- goto exit_free_dev_rdev;
+ return -EINVAL;
}
if (!pnp_irq_valid(pdev, 0)) {
dev_err(&pdev->dev, "PNP IRQ not valid!\n");
- goto exit_free_dev_rdev;
+ return -EINVAL;
}
if (!pnp_port_valid(pdev, 1) ||
pnp_port_len(pdev, 1) < CIR_IOREG_LENGTH) {
dev_err(&pdev->dev, "Wake PNP Port not valid!\n");
- goto exit_free_dev_rdev;
+ return -EINVAL;
}
nvt->cir_addr = pnp_port_start(pdev, 0);
@@ -1046,17 +1036,15 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
nvt->cr_efir = CR_EFIR;
nvt->cr_efdr = CR_EFDR;
- spin_lock_init(&nvt->nvt_lock);
- spin_lock_init(&nvt->tx.lock);
+ spin_lock_init(&nvt->lock);
pnp_set_drvdata(pdev, nvt);
- nvt->pdev = pdev;
init_waitqueue_head(&nvt->tx.queue);
ret = nvt_hw_detect(nvt);
if (ret)
- goto exit_free_dev_rdev;
+ return ret;
/* Initialize CIR & CIR Wake Logical Devices */
nvt_efm_enable(nvt);
@@ -1085,7 +1073,6 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
rdev->input_id.vendor = PCI_VENDOR_ID_WINBOND2;
rdev->input_id.product = nvt->chip_major;
rdev->input_id.version = nvt->chip_minor;
- rdev->dev.parent = &pdev->dev;
rdev->driver_name = NVT_DRIVER_NAME;
rdev->map_name = RC_MAP_RC6_MCE;
rdev->timeout = MS_TO_NS(100);
@@ -1097,29 +1084,27 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
/* tx bits */
rdev->tx_resolution = XYZ;
#endif
- nvt->rdev = rdev;
-
- ret = rc_register_device(rdev);
+ ret = devm_rc_register_device(&pdev->dev, rdev);
if (ret)
- goto exit_free_dev_rdev;
+ return ret;
- ret = -EBUSY;
/* now claim resources */
if (!devm_request_region(&pdev->dev, nvt->cir_addr,
CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
- goto exit_unregister_device;
+ return -EBUSY;
- if (devm_request_irq(&pdev->dev, nvt->cir_irq, nvt_cir_isr,
- IRQF_SHARED, NVT_DRIVER_NAME, (void *)nvt))
- goto exit_unregister_device;
+ ret = devm_request_irq(&pdev->dev, nvt->cir_irq, nvt_cir_isr,
+ IRQF_SHARED, NVT_DRIVER_NAME, nvt);
+ if (ret)
+ return ret;
if (!devm_request_region(&pdev->dev, nvt->cir_wake_addr,
CIR_IOREG_LENGTH, NVT_DRIVER_NAME "-wake"))
- goto exit_unregister_device;
+ return -EBUSY;
ret = device_create_file(&rdev->dev, &dev_attr_wakeup_data);
if (ret)
- goto exit_unregister_device;
+ return ret;
device_init_wakeup(&pdev->dev, true);
@@ -1130,14 +1115,6 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
}
return 0;
-
-exit_unregister_device:
- rc_unregister_device(rdev);
- rdev = NULL;
-exit_free_dev_rdev:
- rc_free_device(rdev);
-
- return ret;
}
static void nvt_remove(struct pnp_dev *pdev)
@@ -1150,8 +1127,6 @@ static void nvt_remove(struct pnp_dev *pdev)
/* enable CIR Wake (for IR power-on) */
nvt_enable_wake(nvt);
-
- rc_unregister_device(nvt->rdev);
}
static int nvt_suspend(struct pnp_dev *pdev, pm_message_t state)
@@ -1161,16 +1136,14 @@ static int nvt_suspend(struct pnp_dev *pdev, pm_message_t state)
nvt_dbg("%s called", __func__);
- spin_lock_irqsave(&nvt->tx.lock, flags);
- nvt->tx.tx_state = ST_TX_NONE;
- spin_unlock_irqrestore(&nvt->tx.lock, flags);
+ spin_lock_irqsave(&nvt->lock, flags);
- spin_lock_irqsave(&nvt->nvt_lock, flags);
+ nvt->tx.tx_state = ST_TX_NONE;
/* disable all CIR interrupts */
nvt_cir_reg_write(nvt, 0, CIR_IREN);
- spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+ spin_unlock_irqrestore(&nvt->lock, flags);
/* disable cir logical dev */
nvt_disable_logical_dev(nvt, LOGICAL_DEV_CIR);
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h
index acf735fc7170..c41c5765e1d2 100644
--- a/drivers/media/rc/nuvoton-cir.h
+++ b/drivers/media/rc/nuvoton-cir.h
@@ -78,17 +78,15 @@ struct nvt_chip {
};
struct nvt_dev {
- struct pnp_dev *pdev;
struct rc_dev *rdev;
- spinlock_t nvt_lock;
+ spinlock_t lock;
/* for rx */
u8 buf[RX_BUF_LEN];
unsigned int pkts;
struct {
- spinlock_t lock;
u8 buf[TX_BUF_LEN];
unsigned int buf_count;
unsigned int cur_buf_num;
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
index 205ecc602e34..1c42a9f2f290 100644
--- a/drivers/media/rc/rc-ir-raw.c
+++ b/drivers/media/rc/rc-ir-raw.c
@@ -26,8 +26,7 @@ static LIST_HEAD(ir_raw_client_list);
/* Used to handle IR raw handler extensions */
static DEFINE_MUTEX(ir_raw_handler_lock);
static LIST_HEAD(ir_raw_handler_list);
-static DEFINE_MUTEX(available_protocols_lock);
-static u64 available_protocols;
+static atomic64_t available_protocols = ATOMIC64_INIT(0);
static int ir_raw_event_thread(void *data)
{
@@ -234,11 +233,7 @@ EXPORT_SYMBOL_GPL(ir_raw_event_handle);
u64
ir_raw_get_allowed_protocols(void)
{
- u64 protocols;
- mutex_lock(&available_protocols_lock);
- protocols = available_protocols;
- mutex_unlock(&available_protocols_lock);
- return protocols;
+ return atomic64_read(&available_protocols);
}
static int change_protocol(struct rc_dev *dev, u64 *rc_type)
@@ -331,9 +326,7 @@ int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
if (ir_raw_handler->raw_register)
list_for_each_entry(raw, &ir_raw_client_list, list)
ir_raw_handler->raw_register(raw->dev);
- mutex_lock(&available_protocols_lock);
- available_protocols |= ir_raw_handler->protocols;
- mutex_unlock(&available_protocols_lock);
+ atomic64_or(ir_raw_handler->protocols, &available_protocols);
mutex_unlock(&ir_raw_handler_lock);
return 0;
@@ -352,9 +345,7 @@ void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
if (ir_raw_handler->raw_unregister)
ir_raw_handler->raw_unregister(raw->dev);
}
- mutex_lock(&available_protocols_lock);
- available_protocols &= ~protocols;
- mutex_unlock(&available_protocols_lock);
+ atomic64_andnot(protocols, &available_protocols);
mutex_unlock(&ir_raw_handler_lock);
}
EXPORT_SYMBOL(ir_raw_handler_unregister);
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index d9c1f2ff7119..dedaf38c5ff6 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -12,6 +12,8 @@
* GNU General Public License for more details.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <media/rc-core.h>
#include <linux/atomic.h>
#include <linux/spinlock.h>
@@ -66,7 +68,7 @@ struct rc_map *rc_map_get(const char *name)
if (!map) {
int rc = request_module("%s", name);
if (rc < 0) {
- printk(KERN_ERR "Couldn't load IR keymap %s\n", name);
+ pr_err("Couldn't load IR keymap %s\n", name);
return NULL;
}
msleep(20); /* Give some time for IR to register */
@@ -75,7 +77,7 @@ struct rc_map *rc_map_get(const char *name)
}
#endif
if (!map) {
- printk(KERN_ERR "IR keymap %s not found\n", name);
+ pr_err("IR keymap %s not found\n", name);
return NULL;
}
@@ -159,6 +161,7 @@ static void ir_free_table(struct rc_map *rc_map)
{
rc_map->size = 0;
kfree(rc_map->name);
+ rc_map->name = NULL;
kfree(rc_map->scan);
rc_map->scan = NULL;
}
@@ -660,8 +663,7 @@ static void ir_do_keydown(struct rc_dev *dev, enum rc_type protocol,
dev->last_toggle = toggle;
dev->last_keycode = keycode;
- IR_dprintk(1, "%s: key down event, "
- "key 0x%04x, protocol 0x%04x, scancode 0x%08x\n",
+ IR_dprintk(1, "%s: key down event, key 0x%04x, protocol 0x%04x, scancode 0x%08x\n",
dev->input_name, keycode, protocol, scancode);
input_report_key(dev->input_dev, keycode, 1);
@@ -1403,6 +1405,34 @@ void rc_free_device(struct rc_dev *dev)
}
EXPORT_SYMBOL_GPL(rc_free_device);
+static void devm_rc_alloc_release(struct device *dev, void *res)
+{
+ rc_free_device(*(struct rc_dev **)res);
+}
+
+struct rc_dev *devm_rc_allocate_device(struct device *dev)
+{
+ struct rc_dev **dr, *rc;
+
+ dr = devres_alloc(devm_rc_alloc_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return NULL;
+
+ rc = rc_allocate_device();
+ if (!rc) {
+ devres_free(dr);
+ return NULL;
+ }
+
+ rc->dev.parent = dev;
+ rc->managed_alloc = true;
+ *dr = rc;
+ devres_add(dev, dr);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(devm_rc_allocate_device);
+
int rc_register_device(struct rc_dev *dev)
{
static bool raw_init = false; /* raw decoders loaded? */
@@ -1531,6 +1561,33 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(rc_register_device);
+static void devm_rc_release(struct device *dev, void *res)
+{
+ rc_unregister_device(*(struct rc_dev **)res);
+}
+
+int devm_rc_register_device(struct device *parent, struct rc_dev *dev)
+{
+ struct rc_dev **dr;
+ int ret;
+
+ dr = devres_alloc(devm_rc_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ ret = rc_register_device(dev);
+ if (ret) {
+ devres_free(dr);
+ return ret;
+ }
+
+ *dr = dev;
+ devres_add(parent, dr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_rc_register_device);
+
void rc_unregister_device(struct rc_dev *dev)
{
if (!dev)
@@ -1552,7 +1609,8 @@ void rc_unregister_device(struct rc_dev *dev)
ida_simple_remove(&rc_ida, dev->minor);
- rc_free_device(dev);
+ if (!dev->managed_alloc)
+ rc_free_device(dev);
}
EXPORT_SYMBOL_GPL(rc_unregister_device);
@@ -1565,7 +1623,7 @@ static int __init rc_core_init(void)
{
int rc = class_register(&rc_class);
if (rc) {
- printk(KERN_ERR "rc_core: unable to register rc class\n");
+ pr_err("rc_core: unable to register rc class\n");
return rc;
}
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index 05ba47bc0b61..2784f5dae398 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -81,6 +81,8 @@
#define RR3_RC_DET_ENABLE 0xbb
/* Stop capture with the RC receiver */
#define RR3_RC_DET_DISABLE 0xbc
+/* Start capture with the wideband receiver */
+#define RR3_MODSIG_CAPTURE 0xb2
/* Return the status of RC detector capture */
#define RR3_RC_DET_STATUS 0xbd
/* Reset redrat */
@@ -105,11 +107,13 @@
#define RR3_CLK_PER_COUNT 12
/* (RR3_CLK / RR3_CLK_PER_COUNT) */
#define RR3_CLK_CONV_FACTOR 2000000
-/* USB bulk-in IR data endpoint address */
-#define RR3_BULK_IN_EP_ADDR 0x82
+/* USB bulk-in wideband IR data endpoint address */
+#define RR3_WIDE_IN_EP_ADDR 0x81
+/* USB bulk-in narrowband IR data endpoint address */
+#define RR3_NARROW_IN_EP_ADDR 0x82
/* Size of the fixed-length portion of the signal */
-#define RR3_DRIVER_MAXLENS 128
+#define RR3_DRIVER_MAXLENS 255
#define RR3_MAX_SIG_SIZE 512
#define RR3_TIME_UNIT 50
#define RR3_END_OF_SIGNAL 0x7f
@@ -207,15 +211,22 @@ struct redrat3_dev {
struct urb *flash_urb;
u8 flash_in_buf;
+ /* learning */
+ bool wideband;
+ struct usb_ctrlrequest learn_control;
+ struct urb *learn_urb;
+ u8 learn_buf;
+
/* save off the usb device pointer */
struct usb_device *udev;
/* the receive endpoint */
- struct usb_endpoint_descriptor *ep_in;
+ struct usb_endpoint_descriptor *ep_narrow;
/* the buffer to receive data */
void *bulk_in_buf;
/* urb used to read ir data */
- struct urb *read_urb;
+ struct urb *narrow_urb;
+ struct urb *wide_urb;
/* the send endpoint */
struct usb_endpoint_descriptor *ep_out;
@@ -236,23 +247,6 @@ struct redrat3_dev {
char phys[64];
};
-/*
- * redrat3_issue_async
- *
- * Issues an async read to the ir data in port..
- * sets the callback to be redrat3_handle_async
- */
-static void redrat3_issue_async(struct redrat3_dev *rr3)
-{
- int res;
-
- res = usb_submit_urb(rr3->read_urb, GFP_ATOMIC);
- if (res)
- dev_dbg(rr3->dev,
- "%s: receive request FAILED! (res %d, len %d)\n",
- __func__, res, rr3->read_urb->transfer_buffer_length);
-}
-
static void redrat3_dump_fw_error(struct redrat3_dev *rr3, int code)
{
if (!rr3->transmitting && (code != 0x40))
@@ -265,8 +259,7 @@ static void redrat3_dump_fw_error(struct redrat3_dev *rr3, int code)
/* Codes 0x20 through 0x2f are IR Firmware Errors */
case 0x20:
- pr_cont("Initial signal pulse not long enough "
- "to measure carrier frequency\n");
+ pr_cont("Initial signal pulse not long enough to measure carrier frequency\n");
break;
case 0x21:
pr_cont("Not enough length values allocated for signal\n");
@@ -278,18 +271,15 @@ static void redrat3_dump_fw_error(struct redrat3_dev *rr3, int code)
pr_cont("Too many signal repeats\n");
break;
case 0x28:
- pr_cont("Insufficient memory available for IR signal "
- "data memory allocation\n");
+ pr_cont("Insufficient memory available for IR signal data memory allocation\n");
break;
case 0x29:
- pr_cont("Insufficient memory available "
- "for IrDa signal data memory allocation\n");
+ pr_cont("Insufficient memory available for IrDa signal data memory allocation\n");
break;
/* Codes 0x30 through 0x3f are USB Firmware Errors */
case 0x30:
- pr_cont("Insufficient memory available for bulk "
- "transfer structure\n");
+ pr_cont("Insufficient memory available for bulk transfer structure\n");
break;
/*
@@ -301,8 +291,7 @@ static void redrat3_dump_fw_error(struct redrat3_dev *rr3, int code)
pr_cont("Signal capture has been terminated\n");
break;
case 0x41:
- pr_cont("Attempt to set/get and unknown signal I/O "
- "algorithm parameter\n");
+ pr_cont("Attempt to set/get and unknown signal I/O algorithm parameter\n");
break;
case 0x42:
pr_cont("Signal capture already started\n");
@@ -368,15 +357,18 @@ static void redrat3_process_ir_data(struct redrat3_dev *rr3)
unsigned int i, sig_size, single_len, offset, val;
u32 mod_freq;
- if (!rr3) {
- pr_err("%s called with no context!\n", __func__);
- return;
- }
-
dev = rr3->dev;
mod_freq = redrat3_val_to_mod_freq(&rr3->irdata);
dev_dbg(dev, "Got mod_freq of %u\n", mod_freq);
+ if (mod_freq && rr3->wideband) {
+ DEFINE_IR_RAW_EVENT(ev);
+
+ ev.carrier_report = 1;
+ ev.carrier = mod_freq;
+
+ ir_raw_event_store(rr3->rc, &ev);
+ }
/* process each rr3 encoded byte into an int */
sig_size = be16_to_cpu(rr3->irdata.sig_size);
@@ -459,19 +451,31 @@ static int redrat3_enable_detector(struct redrat3_dev *rr3)
return -EIO;
}
- redrat3_issue_async(rr3);
+ ret = usb_submit_urb(rr3->narrow_urb, GFP_KERNEL);
+ if (ret) {
+ dev_err(rr3->dev, "narrow band urb failed: %d", ret);
+ return ret;
+ }
+
+ ret = usb_submit_urb(rr3->wide_urb, GFP_KERNEL);
+ if (ret)
+ dev_err(rr3->dev, "wide band urb failed: %d", ret);
- return 0;
+ return ret;
}
static inline void redrat3_delete(struct redrat3_dev *rr3,
struct usb_device *udev)
{
- usb_kill_urb(rr3->read_urb);
+ usb_kill_urb(rr3->narrow_urb);
+ usb_kill_urb(rr3->wide_urb);
usb_kill_urb(rr3->flash_urb);
- usb_free_urb(rr3->read_urb);
+ usb_kill_urb(rr3->learn_urb);
+ usb_free_urb(rr3->narrow_urb);
+ usb_free_urb(rr3->wide_urb);
usb_free_urb(rr3->flash_urb);
- usb_free_coherent(udev, le16_to_cpu(rr3->ep_in->wMaxPacketSize),
+ usb_free_urb(rr3->learn_urb);
+ usb_free_coherent(udev, le16_to_cpu(rr3->ep_narrow->wMaxPacketSize),
rr3->bulk_in_buf, rr3->dma_in);
kfree(rr3);
@@ -485,10 +489,8 @@ static u32 redrat3_get_timeout(struct redrat3_dev *rr3)
len = sizeof(*tmp);
tmp = kzalloc(len, GFP_KERNEL);
- if (!tmp) {
- dev_warn(rr3->dev, "Memory allocation faillure\n");
+ if (!tmp)
return timeout;
- }
pipe = usb_rcvctrlpipe(rr3->udev, 0);
ret = usb_control_msg(rr3->udev, pipe, RR3_GET_IR_PARAM,
@@ -543,16 +545,14 @@ static void redrat3_reset(struct redrat3_dev *rr3)
struct device *dev = rr3->dev;
int rc, rxpipe, txpipe;
u8 *val;
- int len = sizeof(u8);
+ size_t const len = sizeof(*val);
rxpipe = usb_rcvctrlpipe(udev, 0);
txpipe = usb_sndctrlpipe(udev, 0);
val = kmalloc(len, GFP_KERNEL);
- if (!val) {
- dev_err(dev, "Memory allocation failure\n");
+ if (!val)
return;
- }
*val = 0x01;
rc = usb_control_msg(udev, rxpipe, RR3_RESET,
@@ -590,14 +590,12 @@ static void redrat3_reset(struct redrat3_dev *rr3)
static void redrat3_get_firmware_rev(struct redrat3_dev *rr3)
{
- int rc = 0;
+ int rc;
char *buffer;
- buffer = kzalloc(sizeof(char) * (RR3_FW_VERSION_LEN + 1), GFP_KERNEL);
- if (!buffer) {
- dev_err(rr3->dev, "Memory allocation failure\n");
+ buffer = kcalloc(RR3_FW_VERSION_LEN + 1, sizeof(*buffer), GFP_KERNEL);
+ if (!buffer)
return;
- }
rc = usb_control_msg(rr3->udev, usb_rcvctrlpipe(rr3->udev, 0),
RR3_FW_VERSION,
@@ -704,25 +702,25 @@ out:
/* callback function from USB when async USB request has completed */
static void redrat3_handle_async(struct urb *urb)
{
- struct redrat3_dev *rr3;
+ struct redrat3_dev *rr3 = urb->context;
int ret;
- if (!urb)
- return;
-
- rr3 = urb->context;
- if (!rr3) {
- pr_err("%s called with invalid context!\n", __func__);
- usb_unlink_urb(urb);
- return;
- }
-
switch (urb->status) {
case 0:
ret = redrat3_get_ir_data(rr3, urb->actual_length);
+ if (!ret && rr3->wideband && !rr3->learn_urb->hcpriv) {
+ ret = usb_submit_urb(rr3->learn_urb, GFP_ATOMIC);
+ if (ret)
+ dev_err(rr3->dev, "Failed to submit learning urb: %d",
+ ret);
+ }
+
if (!ret) {
/* no error, prepare to read more */
- redrat3_issue_async(rr3);
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret)
+ dev_err(rr3->dev, "Failed to resubmit urb: %d",
+ ret);
}
break;
@@ -785,11 +783,11 @@ static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf,
/* rr3 will disable rc detector on transmit */
rr3->transmitting = true;
- sample_lens = kzalloc(sizeof(int) * RR3_DRIVER_MAXLENS, GFP_KERNEL);
- if (!sample_lens) {
- ret = -ENOMEM;
- goto out;
- }
+ sample_lens = kcalloc(RR3_DRIVER_MAXLENS,
+ sizeof(*sample_lens),
+ GFP_KERNEL);
+ if (!sample_lens)
+ return -ENOMEM;
irdata = kzalloc(sizeof(*irdata), GFP_KERNEL);
if (!irdata) {
@@ -857,8 +855,8 @@ static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf,
ret = count;
out:
- kfree(sample_lens);
kfree(irdata);
+ kfree(sample_lens);
rr3->transmitting = false;
/* rr3 re-enables rc detector because it was enabled before */
@@ -882,6 +880,42 @@ static void redrat3_brightness_set(struct led_classdev *led_dev, enum
}
}
+static int redrat3_wideband_receiver(struct rc_dev *rcdev, int enable)
+{
+ struct redrat3_dev *rr3 = rcdev->priv;
+ int ret = 0;
+
+ rr3->wideband = enable != 0;
+
+ if (enable) {
+ ret = usb_submit_urb(rr3->learn_urb, GFP_KERNEL);
+ if (ret)
+ dev_err(rr3->dev, "Failed to submit learning urb: %d",
+ ret);
+ }
+
+ return ret;
+}
+
+static void redrat3_learn_complete(struct urb *urb)
+{
+ struct redrat3_dev *rr3 = urb->context;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ usb_unlink_urb(urb);
+ return;
+ case -EPIPE:
+ default:
+ dev_err(rr3->dev, "Error: learn urb status = %d", urb->status);
+ break;
+ }
+}
+
static void redrat3_led_complete(struct urb *urb)
{
struct redrat3_dev *rr3 = urb->context;
@@ -908,19 +942,16 @@ static struct rc_dev *redrat3_init_rc_dev(struct redrat3_dev *rr3)
{
struct device *dev = rr3->dev;
struct rc_dev *rc;
- int ret = -ENODEV;
+ int ret;
u16 prod = le16_to_cpu(rr3->udev->descriptor.idProduct);
rc = rc_allocate_device();
- if (!rc) {
- dev_err(dev, "remote input dev allocation failed\n");
- goto out;
- }
+ if (!rc)
+ return NULL;
- snprintf(rr3->name, sizeof(rr3->name), "RedRat3%s "
- "Infrared Remote Transceiver (%04x:%04x)",
- prod == USB_RR3IIUSB_PRODUCT_ID ? "-II" : "",
- le16_to_cpu(rr3->udev->descriptor.idVendor), prod);
+ snprintf(rr3->name, sizeof(rr3->name),
+ "RedRat3%s Infrared Remote Transceiver",
+ prod == USB_RR3IIUSB_PRODUCT_ID ? "-II" : "");
usb_make_path(rr3->udev, rr3->phys, sizeof(rr3->phys));
@@ -937,6 +968,7 @@ static struct rc_dev *redrat3_init_rc_dev(struct redrat3_dev *rr3)
rc->s_timeout = redrat3_set_timeout;
rc->tx_ir = redrat3_transmit_ir;
rc->s_tx_carrier = redrat3_set_tx_carrier;
+ rc->s_carrier_report = redrat3_wideband_receiver;
rc->driver_name = DRIVER_NAME;
rc->rx_resolution = US_TO_NS(2);
rc->map_name = RC_MAP_HAUPPAUGE;
@@ -962,7 +994,8 @@ static int redrat3_dev_probe(struct usb_interface *intf,
struct usb_host_interface *uhi;
struct redrat3_dev *rr3;
struct usb_endpoint_descriptor *ep;
- struct usb_endpoint_descriptor *ep_in = NULL;
+ struct usb_endpoint_descriptor *ep_narrow = NULL;
+ struct usb_endpoint_descriptor *ep_wide = NULL;
struct usb_endpoint_descriptor *ep_out = NULL;
u8 addr, attrs;
int pipe, i;
@@ -976,15 +1009,16 @@ static int redrat3_dev_probe(struct usb_interface *intf,
addr = ep->bEndpointAddress;
attrs = ep->bmAttributes;
- if ((ep_in == NULL) &&
- ((addr & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN) &&
+ if (((addr & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN) &&
((attrs & USB_ENDPOINT_XFERTYPE_MASK) ==
USB_ENDPOINT_XFER_BULK)) {
dev_dbg(dev, "found bulk-in endpoint at 0x%02x\n",
ep->bEndpointAddress);
- /* data comes in on 0x82, 0x81 is for other data... */
- if (ep->bEndpointAddress == RR3_BULK_IN_EP_ADDR)
- ep_in = ep;
+ /* data comes in on 0x82, 0x81 is for learning */
+ if (ep->bEndpointAddress == RR3_NARROW_IN_EP_ADDR)
+ ep_narrow = ep;
+ if (ep->bEndpointAddress == RR3_WIDE_IN_EP_ADDR)
+ ep_wide = ep;
}
if ((ep_out == NULL) &&
@@ -997,68 +1031,76 @@ static int redrat3_dev_probe(struct usb_interface *intf,
}
}
- if (!ep_in || !ep_out) {
- dev_err(dev, "Couldn't find both in and out endpoints\n");
+ if (!ep_narrow || !ep_out || !ep_wide) {
+ dev_err(dev, "Couldn't find all endpoints\n");
retval = -ENODEV;
goto no_endpoints;
}
/* allocate memory for our device state and initialize it */
rr3 = kzalloc(sizeof(*rr3), GFP_KERNEL);
- if (rr3 == NULL) {
- dev_err(dev, "Memory allocation failure\n");
+ if (!rr3)
goto no_endpoints;
- }
rr3->dev = &intf->dev;
+ rr3->ep_narrow = ep_narrow;
+ rr3->ep_out = ep_out;
+ rr3->udev = udev;
/* set up bulk-in endpoint */
- rr3->read_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!rr3->read_urb)
- goto error;
+ rr3->narrow_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!rr3->narrow_urb)
+ goto redrat_free;
- rr3->ep_in = ep_in;
- rr3->bulk_in_buf = usb_alloc_coherent(udev,
- le16_to_cpu(ep_in->wMaxPacketSize), GFP_KERNEL, &rr3->dma_in);
- if (!rr3->bulk_in_buf) {
- dev_err(dev, "Read buffer allocation failure\n");
- goto error;
- }
-
- pipe = usb_rcvbulkpipe(udev, ep_in->bEndpointAddress);
- usb_fill_bulk_urb(rr3->read_urb, udev, pipe, rr3->bulk_in_buf,
- le16_to_cpu(ep_in->wMaxPacketSize), redrat3_handle_async, rr3);
- rr3->read_urb->transfer_dma = rr3->dma_in;
- rr3->read_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ rr3->wide_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!rr3->wide_urb)
+ goto redrat_free;
- rr3->ep_out = ep_out;
- rr3->udev = udev;
+ rr3->bulk_in_buf = usb_alloc_coherent(udev,
+ le16_to_cpu(ep_narrow->wMaxPacketSize),
+ GFP_KERNEL, &rr3->dma_in);
+ if (!rr3->bulk_in_buf)
+ goto redrat_free;
+
+ pipe = usb_rcvbulkpipe(udev, ep_narrow->bEndpointAddress);
+ usb_fill_bulk_urb(rr3->narrow_urb, udev, pipe, rr3->bulk_in_buf,
+ le16_to_cpu(ep_narrow->wMaxPacketSize),
+ redrat3_handle_async, rr3);
+ rr3->narrow_urb->transfer_dma = rr3->dma_in;
+ rr3->narrow_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ pipe = usb_rcvbulkpipe(udev, ep_wide->bEndpointAddress);
+ usb_fill_bulk_urb(rr3->wide_urb, udev, pipe, rr3->bulk_in_buf,
+ le16_to_cpu(ep_narrow->wMaxPacketSize),
+ redrat3_handle_async, rr3);
+ rr3->wide_urb->transfer_dma = rr3->dma_in;
+ rr3->wide_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
redrat3_reset(rr3);
redrat3_get_firmware_rev(rr3);
- /* might be all we need to do? */
- retval = redrat3_enable_detector(rr3);
- if (retval < 0)
- goto error;
-
/* default.. will get overridden by any sends with a freq defined */
rr3->carrier = 38000;
- /* led control */
- rr3->led.name = "redrat3:red:feedback";
- rr3->led.default_trigger = "rc-feedback";
- rr3->led.brightness_set = redrat3_brightness_set;
- retval = led_classdev_register(&intf->dev, &rr3->led);
- if (retval)
- goto error;
-
atomic_set(&rr3->flash, 0);
rr3->flash_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!rr3->flash_urb) {
- retval = -ENOMEM;
- goto led_free_error;
- }
+ if (!rr3->flash_urb)
+ goto redrat_free;
+
+ /* learn urb */
+ rr3->learn_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!rr3->learn_urb)
+ goto redrat_free;
+
+ /* setup packet is 'c0 b2 0000 0000 0001' */
+ rr3->learn_control.bRequestType = 0xc0;
+ rr3->learn_control.bRequest = RR3_MODSIG_CAPTURE;
+ rr3->learn_control.wLength = cpu_to_le16(1);
+
+ usb_fill_control_urb(rr3->learn_urb, udev, usb_rcvctrlpipe(udev, 0),
+ (unsigned char *)&rr3->learn_control,
+ &rr3->learn_buf, sizeof(rr3->learn_buf),
+ redrat3_learn_complete, rr3);
/* setup packet is 'c0 b9 0000 0000 0001' */
rr3->flash_control.bRequestType = 0xc0;
@@ -1070,25 +1112,36 @@ static int redrat3_dev_probe(struct usb_interface *intf,
&rr3->flash_in_buf, sizeof(rr3->flash_in_buf),
redrat3_led_complete, rr3);
+ /* led control */
+ rr3->led.name = "redrat3:red:feedback";
+ rr3->led.default_trigger = "rc-feedback";
+ rr3->led.brightness_set = redrat3_brightness_set;
+ retval = led_classdev_register(&intf->dev, &rr3->led);
+ if (retval)
+ goto redrat_free;
+
rr3->rc = redrat3_init_rc_dev(rr3);
if (!rr3->rc) {
retval = -ENOMEM;
- goto led_free_error;
+ goto led_free;
}
+ /* might be all we need to do? */
+ retval = redrat3_enable_detector(rr3);
+ if (retval < 0)
+ goto led_free;
+
/* we can register the device now, as it is ready */
usb_set_intfdata(intf, rr3);
return 0;
-led_free_error:
+led_free:
led_classdev_unregister(&rr3->led);
-error:
+redrat_free:
redrat3_delete(rr3, rr3->udev);
no_endpoints:
- dev_err(dev, "%s: retval = %x", __func__, retval);
-
return retval;
}
@@ -1097,9 +1150,6 @@ static void redrat3_dev_disconnect(struct usb_interface *intf)
struct usb_device *udev = interface_to_usbdev(intf);
struct redrat3_dev *rr3 = usb_get_intfdata(intf);
- if (!rr3)
- return;
-
usb_set_intfdata(intf, NULL);
rc_unregister_device(rr3->rc);
led_classdev_unregister(&rr3->led);
@@ -1111,7 +1161,8 @@ static int redrat3_dev_suspend(struct usb_interface *intf, pm_message_t message)
struct redrat3_dev *rr3 = usb_get_intfdata(intf);
led_classdev_suspend(&rr3->led);
- usb_kill_urb(rr3->read_urb);
+ usb_kill_urb(rr3->narrow_urb);
+ usb_kill_urb(rr3->wide_urb);
usb_kill_urb(rr3->flash_urb);
return 0;
}
@@ -1120,7 +1171,9 @@ static int redrat3_dev_resume(struct usb_interface *intf)
{
struct redrat3_dev *rr3 = usb_get_intfdata(intf);
- if (usb_submit_urb(rr3->read_urb, GFP_ATOMIC))
+ if (usb_submit_urb(rr3->narrow_urb, GFP_ATOMIC))
+ return -EIO;
+ if (usb_submit_urb(rr3->wide_urb, GFP_ATOMIC))
return -EIO;
led_classdev_resume(&rr3->led);
return 0;
diff --git a/drivers/media/rc/serial_ir.c b/drivers/media/rc/serial_ir.c
new file mode 100644
index 000000000000..436bd58b5f05
--- /dev/null
+++ b/drivers/media/rc/serial_ir.c
@@ -0,0 +1,844 @@
+/*
+ * serial_ir.c
+ *
+ * serial_ir - Device driver that records pulse- and pause-lengths
+ * (space-lengths) between DDCD event on a serial port.
+ *
+ * Copyright (C) 1996,97 Ralph Metzler <rjkm@thp.uni-koeln.de>
+ * Copyright (C) 1998 Trent Piepho <xyzzy@u.washington.edu>
+ * Copyright (C) 1998 Ben Pfaff <blp@gnu.org>
+ * Copyright (C) 1999 Christoph Bartelmus <lirc@bartelmus.de>
+ * Copyright (C) 2007 Andrei Tanas <andrei@tanas.ca> (suspend/resume support)
+ * Copyright (C) 2016 Sean Young <sean@mess.org> (port to rc-core)
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/serial_reg.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <media/rc-core.h>
+
+struct serial_ir_hw {
+ int signal_pin;
+ int signal_pin_change;
+ u8 on;
+ u8 off;
+ unsigned set_send_carrier:1;
+ unsigned set_duty_cycle:1;
+ void (*send_pulse)(unsigned int length, ktime_t edge);
+ void (*send_space)(void);
+ spinlock_t lock;
+};
+
+#define IR_HOMEBREW 0
+#define IR_IRDEO 1
+#define IR_IRDEO_REMOTE 2
+#define IR_ANIMAX 3
+#define IR_IGOR 4
+
+/* module parameters */
+static int type;
+static int io;
+static int irq;
+static bool iommap;
+static int ioshift;
+static bool softcarrier = true;
+static bool share_irq;
+static int sense = -1; /* -1 = auto, 0 = active high, 1 = active low */
+static bool txsense; /* 0 = active high, 1 = active low */
+
+/* forward declarations */
+static void send_pulse_irdeo(unsigned int length, ktime_t edge);
+static void send_space_irdeo(void);
+#ifdef CONFIG_IR_SERIAL_TRANSMITTER
+static void send_pulse_homebrew(unsigned int length, ktime_t edge);
+static void send_space_homebrew(void);
+#endif
+
+static struct serial_ir_hw hardware[] = {
+ [IR_HOMEBREW] = {
+ .lock = __SPIN_LOCK_UNLOCKED(hardware[IR_HOMEBREW].lock),
+ .signal_pin = UART_MSR_DCD,
+ .signal_pin_change = UART_MSR_DDCD,
+ .on = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
+ .off = (UART_MCR_RTS | UART_MCR_OUT2),
+#ifdef CONFIG_IR_SERIAL_TRANSMITTER
+ .send_pulse = send_pulse_homebrew,
+ .send_space = send_space_homebrew,
+ .set_send_carrier = true,
+ .set_duty_cycle = true,
+#endif
+ },
+
+ [IR_IRDEO] = {
+ .lock = __SPIN_LOCK_UNLOCKED(hardware[IR_IRDEO].lock),
+ .signal_pin = UART_MSR_DSR,
+ .signal_pin_change = UART_MSR_DDSR,
+ .on = UART_MCR_OUT2,
+ .off = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
+ .send_pulse = send_pulse_irdeo,
+ .send_space = send_space_irdeo,
+ .set_duty_cycle = true,
+ },
+
+ [IR_IRDEO_REMOTE] = {
+ .lock = __SPIN_LOCK_UNLOCKED(hardware[IR_IRDEO_REMOTE].lock),
+ .signal_pin = UART_MSR_DSR,
+ .signal_pin_change = UART_MSR_DDSR,
+ .on = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
+ .off = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
+ .send_pulse = send_pulse_irdeo,
+ .send_space = send_space_irdeo,
+ .set_duty_cycle = true,
+ },
+
+ [IR_ANIMAX] = {
+ .lock = __SPIN_LOCK_UNLOCKED(hardware[IR_ANIMAX].lock),
+ .signal_pin = UART_MSR_DCD,
+ .signal_pin_change = UART_MSR_DDCD,
+ .on = 0,
+ .off = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
+ },
+
+ [IR_IGOR] = {
+ .lock = __SPIN_LOCK_UNLOCKED(hardware[IR_IGOR].lock),
+ .signal_pin = UART_MSR_DSR,
+ .signal_pin_change = UART_MSR_DDSR,
+ .on = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
+ .off = (UART_MCR_RTS | UART_MCR_OUT2),
+#ifdef CONFIG_IR_SERIAL_TRANSMITTER
+ .send_pulse = send_pulse_homebrew,
+ .send_space = send_space_homebrew,
+ .set_send_carrier = true,
+ .set_duty_cycle = true,
+#endif
+ },
+};
+
+#define RS_ISR_PASS_LIMIT 256
+
+struct serial_ir {
+ ktime_t lastkt;
+ struct rc_dev *rcdev;
+ struct platform_device *pdev;
+
+ unsigned int freq;
+ unsigned int duty_cycle;
+
+ unsigned int pulse_width, space_width;
+};
+
+static struct serial_ir serial_ir;
+
+/* fetch serial input packet (1 byte) from register offset */
+static u8 sinp(int offset)
+{
+ if (iommap)
+ /* the register is memory-mapped */
+ offset <<= ioshift;
+
+ return inb(io + offset);
+}
+
+/* write serial output packet (1 byte) of value to register offset */
+static void soutp(int offset, u8 value)
+{
+ if (iommap)
+ /* the register is memory-mapped */
+ offset <<= ioshift;
+
+ outb(value, io + offset);
+}
+
+static void on(void)
+{
+ if (txsense)
+ soutp(UART_MCR, hardware[type].off);
+ else
+ soutp(UART_MCR, hardware[type].on);
+}
+
+static void off(void)
+{
+ if (txsense)
+ soutp(UART_MCR, hardware[type].on);
+ else
+ soutp(UART_MCR, hardware[type].off);
+}
+
+static void init_timing_params(unsigned int new_duty_cycle,
+ unsigned int new_freq)
+{
+ serial_ir.duty_cycle = new_duty_cycle;
+ serial_ir.freq = new_freq;
+
+ serial_ir.pulse_width = DIV_ROUND_CLOSEST(
+ new_duty_cycle * NSEC_PER_SEC, new_freq * 100l);
+ serial_ir.space_width = DIV_ROUND_CLOSEST(
+ (100l - new_duty_cycle) * NSEC_PER_SEC, new_freq * 100l);
+}
+
+static void send_pulse_irdeo(unsigned int length, ktime_t target)
+{
+ long rawbits;
+ int i;
+ unsigned char output;
+ unsigned char chunk, shifted;
+
+ /* how many bits have to be sent ? */
+ rawbits = length * 1152 / 10000;
+ if (serial_ir.duty_cycle > 50)
+ chunk = 3;
+ else
+ chunk = 1;
+ for (i = 0, output = 0x7f; rawbits > 0; rawbits -= 3) {
+ shifted = chunk << (i * 3);
+ shifted >>= 1;
+ output &= (~shifted);
+ i++;
+ if (i == 3) {
+ soutp(UART_TX, output);
+ while (!(sinp(UART_LSR) & UART_LSR_THRE))
+ ;
+ output = 0x7f;
+ i = 0;
+ }
+ }
+ if (i != 0) {
+ soutp(UART_TX, output);
+ while (!(sinp(UART_LSR) & UART_LSR_TEMT))
+ ;
+ }
+}
+
+static void send_space_irdeo(void)
+{
+}
+
+#ifdef CONFIG_IR_SERIAL_TRANSMITTER
+static void send_pulse_homebrew_softcarrier(unsigned int length, ktime_t edge)
+{
+ ktime_t now, target = ktime_add_us(edge, length);
+ /*
+ * delta should never exceed 4 seconds and on m68k
+ * ndelay(s64) does not compile; so use s32 rather than s64.
+ */
+ s32 delta;
+
+ for (;;) {
+ now = ktime_get();
+ if (ktime_compare(now, target) >= 0)
+ break;
+ on();
+ edge = ktime_add_ns(edge, serial_ir.pulse_width);
+ delta = ktime_to_ns(ktime_sub(edge, now));
+ if (delta > 0)
+ ndelay(delta);
+ now = ktime_get();
+ off();
+ if (ktime_compare(now, target) >= 0)
+ break;
+ edge = ktime_add_ns(edge, serial_ir.space_width);
+ delta = ktime_to_ns(ktime_sub(edge, now));
+ if (delta > 0)
+ ndelay(delta);
+ }
+}
+
+static void send_pulse_homebrew(unsigned int length, ktime_t edge)
+{
+ if (softcarrier)
+ send_pulse_homebrew_softcarrier(length, edge);
+ else
+ on();
+}
+
+static void send_space_homebrew(void)
+{
+ off();
+}
+#endif
+
+static void frbwrite(unsigned int l, bool is_pulse)
+{
+ /* simple noise filter */
+ static unsigned int ptr, pulse, space;
+ DEFINE_IR_RAW_EVENT(ev);
+
+ if (ptr > 0 && is_pulse) {
+ pulse += l;
+ if (pulse > 250000) {
+ ev.duration = space;
+ ev.pulse = false;
+ ir_raw_event_store_with_filter(serial_ir.rcdev, &ev);
+ ev.duration = pulse;
+ ev.pulse = true;
+ ir_raw_event_store_with_filter(serial_ir.rcdev, &ev);
+ ptr = 0;
+ pulse = 0;
+ }
+ return;
+ }
+ if (!is_pulse) {
+ if (ptr == 0) {
+ if (l > 20000000) {
+ space = l;
+ ptr++;
+ return;
+ }
+ } else {
+ if (l > 20000000) {
+ space += pulse;
+ if (space > IR_MAX_DURATION)
+ space = IR_MAX_DURATION;
+ space += l;
+ if (space > IR_MAX_DURATION)
+ space = IR_MAX_DURATION;
+ pulse = 0;
+ return;
+ }
+
+ ev.duration = space;
+ ev.pulse = false;
+ ir_raw_event_store_with_filter(serial_ir.rcdev, &ev);
+ ev.duration = pulse;
+ ev.pulse = true;
+ ir_raw_event_store_with_filter(serial_ir.rcdev, &ev);
+ ptr = 0;
+ pulse = 0;
+ }
+ }
+
+ ev.duration = l;
+ ev.pulse = is_pulse;
+ ir_raw_event_store_with_filter(serial_ir.rcdev, &ev);
+}
+
+static irqreturn_t serial_ir_irq_handler(int i, void *blah)
+{
+ ktime_t kt;
+ int counter, dcd;
+ u8 status;
+ ktime_t delkt;
+ unsigned int data;
+ static int last_dcd = -1;
+
+ if ((sinp(UART_IIR) & UART_IIR_NO_INT)) {
+ /* not our interrupt */
+ return IRQ_NONE;
+ }
+
+ counter = 0;
+ do {
+ counter++;
+ status = sinp(UART_MSR);
+ if (counter > RS_ISR_PASS_LIMIT) {
+ dev_err(&serial_ir.pdev->dev, "Trapped in interrupt");
+ break;
+ }
+ if ((status & hardware[type].signal_pin_change) &&
+ sense != -1) {
+ /* get current time */
+ kt = ktime_get();
+
+ /*
+ * The driver needs to know if your receiver is
+ * active high or active low, or the space/pulse
+ * sense could be inverted.
+ */
+
+ /* calc time since last interrupt in nanoseconds */
+ dcd = (status & hardware[type].signal_pin) ? 1 : 0;
+
+ if (dcd == last_dcd) {
+ dev_err(&serial_ir.pdev->dev,
+ "ignoring spike: %d %d %lldns %lldns\n",
+ dcd, sense, ktime_to_ns(kt),
+ ktime_to_ns(serial_ir.lastkt));
+ continue;
+ }
+
+ delkt = ktime_sub(kt, serial_ir.lastkt);
+ if (ktime_compare(delkt, ktime_set(15, 0)) > 0) {
+ data = IR_MAX_DURATION; /* really long time */
+ if (!(dcd ^ sense)) {
+ /* sanity check */
+ dev_err(&serial_ir.pdev->dev,
+ "dcd unexpected: %d %d %lldns %lldns\n",
+ dcd, sense, ktime_to_ns(kt),
+ ktime_to_ns(serial_ir.lastkt));
+ /*
+ * detecting pulse while this
+ * MUST be a space!
+ */
+ sense = sense ? 0 : 1;
+ }
+ } else {
+ data = ktime_to_ns(delkt);
+ }
+ frbwrite(data, !(dcd ^ sense));
+ serial_ir.lastkt = kt;
+ last_dcd = dcd;
+ ir_raw_event_handle(serial_ir.rcdev);
+ }
+ } while (!(sinp(UART_IIR) & UART_IIR_NO_INT)); /* still pending ? */
+ return IRQ_HANDLED;
+}
+
+static int hardware_init_port(void)
+{
+ u8 scratch, scratch2, scratch3;
+
+ /*
+ * This is a simple port existence test, borrowed from the autoconfig
+ * function in drivers/tty/serial/8250/8250_port.c
+ */
+ scratch = sinp(UART_IER);
+ soutp(UART_IER, 0);
+#ifdef __i386__
+ outb(0xff, 0x080);
+#endif
+ scratch2 = sinp(UART_IER) & 0x0f;
+ soutp(UART_IER, 0x0f);
+#ifdef __i386__
+ outb(0x00, 0x080);
+#endif
+ scratch3 = sinp(UART_IER) & 0x0f;
+ soutp(UART_IER, scratch);
+ if (scratch2 != 0 || scratch3 != 0x0f) {
+ /* we fail, there's nothing here */
+ pr_err("port existence test failed, cannot continue\n");
+ return -ENODEV;
+ }
+
+ /* Set DLAB 0. */
+ soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
+
+ /* First of all, disable all interrupts */
+ soutp(UART_IER, sinp(UART_IER) &
+ (~(UART_IER_MSI | UART_IER_RLSI | UART_IER_THRI | UART_IER_RDI)));
+
+ /* Clear registers. */
+ sinp(UART_LSR);
+ sinp(UART_RX);
+ sinp(UART_IIR);
+ sinp(UART_MSR);
+
+ /* Set line for power source */
+ off();
+
+ /* Clear registers again to be sure. */
+ sinp(UART_LSR);
+ sinp(UART_RX);
+ sinp(UART_IIR);
+ sinp(UART_MSR);
+
+ switch (type) {
+ case IR_IRDEO:
+ case IR_IRDEO_REMOTE:
+ /* setup port to 7N1 @ 115200 Baud */
+ /* 7N1+start = 9 bits at 115200 ~ 3 bits at 38kHz */
+
+ /* Set DLAB 1. */
+ soutp(UART_LCR, sinp(UART_LCR) | UART_LCR_DLAB);
+ /* Set divisor to 1 => 115200 Baud */
+ soutp(UART_DLM, 0);
+ soutp(UART_DLL, 1);
+ /* Set DLAB 0 + 7N1 */
+ soutp(UART_LCR, UART_LCR_WLEN7);
+ /* THR interrupt already disabled at this point */
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int serial_ir_probe(struct platform_device *dev)
+{
+ int i, nlow, nhigh, result;
+
+ result = devm_request_irq(&dev->dev, irq, serial_ir_irq_handler,
+ share_irq ? IRQF_SHARED : 0,
+ KBUILD_MODNAME, &hardware);
+ if (result < 0) {
+ if (result == -EBUSY)
+ dev_err(&dev->dev, "IRQ %d busy\n", irq);
+ else if (result == -EINVAL)
+ dev_err(&dev->dev, "Bad irq number or handler\n");
+ return result;
+ }
+
+ /* Reserve io region. */
+ if ((iommap &&
+ (devm_request_mem_region(&dev->dev, iommap, 8 << ioshift,
+ KBUILD_MODNAME) == NULL)) ||
+ (!iommap && (devm_request_region(&dev->dev, io, 8,
+ KBUILD_MODNAME) == NULL))) {
+ dev_err(&dev->dev, "port %04x already in use\n", io);
+ dev_warn(&dev->dev, "use 'setserial /dev/ttySX uart none'\n");
+ dev_warn(&dev->dev,
+ "or compile the serial port driver as module and\n");
+ dev_warn(&dev->dev, "make sure this module is loaded first\n");
+ return -EBUSY;
+ }
+
+ result = hardware_init_port();
+ if (result < 0)
+ return result;
+
+ /* Initialize pulse/space widths */
+ init_timing_params(50, 38000);
+
+ /* If pin is high, then this must be an active low receiver. */
+ if (sense == -1) {
+ /* wait 1/2 sec for the power supply */
+ msleep(500);
+
+ /*
+ * probe 9 times every 0.04s, collect "votes" for
+ * active high/low
+ */
+ nlow = 0;
+ nhigh = 0;
+ for (i = 0; i < 9; i++) {
+ if (sinp(UART_MSR) & hardware[type].signal_pin)
+ nlow++;
+ else
+ nhigh++;
+ msleep(40);
+ }
+ sense = nlow >= nhigh ? 1 : 0;
+ dev_info(&dev->dev, "auto-detected active %s receiver\n",
+ sense ? "low" : "high");
+ } else
+ dev_info(&dev->dev, "Manually using active %s receiver\n",
+ sense ? "low" : "high");
+
+ dev_dbg(&dev->dev, "Interrupt %d, port %04x obtained\n", irq, io);
+ return 0;
+}
+
+static int serial_ir_open(struct rc_dev *rcdev)
+{
+ unsigned long flags;
+
+ /* initialize timestamp */
+ serial_ir.lastkt = ktime_get();
+
+ spin_lock_irqsave(&hardware[type].lock, flags);
+
+ /* Set DLAB 0. */
+ soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
+
+ soutp(UART_IER, sinp(UART_IER) | UART_IER_MSI);
+
+ spin_unlock_irqrestore(&hardware[type].lock, flags);
+
+ return 0;
+}
+
+static void serial_ir_close(struct rc_dev *rcdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hardware[type].lock, flags);
+
+ /* Set DLAB 0. */
+ soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
+
+ /* First of all, disable all interrupts */
+ soutp(UART_IER, sinp(UART_IER) &
+ (~(UART_IER_MSI | UART_IER_RLSI | UART_IER_THRI | UART_IER_RDI)));
+ spin_unlock_irqrestore(&hardware[type].lock, flags);
+}
+
+static int serial_ir_tx(struct rc_dev *dev, unsigned int *txbuf,
+ unsigned int count)
+{
+ unsigned long flags;
+ ktime_t edge;
+ s64 delta;
+ int i;
+
+ spin_lock_irqsave(&hardware[type].lock, flags);
+ if (type == IR_IRDEO) {
+ /* DTR, RTS down */
+ on();
+ }
+
+ edge = ktime_get();
+ for (i = 0; i < count; i++) {
+ if (i % 2)
+ hardware[type].send_space();
+ else
+ hardware[type].send_pulse(txbuf[i], edge);
+
+ edge = ktime_add_us(edge, txbuf[i]);
+ delta = ktime_us_delta(edge, ktime_get());
+ if (delta > 25) {
+ spin_unlock_irqrestore(&hardware[type].lock, flags);
+ usleep_range(delta - 25, delta + 25);
+ spin_lock_irqsave(&hardware[type].lock, flags);
+ } else if (delta > 0) {
+ udelay(delta);
+ }
+ }
+ off();
+ spin_unlock_irqrestore(&hardware[type].lock, flags);
+ return count;
+}
+
+static int serial_ir_tx_duty_cycle(struct rc_dev *dev, u32 cycle)
+{
+ init_timing_params(cycle, serial_ir.freq);
+ return 0;
+}
+
+static int serial_ir_tx_carrier(struct rc_dev *dev, u32 carrier)
+{
+ if (carrier > 500000 || carrier < 20000)
+ return -EINVAL;
+
+ init_timing_params(serial_ir.duty_cycle, carrier);
+ return 0;
+}
+
+static int serial_ir_suspend(struct platform_device *dev,
+ pm_message_t state)
+{
+ /* Set DLAB 0. */
+ soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
+
+ /* Disable all interrupts */
+ soutp(UART_IER, sinp(UART_IER) &
+ (~(UART_IER_MSI | UART_IER_RLSI | UART_IER_THRI | UART_IER_RDI)));
+
+ /* Clear registers. */
+ sinp(UART_LSR);
+ sinp(UART_RX);
+ sinp(UART_IIR);
+ sinp(UART_MSR);
+
+ return 0;
+}
+
+static int serial_ir_resume(struct platform_device *dev)
+{
+ unsigned long flags;
+ int result;
+
+ result = hardware_init_port();
+ if (result < 0)
+ return result;
+
+ spin_lock_irqsave(&hardware[type].lock, flags);
+ /* Enable Interrupt */
+ serial_ir.lastkt = ktime_get();
+ soutp(UART_IER, sinp(UART_IER) | UART_IER_MSI);
+ off();
+
+ spin_unlock_irqrestore(&hardware[type].lock, flags);
+
+ return 0;
+}
+
+static struct platform_driver serial_ir_driver = {
+ .probe = serial_ir_probe,
+ .suspend = serial_ir_suspend,
+ .resume = serial_ir_resume,
+ .driver = {
+ .name = "serial_ir",
+ },
+};
+
+static int __init serial_ir_init(void)
+{
+ int result;
+
+ result = platform_driver_register(&serial_ir_driver);
+ if (result)
+ return result;
+
+ serial_ir.pdev = platform_device_alloc("serial_ir", 0);
+ if (!serial_ir.pdev) {
+ result = -ENOMEM;
+ goto exit_driver_unregister;
+ }
+
+ result = platform_device_add(serial_ir.pdev);
+ if (result)
+ goto exit_device_put;
+
+ return 0;
+
+exit_device_put:
+ platform_device_put(serial_ir.pdev);
+exit_driver_unregister:
+ platform_driver_unregister(&serial_ir_driver);
+ return result;
+}
+
+static void serial_ir_exit(void)
+{
+ platform_device_unregister(serial_ir.pdev);
+ platform_driver_unregister(&serial_ir_driver);
+}
+
+static int __init serial_ir_init_module(void)
+{
+ struct rc_dev *rcdev;
+ int result;
+
+ switch (type) {
+ case IR_HOMEBREW:
+ case IR_IRDEO:
+ case IR_IRDEO_REMOTE:
+ case IR_ANIMAX:
+ case IR_IGOR:
+ /* if nothing specified, use ttyS0/com1 and irq 4 */
+ io = io ? io : 0x3f8;
+ irq = irq ? irq : 4;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (!softcarrier) {
+ switch (type) {
+ case IR_HOMEBREW:
+ case IR_IGOR:
+ hardware[type].set_send_carrier = false;
+ hardware[type].set_duty_cycle = false;
+ break;
+ }
+ }
+
+ /* make sure sense is either -1, 0, or 1 */
+ if (sense != -1)
+ sense = !!sense;
+
+ result = serial_ir_init();
+ if (result)
+ return result;
+
+ rcdev = devm_rc_allocate_device(&serial_ir.pdev->dev);
+ if (!rcdev) {
+ result = -ENOMEM;
+ goto serial_cleanup;
+ }
+
+ if (hardware[type].send_pulse && hardware[type].send_space)
+ rcdev->tx_ir = serial_ir_tx;
+ if (hardware[type].set_send_carrier)
+ rcdev->s_tx_carrier = serial_ir_tx_carrier;
+ if (hardware[type].set_duty_cycle)
+ rcdev->s_tx_duty_cycle = serial_ir_tx_duty_cycle;
+
+ switch (type) {
+ case IR_HOMEBREW:
+ rcdev->input_name = "Serial IR type home-brew";
+ break;
+ case IR_IRDEO:
+ rcdev->input_name = "Serial IR type IRdeo";
+ break;
+ case IR_IRDEO_REMOTE:
+ rcdev->input_name = "Serial IR type IRdeo remote";
+ break;
+ case IR_ANIMAX:
+ rcdev->input_name = "Serial IR type AnimaX";
+ break;
+ case IR_IGOR:
+ rcdev->input_name = "Serial IR type IgorPlug";
+ break;
+ }
+
+ rcdev->input_phys = KBUILD_MODNAME "/input0";
+ rcdev->input_id.bustype = BUS_HOST;
+ rcdev->input_id.vendor = 0x0001;
+ rcdev->input_id.product = 0x0001;
+ rcdev->input_id.version = 0x0100;
+ rcdev->open = serial_ir_open;
+ rcdev->close = serial_ir_close;
+ rcdev->dev.parent = &serial_ir.pdev->dev;
+ rcdev->driver_type = RC_DRIVER_IR_RAW;
+ rcdev->allowed_protocols = RC_BIT_ALL;
+ rcdev->driver_name = KBUILD_MODNAME;
+ rcdev->map_name = RC_MAP_RC6_MCE;
+ rcdev->timeout = IR_DEFAULT_TIMEOUT;
+ rcdev->rx_resolution = 250000;
+
+ serial_ir.rcdev = rcdev;
+
+ result = rc_register_device(rcdev);
+
+ if (!result)
+ return 0;
+serial_cleanup:
+ serial_ir_exit();
+ return result;
+}
+
+static void __exit serial_ir_exit_module(void)
+{
+ rc_unregister_device(serial_ir.rcdev);
+ serial_ir_exit();
+}
+
+module_init(serial_ir_init_module);
+module_exit(serial_ir_exit_module);
+
+MODULE_DESCRIPTION("Infra-red receiver driver for serial ports.");
+MODULE_AUTHOR("Ralph Metzler, Trent Piepho, Ben Pfaff, Christoph Bartelmus, Andrei Tanas");
+MODULE_LICENSE("GPL");
+
+module_param(type, int, 0444);
+MODULE_PARM_DESC(type, "Hardware type (0 = home-brew, 1 = IRdeo, 2 = IRdeo Remote, 3 = AnimaX, 4 = IgorPlug");
+
+module_param(io, int, 0444);
+MODULE_PARM_DESC(io, "I/O address base (0x3f8 or 0x2f8)");
+
+/* some architectures (e.g. intel xscale) have memory mapped registers */
+module_param(iommap, bool, 0444);
+MODULE_PARM_DESC(iommap, "physical base for memory mapped I/O (0 = no memory mapped io)");
+
+/*
+ * some architectures (e.g. intel xscale) align the 8bit serial registers
+ * on 32bit word boundaries.
+ * See linux-kernel/drivers/tty/serial/8250/8250.c serial_in()/out()
+ */
+module_param(ioshift, int, 0444);
+MODULE_PARM_DESC(ioshift, "shift I/O register offset (0 = no shift)");
+
+module_param(irq, int, 0444);
+MODULE_PARM_DESC(irq, "Interrupt (4 or 3)");
+
+module_param(share_irq, bool, 0444);
+MODULE_PARM_DESC(share_irq, "Share interrupts (0 = off, 1 = on)");
+
+module_param(sense, int, 0444);
+MODULE_PARM_DESC(sense, "Override autodetection of IR receiver circuit (0 = active high, 1 = active low )");
+
+#ifdef CONFIG_IR_SERIAL_TRANSMITTER
+module_param(txsense, bool, 0444);
+MODULE_PARM_DESC(txsense, "Sense of transmitter circuit (0 = active high, 1 = active low )");
+#endif
+
+module_param(softcarrier, bool, 0444);
+MODULE_PARM_DESC(softcarrier, "Software carrier (0 = off, 1 = on, default on)");
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index 4004260a7c69..53f9b0af358a 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -297,8 +297,7 @@ static struct rc_dev *streamzap_init_rc_dev(struct streamzap_ir *sz)
goto out;
}
- snprintf(sz->name, sizeof(sz->name), "Streamzap PC Remote Infrared "
- "Receiver (%04x:%04x)",
+ snprintf(sz->name, sizeof(sz->name), "Streamzap PC Remote Infrared Receiver (%04x:%04x)",
le16_to_cpu(sz->usbdev->descriptor.idVendor),
le16_to_cpu(sz->usbdev->descriptor.idProduct));
usb_make_path(sz->usbdev, sz->phys, sizeof(sz->phys));
@@ -364,15 +363,15 @@ static int streamzap_probe(struct usb_interface *intf,
sz->endpoint = &(iface_host->endpoint[0].desc);
if (!usb_endpoint_dir_in(sz->endpoint)) {
- dev_err(&intf->dev, "%s: endpoint doesn't match input device "
- "02%02x\n", __func__, sz->endpoint->bEndpointAddress);
+ dev_err(&intf->dev, "%s: endpoint doesn't match input device 02%02x\n",
+ __func__, sz->endpoint->bEndpointAddress);
retval = -ENODEV;
goto free_sz;
}
if (!usb_endpoint_xfer_int(sz->endpoint)) {
- dev_err(&intf->dev, "%s: endpoint attributes don't match xfer "
- "02%02x\n", __func__, sz->endpoint->bmAttributes);
+ dev_err(&intf->dev, "%s: endpoint attributes don't match xfer 02%02x\n",
+ __func__, sz->endpoint->bmAttributes);
retval = -ENODEV;
goto free_sz;
}
diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
index 95ae60e659a1..78491ed48d92 100644
--- a/drivers/media/rc/winbond-cir.c
+++ b/drivers/media/rc/winbond-cir.c
@@ -227,8 +227,7 @@ struct wbcir_data {
static enum wbcir_protocol protocol = IR_PROTOCOL_RC6;
module_param(protocol, uint, 0444);
-MODULE_PARM_DESC(protocol, "IR protocol to use for the power-on command "
- "(0 = RC5, 1 = NEC, 2 = RC6A, default)");
+MODULE_PARM_DESC(protocol, "IR protocol to use for the power-on command (0 = RC5, 1 = NEC, 2 = RC6A, default)");
static bool invert; /* default = 0 */
module_param(invert, bool, 0444);
@@ -244,8 +243,7 @@ MODULE_PARM_DESC(wake_sc, "Scancode of the power-on IR command");
static unsigned int wake_rc6mode = 6;
module_param(wake_rc6mode, uint, 0644);
-MODULE_PARM_DESC(wake_rc6mode, "RC6 mode for the power-on command "
- "(0 = 0, 6 = 6A, default)");
+MODULE_PARM_DESC(wake_rc6mode, "RC6 mode for the power-on command (0 = 0, 6 = 6A, default)");
@@ -660,7 +658,7 @@ wbcir_tx(struct rc_dev *dev, unsigned *b, unsigned count)
unsigned i;
unsigned long flags;
- buf = kmalloc(count * sizeof(*b), GFP_KERNEL);
+ buf = kmalloc_array(count, sizeof(*b), GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -1050,8 +1048,7 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
goto exit_free_data;
}
- dev_dbg(&device->dev, "Found device "
- "(w: 0x%lX, e: 0x%lX, s: 0x%lX, i: %u)\n",
+ dev_dbg(&device->dev, "Found device (w: 0x%lX, e: 0x%lX, s: 0x%lX, i: %u)\n",
data->wbase, data->ebase, data->sbase, data->irq);
data->led.name = "cir::activity";
@@ -1188,7 +1185,7 @@ static const struct pnp_device_id wbcir_ids[] = {
MODULE_DEVICE_TABLE(pnp, wbcir_ids);
static struct pnp_driver wbcir_driver = {
- .name = WBCIR_NAME,
+ .name = DRVNAME,
.id_table = wbcir_ids,
.probe = wbcir_probe,
.remove = wbcir_remove,
diff --git a/drivers/media/spi/gs1662.c b/drivers/media/spi/gs1662.c
index d76f36233f43..330dcb2b2e44 100644
--- a/drivers/media/spi/gs1662.c
+++ b/drivers/media/spi/gs1662.c
@@ -453,17 +453,15 @@ static int gs_probe(struct spi_device *spi)
static int gs_remove(struct spi_device *spi)
{
struct v4l2_subdev *sd = spi_get_drvdata(spi);
- struct gs *gs = to_gs(sd);
v4l2_device_unregister_subdev(sd);
- kfree(gs);
+
return 0;
}
static struct spi_driver gs_driver = {
.driver = {
.name = "gs1662",
- .owner = THIS_MODULE,
},
.probe = gs_probe,
diff --git a/drivers/media/tuners/fc0011.c b/drivers/media/tuners/fc0011.c
index 3932aa81e18c..00489a9df4e4 100644
--- a/drivers/media/tuners/fc0011.c
+++ b/drivers/media/tuners/fc0011.c
@@ -112,12 +112,10 @@ static int fc0011_readreg(struct fc0011_priv *priv, u8 reg, u8 *val)
return 0;
}
-static int fc0011_release(struct dvb_frontend *fe)
+static void fc0011_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
-
- return 0;
}
static int fc0011_init(struct dvb_frontend *fe)
@@ -262,8 +260,7 @@ static int fc0011_set_params(struct dvb_frontend *fe)
regs[FC11_REG_VCOSEL] |= FC11_VCOSEL_BW7M;
break;
default:
- dev_warn(&priv->i2c->dev, "Unsupported bandwidth %u kHz. "
- "Using 6000 kHz.\n",
+ dev_warn(&priv->i2c->dev, "Unsupported bandwidth %u kHz. Using 6000 kHz.\n",
bandwidth);
bandwidth = 6000;
/* fallthrough */
@@ -435,9 +432,7 @@ static int fc0011_set_params(struct dvb_frontend *fe)
if (err)
return err;
- dev_dbg(&priv->i2c->dev, "Tuned to "
- "fa=%02X fp=%02X xin=%02X%02X vco=%02X vcosel=%02X "
- "vcocal=%02X(%u) bw=%u\n",
+ dev_dbg(&priv->i2c->dev, "Tuned to fa=%02X fp=%02X xin=%02X%02X vco=%02X vcosel=%02X vcocal=%02X(%u) bw=%u\n",
(unsigned int)regs[FC11_REG_FA],
(unsigned int)regs[FC11_REG_FP],
(unsigned int)regs[FC11_REG_XINHI],
diff --git a/drivers/media/tuners/fc0012.c b/drivers/media/tuners/fc0012.c
index d74e92056810..30508f44e5f9 100644
--- a/drivers/media/tuners/fc0012.c
+++ b/drivers/media/tuners/fc0012.c
@@ -55,11 +55,10 @@ static int fc0012_readreg(struct fc0012_priv *priv, u8 reg, u8 *val)
return 0;
}
-static int fc0012_release(struct dvb_frontend *fe)
+static void fc0012_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static int fc0012_init(struct dvb_frontend *fe)
diff --git a/drivers/media/tuners/fc0013.c b/drivers/media/tuners/fc0013.c
index 522690d97b42..f7cf0e9e7c99 100644
--- a/drivers/media/tuners/fc0013.c
+++ b/drivers/media/tuners/fc0013.c
@@ -52,11 +52,10 @@ static int fc0013_readreg(struct fc0013_priv *priv, u8 reg, u8 *val)
return 0;
}
-static int fc0013_release(struct dvb_frontend *fe)
+static void fc0013_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static int fc0013_init(struct dvb_frontend *fe)
diff --git a/drivers/media/tuners/max2165.c b/drivers/media/tuners/max2165.c
index 353b178becf6..c3f10925b0d4 100644
--- a/drivers/media/tuners/max2165.c
+++ b/drivers/media/tuners/max2165.c
@@ -370,15 +370,13 @@ static int max2165_init(struct dvb_frontend *fe)
return 0;
}
-static int max2165_release(struct dvb_frontend *fe)
+static void max2165_release(struct dvb_frontend *fe)
{
struct max2165_priv *priv = fe->tuner_priv;
dprintk("%s()\n", __func__);
kfree(priv);
fe->tuner_priv = NULL;
-
- return 0;
}
static const struct dvb_tuner_ops max2165_tuner_ops = {
diff --git a/drivers/media/tuners/mc44s803.c b/drivers/media/tuners/mc44s803.c
index f1b764074661..aba580b4ac2c 100644
--- a/drivers/media/tuners/mc44s803.c
+++ b/drivers/media/tuners/mc44s803.c
@@ -80,14 +80,12 @@ static int mc44s803_readreg(struct mc44s803_priv *priv, u8 reg, u32 *val)
return 0;
}
-static int mc44s803_release(struct dvb_frontend *fe)
+static void mc44s803_release(struct dvb_frontend *fe)
{
struct mc44s803_priv *priv = fe->tuner_priv;
fe->tuner_priv = NULL;
kfree(priv);
-
- return 0;
}
static int mc44s803_init(struct dvb_frontend *fe)
@@ -349,8 +347,8 @@ struct dvb_frontend *mc44s803_attach(struct dvb_frontend *fe,
id = MC44S803_REG_MS(reg, MC44S803_ID);
if (id != 0x14) {
- mc_printk(KERN_ERR, "unsupported ID "
- "(%x should be 0x14)\n", id);
+ mc_printk(KERN_ERR, "unsupported ID (%x should be 0x14)\n",
+ id);
goto error;
}
diff --git a/drivers/media/tuners/mt2060.c b/drivers/media/tuners/mt2060.c
index b87b2549d58d..94077ea78dde 100644
--- a/drivers/media/tuners/mt2060.c
+++ b/drivers/media/tuners/mt2060.c
@@ -332,11 +332,10 @@ static int mt2060_sleep(struct dvb_frontend *fe)
return ret;
}
-static int mt2060_release(struct dvb_frontend *fe)
+static void mt2060_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static const struct dvb_tuner_ops mt2060_tuner_ops = {
diff --git a/drivers/media/tuners/mt2063.c b/drivers/media/tuners/mt2063.c
index dfec23743afe..8b39d8dc97a0 100644
--- a/drivers/media/tuners/mt2063.c
+++ b/drivers/media/tuners/mt2063.c
@@ -2019,7 +2019,7 @@ static int mt2063_get_status(struct dvb_frontend *fe, u32 *tuner_status)
return 0;
}
-static int mt2063_release(struct dvb_frontend *fe)
+static void mt2063_release(struct dvb_frontend *fe)
{
struct mt2063_state *state = fe->tuner_priv;
@@ -2027,8 +2027,6 @@ static int mt2063_release(struct dvb_frontend *fe)
fe->tuner_priv = NULL;
kfree(state);
-
- return 0;
}
static int mt2063_set_analog_params(struct dvb_frontend *fe,
diff --git a/drivers/media/tuners/mt20xx.c b/drivers/media/tuners/mt20xx.c
index 52da4671b0e0..129bf8e1aff8 100644
--- a/drivers/media/tuners/mt20xx.c
+++ b/drivers/media/tuners/mt20xx.c
@@ -49,12 +49,10 @@ struct microtune_priv {
u32 frequency;
};
-static int microtune_release(struct dvb_frontend *fe)
+static void microtune_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
-
- return 0;
}
static int microtune_get_frequency(struct dvb_frontend *fe, u32 *frequency)
@@ -487,13 +485,8 @@ static void mt2050_set_if_freq(struct dvb_frontend *fe,unsigned int freq, unsign
buf[5]=div2a;
if(num2!=0) buf[5]=buf[5]|0x40;
- if (debug > 1) {
- int i;
- tuner_dbg("bufs is: ");
- for(i=0;i<6;i++)
- printk("%x ",buf[i]);
- printk("\n");
- }
+ if (debug > 1)
+ tuner_dbg("bufs is: %*ph\n", 6, buf);
ret=tuner_i2c_xfer_send(&priv->i2c_props,buf,6);
if (ret!=6)
@@ -619,15 +612,9 @@ struct dvb_frontend *microtune_attach(struct dvb_frontend *fe,
tuner_i2c_xfer_send(&priv->i2c_props,buf,1);
tuner_i2c_xfer_recv(&priv->i2c_props,buf,21);
- if (debug) {
- int i;
- tuner_dbg("MT20xx hexdump:");
- for(i=0;i<21;i++) {
- printk(" %02x",buf[i]);
- if(((i+1)%8)==0) printk(" ");
- }
- printk("\n");
- }
+ if (debug)
+ tuner_dbg("MT20xx hexdump: %*ph\n", 21, buf);
+
company_code = buf[0x11] << 8 | buf[0x12];
tuner_info("microtune: companycode=%04x part=%02x rev=%02x\n",
company_code,buf[0x13],buf[0x14]);
diff --git a/drivers/media/tuners/mt2131.c b/drivers/media/tuners/mt2131.c
index 6e2cdd2b6175..e7790e4afcfe 100644
--- a/drivers/media/tuners/mt2131.c
+++ b/drivers/media/tuners/mt2131.c
@@ -230,12 +230,11 @@ static int mt2131_init(struct dvb_frontend *fe)
return ret;
}
-static int mt2131_release(struct dvb_frontend *fe)
+static void mt2131_release(struct dvb_frontend *fe)
{
dprintk(1, "%s()\n", __func__);
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static const struct dvb_tuner_ops mt2131_tuner_ops = {
diff --git a/drivers/media/tuners/mt2266.c b/drivers/media/tuners/mt2266.c
index bca4d75e42d4..88edcc031e3c 100644
--- a/drivers/media/tuners/mt2266.c
+++ b/drivers/media/tuners/mt2266.c
@@ -296,11 +296,10 @@ static int mt2266_sleep(struct dvb_frontend *fe)
return 0;
}
-static int mt2266_release(struct dvb_frontend *fe)
+static void mt2266_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static const struct dvb_tuner_ops mt2266_tuner_ops = {
diff --git a/drivers/media/tuners/mxl5005s.c b/drivers/media/tuners/mxl5005s.c
index 92a3be4fde87..353744fee053 100644
--- a/drivers/media/tuners/mxl5005s.c
+++ b/drivers/media/tuners/mxl5005s.c
@@ -4063,12 +4063,11 @@ static int mxl5005s_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
return 0;
}
-static int mxl5005s_release(struct dvb_frontend *fe)
+static void mxl5005s_release(struct dvb_frontend *fe)
{
dprintk(1, "%s()\n", __func__);
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static const struct dvb_tuner_ops mxl5005s_tuner_ops = {
diff --git a/drivers/media/tuners/mxl5007t.c b/drivers/media/tuners/mxl5007t.c
index 42569c6811e6..b16dfa5e85fb 100644
--- a/drivers/media/tuners/mxl5007t.c
+++ b/drivers/media/tuners/mxl5007t.c
@@ -776,7 +776,7 @@ static int mxl5007t_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
return 0;
}
-static int mxl5007t_release(struct dvb_frontend *fe)
+static void mxl5007t_release(struct dvb_frontend *fe)
{
struct mxl5007t_state *state = fe->tuner_priv;
@@ -788,8 +788,6 @@ static int mxl5007t_release(struct dvb_frontend *fe)
mutex_unlock(&mxl5007t_list_mutex);
fe->tuner_priv = NULL;
-
- return 0;
}
/* ------------------------------------------------------------------------- */
diff --git a/drivers/media/tuners/qt1010.c b/drivers/media/tuners/qt1010.c
index ae8cbece6d2b..a2c6cd1c3923 100644
--- a/drivers/media/tuners/qt1010.c
+++ b/drivers/media/tuners/qt1010.c
@@ -377,11 +377,10 @@ static int qt1010_init(struct dvb_frontend *fe)
return qt1010_set_params(fe);
}
-static int qt1010_release(struct dvb_frontend *fe)
+static void qt1010_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static int qt1010_get_frequency(struct dvb_frontend *fe, u32 *frequency)
diff --git a/drivers/media/tuners/r820t.c b/drivers/media/tuners/r820t.c
index 08dca40356d2..ba80376a3b86 100644
--- a/drivers/media/tuners/r820t.c
+++ b/drivers/media/tuners/r820t.c
@@ -2286,7 +2286,7 @@ static int r820t_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
return 0;
}
-static int r820t_release(struct dvb_frontend *fe)
+static void r820t_release(struct dvb_frontend *fe)
{
struct r820t_priv *priv = fe->tuner_priv;
@@ -2300,8 +2300,6 @@ static int r820t_release(struct dvb_frontend *fe)
mutex_unlock(&r820t_list_mutex);
fe->tuner_priv = NULL;
-
- return 0;
}
static const struct dvb_tuner_ops r820t_tuner_ops = {
diff --git a/drivers/media/tuners/tda18218.c b/drivers/media/tuners/tda18218.c
index 9300e9361e3b..8357a3c08a70 100644
--- a/drivers/media/tuners/tda18218.c
+++ b/drivers/media/tuners/tda18218.c
@@ -265,11 +265,10 @@ static int tda18218_init(struct dvb_frontend *fe)
return ret;
}
-static int tda18218_release(struct dvb_frontend *fe)
+static void tda18218_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static const struct dvb_tuner_ops tda18218_tuner_ops = {
diff --git a/drivers/media/tuners/tda18271-common.c b/drivers/media/tuners/tda18271-common.c
index a26bb33102b8..7e81cd887c13 100644
--- a/drivers/media/tuners/tda18271-common.c
+++ b/drivers/media/tuners/tda18271-common.c
@@ -251,8 +251,8 @@ static int __tda18271_write_regs(struct dvb_frontend *fe, int idx, int len,
}
if (ret != 1)
- tda_err("ERROR: idx = 0x%x, len = %d, "
- "i2c_transfer returned: %d\n", idx, max, ret);
+ tda_err("ERROR: idx = 0x%x, len = %d, i2c_transfer returned: %d\n",
+ idx, max, ret);
return (ret == 1 ? 0 : ret);
}
diff --git a/drivers/media/tuners/tda18271-fe.c b/drivers/media/tuners/tda18271-fe.c
index 2d50e8b1dce1..b4e5fa2ff5e5 100644
--- a/drivers/media/tuners/tda18271-fe.c
+++ b/drivers/media/tuners/tda18271-fe.c
@@ -26,8 +26,7 @@
int tda18271_debug;
module_param_named(debug, tda18271_debug, int, 0644);
-MODULE_PARM_DESC(debug, "set debug level "
- "(info=1, map=2, reg=4, adv=8, cal=16 (or-able))");
+MODULE_PARM_DESC(debug, "set debug level (info=1, map=2, reg=4, adv=8, cal=16 (or-able))");
static int tda18271_cal_on_startup = -1;
module_param_named(cal, tda18271_cal_on_startup, int, 0644);
@@ -1049,7 +1048,7 @@ fail:
return ret;
}
-static int tda18271_release(struct dvb_frontend *fe)
+static void tda18271_release(struct dvb_frontend *fe)
{
struct tda18271_priv *priv = fe->tuner_priv;
@@ -1061,8 +1060,6 @@ static int tda18271_release(struct dvb_frontend *fe)
mutex_unlock(&tda18271_list_mutex);
fe->tuner_priv = NULL;
-
- return 0;
}
static int tda18271_get_frequency(struct dvb_frontend *fe, u32 *frequency)
diff --git a/drivers/media/tuners/tda18271-maps.c b/drivers/media/tuners/tda18271-maps.c
index 1e89dd93c4bb..7d114677b4ca 100644
--- a/drivers/media/tuners/tda18271-maps.c
+++ b/drivers/media/tuners/tda18271-maps.c
@@ -1024,11 +1024,7 @@ int tda18271_lookup_rf_band(struct dvb_frontend *fe, u32 *freq, u8 *rf_band)
while ((map[i].rfmax * 1000) < *freq) {
if (tda18271_debug & DBG_ADV)
- tda_map("(%d) rfmax = %d < freq = %d, "
- "rf1_def = %d, rf2_def = %d, rf3_def = %d, "
- "rf1 = %d, rf2 = %d, rf3 = %d, "
- "rf_a1 = %d, rf_a2 = %d, "
- "rf_b1 = %d, rf_b2 = %d\n",
+ tda_map("(%d) rfmax = %d < freq = %d, rf1_def = %d, rf2_def = %d, rf3_def = %d, rf1 = %d, rf2 = %d, rf3 = %d, rf_a1 = %d, rf_a2 = %d, rf_b1 = %d, rf_b2 = %d\n",
i, map[i].rfmax * 1000, *freq,
map[i].rf1_def, map[i].rf2_def, map[i].rf3_def,
map[i].rf1, map[i].rf2, map[i].rf3,
diff --git a/drivers/media/tuners/tda827x.c b/drivers/media/tuners/tda827x.c
index 5050ce9be423..2137eadf30f1 100644
--- a/drivers/media/tuners/tda827x.c
+++ b/drivers/media/tuners/tda827x.c
@@ -767,11 +767,10 @@ static void tda827xa_agcf(struct dvb_frontend *fe)
/* ------------------------------------------------------------------ */
-static int tda827x_release(struct dvb_frontend *fe)
+static void tda827x_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
static int tda827x_get_frequency(struct dvb_frontend *fe, u32 *frequency)
diff --git a/drivers/media/tuners/tda8290.c b/drivers/media/tuners/tda8290.c
index 998e82bba9c0..a59c567c55d6 100644
--- a/drivers/media/tuners/tda8290.c
+++ b/drivers/media/tuners/tda8290.c
@@ -617,8 +617,8 @@ static int tda829x_find_tuner(struct dvb_frontend *fe)
if (tuner_addrs == 0) {
tuner_addrs = 0x60;
- tuner_info("could not clearly identify tuner address, "
- "defaulting to %x\n", tuner_addrs);
+ tuner_info("could not clearly identify tuner address, defaulting to %x\n",
+ tuner_addrs);
} else {
tuner_addrs = tuner_addrs & 0xff;
tuner_info("setting tuner address to %x\n", tuner_addrs);
@@ -721,7 +721,7 @@ static int tda8295_probe(struct tuner_i2c_props *i2c_props)
return -ENODEV;
}
-static struct analog_demod_ops tda8290_ops = {
+static const struct analog_demod_ops tda8290_ops = {
.set_params = tda8290_set_params,
.has_signal = tda8290_has_signal,
.standby = tda8290_standby,
@@ -729,7 +729,7 @@ static struct analog_demod_ops tda8290_ops = {
.i2c_gate_ctrl = tda8290_i2c_bridge,
};
-static struct analog_demod_ops tda8295_ops = {
+static const struct analog_demod_ops tda8295_ops = {
.set_params = tda8295_set_params,
.has_signal = tda8295_has_signal,
.standby = tda8295_standby,
diff --git a/drivers/media/tuners/tda9887.c b/drivers/media/tuners/tda9887.c
index 56be6c29399b..c0e815f8b951 100644
--- a/drivers/media/tuners/tda9887.c
+++ b/drivers/media/tuners/tda9887.c
@@ -659,7 +659,7 @@ static void tda9887_release(struct dvb_frontend *fe)
fe->analog_demod_priv = NULL;
}
-static struct analog_demod_ops tda9887_ops = {
+static const struct analog_demod_ops tda9887_ops = {
.info = {
.name = "tda9887",
},
diff --git a/drivers/media/tuners/tea5761.c b/drivers/media/tuners/tea5761.c
index 36b0b1e1d05b..a9b1bb134409 100644
--- a/drivers/media/tuners/tea5761.c
+++ b/drivers/media/tuners/tea5761.c
@@ -274,24 +274,20 @@ int tea5761_autodetection(struct i2c_adapter* i2c_adap, u8 i2c_addr)
}
if ((buffer[13] != 0x2b) || (buffer[14] != 0x57) || (buffer[15] != 0x061)) {
- printk(KERN_WARNING "Manufacturer ID= 0x%02x, Chip ID = %02x%02x."
- " It is not a TEA5761\n",
+ printk(KERN_WARNING "Manufacturer ID= 0x%02x, Chip ID = %02x%02x. It is not a TEA5761\n",
buffer[13], buffer[14], buffer[15]);
return -EINVAL;
}
- printk(KERN_WARNING "tea5761: TEA%02x%02x detected. "
- "Manufacturer ID= 0x%02x\n",
+ printk(KERN_WARNING "tea5761: TEA%02x%02x detected. Manufacturer ID= 0x%02x\n",
buffer[14], buffer[15], buffer[13]);
return 0;
}
-static int tea5761_release(struct dvb_frontend *fe)
+static void tea5761_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
-
- return 0;
}
static int tea5761_get_frequency(struct dvb_frontend *fe, u32 *frequency)
diff --git a/drivers/media/tuners/tea5767.c b/drivers/media/tuners/tea5767.c
index d62a6d6b1f42..525b7ab90c80 100644
--- a/drivers/media/tuners/tea5767.c
+++ b/drivers/media/tuners/tea5767.c
@@ -401,12 +401,10 @@ int tea5767_autodetection(struct i2c_adapter* i2c_adap, u8 i2c_addr)
return 0;
}
-static int tea5767_release(struct dvb_frontend *fe)
+static void tea5767_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
-
- return 0;
}
static int tea5767_get_frequency(struct dvb_frontend *fe, u32 *frequency)
diff --git a/drivers/media/tuners/tuner-simple.c b/drivers/media/tuners/tuner-simple.c
index 9ba9582e7765..3339b13dd3f5 100644
--- a/drivers/media/tuners/tuner-simple.c
+++ b/drivers/media/tuners/tuner-simple.c
@@ -275,8 +275,7 @@ static int simple_config_lookup(struct dvb_frontend *fe,
*config = t_params->ranges[i].config;
*cb = t_params->ranges[i].cb;
- tuner_dbg("freq = %d.%02d (%d), range = %d, "
- "config = 0x%02x, cb = 0x%02x\n",
+ tuner_dbg("freq = %d.%02d (%d), range = %d, config = 0x%02x, cb = 0x%02x\n",
*frequency / 16, *frequency % 16 * 100 / 16, *frequency,
i, *config, *cb);
@@ -404,12 +403,12 @@ static int simple_std_setup(struct dvb_frontend *fe,
i2c.addr = 0x0a;
rc = tuner_i2c_xfer_send(&i2c, &buffer[0], 2);
if (2 != rc)
- tuner_warn("i2c i/o error: rc == %d "
- "(should be 2)\n", rc);
+ tuner_warn("i2c i/o error: rc == %d (should be 2)\n",
+ rc);
rc = tuner_i2c_xfer_send(&i2c, &buffer[2], 2);
if (2 != rc)
- tuner_warn("i2c i/o error: rc == %d "
- "(should be 2)\n", rc);
+ tuner_warn("i2c i/o error: rc == %d (should be 2)\n",
+ rc);
break;
}
}
@@ -463,8 +462,8 @@ static int simple_post_tune(struct dvb_frontend *fe, u8 *buffer,
rc = tuner_i2c_xfer_recv(&priv->i2c_props,
&status_byte, 1);
if (1 != rc) {
- tuner_warn("i2c i/o read error: rc == %d "
- "(should be 1)\n", rc);
+ tuner_warn("i2c i/o read error: rc == %d (should be 1)\n",
+ rc);
break;
}
if (status_byte & TUNER_PLL_LOCKED)
@@ -483,8 +482,8 @@ static int simple_post_tune(struct dvb_frontend *fe, u8 *buffer,
rc = tuner_i2c_xfer_send(&priv->i2c_props, buffer, 4);
if (4 != rc)
- tuner_warn("i2c i/o error: rc == %d "
- "(should be 4)\n", rc);
+ tuner_warn("i2c i/o error: rc == %d (should be 4)\n",
+ rc);
break;
}
}
@@ -499,8 +498,7 @@ static int simple_radio_bandswitch(struct dvb_frontend *fe, u8 *buffer)
switch (priv->type) {
case TUNER_TENA_9533_DI:
case TUNER_YMEC_TVF_5533MF:
- tuner_dbg("This tuner doesn't have FM. "
- "Most cards have a TEA5767 for FM\n");
+ tuner_dbg("This tuner doesn't have FM. Most cards have a TEA5767 for FM\n");
return 0;
case TUNER_PHILIPS_FM1216ME_MK3:
case TUNER_PHILIPS_FM1236_MK3:
@@ -586,8 +584,7 @@ static int simple_set_tv_freq(struct dvb_frontend *fe,
div = params->frequency + IFPCoff + offset;
- tuner_dbg("Freq= %d.%02d MHz, V_IF=%d.%02d MHz, "
- "Offset=%d.%02d MHz, div=%0d\n",
+ tuner_dbg("Freq= %d.%02d MHz, V_IF=%d.%02d MHz, Offset=%d.%02d MHz, div=%0d\n",
params->frequency / 16, params->frequency % 16 * 100 / 16,
IFPCoff / 16, IFPCoff % 16 * 100 / 16,
offset / 16, offset % 16 * 100 / 16, div);
@@ -858,8 +855,7 @@ static u32 simple_dvb_configure(struct dvb_frontend *fe, u8 *buf,
if (!tun->stepsize) {
/* tuner-core was loaded before the digital tuner was
* configured and somehow picked the wrong tuner type */
- tuner_err("attempt to treat tuner %d (%s) as digital tuner "
- "without stepsize defined.\n",
+ tuner_err("attempt to treat tuner %d (%s) as digital tuner without stepsize defined.\n",
priv->type, priv->tun->name);
return 0; /* failure */
}
@@ -1005,7 +1001,7 @@ static int simple_sleep(struct dvb_frontend *fe)
return 0;
}
-static int simple_release(struct dvb_frontend *fe)
+static void simple_release(struct dvb_frontend *fe)
{
struct tuner_simple_priv *priv = fe->tuner_priv;
@@ -1017,8 +1013,6 @@ static int simple_release(struct dvb_frontend *fe)
mutex_unlock(&tuner_simple_list_mutex);
fe->tuner_priv = NULL;
-
- return 0;
}
static int simple_get_frequency(struct dvb_frontend *fe, u32 *frequency)
@@ -1077,8 +1071,7 @@ struct dvb_frontend *simple_tuner_attach(struct dvb_frontend *fe,
fe->ops.i2c_gate_ctrl(fe, 1);
if (1 != i2c_transfer(i2c_adap, &msg, 1))
- printk(KERN_WARNING "tuner-simple %d-%04x: "
- "unable to probe %s, proceeding anyway.",
+ printk(KERN_WARNING "tuner-simple %d-%04x: unable to probe %s, proceeding anyway.",
i2c_adapter_id(i2c_adap), i2c_addr,
tuners[type].name);
@@ -1123,18 +1116,16 @@ struct dvb_frontend *simple_tuner_attach(struct dvb_frontend *fe,
if ((debug) || ((atv_input[priv->nr] > 0) ||
(dtv_input[priv->nr] > 0))) {
if (0 == atv_input[priv->nr])
- tuner_info("tuner %d atv rf input will be "
- "autoselected\n", priv->nr);
+ tuner_info("tuner %d atv rf input will be autoselected\n",
+ priv->nr);
else
- tuner_info("tuner %d atv rf input will be "
- "set to input %d (insmod option)\n",
+ tuner_info("tuner %d atv rf input will be set to input %d (insmod option)\n",
priv->nr, atv_input[priv->nr]);
if (0 == dtv_input[priv->nr])
- tuner_info("tuner %d dtv rf input will be "
- "autoselected\n", priv->nr);
+ tuner_info("tuner %d dtv rf input will be autoselected\n",
+ priv->nr);
else
- tuner_info("tuner %d dtv rf input will be "
- "set to input %d (insmod option)\n",
+ tuner_info("tuner %d dtv rf input will be set to input %d (insmod option)\n",
priv->nr, dtv_input[priv->nr]);
}
diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c
index 8d96a22647b3..b5b62b08159e 100644
--- a/drivers/media/tuners/tuner-xc2028.c
+++ b/drivers/media/tuners/tuner-xc2028.c
@@ -56,8 +56,7 @@ MODULE_PARM_DESC(no_poweroff, "0 (default) powers device off when not used.\n"
static char audio_std[8];
module_param_string(audio_std, audio_std, sizeof(audio_std), 0);
MODULE_PARM_DESC(audio_std,
- "Audio standard. XC3028 audio decoder explicitly "
- "needs to know what audio\n"
+ "Audio standard. XC3028 audio decoder explicitly needs to know what audio\n"
"standard is needed for some video standards with audio A2 or NICAM.\n"
"The valid values are:\n"
"A2\n"
@@ -69,8 +68,8 @@ MODULE_PARM_DESC(audio_std,
static char firmware_name[30];
module_param_string(firmware_name, firmware_name, sizeof(firmware_name), 0);
-MODULE_PARM_DESC(firmware_name, "Firmware file name. Allows overriding the "
- "default firmware name\n");
+MODULE_PARM_DESC(firmware_name,
+ "Firmware file name. Allows overriding the default firmware name\n");
static LIST_HEAD(hybrid_tuner_instance_list);
static DEFINE_MUTEX(xc2028_list_mutex);
@@ -179,67 +178,67 @@ static int xc2028_get_reg(struct xc2028_data *priv, u16 reg, u16 *val)
static void dump_firm_type_and_int_freq(unsigned int type, u16 int_freq)
{
if (type & BASE)
- printk("BASE ");
+ printk(KERN_CONT "BASE ");
if (type & INIT1)
- printk("INIT1 ");
+ printk(KERN_CONT "INIT1 ");
if (type & F8MHZ)
- printk("F8MHZ ");
+ printk(KERN_CONT "F8MHZ ");
if (type & MTS)
- printk("MTS ");
+ printk(KERN_CONT "MTS ");
if (type & D2620)
- printk("D2620 ");
+ printk(KERN_CONT "D2620 ");
if (type & D2633)
- printk("D2633 ");
+ printk(KERN_CONT "D2633 ");
if (type & DTV6)
- printk("DTV6 ");
+ printk(KERN_CONT "DTV6 ");
if (type & QAM)
- printk("QAM ");
+ printk(KERN_CONT "QAM ");
if (type & DTV7)
- printk("DTV7 ");
+ printk(KERN_CONT "DTV7 ");
if (type & DTV78)
- printk("DTV78 ");
+ printk(KERN_CONT "DTV78 ");
if (type & DTV8)
- printk("DTV8 ");
+ printk(KERN_CONT "DTV8 ");
if (type & FM)
- printk("FM ");
+ printk(KERN_CONT "FM ");
if (type & INPUT1)
- printk("INPUT1 ");
+ printk(KERN_CONT "INPUT1 ");
if (type & LCD)
- printk("LCD ");
+ printk(KERN_CONT "LCD ");
if (type & NOGD)
- printk("NOGD ");
+ printk(KERN_CONT "NOGD ");
if (type & MONO)
- printk("MONO ");
+ printk(KERN_CONT "MONO ");
if (type & ATSC)
- printk("ATSC ");
+ printk(KERN_CONT "ATSC ");
if (type & IF)
- printk("IF ");
+ printk(KERN_CONT "IF ");
if (type & LG60)
- printk("LG60 ");
+ printk(KERN_CONT "LG60 ");
if (type & ATI638)
- printk("ATI638 ");
+ printk(KERN_CONT "ATI638 ");
if (type & OREN538)
- printk("OREN538 ");
+ printk(KERN_CONT "OREN538 ");
if (type & OREN36)
- printk("OREN36 ");
+ printk(KERN_CONT "OREN36 ");
if (type & TOYOTA388)
- printk("TOYOTA388 ");
+ printk(KERN_CONT "TOYOTA388 ");
if (type & TOYOTA794)
- printk("TOYOTA794 ");
+ printk(KERN_CONT "TOYOTA794 ");
if (type & DIBCOM52)
- printk("DIBCOM52 ");
+ printk(KERN_CONT "DIBCOM52 ");
if (type & ZARLINK456)
- printk("ZARLINK456 ");
+ printk(KERN_CONT "ZARLINK456 ");
if (type & CHINA)
- printk("CHINA ");
+ printk(KERN_CONT "CHINA ");
if (type & F6MHZ)
- printk("F6MHZ ");
+ printk(KERN_CONT "F6MHZ ");
if (type & INPUT2)
- printk("INPUT2 ");
+ printk(KERN_CONT "INPUT2 ");
if (type & SCODE)
- printk("SCODE ");
+ printk(KERN_CONT "SCODE ");
if (type & HAS_IF)
- printk("HAS_IF_%d ", int_freq);
+ printk(KERN_CONT "HAS_IF_%d ", int_freq);
}
static v4l2_std_id parse_audio_std_option(void)
@@ -351,8 +350,7 @@ static int load_all_firmwares(struct dvb_frontend *fe,
n++;
if (n >= n_array) {
- tuner_err("More firmware images in file than "
- "were expected!\n");
+ tuner_err("More firmware images in file than were expected!\n");
goto corrupt;
}
@@ -379,8 +377,8 @@ static int load_all_firmwares(struct dvb_frontend *fe,
if (!size || size > endp - p) {
tuner_err("Firmware type ");
dump_firm_type(type);
- printk("(%x), id %llx is corrupted "
- "(size=%d, expected %d)\n",
+ printk(KERN_CONT
+ "(%x), id %llx is corrupted (size=%d, expected %d)\n",
type, (unsigned long long)id,
(unsigned)(endp - p), size);
goto corrupt;
@@ -395,7 +393,7 @@ static int load_all_firmwares(struct dvb_frontend *fe,
tuner_dbg("Reading firmware type ");
if (debug) {
dump_firm_type_and_int_freq(type, int_freq);
- printk("(%x), id %llx, size=%d.\n",
+ printk(KERN_CONT "(%x), id %llx, size=%d.\n",
type, (unsigned long long)id, size);
}
@@ -444,7 +442,8 @@ static int seek_firmware(struct dvb_frontend *fe, unsigned int type,
tuner_dbg("%s called, want type=", __func__);
if (debug) {
dump_firm_type(type);
- printk("(%x), id %016llx.\n", type, (unsigned long long)*id);
+ printk(KERN_CONT "(%x), id %016llx.\n",
+ type, (unsigned long long)*id);
}
if (!priv->firm) {
@@ -500,10 +499,11 @@ static int seek_firmware(struct dvb_frontend *fe, unsigned int type,
}
if (best_nr_matches > 0) {
- tuner_dbg("Selecting best matching firmware (%d bits) for "
- "type=", best_nr_matches);
+ tuner_dbg("Selecting best matching firmware (%d bits) for type=",
+ best_nr_matches);
dump_firm_type(type);
- printk("(%x), id %016llx:\n", type, (unsigned long long)*id);
+ printk(KERN_CONT
+ "(%x), id %016llx:\n", type, (unsigned long long)*id);
i = best_i;
goto found;
}
@@ -520,7 +520,8 @@ ret:
tuner_dbg("%s firmware for type=", (i < 0) ? "Can't find" : "Found");
if (debug) {
dump_firm_type(type);
- printk("(%x), id %016llx.\n", type, (unsigned long long)*id);
+ printk(KERN_CONT "(%x), id %016llx.\n",
+ type, (unsigned long long)*id);
}
return i;
}
@@ -560,8 +561,8 @@ static int load_firmware(struct dvb_frontend *fe, unsigned int type,
tuner_info("Loading firmware for type=");
dump_firm_type(priv->firm[pos].type);
- printk("(%x), id %016llx.\n", priv->firm[pos].type,
- (unsigned long long)*id);
+ printk(KERN_CONT "(%x), id %016llx.\n",
+ priv->firm[pos].type, (unsigned long long)*id);
p = priv->firm[pos].ptr;
endp = p + priv->firm[pos].size;
@@ -694,7 +695,7 @@ static int load_scode(struct dvb_frontend *fe, unsigned int type,
tuner_info("Loading SCODE for type=");
dump_firm_type_and_int_freq(priv->firm[pos].type,
priv->firm[pos].int_freq);
- printk("(%x), id %016llx.\n", priv->firm[pos].type,
+ printk(KERN_CONT "(%x), id %016llx.\n", priv->firm[pos].type,
(unsigned long long)*id);
if (priv->firm_version < 0x0202)
@@ -746,15 +747,15 @@ retry:
tuner_dbg("checking firmware, user requested type=");
if (debug) {
dump_firm_type(new_fw.type);
- printk("(%x), id %016llx, ", new_fw.type,
+ printk(KERN_CONT "(%x), id %016llx, ", new_fw.type,
(unsigned long long)new_fw.std_req);
if (!int_freq) {
- printk("scode_tbl ");
+ printk(KERN_CONT "scode_tbl ");
dump_firm_type(priv->ctrl.scode_table);
- printk("(%x), ", priv->ctrl.scode_table);
+ printk(KERN_CONT "(%x), ", priv->ctrl.scode_table);
} else
- printk("int_freq %d, ", new_fw.int_freq);
- printk("scode_nr %d\n", new_fw.scode_nr);
+ printk(KERN_CONT "int_freq %d, ", new_fw.int_freq);
+ printk(KERN_CONT "scode_nr %d\n", new_fw.scode_nr);
}
/*
@@ -842,8 +843,7 @@ check_device:
goto fail;
}
- tuner_dbg("Device is Xceive %d version %d.%d, "
- "firmware version %d.%d\n",
+ tuner_dbg("Device is Xceive %d version %d.%d, firmware version %d.%d\n",
hwmodel, (version & 0xf000) >> 12, (version & 0xf00) >> 8,
(version & 0xf0) >> 4, version & 0xf);
@@ -857,8 +857,7 @@ check_device:
tuner_err("Incorrect readback of firmware version.\n");
goto fail;
} else {
- tuner_err("Returned an incorrect version. However, "
- "read is not reliable enough. Ignoring it.\n");
+ tuner_err("Returned an incorrect version. However, read is not reliable enough. Ignoring it.\n");
hwmodel = 3028;
}
}
@@ -869,8 +868,7 @@ check_device:
priv->hwvers = version & 0xff00;
} else if (priv->hwmodel == 0 || priv->hwmodel != hwmodel ||
priv->hwvers != (version & 0xff00)) {
- tuner_err("Read invalid device hardware information - tuner "
- "hung?\n");
+ tuner_err("Read invalid device hardware information - tuner hung?\n");
goto fail;
}
@@ -1327,7 +1325,7 @@ static int xc2028_sleep(struct dvb_frontend *fe)
return rc;
}
-static int xc2028_dvb_release(struct dvb_frontend *fe)
+static void xc2028_dvb_release(struct dvb_frontend *fe)
{
struct xc2028_data *priv = fe->tuner_priv;
@@ -1345,8 +1343,6 @@ static int xc2028_dvb_release(struct dvb_frontend *fe)
mutex_unlock(&xc2028_list_mutex);
fe->tuner_priv = NULL;
-
- return 0;
}
static int xc2028_get_frequency(struct dvb_frontend *fe, u32 *frequency)
diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
index d95c7e082ccf..03eef9b87a24 100644
--- a/drivers/media/tuners/xc4000.c
+++ b/drivers/media/tuners/xc4000.c
@@ -43,14 +43,11 @@ MODULE_PARM_DESC(debug, "Debugging level (0 to 2, default: 0 (off)).");
static int no_poweroff;
module_param(no_poweroff, int, 0644);
-MODULE_PARM_DESC(no_poweroff, "Power management (1: disabled, 2: enabled, "
- "0 (default): use device-specific default mode).");
+MODULE_PARM_DESC(no_poweroff, "Power management (1: disabled, 2: enabled, 0 (default): use device-specific default mode).");
static int audio_std;
module_param(audio_std, int, 0644);
-MODULE_PARM_DESC(audio_std, "Audio standard. XC4000 audio decoder explicitly "
- "needs to know what audio standard is needed for some video standards "
- "with audio A2 or NICAM. The valid settings are a sum of:\n"
+MODULE_PARM_DESC(audio_std, "Audio standard. XC4000 audio decoder explicitly needs to know what audio standard is needed for some video standards with audio A2 or NICAM. The valid settings are a sum of:\n"
" 1: use NICAM/B or A2/B instead of NICAM/A or A2/A\n"
" 2: use A2 instead of NICAM or BTSC\n"
" 4: use SECAM/K3 instead of K1\n"
@@ -60,8 +57,7 @@ MODULE_PARM_DESC(audio_std, "Audio standard. XC4000 audio decoder explicitly "
static char firmware_name[30];
module_param_string(firmware_name, firmware_name, sizeof(firmware_name), 0);
-MODULE_PARM_DESC(firmware_name, "Firmware file name. Allows overriding the "
- "default firmware name.");
+MODULE_PARM_DESC(firmware_name, "Firmware file name. Allows overriding the default firmware name.");
static DEFINE_MUTEX(xc4000_list_mutex);
static LIST_HEAD(hybrid_tuner_instance_list);
@@ -290,8 +286,7 @@ static int xc4000_tuner_reset(struct dvb_frontend *fe)
return -EREMOTEIO;
}
} else {
- printk(KERN_ERR "xc4000: no tuner reset callback function, "
- "fatal\n");
+ printk(KERN_ERR "xc4000: no tuner reset callback function, fatal\n");
return -EINVAL;
}
return 0;
@@ -679,8 +674,7 @@ static int seek_firmware(struct dvb_frontend *fe, unsigned int type,
if (best_nr_diffs > 0U) {
printk(KERN_WARNING
- "Selecting best matching firmware (%u bits differ) for "
- "type=(%x), id %016llx:\n",
+ "Selecting best matching firmware (%u bits differ) for type=(%x), id %016llx:\n",
best_nr_diffs, type, (unsigned long long)*id);
i = best_i;
}
@@ -800,8 +794,7 @@ static int xc4000_fwupload(struct dvb_frontend *fe)
n++;
if (n >= n_array) {
- printk(KERN_ERR "More firmware images in file than "
- "were expected!\n");
+ printk(KERN_ERR "More firmware images in file than were expected!\n");
goto corrupt;
}
@@ -1055,8 +1048,7 @@ check_device:
goto fail;
}
- dprintk(1, "Device is Xceive %d version %d.%d, "
- "firmware version %d.%d\n",
+ dprintk(1, "Device is Xceive %d version %d.%d, firmware version %d.%d\n",
hwmodel, hw_major, hw_minor, fw_major, fw_minor);
/* Check firmware version against what we downloaded. */
@@ -1076,8 +1068,7 @@ check_device:
} else if (priv->hwmodel == 0 || priv->hwmodel != hwmodel ||
priv->hwvers != ((hw_major << 8) | hw_minor)) {
printk(KERN_WARNING
- "Read invalid device hardware information - tuner "
- "hung?\n");
+ "Read invalid device hardware information - tuner hung?\n");
goto fail;
}
@@ -1627,7 +1618,7 @@ static int xc4000_init(struct dvb_frontend *fe)
return 0;
}
-static int xc4000_release(struct dvb_frontend *fe)
+static void xc4000_release(struct dvb_frontend *fe)
{
struct xc4000_priv *priv = fe->tuner_priv;
@@ -1641,8 +1632,6 @@ static int xc4000_release(struct dvb_frontend *fe)
mutex_unlock(&xc4000_list_mutex);
fe->tuner_priv = NULL;
-
- return 0;
}
static const struct dvb_tuner_ops xc4000_tuner_ops = {
diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c
index e6e5e90d8d95..796e7638b3b2 100644
--- a/drivers/media/tuners/xc5000.c
+++ b/drivers/media/tuners/xc5000.c
@@ -1326,7 +1326,7 @@ static int xc5000_init(struct dvb_frontend *fe)
return 0;
}
-static int xc5000_release(struct dvb_frontend *fe)
+static void xc5000_release(struct dvb_frontend *fe)
{
struct xc5000_priv *priv = fe->tuner_priv;
@@ -1346,8 +1346,6 @@ static int xc5000_release(struct dvb_frontend *fe)
mutex_unlock(&xc5000_list_mutex);
fe->tuner_priv = NULL;
-
- return 0;
}
static int xc5000_set_config(struct dvb_frontend *fe, void *priv_cfg)
diff --git a/drivers/media/usb/Kconfig b/drivers/media/usb/Kconfig
index 7496f332f3f5..c9644b62f91a 100644
--- a/drivers/media/usb/Kconfig
+++ b/drivers/media/usb/Kconfig
@@ -60,5 +60,10 @@ source "drivers/media/usb/hackrf/Kconfig"
source "drivers/media/usb/msi2500/Kconfig"
endif
+if MEDIA_CEC_SUPPORT
+ comment "USB HDMI CEC adapters"
+source "drivers/media/usb/pulse8-cec/Kconfig"
+endif
+
endif #MEDIA_USB_SUPPORT
endif #USB
diff --git a/drivers/media/usb/Makefile b/drivers/media/usb/Makefile
index 8874ba774a34..0f15e3351ddc 100644
--- a/drivers/media/usb/Makefile
+++ b/drivers/media/usb/Makefile
@@ -24,3 +24,4 @@ obj-$(CONFIG_VIDEO_EM28XX) += em28xx/
obj-$(CONFIG_VIDEO_USBTV) += usbtv/
obj-$(CONFIG_VIDEO_GO7007) += go7007/
obj-$(CONFIG_DVB_AS102) += as102/
+obj-$(CONFIG_USB_PULSE8_CEC) += pulse8-cec/
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 85dd9a8e83ff..7a10eaa38f67 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -253,8 +253,7 @@ static int au0828_init_isoc(struct au0828_dev *dev, int max_packets,
dev->isoc_ctl.transfer_buffer[i] = usb_alloc_coherent(dev->usbdev,
sb_size, GFP_KERNEL, &urb->transfer_dma);
if (!dev->isoc_ctl.transfer_buffer[i]) {
- printk("unable to allocate %i bytes for transfer"
- " buffer %i%s\n",
+ printk("unable to allocate %i bytes for transfer buffer %i%s\n",
sb_size, i,
in_interrupt() ? " while in int" : "");
au0828_uninit_isoc(dev);
diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
index 52bc42da8a4c..788c73803138 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.c
+++ b/drivers/media/usb/b2c2/flexcop-usb.c
@@ -33,8 +33,7 @@
static int debug;
module_param(debug, int, 0644);
-MODULE_PARM_DESC(debug, "set debugging level (1=info,ts=2,"
- "ctrl=4,i2c=8,v8mem=16 (or-able))." DEBSTATUS);
+MODULE_PARM_DESC(debug, "set debugging level (1=info,ts=2,ctrl=4,i2c=8,v8mem=16 (or-able))." DEBSTATUS);
#undef DEBSTATUS
#define deb_info(args...) dprintk(0x01, args)
@@ -433,8 +432,8 @@ static int flexcop_usb_transfer_init(struct flexcop_usb *fc_usb)
frame_size, i, j, ret;
int buffer_offset = 0;
- deb_ts("creating %d iso-urbs with %d frames "
- "each of %d bytes size = %d.\n", B2C2_USB_NUM_ISO_URB,
+ deb_ts("creating %d iso-urbs with %d frames each of %d bytes size = %d.\n",
+ B2C2_USB_NUM_ISO_URB,
B2C2_USB_FRAMES_PER_ISO, frame_size, bufsize);
fc_usb->iso_buffer = usb_alloc_coherent(fc_usb->udev,
@@ -459,8 +458,8 @@ static int flexcop_usb_transfer_init(struct flexcop_usb *fc_usb)
for (i = 0; i < B2C2_USB_NUM_ISO_URB; i++) {
int frame_offset = 0;
struct urb *urb = fc_usb->iso_urb[i];
- deb_ts("initializing and submitting urb no. %d "
- "(buf_offset: %d).\n", i, buffer_offset);
+ deb_ts("initializing and submitting urb no. %d (buf_offset: %d).\n",
+ i, buffer_offset);
urb->dev = fc_usb->udev;
urb->context = fc_usb;
diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c
index e9100a235831..37f9b30b0abc 100644
--- a/drivers/media/usb/cpia2/cpia2_usb.c
+++ b/drivers/media/usb/cpia2/cpia2_usb.c
@@ -759,9 +759,7 @@ int cpia2_usb_stream_start(struct camera_data *cam, unsigned int alternate)
cam->params.camera_state.stream_mode = old_alt;
ret2 = set_alternate(cam, USBIF_CMDONLY);
if (ret2 < 0) {
- ERR("cpia2_usb_change_streaming_alternate(%d) =%d has already "
- "failed. Then tried to call "
- "set_alternate(USBIF_CMDONLY) = %d.\n",
+ ERR("cpia2_usb_change_streaming_alternate(%d) =%d has already failed. Then tried to call set_alternate(USBIF_CMDONLY) = %d.\n",
alternate, ret, ret2);
}
} else {
diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c
index 8b099fe1d592..550ec932f931 100644
--- a/drivers/media/usb/cx231xx/cx231xx-core.c
+++ b/drivers/media/usb/cx231xx/cx231xx-core.c
@@ -241,8 +241,7 @@ static int __usb_control_msg(struct cx231xx *dev, unsigned int pipe,
int rc, i;
if (reg_debug) {
- printk(KERN_DEBUG "%s: (pipe 0x%08x): "
- "%s: %02x %02x %02x %02x %02x %02x %02x %02x ",
+ printk(KERN_DEBUG "%s: (pipe 0x%08x): %s: %02x %02x %02x %02x %02x %02x %02x %02x ",
dev->name,
pipe,
(requesttype & USB_DIR_IN) ? "IN" : "OUT",
@@ -441,8 +440,7 @@ int cx231xx_write_ctrl_reg(struct cx231xx *dev, u8 req, u16 reg, char *buf,
if (reg_debug) {
int byte;
- cx231xx_isocdbg("(pipe 0x%08x): "
- "OUT: %02x %02x %02x %02x %02x %02x %02x %02x >>>",
+ cx231xx_isocdbg("(pipe 0x%08x): OUT: %02x %02x %02x %02x %02x %02x %02x %02x >>>",
pipe,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
req, 0, val, reg & 0xff,
@@ -600,8 +598,8 @@ int cx231xx_set_alt_setting(struct cx231xx *dev, u8 index, u8 alt)
return -1;
}
- cx231xx_coredbg("setting alternate %d with wMaxPacketSize=%u,"
- "Interface = %d\n", alt, max_pkt_size,
+ cx231xx_coredbg("setting alternate %d with wMaxPacketSize=%u,Interface = %d\n",
+ alt, max_pkt_size,
usb_interface_index);
if (usb_interface_index > 0) {
diff --git a/drivers/media/usb/cx231xx/cx231xx-dvb.c b/drivers/media/usb/cx231xx/cx231xx-dvb.c
index 1417515d30eb..2868546999ca 100644
--- a/drivers/media/usb/cx231xx/cx231xx-dvb.c
+++ b/drivers/media/usb/cx231xx/cx231xx-dvb.c
@@ -377,8 +377,8 @@ static int attach_xc5000(u8 addr, struct cx231xx *dev)
cfg.i2c_addr = addr;
if (!dev->dvb->frontend) {
- dev_err(dev->dev, "%s/2: dvb frontend not attached. "
- "Can't attach xc5000\n", dev->name);
+ dev_err(dev->dev, "%s/2: dvb frontend not attached. Can't attach xc5000\n",
+ dev->name);
return -EINVAL;
}
diff --git a/drivers/media/usb/dvb-usb-v2/af9015.c b/drivers/media/usb/dvb-usb-v2/af9015.c
index 941ceff9b268..29011dfabb11 100644
--- a/drivers/media/usb/dvb-usb-v2/af9015.c
+++ b/drivers/media/usb/dvb-usb-v2/af9015.c
@@ -1455,7 +1455,7 @@ static const struct usb_device_id af9015_id_table[] = {
{ DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_CONCEPTRONIC_CTVDIGRCU,
&af9015_props, "Conceptronic USB2.0 DVB-T CTVDIGRCU V3.0", NULL) },
{ DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_MC810,
- &af9015_props, "KWorld Digial MC-810", NULL) },
+ &af9015_props, "KWorld Digital MC-810", NULL) },
{ DVB_USB_DEVICE(USB_VID_KYE, USB_PID_GENIUS_TVGO_DVB_T03,
&af9015_props, "Genius TVGo DVB-T03", NULL) },
{ DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_399U_2,
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 8961dd732522..c673726d9b70 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -2095,6 +2095,8 @@ static const struct usb_device_id af9035_id_table[] = {
&af9035_props, "TerraTec Cinergy T Stick (rev. 2)", NULL) },
{ DVB_USB_DEVICE(USB_VID_AVERMEDIA, 0x0337,
&af9035_props, "AVerMedia HD Volar (A867)", NULL) },
+ { DVB_USB_DEVICE(USB_VID_GTEK, USB_PID_EVOLVEO_XTRATV_STICK,
+ &af9035_props, "EVOLVEO XtraTV stick", NULL) },
/* IT9135 devices */
{ DVB_USB_DEVICE(USB_VID_ITETECH, USB_PID_ITETECH_IT9135,
diff --git a/drivers/media/usb/dvb-usb-v2/dvbsky.c b/drivers/media/usb/dvb-usb-v2/dvbsky.c
index 02dbc6c45423..0636eac37bbb 100644
--- a/drivers/media/usb/dvb-usb-v2/dvbsky.c
+++ b/drivers/media/usb/dvb-usb-v2/dvbsky.c
@@ -851,6 +851,10 @@ static const struct usb_device_id dvbsky_id_table[] = {
USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI_2,
&dvbsky_t680c_props, "TechnoTrend TT-connect CT2-4650 CI v1.1",
RC_MAP_TT_1500) },
+ { DVB_USB_DEVICE(USB_VID_TECHNOTREND,
+ USB_PID_TECHNOTREND_CONNECT_S2_4650_CI,
+ &dvbsky_s960c_props, "TechnoTrend TT-connect S2-4650 CI",
+ RC_MAP_TT_1500) },
{ DVB_USB_DEVICE(USB_VID_TERRATEC,
USB_PID_TERRATEC_H7_3,
&dvbsky_t680c_props, "Terratec H7 Rev.4",
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index 0e8fb89896c4..5fea02672685 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -156,21 +156,19 @@ struct lme2510_state {
static int lme2510_bulk_write(struct usb_device *dev,
u8 *snd, int len, u8 pipe)
{
- int ret, actual_l;
+ int actual_l;
- ret = usb_bulk_msg(dev, usb_sndbulkpipe(dev, pipe),
- snd, len , &actual_l, 100);
- return ret;
+ return usb_bulk_msg(dev, usb_sndbulkpipe(dev, pipe),
+ snd, len, &actual_l, 100);
}
static int lme2510_bulk_read(struct usb_device *dev,
u8 *rev, int len, u8 pipe)
{
- int ret, actual_l;
+ int actual_l;
- ret = usb_bulk_msg(dev, usb_rcvbulkpipe(dev, pipe),
- rev, len , &actual_l, 200);
- return ret;
+ return usb_bulk_msg(dev, usb_rcvbulkpipe(dev, pipe),
+ rev, len, &actual_l, 200);
}
static int lme2510_usb_talk(struct dvb_usb_device *d,
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
index 047a32fe43ea..639e156e0c1b 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
@@ -549,7 +549,7 @@ static void mxl111sf_demod_release(struct dvb_frontend *fe)
fe->demodulator_priv = NULL;
}
-static struct dvb_frontend_ops mxl111sf_demod_ops = {
+static const struct dvb_frontend_ops mxl111sf_demod_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "MaxLinear MxL111SF DVB-T demodulator",
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
index 283495c84ba3..6427137a09ef 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
@@ -666,8 +666,8 @@ static int mxl111sf_i2c_hw_xfer_msg(struct mxl111sf_state *state,
if (rd_status[i] == 0x04) {
if (i < 7) {
- mxl_i2c("i2c fifo empty!"
- " @ %d", i);
+ mxl_i2c("i2c fifo empty! @ %d",
+ i);
msg->buf[(index*8)+i] =
i2c_r_data[(i*3)+1];
/* read again */
@@ -692,8 +692,7 @@ static int mxl111sf_i2c_hw_xfer_msg(struct mxl111sf_state *state,
}
goto stop_copy;
} else {
- mxl_i2c("readagain "
- "ERROR!");
+ mxl_i2c("readagain ERROR!");
}
} else {
msg->buf[(index*8)+i] =
@@ -827,9 +826,8 @@ int mxl111sf_i2c_xfer(struct i2c_adapter *adap,
mxl111sf_i2c_hw_xfer_msg(state, &msg[i]) :
mxl111sf_i2c_sw_xfer_msg(state, &msg[i]);
if (mxl_fail(ret)) {
- mxl_debug_adv("failed with error %d on i2c "
- "transaction %d of %d, %sing %d bytes "
- "to/from 0x%02x", ret, i+1, num,
+ mxl_debug_adv("failed with error %d on i2c transaction %d of %d, %sing %d bytes to/from 0x%02x",
+ ret, i+1, num,
(msg[i].flags & I2C_M_RD) ?
"read" : "writ",
msg[i].len, msg[i].addr);
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
index f141dcc55cc9..f84bef6034dc 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
@@ -455,13 +455,12 @@ static int mxl111sf_tuner_get_if_frequency(struct dvb_frontend *fe,
return 0;
}
-static int mxl111sf_tuner_release(struct dvb_frontend *fe)
+static void mxl111sf_tuner_release(struct dvb_frontend *fe)
{
struct mxl111sf_tuner_state *state = fe->tuner_priv;
mxl_dbg("()");
kfree(state);
fe->tuner_priv = NULL;
- return 0;
}
/* ------------------------------------------------------------------------- */
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
index 5d676b533a3a..80c635980526 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
@@ -29,8 +29,7 @@
int dvb_usb_mxl111sf_debug;
module_param_named(debug, dvb_usb_mxl111sf_debug, int, 0644);
-MODULE_PARM_DESC(debug, "set debugging level "
- "(1=info, 2=xfer, 4=i2c, 8=reg, 16=adv (or-able)).");
+MODULE_PARM_DESC(debug, "set debugging level (1=info, 2=xfer, 4=i2c, 8=reg, 16=adv (or-able)).");
static int dvb_usb_mxl111sf_isoc;
module_param_named(isoc, dvb_usb_mxl111sf_isoc, int, 0644);
@@ -137,8 +136,8 @@ int mxl111sf_write_reg_mask(struct mxl111sf_state *state,
#if 1
/* dont know why this usually errors out on the first try */
if (mxl_fail(ret))
- pr_err("error writing addr: 0x%02x, mask: 0x%02x, "
- "data: 0x%02x, retrying...", addr, mask, data);
+ pr_err("error writing addr: 0x%02x, mask: 0x%02x, data: 0x%02x, retrying...",
+ addr, mask, data);
ret = mxl111sf_read_reg(state, addr, &val);
#endif
@@ -946,8 +945,7 @@ static int mxl111sf_init(struct dvb_usb_device *d)
case 138001:
break;
default:
- printk(KERN_WARNING "%s: warning: "
- "unknown hauppauge model #%d\n",
+ printk(KERN_WARNING "%s: warning: unknown hauppauge model #%d\n",
__func__, state->tv.model);
}
#endif
diff --git a/drivers/media/usb/dvb-usb/af9005-fe.c b/drivers/media/usb/dvb-usb/af9005-fe.c
index 09db3d02bd82..9862d3e6b8e8 100644
--- a/drivers/media/usb/dvb-usb/af9005-fe.c
+++ b/drivers/media/usb/dvb-usb/af9005-fe.c
@@ -1430,7 +1430,7 @@ static void af9005_fe_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops af9005_fe_ops;
+static const struct dvb_frontend_ops af9005_fe_ops;
struct dvb_frontend *af9005_fe_attach(struct dvb_usb_device *d)
{
@@ -1455,7 +1455,7 @@ struct dvb_frontend *af9005_fe_attach(struct dvb_usb_device *d)
return NULL;
}
-static struct dvb_frontend_ops af9005_fe_ops = {
+static const struct dvb_frontend_ops af9005_fe_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "AF9005 USB DVB-T",
diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
index 7853261906b1..f5f476841aea 100644
--- a/drivers/media/usb/dvb-usb/af9005.c
+++ b/drivers/media/usb/dvb-usb/af9005.c
@@ -826,7 +826,6 @@ static int af9005_frontend_attach(struct dvb_usb_adapter *adap)
printk("EEPROM DUMP\n");
for (i = 0; i < 255; i += 8) {
af9005_read_eeprom(adap->dev, i, buf, 8);
- printk("ADDR %x ", i);
debug_dump(buf, 8, printk);
}
}
diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
index 290275bc7fde..6404205560eb 100644
--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
@@ -34,8 +34,7 @@
int dvb_usb_cinergyt2_debug;
module_param_named(debug, dvb_usb_cinergyt2_debug, int, 0644);
-MODULE_PARM_DESC(debug, "set debugging level (1=info, xfer=2, rc=4 "
- "(or-able)).");
+MODULE_PARM_DESC(debug, "set debugging level (1=info, xfer=2, rc=4 (or-able)).");
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
@@ -93,8 +92,7 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 3, 0);
if (ret < 0) {
- deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
- "state info\n");
+ deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep state info\n");
}
mutex_unlock(&d->data_mutex);
diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
index 2d29b4174dba..bbb10fab65bc 100644
--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
@@ -278,7 +278,7 @@ static void cinergyt2_fe_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops cinergyt2_fe_ops;
+static const struct dvb_frontend_ops cinergyt2_fe_ops;
struct dvb_frontend *cinergyt2_fe_attach(struct dvb_usb_device *d)
{
@@ -295,7 +295,7 @@ struct dvb_frontend *cinergyt2_fe_attach(struct dvb_usb_device *d)
}
-static struct dvb_frontend_ops cinergyt2_fe_ops = {
+static const struct dvb_frontend_ops cinergyt2_fe_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = DRIVER_NAME,
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index 243403081fa5..9b8771eb31d4 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -369,6 +369,26 @@ static int cxusb_aver_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
return 0;
}
+static int cxusb_read_status(struct dvb_frontend *fe,
+ enum fe_status *status)
+{
+ struct dvb_usb_adapter *adap = (struct dvb_usb_adapter *)fe->dvb->priv;
+ struct cxusb_state *state = (struct cxusb_state *)adap->dev->priv;
+ int ret;
+
+ ret = state->fe_read_status(fe, status);
+
+ /* it need resync slave fifo when signal change from unlock to lock.*/
+ if ((*status & FE_HAS_LOCK) && (!state->last_lock)) {
+ mutex_lock(&state->stream_mutex);
+ cxusb_streaming_ctrl(adap, 1);
+ mutex_unlock(&state->stream_mutex);
+ }
+
+ state->last_lock = (*status & FE_HAS_LOCK) ? 1 : 0;
+ return ret;
+}
+
static void cxusb_d680_dmb_drain_message(struct dvb_usb_device *d)
{
int ep = d->props.generic_bulk_ctrl_endpoint;
@@ -1372,6 +1392,12 @@ static int cxusb_mygica_t230_frontend_attach(struct dvb_usb_adapter *adap)
st->i2c_client_tuner = client_tuner;
+ /* hook fe: need to resync the slave fifo when signal locks. */
+ mutex_init(&st->stream_mutex);
+ st->last_lock = 0;
+ st->fe_read_status = adap->fe_adap[0].fe->ops.read_status;
+ adap->fe_adap[0].fe->ops.read_status = cxusb_read_status;
+
return 0;
}
diff --git a/drivers/media/usb/dvb-usb/cxusb.h b/drivers/media/usb/dvb-usb/cxusb.h
index 18acda19527a..66429d7f69b5 100644
--- a/drivers/media/usb/dvb-usb/cxusb.h
+++ b/drivers/media/usb/dvb-usb/cxusb.h
@@ -37,6 +37,11 @@ struct cxusb_state {
struct i2c_client *i2c_client_tuner;
unsigned char data[MAX_XFER_SIZE];
+
+ struct mutex stream_mutex;
+ u8 last_lock;
+ int (*fe_read_status)(struct dvb_frontend *fe,
+ enum fe_status *status);
};
#endif
diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
index 47ce9d5de4c6..dd5edd3a17ee 100644
--- a/drivers/media/usb/dvb-usb/dib0700_core.c
+++ b/drivers/media/usb/dvb-usb/dib0700_core.c
@@ -16,10 +16,7 @@ MODULE_PARM_DESC(debug, "set debugging level (1=info,2=fw,4=fwdata,8=data (or-ab
static int nb_packet_buffer_size = 21;
module_param(nb_packet_buffer_size, int, 0644);
MODULE_PARM_DESC(nb_packet_buffer_size,
- "Set the dib0700 driver data buffer size. This parameter "
- "corresponds to the number of TS packets. The actual size of "
- "the data buffer corresponds to this parameter "
- "multiplied by 188 (default: 21)");
+ "Set the dib0700 driver data buffer size. This parameter corresponds to the number of TS packets. The actual size of the data buffer corresponds to this parameter multiplied by 188 (default: 21)");
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index ef1b8ee75c57..b29d4894c2f1 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -26,8 +26,7 @@
static int force_lna_activation;
module_param(force_lna_activation, int, 0644);
-MODULE_PARM_DESC(force_lna_activation, "force the activation of Low-Noise-Amplifyer(s) (LNA), "
- "if applicable for the device (default: 0=automatic/off).");
+MODULE_PARM_DESC(force_lna_activation, "force the activation of Low-Noise-Amplifyer(s) (LNA), if applicable for the device (default: 0=automatic/off).");
struct dib0700_adapter_state {
int (*set_param_save) (struct dvb_frontend *);
diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c
index de3ee2547479..8207e6900656 100644
--- a/drivers/media/usb/dvb-usb/dibusb-common.c
+++ b/drivers/media/usb/dvb-usb/dibusb-common.c
@@ -382,9 +382,9 @@ int dibusb_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
if (buf[0] != 0)
deb_info("key: %*ph\n", 5, buf);
+ret:
kfree(buf);
-ret:
return ret;
}
EXPORT_SYMBOL(dibusb_rc_query);
diff --git a/drivers/media/usb/dvb-usb/dibusb-mc-common.c b/drivers/media/usb/dvb-usb/dibusb-mc-common.c
index d66f56cc46a5..c989cac9343d 100644
--- a/drivers/media/usb/dvb-usb/dibusb-mc-common.c
+++ b/drivers/media/usb/dvb-usb/dibusb-mc-common.c
@@ -9,7 +9,6 @@
* see Documentation/dvb/README.dvb-usb for more information
*/
-#include <linux/kconfig.h>
#include "dibusb.h"
/* 3000MC/P stuff */
diff --git a/drivers/media/usb/dvb-usb/dtt200u-fe.c b/drivers/media/usb/dvb-usb/dtt200u-fe.c
index f5c042baa254..00f565fe7cc2 100644
--- a/drivers/media/usb/dvb-usb/dtt200u-fe.c
+++ b/drivers/media/usb/dvb-usb/dtt200u-fe.c
@@ -202,7 +202,7 @@ static void dtt200u_fe_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops dtt200u_fe_ops;
+static const struct dvb_frontend_ops dtt200u_fe_ops;
struct dvb_frontend* dtt200u_fe_attach(struct dvb_usb_device *d)
{
@@ -226,7 +226,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops dtt200u_fe_ops = {
+static const struct dvb_frontend_ops dtt200u_fe_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "WideView USB DVB-T",
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-dvb.c b/drivers/media/usb/dvb-usb/dvb-usb-dvb.c
index a04c0a250625..e5675da286cb 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-dvb.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-dvb.c
@@ -277,8 +277,7 @@ int dvb_usb_adapter_frontend_init(struct dvb_usb_adapter *adap)
for (i = 0; i < adap->props.num_frontends; i++) {
if (adap->props.fe[i].frontend_attach == NULL) {
- err("strange: '%s' #%d,%d "
- "doesn't want to attach a frontend.",
+ err("strange: '%s' #%d,%d doesn't want to attach a frontend.",
adap->dev->desc->name, adap->id, i);
return 0;
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
index dd048a7c461c..f0023dbb7276 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
@@ -49,8 +49,7 @@ int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw
ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
if (ret != hx.len) {
- err("error while transferring firmware "
- "(transferred size: %d, block size: %d)",
+ err("error while transferring firmware (transferred size: %d, block size: %d)",
ret,hx.len);
ret = -EINVAL;
break;
@@ -81,8 +80,7 @@ int dvb_usb_download_firmware(struct usb_device *udev, struct dvb_usb_device_pro
const struct firmware *fw = NULL;
if ((ret = request_firmware(&fw, props->firmware, &udev->dev)) != 0) {
- err("did not find the firmware file. (%s) "
- "Please see linux/Documentation/dvb/ for more details on firmware-problems. (%d)",
+ err("did not find the firmware file. (%s) Please see linux/Documentation/dvb/ for more details on firmware-problems. (%d)",
props->firmware,ret);
return ret;
}
diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h
index 107255b08b2b..67f898b6f6d0 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb.h
+++ b/drivers/media/usb/dvb-usb/dvb-usb.h
@@ -467,8 +467,10 @@ extern int dvb_usb_device_init(struct usb_interface *,
extern void dvb_usb_device_exit(struct usb_interface *);
/* the generic read/write method for device control */
-extern int dvb_usb_generic_rw(struct dvb_usb_device *, u8 *, u16, u8 *, u16,int);
-extern int dvb_usb_generic_write(struct dvb_usb_device *, u8 *, u16);
+extern int __must_check
+dvb_usb_generic_rw(struct dvb_usb_device *, u8 *, u16, u8 *, u16, int);
+extern int __must_check
+dvb_usb_generic_write(struct dvb_usb_device *, u8 *, u16);
/* commonly used remote control parsing */
extern int dvb_usb_nec_rc_key_to_event(struct dvb_usb_device *, u8[], u32 *, int *);
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 2c720cb2fb00..6ca502d834b4 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -86,8 +86,7 @@ MODULE_PARM_DESC(debug, "set debugging level (1=info 2=xfer 4=rc(or-able))."
/* demod probe */
static int demod_probe = 1;
module_param_named(demod, demod_probe, int, 0644);
-MODULE_PARM_DESC(demod, "demod to probe (1=cx24116 2=stv0903+stv6110 "
- "4=stv0903+stb6100(or-able)).");
+MODULE_PARM_DESC(demod, "demod to probe (1=cx24116 2=stv0903+stv6110 4=stv0903+stb6100(or-able)).");
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
@@ -1642,6 +1641,7 @@ enum dw2102_table_entry {
TEVII_S632,
TERRATEC_CINERGY_S2_R2,
TERRATEC_CINERGY_S2_R3,
+ TERRATEC_CINERGY_S2_R4,
GOTVIEW_SAT_HD,
GENIATECH_T220,
TECHNOTREND_S2_4600,
@@ -1671,6 +1671,7 @@ static struct usb_device_id dw2102_table[] = {
[TEVII_S632] = {USB_DEVICE(0x9022, USB_PID_TEVII_S632)},
[TERRATEC_CINERGY_S2_R2] = {USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_S2_R2)},
[TERRATEC_CINERGY_S2_R3] = {USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_S2_R3)},
+ [TERRATEC_CINERGY_S2_R4] = {USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_S2_R4)},
[GOTVIEW_SAT_HD] = {USB_DEVICE(0x1FE1, USB_PID_GOTVIEW_SAT_HD)},
[GENIATECH_T220] = {USB_DEVICE(0x1f4d, 0xD220)},
[TECHNOTREND_S2_4600] = {USB_DEVICE(USB_VID_TECHNOTREND,
@@ -2343,12 +2344,7 @@ static struct usb_driver dw2102_driver = {
module_usb_driver(dw2102_driver);
MODULE_AUTHOR("Igor M. Liplianin (c) liplianin@me.by");
-MODULE_DESCRIPTION("Driver for DVBWorld DVB-S 2101, 2102, DVB-S2 2104,"
- " DVB-C 3101 USB2.0,"
- " TeVii S421, S480, S482, S600, S630, S632, S650,"
- " TeVii S660, S662, Prof 1100, 7500 USB2.0,"
- " Geniatech SU3000, T220,"
- " TechnoTrend S2-4600, Terratec Cinergy S2 devices");
+MODULE_DESCRIPTION("Driver for DVBWorld DVB-S 2101, 2102, DVB-S2 2104, DVB-C 3101 USB2.0, TeVii S421, S480, S482, S600, S630, S632, S650, TeVii S660, S662, Prof 1100, 7500 USB2.0, Geniatech SU3000, T220, TechnoTrend S2-4600, Terratec Cinergy S2 devices");
MODULE_VERSION("0.1");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(DW2101_FIRMWARE);
diff --git a/drivers/media/usb/dvb-usb/friio-fe.c b/drivers/media/usb/dvb-usb/friio-fe.c
index 979f05b4b87c..0251a4e91d47 100644
--- a/drivers/media/usb/dvb-usb/friio-fe.c
+++ b/drivers/media/usb/dvb-usb/friio-fe.c
@@ -401,7 +401,7 @@ static void jdvbt90502_release(struct dvb_frontend *fe)
}
-static struct dvb_frontend_ops jdvbt90502_ops;
+static const struct dvb_frontend_ops jdvbt90502_ops;
struct dvb_frontend *jdvbt90502_attach(struct dvb_usb_device *d)
{
@@ -432,7 +432,7 @@ error:
return NULL;
}
-static struct dvb_frontend_ops jdvbt90502_ops = {
+static const struct dvb_frontend_ops jdvbt90502_ops = {
.delsys = { SYS_ISDBT },
.info = {
.name = "Comtech JDVBT90502 ISDB-T",
diff --git a/drivers/media/usb/dvb-usb/friio.c b/drivers/media/usb/dvb-usb/friio.c
index 474a17e4db0c..62abe6c43a32 100644
--- a/drivers/media/usb/dvb-usb/friio.c
+++ b/drivers/media/usb/dvb-usb/friio.c
@@ -320,8 +320,8 @@ restart:
*/
if (rbuf[0] & 0x80) { /* still in PowerOnReset state? */
if (++retry > 3) {
- deb_info("failed to get the correct"
- " FE demod status:0x%02x\n", rbuf[0]);
+ deb_info("failed to get the correct FE demod status:0x%02x\n",
+ rbuf[0]);
goto error;
}
msleep(100);
diff --git a/drivers/media/usb/dvb-usb/gp8psk.c b/drivers/media/usb/dvb-usb/gp8psk.c
index 993bb7a72985..2360e7e32b06 100644
--- a/drivers/media/usb/dvb-usb/gp8psk.c
+++ b/drivers/media/usb/dvb-usb/gp8psk.c
@@ -135,8 +135,7 @@ static int gp8psk_load_bcm4500fw(struct dvb_usb_device *d)
u8 *buf;
if ((ret = request_firmware(&fw, bcm4500_firmware,
&d->udev->dev)) != 0) {
- err("did not find the bcm4500 firmware file. (%s) "
- "Please see linux/Documentation/dvb/ for more details on firmware-problems. (%d)",
+ err("did not find the bcm4500 firmware file. (%s) Please see linux/Documentation/dvb/ for more details on firmware-problems. (%d)",
bcm4500_firmware,ret);
return ret;
}
diff --git a/drivers/media/usb/dvb-usb/m920x.c b/drivers/media/usb/dvb-usb/m920x.c
index eafc5c82467f..70672e1e5ec7 100644
--- a/drivers/media/usb/dvb-usb/m920x.c
+++ b/drivers/media/usb/dvb-usb/m920x.c
@@ -55,13 +55,9 @@ static inline int m920x_read(struct usb_device *udev, u8 request, u16 value,
static inline int m920x_write(struct usb_device *udev, u8 request,
u16 value, u16 index)
{
- int ret;
-
- ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
- request, USB_TYPE_VENDOR | USB_DIR_OUT,
- value, index, NULL, 0, 2000);
-
- return ret;
+ return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), request,
+ USB_TYPE_VENDOR | USB_DIR_OUT, value, index,
+ NULL, 0, 2000);
}
static inline int m920x_write_seq(struct usb_device *udev, u8 request,
diff --git a/drivers/media/usb/dvb-usb/opera1.c b/drivers/media/usb/dvb-usb/opera1.c
index 2566d2f1c2ad..946a5ccc8f1a 100644
--- a/drivers/media/usb/dvb-usb/opera1.c
+++ b/drivers/media/usb/dvb-usb/opera1.c
@@ -453,8 +453,7 @@ static int opera1_xilinx_load_firmware(struct usb_device *dev,
info("start downloading fpga firmware %s",filename);
if ((ret = request_firmware(&fw, filename, &dev->dev)) != 0) {
- err("did not find the firmware file. (%s) "
- "Please see linux/Documentation/dvb/ for more details on firmware-problems.",
+ err("did not find the firmware file. (%s) Please see linux/Documentation/dvb/ for more details on firmware-problems.",
filename);
return ret;
} else {
diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
index 4706628a3ed5..02c3bee6f83b 100644
--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
@@ -50,8 +50,7 @@ MODULE_PARM_DESC(debug,
static int disable_led_control;
module_param(disable_led_control, int, 0444);
MODULE_PARM_DESC(disable_led_control,
- "disable LED control of the device "
- "(default: 0 - LED control is active).");
+ "disable LED control of the device (default: 0 - LED control is active).");
/* device private data */
struct technisat_usb2_state {
diff --git a/drivers/media/usb/dvb-usb/vp702x-fe.c b/drivers/media/usb/dvb-usb/vp702x-fe.c
index 27398c08c69d..7ff31baa3682 100644
--- a/drivers/media/usb/dvb-usb/vp702x-fe.c
+++ b/drivers/media/usb/dvb-usb/vp702x-fe.c
@@ -323,7 +323,7 @@ static void vp702x_fe_release(struct dvb_frontend* fe)
kfree(st);
}
-static struct dvb_frontend_ops vp702x_fe_ops;
+static const struct dvb_frontend_ops vp702x_fe_ops;
struct dvb_frontend * vp702x_fe_attach(struct dvb_usb_device *d)
{
@@ -345,7 +345,7 @@ error:
}
-static struct dvb_frontend_ops vp702x_fe_ops = {
+static const struct dvb_frontend_ops vp702x_fe_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Twinhan DST-like frontend (VP7021/VP7020) DVB-S",
diff --git a/drivers/media/usb/dvb-usb/vp7045-fe.c b/drivers/media/usb/dvb-usb/vp7045-fe.c
index 7765602ea658..4520ad9c2014 100644
--- a/drivers/media/usb/dvb-usb/vp7045-fe.c
+++ b/drivers/media/usb/dvb-usb/vp7045-fe.c
@@ -140,7 +140,7 @@ static void vp7045_fe_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops vp7045_fe_ops;
+static const struct dvb_frontend_ops vp7045_fe_ops;
struct dvb_frontend * vp7045_fe_attach(struct dvb_usb_device *d)
{
@@ -158,7 +158,7 @@ error:
}
-static struct dvb_frontend_ops vp7045_fe_ops = {
+static const struct dvb_frontend_ops vp7045_fe_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Twinhan VP7045/46 USB DVB-T",
diff --git a/drivers/media/usb/em28xx/Kconfig b/drivers/media/usb/em28xx/Kconfig
index d917b0a2beb1..aa131cf9989b 100644
--- a/drivers/media/usb/em28xx/Kconfig
+++ b/drivers/media/usb/em28xx/Kconfig
@@ -11,7 +11,7 @@ config VIDEO_EM28XX_V4L2
select VIDEO_SAA711X if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_TVP5150 if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_MSP3400 if MEDIA_SUBDRV_AUTOSELECT
- select VIDEO_MT9V011 if MEDIA_SUBDRV_AUTOSELECT
+ select VIDEO_MT9V011 if MEDIA_SUBDRV_AUTOSELECT && MEDIA_CAMERA_SUPPORT
---help---
This is a video4linux driver for Empia 28xx based TV cards.
diff --git a/drivers/media/usb/em28xx/em28xx-audio.c b/drivers/media/usb/em28xx/em28xx-audio.c
index e11fe46a547c..7969ddb9e2dd 100644
--- a/drivers/media/usb/em28xx/em28xx-audio.c
+++ b/drivers/media/usb/em28xx/em28xx-audio.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2006 Markus Rechberger <mrechberger@gmail.com>
*
- * Copyright (C) 2007-2014 Mauro Carvalho Chehab
+ * Copyright (C) 2007-2016 Mauro Carvalho Chehab
* - Port to work with the in-kernel driver
* - Cleanups, fixes, alsa-controls, etc.
*
@@ -25,6 +25,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "em28xx.h"
+
#include <linux/kernel.h>
#include <linux/usb.h>
#include <linux/init.h>
@@ -44,7 +46,6 @@
#include <sound/tlv.h>
#include <sound/ac97_codec.h>
#include <media/v4l2-common.h>
-#include "em28xx.h"
static int debug;
module_param(debug, int, 0644);
@@ -54,10 +55,10 @@ MODULE_PARM_DESC(debug, "activates debug info");
#define EM28XX_MIN_AUDIO_PACKETS 64
#define dprintk(fmt, arg...) do { \
- if (debug) \
- printk(KERN_INFO "em28xx-audio %s: " fmt, \
- __func__, ##arg); \
- } while (0)
+ if (debug) \
+ dev_printk(KERN_DEBUG, &dev->intf->dev, \
+ "video: %s: " fmt, __func__, ## arg); \
+} while (0)
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
@@ -91,7 +92,8 @@ static void em28xx_audio_isocirq(struct urb *urb)
struct snd_pcm_runtime *runtime;
if (dev->disconnected) {
- dprintk("device disconnected while streaming. URB status=%d.\n", urb->status);
+ dprintk("device disconnected while streaming. URB status=%d.\n",
+ urb->status);
atomic_set(&dev->adev.stream_started, 0);
return;
}
@@ -164,8 +166,9 @@ static void em28xx_audio_isocirq(struct urb *urb)
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status < 0)
- em28xx_errdev("resubmit of audio urb failed (error=%i)\n",
- status);
+ dev_err(&dev->intf->dev,
+ "resubmit of audio urb failed (error=%i)\n",
+ status);
return;
}
@@ -182,8 +185,9 @@ static int em28xx_init_audio_isoc(struct em28xx *dev)
errCode = usb_submit_urb(dev->adev.urb[i], GFP_ATOMIC);
if (errCode) {
- em28xx_errdev("submit of audio urb failed (error=%i)\n",
- errCode);
+ dev_err(&dev->intf->dev,
+ "submit of audio urb failed (error=%i)\n",
+ errCode);
em28xx_deinit_isoc_audio(dev);
atomic_set(&dev->adev.stream_started, 0);
return errCode;
@@ -197,6 +201,7 @@ static int em28xx_init_audio_isoc(struct em28xx *dev)
static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
size_t size)
{
+ struct em28xx *dev = snd_pcm_substream_chip(subs);
struct snd_pcm_runtime *runtime = subs->runtime;
dprintk("Allocating vbuffer\n");
@@ -254,8 +259,7 @@ static int snd_em28xx_capture_open(struct snd_pcm_substream *substream)
int nonblock, ret = 0;
if (!dev) {
- em28xx_err("BUG: em28xx can't find device struct."
- " Can't proceed with open\n");
+ pr_err("em28xx-audio: BUG: em28xx can't find device struct. Can't proceed with open\n");
return -ENODEV;
}
@@ -275,6 +279,8 @@ static int snd_em28xx_capture_open(struct snd_pcm_substream *substream)
if (dev->adev.users == 0) {
if (dev->alt == 0 || dev->is_audio_only) {
+ struct usb_device *udev = interface_to_usbdev(dev->intf);
+
if (dev->is_audio_only)
/* audio is on a separate interface */
dev->alt = 1;
@@ -292,7 +298,7 @@ static int snd_em28xx_capture_open(struct snd_pcm_substream *substream)
*/
dprintk("changing alternate number on interface %d to %d\n",
dev->ifnum, dev->alt);
- usb_set_interface(dev->udev, dev->ifnum, dev->alt);
+ usb_set_interface(udev, dev->ifnum, dev->alt);
}
/* Sets volume, mute, etc */
@@ -318,7 +324,8 @@ static int snd_em28xx_capture_open(struct snd_pcm_substream *substream)
err:
mutex_unlock(&dev->lock);
- em28xx_err("Error while configuring em28xx mixer\n");
+ dev_err(&dev->intf->dev,
+ "Error while configuring em28xx mixer\n");
return ret;
}
@@ -709,6 +716,7 @@ static const struct snd_pcm_ops snd_em28xx_pcm_capture = {
static void em28xx_audio_free_urb(struct em28xx *dev)
{
+ struct usb_device *udev = interface_to_usbdev(dev->intf);
int i;
for (i = 0; i < dev->adev.num_urb; i++) {
@@ -717,7 +725,7 @@ static void em28xx_audio_free_urb(struct em28xx *dev)
if (!urb)
continue;
- usb_free_coherent(dev->udev, urb->transfer_buffer_length,
+ usb_free_coherent(udev, urb->transfer_buffer_length,
dev->adev.transfer_buffer[i],
urb->transfer_dma);
@@ -744,6 +752,7 @@ static int em28xx_audio_urb_init(struct em28xx *dev)
{
struct usb_interface *intf;
struct usb_endpoint_descriptor *e, *ep = NULL;
+ struct usb_device *udev = interface_to_usbdev(dev->intf);
int i, ep_size, interval, num_urb, npackets;
int urb_size, bytes_per_transfer;
u8 alt;
@@ -753,10 +762,10 @@ static int em28xx_audio_urb_init(struct em28xx *dev)
else
alt = 7;
- intf = usb_ifnum_to_if(dev->udev, dev->ifnum);
+ intf = usb_ifnum_to_if(udev, dev->ifnum);
if (intf->num_altsetting <= alt) {
- em28xx_errdev("alt %d doesn't exist on interface %d\n",
+ dev_err(&dev->intf->dev, "alt %d doesn't exist on interface %d\n",
dev->ifnum, alt);
return -ENODEV;
}
@@ -772,18 +781,17 @@ static int em28xx_audio_urb_init(struct em28xx *dev)
}
if (!ep) {
- em28xx_errdev("Couldn't find an audio endpoint");
+ dev_err(&dev->intf->dev, "Couldn't find an audio endpoint");
return -ENODEV;
}
- ep_size = em28xx_audio_ep_packet_size(dev->udev, ep);
+ ep_size = em28xx_audio_ep_packet_size(udev, ep);
interval = 1 << (ep->bInterval - 1);
- em28xx_info("Endpoint 0x%02x %s on intf %d alt %d interval = %d, size %d\n",
- EM28XX_EP_AUDIO, usb_speed_string(dev->udev->speed),
- dev->ifnum, alt,
- interval,
- ep_size);
+ dev_info(&dev->intf->dev,
+ "Endpoint 0x%02x %s on intf %d alt %d interval = %d, size %d\n",
+ EM28XX_EP_AUDIO, usb_speed_string(udev->speed),
+ dev->ifnum, alt, interval, ep_size);
/* Calculate the number and size of URBs to better fit the audio samples */
@@ -820,8 +828,9 @@ static int em28xx_audio_urb_init(struct em28xx *dev)
if (urb_size > ep_size * npackets)
npackets = DIV_ROUND_UP(urb_size, ep_size);
- em28xx_info("Number of URBs: %d, with %d packets and %d size\n",
- num_urb, npackets, urb_size);
+ dev_info(&dev->intf->dev,
+ "Number of URBs: %d, with %d packets and %d size\n",
+ num_urb, npackets, urb_size);
/* Estimate the bytes per period */
dev->adev.period = urb_size * npackets;
@@ -855,18 +864,19 @@ static int em28xx_audio_urb_init(struct em28xx *dev)
}
dev->adev.urb[i] = urb;
- buf = usb_alloc_coherent(dev->udev, npackets * ep_size, GFP_ATOMIC,
+ buf = usb_alloc_coherent(udev, npackets * ep_size, GFP_ATOMIC,
&urb->transfer_dma);
if (!buf) {
- em28xx_errdev("usb_alloc_coherent failed!\n");
+ dev_err(&dev->intf->dev,
+ "usb_alloc_coherent failed!\n");
em28xx_audio_free_urb(dev);
return -ENOMEM;
}
dev->adev.transfer_buffer[i] = buf;
- urb->dev = dev->udev;
+ urb->dev = udev;
urb->context = dev;
- urb->pipe = usb_rcvisocpipe(dev->udev, EM28XX_EP_AUDIO);
+ urb->pipe = usb_rcvisocpipe(udev, EM28XX_EP_AUDIO);
urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
urb->transfer_buffer = buf;
urb->interval = interval;
@@ -886,6 +896,7 @@ static int em28xx_audio_urb_init(struct em28xx *dev)
static int em28xx_audio_init(struct em28xx *dev)
{
struct em28xx_audio *adev = &dev->adev;
+ struct usb_device *udev = interface_to_usbdev(dev->intf);
struct snd_pcm *pcm;
struct snd_card *card;
static int devnr;
@@ -898,23 +909,23 @@ static int em28xx_audio_init(struct em28xx *dev)
return 0;
}
- em28xx_info("Binding audio extension\n");
+ dev_info(&dev->intf->dev, "Binding audio extension\n");
kref_get(&dev->ref);
- printk(KERN_INFO "em28xx-audio.c: Copyright (C) 2006 Markus "
- "Rechberger\n");
- printk(KERN_INFO
- "em28xx-audio.c: Copyright (C) 2007-2014 Mauro Carvalho Chehab\n");
+ dev_info(&dev->intf->dev,
+ "em28xx-audio.c: Copyright (C) 2006 Markus Rechberger\n");
+ dev_info(&dev->intf->dev,
+ "em28xx-audio.c: Copyright (C) 2007-2016 Mauro Carvalho Chehab\n");
- err = snd_card_new(&dev->udev->dev, index[devnr], "Em28xx Audio",
+ err = snd_card_new(&dev->intf->dev, index[devnr], "Em28xx Audio",
THIS_MODULE, 0, &card);
if (err < 0)
return err;
spin_lock_init(&adev->slock);
adev->sndcard = card;
- adev->udev = dev->udev;
+ adev->udev = udev;
err = snd_pcm_new(card, "Em28xx Audio", 0, 0, 1, &pcm);
if (err < 0)
@@ -955,7 +966,7 @@ static int em28xx_audio_init(struct em28xx *dev)
if (err < 0)
goto urb_free;
- em28xx_info("Audio extension successfully initialized\n");
+ dev_info(&dev->intf->dev, "Audio extension successfully initialized\n");
return 0;
urb_free:
@@ -980,7 +991,7 @@ static int em28xx_audio_fini(struct em28xx *dev)
return 0;
}
- em28xx_info("Closing audio extension\n");
+ dev_info(&dev->intf->dev, "Closing audio extension\n");
if (dev->adev.sndcard) {
snd_card_disconnect(dev->adev.sndcard);
@@ -1004,7 +1015,7 @@ static int em28xx_audio_suspend(struct em28xx *dev)
if (dev->usb_audio_type != EM28XX_USB_AUDIO_VENDOR)
return 0;
- em28xx_info("Suspending audio extension\n");
+ dev_info(&dev->intf->dev, "Suspending audio extension\n");
em28xx_deinit_isoc_audio(dev);
atomic_set(&dev->adev.stream_started, 0);
return 0;
@@ -1018,7 +1029,7 @@ static int em28xx_audio_resume(struct em28xx *dev)
if (dev->usb_audio_type != EM28XX_USB_AUDIO_VENDOR)
return 0;
- em28xx_info("Resuming audio extension\n");
+ dev_info(&dev->intf->dev, "Resuming audio extension\n");
/* Nothing to do other than schedule_work() ?? */
schedule_work(&dev->adev.wq_trigger);
return 0;
diff --git a/drivers/media/usb/em28xx/em28xx-camera.c b/drivers/media/usb/em28xx/em28xx-camera.c
index 72f3f4d50253..89c890ba7dd6 100644
--- a/drivers/media/usb/em28xx/em28xx-camera.c
+++ b/drivers/media/usb/em28xx/em28xx-camera.c
@@ -19,14 +19,15 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "em28xx.h"
+
#include <linux/i2c.h>
+#include <linux/usb.h>
#include <media/soc_camera.h>
#include <media/i2c/mt9v011.h>
#include <media/v4l2-clk.h>
#include <media/v4l2-common.h>
-#include "em28xx.h"
-
/* Possible i2c addresses of Micron sensors */
static unsigned short micron_sensor_addrs[] = {
0xb8 >> 1, /* MT9V111, MT9V403 */
@@ -120,14 +121,16 @@ static int em28xx_probe_sensor_micron(struct em28xx *dev)
ret = i2c_master_send(&client, &reg, 1);
if (ret < 0) {
if (ret != -ENXIO)
- em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n",
- client.addr << 1, ret);
+ dev_err(&dev->intf->dev,
+ "couldn't read from i2c device 0x%02x: error %i\n",
+ client.addr << 1, ret);
continue;
}
ret = i2c_master_recv(&client, (u8 *)&id_be, 2);
if (ret < 0) {
- em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n",
- client.addr << 1, ret);
+ dev_err(&dev->intf->dev,
+ "couldn't read from i2c device 0x%02x: error %i\n",
+ client.addr << 1, ret);
continue;
}
id = be16_to_cpu(id_be);
@@ -135,14 +138,16 @@ static int em28xx_probe_sensor_micron(struct em28xx *dev)
reg = 0xff;
ret = i2c_master_send(&client, &reg, 1);
if (ret < 0) {
- em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n",
- client.addr << 1, ret);
+ dev_err(&dev->intf->dev,
+ "couldn't read from i2c device 0x%02x: error %i\n",
+ client.addr << 1, ret);
continue;
}
ret = i2c_master_recv(&client, (u8 *)&id_be, 2);
if (ret < 0) {
- em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n",
- client.addr << 1, ret);
+ dev_err(&dev->intf->dev,
+ "couldn't read from i2c device 0x%02x: error %i\n",
+ client.addr << 1, ret);
continue;
}
/* Validate chip ID to be sure we have a Micron device */
@@ -180,15 +185,17 @@ static int em28xx_probe_sensor_micron(struct em28xx *dev)
dev->em28xx_sensor = EM28XX_MT9M001;
break;
default:
- em28xx_info("unknown Micron sensor detected: 0x%04x\n",
- id);
+ dev_info(&dev->intf->dev,
+ "unknown Micron sensor detected: 0x%04x\n", id);
return 0;
}
if (dev->em28xx_sensor == EM28XX_NOSENSOR)
- em28xx_info("unsupported sensor detected: %s\n", name);
+ dev_info(&dev->intf->dev,
+ "unsupported sensor detected: %s\n", name);
else
- em28xx_info("sensor %s detected\n", name);
+ dev_info(&dev->intf->dev,
+ "sensor %s detected\n", name);
dev->i2c_client[dev->def_i2c_bus].addr = client.addr;
return 0;
@@ -218,16 +225,18 @@ static int em28xx_probe_sensor_omnivision(struct em28xx *dev)
ret = i2c_smbus_read_byte_data(&client, reg);
if (ret < 0) {
if (ret != -ENXIO)
- em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n",
- client.addr << 1, ret);
+ dev_err(&dev->intf->dev,
+ "couldn't read from i2c device 0x%02x: error %i\n",
+ client.addr << 1, ret);
continue;
}
id = ret << 8;
reg = 0x1d;
ret = i2c_smbus_read_byte_data(&client, reg);
if (ret < 0) {
- em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n",
- client.addr << 1, ret);
+ dev_err(&dev->intf->dev,
+ "couldn't read from i2c device 0x%02x: error %i\n",
+ client.addr << 1, ret);
continue;
}
id += ret;
@@ -238,16 +247,18 @@ static int em28xx_probe_sensor_omnivision(struct em28xx *dev)
reg = 0x0a;
ret = i2c_smbus_read_byte_data(&client, reg);
if (ret < 0) {
- em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n",
- client.addr << 1, ret);
+ dev_err(&dev->intf->dev,
+ "couldn't read from i2c device 0x%02x: error %i\n",
+ client.addr << 1, ret);
continue;
}
id = ret << 8;
reg = 0x0b;
ret = i2c_smbus_read_byte_data(&client, reg);
if (ret < 0) {
- em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n",
- client.addr << 1, ret);
+ dev_err(&dev->intf->dev,
+ "couldn't read from i2c device 0x%02x: error %i\n",
+ client.addr << 1, ret);
continue;
}
id += ret;
@@ -285,15 +296,18 @@ static int em28xx_probe_sensor_omnivision(struct em28xx *dev)
name = "OV9655";
break;
default:
- em28xx_info("unknown OmniVision sensor detected: 0x%04x\n",
- id);
+ dev_info(&dev->intf->dev,
+ "unknown OmniVision sensor detected: 0x%04x\n",
+ id);
return 0;
}
if (dev->em28xx_sensor == EM28XX_NOSENSOR)
- em28xx_info("unsupported sensor detected: %s\n", name);
+ dev_info(&dev->intf->dev,
+ "unsupported sensor detected: %s\n", name);
else
- em28xx_info("sensor %s detected\n", name);
+ dev_info(&dev->intf->dev,
+ "sensor %s detected\n", name);
dev->i2c_client[dev->def_i2c_bus].addr = client.addr;
return 0;
@@ -317,7 +331,8 @@ int em28xx_detect_sensor(struct em28xx *dev)
*/
if (dev->em28xx_sensor == EM28XX_NOSENSOR && ret < 0) {
- em28xx_info("No sensor detected\n");
+ dev_info(&dev->intf->dev,
+ "No sensor detected\n");
return -ENODEV;
}
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index e397f544f108..23c67494762d 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -23,6 +23,8 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "em28xx.h"
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -39,7 +41,6 @@
#include <media/v4l2-common.h>
#include <sound/ac97_codec.h>
-#include "em28xx.h"
#define DRIVER_NAME "em28xx"
@@ -1560,8 +1561,7 @@ struct em28xx_board em28xx_boards[] = {
} },
},
[EM2820_BOARD_PINNACLE_DVC_90] = {
- .name = "Pinnacle Dazzle DVC 90/100/101/107 / Kaiser Baas Video to DVD maker "
- "/ Kworld DVD Maker 2 / Plextor ConvertX PX-AV100U",
+ .name = "Pinnacle Dazzle DVC 90/100/101/107 / Kaiser Baas Video to DVD maker / Kworld DVD Maker 2 / Plextor ConvertX PX-AV100U",
.tuner_type = TUNER_ABSENT, /* capture only board */
.decoder = EM28XX_SAA711X,
.input = { {
@@ -2677,7 +2677,7 @@ static int em28xx_wait_until_ac97_features_equals(struct em28xx *dev,
msleep(50);
}
- em28xx_warn("AC97 registers access is not reliable !\n");
+ dev_warn(&dev->intf->dev, "AC97 registers access is not reliable !\n");
return -ETIMEDOUT;
}
@@ -2831,16 +2831,14 @@ static int em28xx_hint_board(struct em28xx *dev)
dev->model = em28xx_eeprom_hash[i].model;
dev->tuner_type = em28xx_eeprom_hash[i].tuner;
- em28xx_errdev("Your board has no unique USB ID.\n");
- em28xx_errdev("A hint were successfully done, "
- "based on eeprom hash.\n");
- em28xx_errdev("This method is not 100%% failproof.\n");
- em28xx_errdev("If the board were missdetected, "
- "please email this log to:\n");
- em28xx_errdev("\tV4L Mailing List "
- " <linux-media@vger.kernel.org>\n");
- em28xx_errdev("Board detected as %s\n",
- em28xx_boards[dev->model].name);
+ dev_err(&dev->intf->dev,
+ "Your board has no unique USB ID.\n"
+ "A hint were successfully done, based on eeprom hash.\n"
+ "This method is not 100%% failproof.\n"
+ "If the board were missdetected, please email this log to:\n"
+ "\tV4L Mailing List <linux-media@vger.kernel.org>\n"
+ "Board detected as %s\n",
+ em28xx_boards[dev->model].name);
return 0;
}
@@ -2863,35 +2861,33 @@ static int em28xx_hint_board(struct em28xx *dev)
if (dev->i2c_hash == em28xx_i2c_hash[i].hash) {
dev->model = em28xx_i2c_hash[i].model;
dev->tuner_type = em28xx_i2c_hash[i].tuner;
- em28xx_errdev("Your board has no unique USB ID.\n");
- em28xx_errdev("A hint were successfully done, "
- "based on i2c devicelist hash.\n");
- em28xx_errdev("This method is not 100%% failproof.\n");
- em28xx_errdev("If the board were missdetected, "
- "please email this log to:\n");
- em28xx_errdev("\tV4L Mailing List "
- " <linux-media@vger.kernel.org>\n");
- em28xx_errdev("Board detected as %s\n",
- em28xx_boards[dev->model].name);
+ dev_err(&dev->intf->dev,
+ "Your board has no unique USB ID.\n"
+ "A hint were successfully done, based on i2c devicelist hash.\n"
+ "This method is not 100%% failproof.\n"
+ "If the board were missdetected, please email this log to:\n"
+ "\tV4L Mailing List <linux-media@vger.kernel.org>\n"
+ "Board detected as %s\n",
+ em28xx_boards[dev->model].name);
return 0;
}
}
- em28xx_errdev("Your board has no unique USB ID and thus need a "
- "hint to be detected.\n");
- em28xx_errdev("You may try to use card=<n> insmod option to "
- "workaround that.\n");
- em28xx_errdev("Please send an email with this log to:\n");
- em28xx_errdev("\tV4L Mailing List <linux-media@vger.kernel.org>\n");
- em28xx_errdev("Board eeprom hash is 0x%08lx\n", dev->hash);
- em28xx_errdev("Board i2c devicelist hash is 0x%08lx\n", dev->i2c_hash);
-
- em28xx_errdev("Here is a list of valid choices for the card=<n>"
- " insmod option:\n");
+ dev_err(&dev->intf->dev,
+ "Your board has no unique USB ID and thus need a hint to be detected.\n"
+ "You may try to use card=<n> insmod option to workaround that.\n"
+ "Please send an email with this log to:\n"
+ "\tV4L Mailing List <linux-media@vger.kernel.org>\n"
+ "Board eeprom hash is 0x%08lx\n"
+ "Board i2c devicelist hash is 0x%08lx\n",
+ dev->hash, dev->i2c_hash);
+
+ dev_err(&dev->intf->dev,
+ "Here is a list of valid choices for the card=<n> insmod option:\n");
for (i = 0; i < em28xx_bcount; i++) {
- em28xx_errdev(" card=%d -> %s\n",
- i, em28xx_boards[i].name);
+ dev_err(&dev->intf->dev,
+ " card=%d -> %s\n", i, em28xx_boards[i].name);
}
return -1;
}
@@ -2925,7 +2921,7 @@ static void em28xx_card_setup(struct em28xx *dev)
* hash identities which has not been determined as yet.
*/
if (em28xx_hint_board(dev) < 0)
- em28xx_errdev("Board not discovered\n");
+ dev_err(&dev->intf->dev, "Board not discovered\n");
else {
em28xx_set_model(dev);
em28xx_pre_card_setup(dev);
@@ -2935,8 +2931,8 @@ static void em28xx_card_setup(struct em28xx *dev)
em28xx_set_model(dev);
}
- em28xx_info("Identified as %s (card=%d)\n",
- dev->board.name, dev->model);
+ dev_info(&dev->intf->dev, "Identified as %s (card=%d)\n",
+ dev->board.name, dev->model);
dev->tuner_type = em28xx_boards[dev->model].tuner_type;
@@ -3034,12 +3030,11 @@ static void em28xx_card_setup(struct em28xx *dev)
}
if (dev->board.valid == EM28XX_BOARD_NOT_VALIDATED) {
- em28xx_errdev("\n\n");
- em28xx_errdev("The support for this board weren't "
- "valid yet.\n");
- em28xx_errdev("Please send a report of having this working\n");
- em28xx_errdev("not to V4L mailing list (and/or to other "
- "addresses)\n\n");
+ dev_err(&dev->intf->dev,
+ "\n\n"
+ "The support for this board weren't valid yet.\n"
+ "Please send a report of having this working\n"
+ "not to V4L mailing list (and/or to other addresses)\n\n");
}
/* Free eeprom data memory */
@@ -3166,7 +3161,7 @@ static int em28xx_media_device_init(struct em28xx *dev,
else if (udev->manufacturer)
media_device_usb_init(mdev, udev, udev->manufacturer);
else
- media_device_usb_init(mdev, udev, dev->name);
+ media_device_usb_init(mdev, udev, dev_name(&dev->intf->dev));
dev->media_dev = mdev;
#endif
@@ -3193,6 +3188,8 @@ static void em28xx_unregister_media_device(struct em28xx *dev)
*/
static void em28xx_release_resources(struct em28xx *dev)
{
+ struct usb_device *udev = interface_to_usbdev(dev->intf);
+
/*FIXME: I2C IR should be disconnected */
mutex_lock(&dev->lock);
@@ -3203,7 +3200,7 @@ static void em28xx_release_resources(struct em28xx *dev)
em28xx_i2c_unregister(dev, 1);
em28xx_i2c_unregister(dev, 0);
- usb_put_dev(dev->udev);
+ usb_put_dev(udev);
/* Mark device as unused */
clear_bit(dev->devno, em28xx_devused);
@@ -3222,7 +3219,7 @@ void em28xx_free_device(struct kref *ref)
{
struct em28xx *dev = kref_to_dev(ref);
- em28xx_info("Freeing device\n");
+ dev_info(&dev->intf->dev, "Freeing device\n");
if (!dev->disconnected)
em28xx_release_resources(dev);
@@ -3241,10 +3238,9 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
int minor)
{
int retval;
- static const char *default_chip_name = "em28xx";
- const char *chip_name = default_chip_name;
+ const char *chip_name = NULL;
- dev->udev = udev;
+ dev->intf = interface;
mutex_init(&dev->ctrl_urb_lock);
spin_lock_init(&dev->slock);
@@ -3282,9 +3278,8 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
break;
case CHIP_ID_EM2820:
chip_name = "em2710/2820";
- if (le16_to_cpu(dev->udev->descriptor.idVendor)
- == 0xeb1a) {
- __le16 idProd = dev->udev->descriptor.idProduct;
+ if (le16_to_cpu(udev->descriptor.idVendor) == 0xeb1a) {
+ __le16 idProd = udev->descriptor.idProduct;
if (le16_to_cpu(idProd) == 0x2710)
chip_name = "em2710";
@@ -3327,21 +3322,13 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
dev->wait_after_write = 0;
dev->eeprom_addrwidth_16bit = 1;
break;
- default:
- printk(KERN_INFO DRIVER_NAME
- ": unknown em28xx chip ID (%d)\n", dev->chip_id);
}
}
-
- if (chip_name != default_chip_name)
- printk(KERN_INFO DRIVER_NAME
- ": chip ID is %s\n", chip_name);
-
- /*
- * For em2820/em2710, the name may change latter, after checking
- * if the device has a sensor (so, it is em2710) or not.
- */
- snprintf(dev->name, sizeof(dev->name), "%s #%d", chip_name, dev->devno);
+ if (!chip_name)
+ dev_info(&dev->intf->dev,
+ "unknown em28xx chip ID (%d)\n", dev->chip_id);
+ else
+ dev_info(&dev->intf->dev, "chip ID is %s\n", chip_name);
em28xx_media_device_init(dev, udev);
@@ -3360,9 +3347,9 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
/* Resets I2C speed */
retval = em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, dev->board.i2c_speed);
if (retval < 0) {
- em28xx_errdev("%s: em28xx_write_reg failed!"
- " retval [%d]\n",
- __func__, retval);
+ dev_err(&dev->intf->dev,
+ "%s: em28xx_write_reg failed! retval [%d]\n",
+ __func__, retval);
return retval;
}
}
@@ -3375,8 +3362,9 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
else
retval = em28xx_i2c_register(dev, 0, EM28XX_I2C_ALGO_EM28XX);
if (retval < 0) {
- em28xx_errdev("%s: em28xx_i2c_register bus 0 - error [%d]!\n",
- __func__, retval);
+ dev_err(&dev->intf->dev,
+ "%s: em28xx_i2c_register bus 0 - error [%d]!\n",
+ __func__, retval);
return retval;
}
@@ -3389,8 +3377,9 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
retval = em28xx_i2c_register(dev, 1,
EM28XX_I2C_ALGO_EM28XX);
if (retval < 0) {
- em28xx_errdev("%s: em28xx_i2c_register bus 1 - error [%d]!\n",
- __func__, retval);
+ dev_err(&dev->intf->dev,
+ "%s: em28xx_i2c_register bus 1 - error [%d]!\n",
+ __func__, retval);
em28xx_i2c_unregister(dev, 0);
@@ -3429,7 +3418,8 @@ static int em28xx_usb_probe(struct usb_interface *interface,
nr = find_first_zero_bit(em28xx_devused, EM28XX_MAXBOARDS);
if (nr >= EM28XX_MAXBOARDS) {
/* No free device slots */
- printk(DRIVER_NAME ": Supports only %i em28xx boards.\n",
+ dev_err(&interface->dev,
+ "Driver supports up to %i em28xx boards.\n",
EM28XX_MAXBOARDS);
retval = -ENOMEM;
goto err_no_slot;
@@ -3438,8 +3428,8 @@ static int em28xx_usb_probe(struct usb_interface *interface,
/* Don't register audio interfaces */
if (interface->altsetting[0].desc.bInterfaceClass == USB_CLASS_AUDIO) {
- em28xx_err(DRIVER_NAME " audio device (%04x:%04x): "
- "interface %i, class %i\n",
+ dev_err(&interface->dev,
+ "audio device (%04x:%04x): interface %i, class %i\n",
le16_to_cpu(udev->descriptor.idVendor),
le16_to_cpu(udev->descriptor.idProduct),
ifnum,
@@ -3452,7 +3442,6 @@ static int em28xx_usb_probe(struct usb_interface *interface,
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL) {
- em28xx_err(DRIVER_NAME ": out of memory!\n");
retval = -ENOMEM;
goto err;
}
@@ -3462,7 +3451,6 @@ static int em28xx_usb_probe(struct usb_interface *interface,
kmalloc(sizeof(dev->alt_max_pkt_size_isoc[0]) *
interface->num_altsetting, GFP_KERNEL);
if (dev->alt_max_pkt_size_isoc == NULL) {
- em28xx_errdev("out of memory!\n");
kfree(dev);
retval = -ENOMEM;
goto err;
@@ -3501,8 +3489,8 @@ static int em28xx_usb_probe(struct usb_interface *interface,
if (usb_endpoint_xfer_isoc(e)) {
has_vendor_audio = true;
} else {
- printk(KERN_INFO DRIVER_NAME
- ": error: skipping audio endpoint 0x83, because it uses bulk transfers !\n");
+ dev_err(&interface->dev,
+ "error: skipping audio endpoint 0x83, because it uses bulk transfers !\n");
}
break;
case 0x84:
@@ -3575,9 +3563,8 @@ static int em28xx_usb_probe(struct usb_interface *interface,
speed = "unknown";
}
- printk(KERN_INFO DRIVER_NAME
- ": New device %s %s @ %s Mbps "
- "(%04x:%04x, interface %d, class %d)\n",
+ dev_err(&interface->dev,
+ "New device %s %s @ %s Mbps (%04x:%04x, interface %d, class %d)\n",
udev->manufacturer ? udev->manufacturer : "",
udev->product ? udev->product : "",
speed,
@@ -3592,9 +3579,9 @@ static int em28xx_usb_probe(struct usb_interface *interface,
* not enough even for most Digital TV streams.
*/
if (udev->speed != USB_SPEED_HIGH && disable_usb_speed_check == 0) {
- printk(DRIVER_NAME ": Device initialization failed.\n");
- printk(DRIVER_NAME ": Device must be connected to a high-speed"
- " USB 2.0 port.\n");
+ dev_err(&interface->dev, "Device initialization failed.\n");
+ dev_err(&interface->dev,
+ "Device must be connected to a high-speed USB 2.0 port.\n");
retval = -ENODEV;
goto err_free;
}
@@ -3607,8 +3594,8 @@ static int em28xx_usb_probe(struct usb_interface *interface,
dev->ifnum = ifnum;
if (has_vendor_audio) {
- printk(KERN_INFO DRIVER_NAME ": Audio interface %i found %s\n",
- ifnum, "(Vendor Class)");
+ dev_err(&interface->dev,
+ "Audio interface %i found (Vendor Class)\n", ifnum);
dev->usb_audio_type = EM28XX_USB_AUDIO_VENDOR;
}
/* Checks if audio is provided by a USB Audio Class interface */
@@ -3617,25 +3604,24 @@ static int em28xx_usb_probe(struct usb_interface *interface,
if (uif->altsetting[0].desc.bInterfaceClass == USB_CLASS_AUDIO) {
if (has_vendor_audio)
- em28xx_err("em28xx: device seems to have vendor AND usb audio class interfaces !\n"
- "\t\tThe vendor interface will be ignored. Please contact the developers <linux-media@vger.kernel.org>\n");
+ dev_err(&interface->dev,
+ "em28xx: device seems to have vendor AND usb audio class interfaces !\n"
+ "\t\tThe vendor interface will be ignored. Please contact the developers <linux-media@vger.kernel.org>\n");
dev->usb_audio_type = EM28XX_USB_AUDIO_CLASS;
break;
}
}
if (has_video)
- printk(KERN_INFO DRIVER_NAME
- ": Video interface %i found:%s%s\n",
- ifnum,
- dev->analog_ep_bulk ? " bulk" : "",
- dev->analog_ep_isoc ? " isoc" : "");
+ dev_err(&interface->dev, "Video interface %i found:%s%s\n",
+ ifnum,
+ dev->analog_ep_bulk ? " bulk" : "",
+ dev->analog_ep_isoc ? " isoc" : "");
if (has_dvb)
- printk(KERN_INFO DRIVER_NAME
- ": DVB interface %i found:%s%s\n",
- ifnum,
- dev->dvb_ep_bulk ? " bulk" : "",
- dev->dvb_ep_isoc ? " isoc" : "");
+ dev_err(&interface->dev, "DVB interface %i found:%s%s\n",
+ ifnum,
+ dev->dvb_ep_bulk ? " bulk" : "",
+ dev->dvb_ep_isoc ? " isoc" : "");
dev->num_alt = interface->num_altsetting;
@@ -3664,8 +3650,8 @@ static int em28xx_usb_probe(struct usb_interface *interface,
/* Disable V4L2 if the device doesn't have a decoder */
if (has_video &&
dev->board.decoder == EM28XX_NODECODER && !dev->board.is_webcam) {
- printk(DRIVER_NAME
- ": Currently, V4L2 is not supported on this model\n");
+ dev_err(&interface->dev,
+ "Currently, V4L2 is not supported on this model\n");
has_video = false;
dev->has_video = false;
}
@@ -3674,14 +3660,14 @@ static int em28xx_usb_probe(struct usb_interface *interface,
if (has_video) {
if (!dev->analog_ep_isoc || (try_bulk && dev->analog_ep_bulk))
dev->analog_xfer_bulk = 1;
- em28xx_info("analog set to %s mode.\n",
- dev->analog_xfer_bulk ? "bulk" : "isoc");
+ dev_err(&interface->dev, "analog set to %s mode.\n",
+ dev->analog_xfer_bulk ? "bulk" : "isoc");
}
if (has_dvb) {
if (!dev->dvb_ep_isoc || (try_bulk && dev->dvb_ep_bulk))
dev->dvb_xfer_bulk = 1;
- em28xx_info("dvb set to %s mode.\n",
- dev->dvb_xfer_bulk ? "bulk" : "isoc");
+ dev_err(&interface->dev, "dvb set to %s mode.\n",
+ dev->dvb_xfer_bulk ? "bulk" : "isoc");
}
kref_init(&dev->ref);
@@ -3728,7 +3714,7 @@ static void em28xx_usb_disconnect(struct usb_interface *interface)
dev->disconnected = 1;
- em28xx_info("Disconnecting %s\n", dev->name);
+ dev_err(&dev->intf->dev, "Disconnecting\n");
flush_request_modules(dev);
diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
index eebd5d7088d0..19ccff41c7eb 100644
--- a/drivers/media/usb/em28xx/em28xx-core.c
+++ b/drivers/media/usb/em28xx/em28xx-core.c
@@ -22,6 +22,8 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "em28xx.h"
+
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/list.h>
@@ -32,8 +34,6 @@
#include <sound/ac97_codec.h>
#include <media/v4l2-common.h>
-#include "em28xx.h"
-
#define DRIVER_AUTHOR "Ludovico Cavedon <cavedon@sssup.it>, " \
"Markus Rechberger <mrechberger@gmail.com>, " \
"Mauro Carvalho Chehab <mchehab@infradead.org>, " \
@@ -48,27 +48,31 @@ MODULE_VERSION(EM28XX_VERSION);
static unsigned int core_debug;
module_param(core_debug, int, 0644);
-MODULE_PARM_DESC(core_debug, "enable debug messages [core]");
+MODULE_PARM_DESC(core_debug, "enable debug messages [core and isoc]");
-#define em28xx_coredbg(fmt, arg...) do {\
- if (core_debug) \
- printk(KERN_INFO "%s %s :"fmt, \
- dev->name, __func__ , ##arg); } while (0)
+#define em28xx_coredbg(fmt, arg...) do { \
+ if (core_debug) \
+ dev_printk(KERN_DEBUG, &dev->intf->dev, \
+ "core: %s: " fmt, __func__, ## arg); \
+} while (0)
static unsigned int reg_debug;
module_param(reg_debug, int, 0644);
MODULE_PARM_DESC(reg_debug, "enable debug messages [URB reg]");
-#define em28xx_regdbg(fmt, arg...) do {\
- if (reg_debug) \
- printk(KERN_INFO "%s %s :"fmt, \
- dev->name, __func__ , ##arg); } while (0)
-/* FIXME */
-#define em28xx_isocdbg(fmt, arg...) do {\
- if (core_debug) \
- printk(KERN_INFO "%s %s :"fmt, \
- dev->name, __func__ , ##arg); } while (0)
+#define em28xx_regdbg(fmt, arg...) do { \
+ if (reg_debug) \
+ dev_printk(KERN_DEBUG, &dev->intf->dev, \
+ "reg: %s: " fmt, __func__, ## arg); \
+} while (0)
+
+/* FIXME: don't abuse core_debug */
+#define em28xx_isocdbg(fmt, arg...) do { \
+ if (core_debug) \
+ dev_printk(KERN_DEBUG, &dev->intf->dev, \
+ "core: %s: " fmt, __func__, ## arg); \
+} while (0)
/*
* em28xx_read_reg_req()
@@ -78,7 +82,8 @@ int em28xx_read_reg_req_len(struct em28xx *dev, u8 req, u16 reg,
char *buf, int len)
{
int ret;
- int pipe = usb_rcvctrlpipe(dev->udev, 0);
+ struct usb_device *udev = interface_to_usbdev(dev->intf);
+ int pipe = usb_rcvctrlpipe(udev, 0);
if (dev->disconnected)
return -ENODEV;
@@ -86,23 +91,22 @@ int em28xx_read_reg_req_len(struct em28xx *dev, u8 req, u16 reg,
if (len > URB_MAX_CTRL_SIZE)
return -EINVAL;
- if (reg_debug) {
- printk(KERN_DEBUG "(pipe 0x%08x): "
- "IN: %02x %02x %02x %02x %02x %02x %02x %02x ",
- pipe,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- req, 0, 0,
- reg & 0xff, reg >> 8,
- len & 0xff, len >> 8);
- }
+ em28xx_regdbg("(pipe 0x%08x): IN: %02x %02x %02x %02x %02x %02x %02x %02x ",
+ pipe, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ req, 0, 0,
+ reg & 0xff, reg >> 8,
+ len & 0xff, len >> 8);
mutex_lock(&dev->ctrl_urb_lock);
- ret = usb_control_msg(dev->udev, pipe, req,
+ ret = usb_control_msg(udev, pipe, req,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x0000, reg, dev->urb_buf, len, HZ);
if (ret < 0) {
- if (reg_debug)
- printk(" failed!\n");
+ em28xx_regdbg("(pipe 0x%08x): IN: %02x %02x %02x %02x %02x %02x %02x %02x failed\n",
+ pipe, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ req, 0, 0,
+ reg & 0xff, reg >> 8,
+ len & 0xff, len >> 8);
mutex_unlock(&dev->ctrl_urb_lock);
return usb_translate_errors(ret);
}
@@ -112,14 +116,11 @@ int em28xx_read_reg_req_len(struct em28xx *dev, u8 req, u16 reg,
mutex_unlock(&dev->ctrl_urb_lock);
- if (reg_debug) {
- int byte;
-
- printk("<<<");
- for (byte = 0; byte < len; byte++)
- printk(" %02x", (unsigned char)buf[byte]);
- printk("\n");
- }
+ em28xx_regdbg("(pipe 0x%08x): IN: %02x %02x %02x %02x %02x %02x %02x %02x failed <<< %*ph\n",
+ pipe, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ req, 0, 0,
+ reg & 0xff, reg >> 8,
+ len & 0xff, len >> 8, len, buf);
return ret;
}
@@ -154,7 +155,8 @@ int em28xx_write_regs_req(struct em28xx *dev, u8 req, u16 reg, char *buf,
int len)
{
int ret;
- int pipe = usb_sndctrlpipe(dev->udev, 0);
+ struct usb_device *udev = interface_to_usbdev(dev->intf);
+ int pipe = usb_sndctrlpipe(udev, 0);
if (dev->disconnected)
return -ENODEV;
@@ -162,25 +164,16 @@ int em28xx_write_regs_req(struct em28xx *dev, u8 req, u16 reg, char *buf,
if ((len < 1) || (len > URB_MAX_CTRL_SIZE))
return -EINVAL;
- if (reg_debug) {
- int byte;
-
- printk(KERN_DEBUG "(pipe 0x%08x): "
- "OUT: %02x %02x %02x %02x %02x %02x %02x %02x >>>",
- pipe,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- req, 0, 0,
- reg & 0xff, reg >> 8,
- len & 0xff, len >> 8);
-
- for (byte = 0; byte < len; byte++)
- printk(" %02x", (unsigned char)buf[byte]);
- printk("\n");
- }
+ em28xx_regdbg("(pipe 0x%08x): OUT: %02x %02x %02x %02x %02x %02x %02x %02x >>> %*ph\n",
+ pipe,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ req, 0, 0,
+ reg & 0xff, reg >> 8,
+ len & 0xff, len >> 8, len, buf);
mutex_lock(&dev->ctrl_urb_lock);
memcpy(dev->urb_buf, buf, len);
- ret = usb_control_msg(dev->udev, pipe, req,
+ ret = usb_control_msg(udev, pipe, req,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x0000, reg, dev->urb_buf, len, HZ);
mutex_unlock(&dev->ctrl_urb_lock);
@@ -267,7 +260,8 @@ static int em28xx_is_ac97_ready(struct em28xx *dev)
msleep(5);
}
- em28xx_warn("AC97 command still being executed: not handled properly!\n");
+ dev_warn(&dev->intf->dev,
+ "AC97 command still being executed: not handled properly!\n");
return -EBUSY;
}
@@ -360,8 +354,9 @@ static int set_ac97_input(struct em28xx *dev)
ret = em28xx_write_ac97(dev, inputs[i].reg, 0x8000);
if (ret < 0)
- em28xx_warn("couldn't setup AC97 register %d\n",
- inputs[i].reg);
+ dev_warn(&dev->intf->dev,
+ "couldn't setup AC97 register %d\n",
+ inputs[i].reg);
}
return 0;
}
@@ -444,8 +439,9 @@ int em28xx_audio_analog_set(struct em28xx *dev)
for (i = 0; i < ARRAY_SIZE(outputs); i++) {
ret = em28xx_write_ac97(dev, outputs[i].reg, 0x8000);
if (ret < 0)
- em28xx_warn("couldn't setup AC97 register %d\n",
- outputs[i].reg);
+ dev_warn(&dev->intf->dev,
+ "couldn't setup AC97 register %d\n",
+ outputs[i].reg);
}
}
@@ -482,8 +478,9 @@ int em28xx_audio_analog_set(struct em28xx *dev)
ret = em28xx_write_ac97(dev, outputs[i].reg,
vol);
if (ret < 0)
- em28xx_warn("couldn't setup AC97 register %d\n",
- outputs[i].reg);
+ dev_warn(&dev->intf->dev,
+ "couldn't setup AC97 register %d\n",
+ outputs[i].reg);
}
if (dev->ctl_aoutput & EM28XX_AOUT_PCM_IN) {
@@ -519,7 +516,7 @@ int em28xx_audio_setup(struct em28xx *dev)
/* See how this device is configured */
cfg = em28xx_read_reg(dev, EM28XX_R00_CHIPCFG);
- em28xx_info("Config register raw data: 0x%02x\n", cfg);
+ dev_info(&dev->intf->dev, "Config register raw data: 0x%02x\n", cfg);
if (cfg < 0) { /* Register read error */
/* Be conservative */
dev->int_audio_type = EM28XX_INT_AUDIO_AC97;
@@ -540,8 +537,8 @@ int em28xx_audio_setup(struct em28xx *dev)
i2s_samplerates = 5;
else
i2s_samplerates = 3;
- em28xx_info("I2S Audio (%d sample rate(s))\n",
- i2s_samplerates);
+ dev_info(&dev->intf->dev, "I2S Audio (%d sample rate(s))\n",
+ i2s_samplerates);
/* Skip the code that does AC97 vendor detection */
dev->audio_mode.ac97 = EM28XX_NO_AC97;
goto init_audio;
@@ -558,7 +555,8 @@ int em28xx_audio_setup(struct em28xx *dev)
* Note: (some) em2800 devices without eeprom reports 0x91 on
* CHIPCFG register, even not having an AC97 chip
*/
- em28xx_warn("AC97 chip type couldn't be determined\n");
+ dev_warn(&dev->intf->dev,
+ "AC97 chip type couldn't be determined\n");
dev->audio_mode.ac97 = EM28XX_NO_AC97;
if (dev->usb_audio_type == EM28XX_USB_AUDIO_VENDOR)
dev->usb_audio_type = EM28XX_USB_AUDIO_NONE;
@@ -571,13 +569,13 @@ int em28xx_audio_setup(struct em28xx *dev)
goto init_audio;
vid = vid1 << 16 | vid2;
- em28xx_warn("AC97 vendor ID = 0x%08x\n", vid);
+ dev_warn(&dev->intf->dev, "AC97 vendor ID = 0x%08x\n", vid);
feat = em28xx_read_ac97(dev, AC97_RESET);
if (feat < 0)
goto init_audio;
- em28xx_warn("AC97 features = 0x%04x\n", feat);
+ dev_warn(&dev->intf->dev, "AC97 features = 0x%04x\n", feat);
/* Try to identify what audio processor we have */
if (((vid == 0xffffffff) || (vid == 0x83847650)) && (feat == 0x6a90))
@@ -589,17 +587,20 @@ init_audio:
/* Reports detected AC97 processor */
switch (dev->audio_mode.ac97) {
case EM28XX_NO_AC97:
- em28xx_info("No AC97 audio processor\n");
+ dev_info(&dev->intf->dev, "No AC97 audio processor\n");
break;
case EM28XX_AC97_EM202:
- em28xx_info("Empia 202 AC97 audio processor detected\n");
+ dev_info(&dev->intf->dev,
+ "Empia 202 AC97 audio processor detected\n");
break;
case EM28XX_AC97_SIGMATEL:
- em28xx_info("Sigmatel audio processor detected (stac 97%02x)\n",
- vid & 0xff);
+ dev_info(&dev->intf->dev,
+ "Sigmatel audio processor detected (stac 97%02x)\n",
+ vid & 0xff);
break;
case EM28XX_AC97_OTHER:
- em28xx_warn("Unknown AC97 audio processor detected!\n");
+ dev_warn(&dev->intf->dev,
+ "Unknown AC97 audio processor detected!\n");
break;
default:
break;
@@ -798,6 +799,7 @@ void em28xx_uninit_usb_xfer(struct em28xx *dev, enum em28xx_mode mode)
{
struct urb *urb;
struct em28xx_usb_bufs *usb_bufs;
+ struct usb_device *udev = interface_to_usbdev(dev->intf);
int i;
em28xx_isocdbg("em28xx: called em28xx_uninit_usb_xfer in mode %d\n",
@@ -817,7 +819,7 @@ void em28xx_uninit_usb_xfer(struct em28xx *dev, enum em28xx_mode mode)
usb_unlink_urb(urb);
if (usb_bufs->transfer_buffer[i]) {
- usb_free_coherent(dev->udev,
+ usb_free_coherent(udev,
urb->transfer_buffer_length,
usb_bufs->transfer_buffer[i],
urb->transfer_dma);
@@ -871,9 +873,10 @@ int em28xx_alloc_urbs(struct em28xx *dev, enum em28xx_mode mode, int xfer_bulk,
int num_bufs, int max_pkt_size, int packet_multiplier)
{
struct em28xx_usb_bufs *usb_bufs;
+ struct urb *urb;
+ struct usb_device *udev = interface_to_usbdev(dev->intf);
int i;
int sb_size, pipe;
- struct urb *urb;
int j, k;
em28xx_isocdbg("em28xx: called em28xx_alloc_isoc in mode %d\n", mode);
@@ -883,21 +886,23 @@ int em28xx_alloc_urbs(struct em28xx *dev, enum em28xx_mode mode, int xfer_bulk,
if (mode == EM28XX_DIGITAL_MODE) {
if ((xfer_bulk && !dev->dvb_ep_bulk) ||
(!xfer_bulk && !dev->dvb_ep_isoc)) {
- em28xx_errdev("no endpoint for DVB mode and transfer type %d\n",
- xfer_bulk > 0);
+ dev_err(&dev->intf->dev,
+ "no endpoint for DVB mode and transfer type %d\n",
+ xfer_bulk > 0);
return -EINVAL;
}
usb_bufs = &dev->usb_ctl.digital_bufs;
} else if (mode == EM28XX_ANALOG_MODE) {
if ((xfer_bulk && !dev->analog_ep_bulk) ||
(!xfer_bulk && !dev->analog_ep_isoc)) {
- em28xx_errdev("no endpoint for analog mode and transfer type %d\n",
- xfer_bulk > 0);
+ dev_err(&dev->intf->dev,
+ "no endpoint for analog mode and transfer type %d\n",
+ xfer_bulk > 0);
return -EINVAL;
}
usb_bufs = &dev->usb_ctl.analog_bufs;
} else {
- em28xx_errdev("invalid mode selected\n");
+ dev_err(&dev->intf->dev, "invalid mode selected\n");
return -EINVAL;
}
@@ -907,15 +912,12 @@ int em28xx_alloc_urbs(struct em28xx *dev, enum em28xx_mode mode, int xfer_bulk,
usb_bufs->num_bufs = num_bufs;
usb_bufs->urb = kzalloc(sizeof(void *)*num_bufs, GFP_KERNEL);
- if (!usb_bufs->urb) {
- em28xx_errdev("cannot alloc memory for usb buffers\n");
+ if (!usb_bufs->urb)
return -ENOMEM;
- }
usb_bufs->transfer_buffer = kzalloc(sizeof(void *)*num_bufs,
GFP_KERNEL);
if (!usb_bufs->transfer_buffer) {
- em28xx_errdev("cannot allocate memory for usb transfer\n");
kfree(usb_bufs->urb);
return -ENOMEM;
}
@@ -939,33 +941,33 @@ int em28xx_alloc_urbs(struct em28xx *dev, enum em28xx_mode mode, int xfer_bulk,
}
usb_bufs->urb[i] = urb;
- usb_bufs->transfer_buffer[i] = usb_alloc_coherent(dev->udev,
+ usb_bufs->transfer_buffer[i] = usb_alloc_coherent(udev,
sb_size, GFP_KERNEL, &urb->transfer_dma);
if (!usb_bufs->transfer_buffer[i]) {
- em28xx_err("unable to allocate %i bytes for transfer"
- " buffer %i%s\n",
- sb_size, i,
- in_interrupt() ? " while in int" : "");
+ dev_err(&dev->intf->dev,
+ "unable to allocate %i bytes for transfer buffer %i%s\n",
+ sb_size, i,
+ in_interrupt() ? " while in int" : "");
em28xx_uninit_usb_xfer(dev, mode);
return -ENOMEM;
}
memset(usb_bufs->transfer_buffer[i], 0, sb_size);
if (xfer_bulk) { /* bulk */
- pipe = usb_rcvbulkpipe(dev->udev,
+ pipe = usb_rcvbulkpipe(udev,
mode == EM28XX_ANALOG_MODE ?
dev->analog_ep_bulk :
dev->dvb_ep_bulk);
- usb_fill_bulk_urb(urb, dev->udev, pipe,
+ usb_fill_bulk_urb(urb, udev, pipe,
usb_bufs->transfer_buffer[i], sb_size,
em28xx_irq_callback, dev);
urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
} else { /* isoc */
- pipe = usb_rcvisocpipe(dev->udev,
+ pipe = usb_rcvisocpipe(udev,
mode == EM28XX_ANALOG_MODE ?
dev->analog_ep_isoc :
dev->dvb_ep_isoc);
- usb_fill_int_urb(urb, dev->udev, pipe,
+ usb_fill_int_urb(urb, udev, pipe,
usb_bufs->transfer_buffer[i], sb_size,
em28xx_irq_callback, dev, 1);
urb->transfer_flags = URB_ISO_ASAP |
@@ -997,6 +999,7 @@ int em28xx_init_usb_xfer(struct em28xx *dev, enum em28xx_mode mode,
struct em28xx_dmaqueue *dma_q = &dev->vidq;
struct em28xx_dmaqueue *vbi_dma_q = &dev->vbiq;
struct em28xx_usb_bufs *usb_bufs;
+ struct usb_device *udev = interface_to_usbdev(dev->intf);
int i;
int rc;
int alloc;
@@ -1023,10 +1026,11 @@ int em28xx_init_usb_xfer(struct em28xx *dev, enum em28xx_mode mode,
}
if (xfer_bulk) {
- rc = usb_clear_halt(dev->udev, usb_bufs->urb[0]->pipe);
+ rc = usb_clear_halt(udev, usb_bufs->urb[0]->pipe);
if (rc < 0) {
- em28xx_err("failed to clear USB bulk endpoint stall/halt condition (error=%i)\n",
- rc);
+ dev_err(&dev->intf->dev,
+ "failed to clear USB bulk endpoint stall/halt condition (error=%i)\n",
+ rc);
em28xx_uninit_usb_xfer(dev, mode);
return rc;
}
@@ -1041,8 +1045,8 @@ int em28xx_init_usb_xfer(struct em28xx *dev, enum em28xx_mode mode,
for (i = 0; i < usb_bufs->num_bufs; i++) {
rc = usb_submit_urb(usb_bufs->urb[i], GFP_ATOMIC);
if (rc) {
- em28xx_err("submit of urb %i failed (error=%i)\n", i,
- rc);
+ dev_err(&dev->intf->dev,
+ "submit of urb %i failed (error=%i)\n", i, rc);
em28xx_uninit_usb_xfer(dev, mode);
return rc;
}
@@ -1075,7 +1079,7 @@ int em28xx_register_extension(struct em28xx_ops *ops)
ops->init(dev);
}
mutex_unlock(&em28xx_devlist_mutex);
- printk(KERN_INFO "em28xx: Registered (%s) extension\n", ops->name);
+ pr_info("em28xx: Registered (%s) extension\n", ops->name);
return 0;
}
EXPORT_SYMBOL(em28xx_register_extension);
@@ -1090,7 +1094,7 @@ void em28xx_unregister_extension(struct em28xx_ops *ops)
}
list_del(&ops->next);
mutex_unlock(&em28xx_devlist_mutex);
- printk(KERN_INFO "Em28xx: Removed (%s) extension\n", ops->name);
+ pr_info("em28xx: Removed (%s) extension\n", ops->name);
}
EXPORT_SYMBOL(em28xx_unregister_extension);
@@ -1124,7 +1128,7 @@ int em28xx_suspend_extension(struct em28xx *dev)
{
const struct em28xx_ops *ops = NULL;
- em28xx_info("Suspending extensions\n");
+ dev_info(&dev->intf->dev, "Suspending extensions\n");
mutex_lock(&em28xx_devlist_mutex);
list_for_each_entry(ops, &em28xx_extension_devlist, next) {
if (ops->suspend)
@@ -1138,7 +1142,7 @@ int em28xx_resume_extension(struct em28xx *dev)
{
const struct em28xx_ops *ops = NULL;
- em28xx_info("Resuming extensions\n");
+ dev_info(&dev->intf->dev, "Resuming extensions\n");
mutex_lock(&em28xx_devlist_mutex);
list_for_each_entry(ops, &em28xx_extension_devlist, next) {
if (ops->resume)
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index 8cedef0daae4..75a75dab2e8e 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -21,11 +21,12 @@
the Free Software Foundation; either version 2 of the License.
*/
+#include "em28xx.h"
+
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/usb.h>
-#include "em28xx.h"
#include <media/v4l2-common.h>
#include <dvb_demux.h>
#include <dvb_net.h>
@@ -72,9 +73,10 @@ MODULE_PARM_DESC(debug, "enable debug messages [dvb]");
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
-#define dprintk(level, fmt, arg...) do { \
-if (debug >= level) \
- printk(KERN_DEBUG "%s/2-dvb: " fmt, dev->name, ## arg); \
+#define dprintk(level, fmt, arg...) do { \
+ if (debug >= level) \
+ dev_printk(KERN_DEBUG, &dev->intf->dev, \
+ "dvb: " fmt, ## arg); \
} while (0)
struct em28xx_dvb {
@@ -196,6 +198,7 @@ static int em28xx_start_streaming(struct em28xx_dvb *dvb)
int rc;
struct em28xx_i2c_bus *i2c_bus = dvb->adapter.priv;
struct em28xx *dev = i2c_bus->dev;
+ struct usb_device *udev = interface_to_usbdev(dev->intf);
int dvb_max_packet_size, packet_multiplier, dvb_alt;
if (dev->dvb_xfer_bulk) {
@@ -214,7 +217,7 @@ static int em28xx_start_streaming(struct em28xx_dvb *dvb)
dvb_alt = dev->dvb_alt_isoc;
}
- usb_set_interface(dev->udev, dev->ifnum, dvb_alt);
+ usb_set_interface(udev, dev->ifnum, dvb_alt);
rc = em28xx_set_mode(dev, EM28XX_DIGITAL_MODE);
if (rc < 0)
return rc;
@@ -734,13 +737,13 @@ static int em28xx_pctv_290e_set_lna(struct dvb_frontend *fe)
ret = gpio_request_one(dvb->lna_gpio, flags, NULL);
if (ret)
- em28xx_errdev("gpio request failed %d\n", ret);
+ dev_err(&dev->intf->dev, "gpio request failed %d\n", ret);
else
gpio_free(dvb->lna_gpio);
return ret;
#else
- dev_warn(&dev->udev->dev, "%s: LNA control is disabled (lna=%u)\n",
+ dev_warn(&dev->intf->dev, "%s: LNA control is disabled (lna=%u)\n",
KBUILD_MODNAME, c->lna);
return 0;
#endif
@@ -934,20 +937,20 @@ static int em28xx_attach_xc3028(u8 addr, struct em28xx *dev)
cfg.ctrl = &ctl;
if (!dev->dvb->fe[0]) {
- em28xx_errdev("/2: dvb frontend not attached. "
- "Can't attach xc3028\n");
+ dev_err(&dev->intf->dev,
+ "dvb frontend not attached. Can't attach xc3028\n");
return -EINVAL;
}
fe = dvb_attach(xc2028_attach, dev->dvb->fe[0], &cfg);
if (!fe) {
- em28xx_errdev("/2: xc3028 attach failed\n");
+ dev_err(&dev->intf->dev, "xc3028 attach failed\n");
dvb_frontend_detach(dev->dvb->fe[0]);
dev->dvb->fe[0] = NULL;
return -EINVAL;
}
- em28xx_info("%s/2: xc3028 attached\n", dev->name);
+ dev_info(&dev->intf->dev, "xc3028 attached\n");
return 0;
}
@@ -963,11 +966,13 @@ static int em28xx_register_dvb(struct em28xx_dvb *dvb, struct module *module,
mutex_init(&dvb->lock);
/* register adapter */
- result = dvb_register_adapter(&dvb->adapter, dev->name, module, device,
- adapter_nr);
+ result = dvb_register_adapter(&dvb->adapter,
+ dev_name(&dev->intf->dev), module,
+ device, adapter_nr);
if (result < 0) {
- printk(KERN_WARNING "%s: dvb_register_adapter failed (errno = %d)\n",
- dev->name, result);
+ dev_warn(&dev->intf->dev,
+ "dvb_register_adapter failed (errno = %d)\n",
+ result);
goto fail_adapter;
}
#ifdef CONFIG_MEDIA_CONTROLLER_DVB
@@ -984,8 +989,9 @@ static int em28xx_register_dvb(struct em28xx_dvb *dvb, struct module *module,
/* register frontend */
result = dvb_register_frontend(&dvb->adapter, dvb->fe[0]);
if (result < 0) {
- printk(KERN_WARNING "%s: dvb_register_frontend failed (errno = %d)\n",
- dev->name, result);
+ dev_warn(&dev->intf->dev,
+ "dvb_register_frontend failed (errno = %d)\n",
+ result);
goto fail_frontend0;
}
@@ -993,8 +999,9 @@ static int em28xx_register_dvb(struct em28xx_dvb *dvb, struct module *module,
if (dvb->fe[1]) {
result = dvb_register_frontend(&dvb->adapter, dvb->fe[1]);
if (result < 0) {
- printk(KERN_WARNING "%s: 2nd dvb_register_frontend failed (errno = %d)\n",
- dev->name, result);
+ dev_warn(&dev->intf->dev,
+ "2nd dvb_register_frontend failed (errno = %d)\n",
+ result);
goto fail_frontend1;
}
}
@@ -1011,8 +1018,9 @@ static int em28xx_register_dvb(struct em28xx_dvb *dvb, struct module *module,
result = dvb_dmx_init(&dvb->demux);
if (result < 0) {
- printk(KERN_WARNING "%s: dvb_dmx_init failed (errno = %d)\n",
- dev->name, result);
+ dev_warn(&dev->intf->dev,
+ "dvb_dmx_init failed (errno = %d)\n",
+ result);
goto fail_dmx;
}
@@ -1021,31 +1029,35 @@ static int em28xx_register_dvb(struct em28xx_dvb *dvb, struct module *module,
dvb->dmxdev.capabilities = 0;
result = dvb_dmxdev_init(&dvb->dmxdev, &dvb->adapter);
if (result < 0) {
- printk(KERN_WARNING "%s: dvb_dmxdev_init failed (errno = %d)\n",
- dev->name, result);
+ dev_warn(&dev->intf->dev,
+ "dvb_dmxdev_init failed (errno = %d)\n",
+ result);
goto fail_dmxdev;
}
dvb->fe_hw.source = DMX_FRONTEND_0;
result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_hw);
if (result < 0) {
- printk(KERN_WARNING "%s: add_frontend failed (DMX_FRONTEND_0, errno = %d)\n",
- dev->name, result);
+ dev_warn(&dev->intf->dev,
+ "add_frontend failed (DMX_FRONTEND_0, errno = %d)\n",
+ result);
goto fail_fe_hw;
}
dvb->fe_mem.source = DMX_MEMORY_FE;
result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_mem);
if (result < 0) {
- printk(KERN_WARNING "%s: add_frontend failed (DMX_MEMORY_FE, errno = %d)\n",
- dev->name, result);
+ dev_warn(&dev->intf->dev,
+ "add_frontend failed (DMX_MEMORY_FE, errno = %d)\n",
+ result);
goto fail_fe_mem;
}
result = dvb->demux.dmx.connect_frontend(&dvb->demux.dmx, &dvb->fe_hw);
if (result < 0) {
- printk(KERN_WARNING "%s: connect_frontend failed (errno = %d)\n",
- dev->name, result);
+ dev_warn(&dev->intf->dev,
+ "connect_frontend failed (errno = %d)\n",
+ result);
goto fail_fe_conn;
}
@@ -1117,13 +1129,12 @@ static int em28xx_dvb_init(struct em28xx *dev)
return 0;
}
- em28xx_info("Binding DVB extension\n");
+ dev_info(&dev->intf->dev, "Binding DVB extension\n");
dvb = kzalloc(sizeof(struct em28xx_dvb), GFP_KERNEL);
- if (dvb == NULL) {
- em28xx_info("em28xx_dvb: memory allocation failed\n");
+ if (!dvb)
return -ENOMEM;
- }
+
dev->dvb = dvb;
dvb->fe[0] = dvb->fe[1] = NULL;
@@ -1142,7 +1153,8 @@ static int em28xx_dvb_init(struct em28xx *dev)
EM28XX_DVB_NUM_ISOC_PACKETS);
}
if (result) {
- em28xx_errdev("em28xx_dvb: failed to pre-allocate USB transfer buffers for DVB.\n");
+ dev_err(&dev->intf->dev,
+ "failed to pre-allocate USB transfer buffers for DVB.\n");
kfree(dvb);
dev->dvb = NULL;
return result;
@@ -1259,7 +1271,8 @@ static int em28xx_dvb_init(struct em28xx *dev)
case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2:
case EM2882_BOARD_PINNACLE_HYBRID_PRO_330E:
dvb->fe[0] = dvb_attach(drxd_attach, &em28xx_drxd, NULL,
- &dev->i2c_adap[dev->def_i2c_bus], &dev->udev->dev);
+ &dev->i2c_adap[dev->def_i2c_bus],
+ &dev->intf->dev);
if (em28xx_attach_xc3028(0x61, dev) < 0) {
result = -EINVAL;
goto out_free;
@@ -1321,8 +1334,9 @@ static int em28xx_dvb_init(struct em28xx *dev)
result = gpio_request_one(dvb->lna_gpio,
GPIOF_OUT_INIT_LOW, NULL);
if (result)
- em28xx_errdev("gpio request failed %d\n",
- result);
+ dev_err(&dev->intf->dev,
+ "gpio request failed %d\n",
+ result);
else
gpio_free(dvb->lna_gpio);
@@ -1937,12 +1951,12 @@ static int em28xx_dvb_init(struct em28xx *dev)
}
break;
default:
- em28xx_errdev("/2: The frontend of your DVB/ATSC card"
- " isn't supported yet\n");
+ dev_err(&dev->intf->dev,
+ "The frontend of your DVB/ATSC card isn't supported yet\n");
break;
}
if (NULL == dvb->fe[0]) {
- em28xx_errdev("/2: frontend initialization failed\n");
+ dev_err(&dev->intf->dev, "frontend initialization failed\n");
result = -EINVAL;
goto out_free;
}
@@ -1952,12 +1966,12 @@ static int em28xx_dvb_init(struct em28xx *dev)
dvb->fe[1]->callback = em28xx_tuner_callback;
/* register everything */
- result = em28xx_register_dvb(dvb, THIS_MODULE, dev, &dev->udev->dev);
+ result = em28xx_register_dvb(dvb, THIS_MODULE, dev, &dev->intf->dev);
if (result < 0)
goto out_free;
- em28xx_info("DVB extension successfully initialized\n");
+ dev_info(&dev->intf->dev, "DVB extension successfully initialized\n");
kref_get(&dev->ref);
@@ -1997,7 +2011,7 @@ static int em28xx_dvb_fini(struct em28xx *dev)
if (!dev->dvb)
return 0;
- em28xx_info("Closing DVB extension\n");
+ dev_info(&dev->intf->dev, "Closing DVB extension\n");
dvb = dev->dvb;
@@ -2055,17 +2069,17 @@ static int em28xx_dvb_suspend(struct em28xx *dev)
if (!dev->board.has_dvb)
return 0;
- em28xx_info("Suspending DVB extension\n");
+ dev_info(&dev->intf->dev, "Suspending DVB extension\n");
if (dev->dvb) {
struct em28xx_dvb *dvb = dev->dvb;
if (dvb->fe[0]) {
ret = dvb_frontend_suspend(dvb->fe[0]);
- em28xx_info("fe0 suspend %d\n", ret);
+ dev_info(&dev->intf->dev, "fe0 suspend %d\n", ret);
}
if (dvb->fe[1]) {
dvb_frontend_suspend(dvb->fe[1]);
- em28xx_info("fe1 suspend %d\n", ret);
+ dev_info(&dev->intf->dev, "fe1 suspend %d\n", ret);
}
}
@@ -2082,18 +2096,18 @@ static int em28xx_dvb_resume(struct em28xx *dev)
if (!dev->board.has_dvb)
return 0;
- em28xx_info("Resuming DVB extension\n");
+ dev_info(&dev->intf->dev, "Resuming DVB extension\n");
if (dev->dvb) {
struct em28xx_dvb *dvb = dev->dvb;
if (dvb->fe[0]) {
ret = dvb_frontend_resume(dvb->fe[0]);
- em28xx_info("fe0 resume %d\n", ret);
+ dev_info(&dev->intf->dev, "fe0 resume %d\n", ret);
}
if (dvb->fe[1]) {
ret = dvb_frontend_resume(dvb->fe[1]);
- em28xx_info("fe1 resume %d\n", ret);
+ dev_info(&dev->intf->dev, "fe1 resume %d\n", ret);
}
}
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index 8b690ac908a4..8c472d5adb50 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -22,13 +22,14 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "em28xx.h"
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/usb.h>
#include <linux/i2c.h>
#include <linux/jiffies.h>
-#include "em28xx.h"
#include "tuner-xc2028.h"
#include <media/v4l2-common.h>
#include <media/tuner.h>
@@ -43,6 +44,13 @@ static unsigned int i2c_debug;
module_param(i2c_debug, int, 0644);
MODULE_PARM_DESC(i2c_debug, "i2c debug message level (1: normal debug, 2: show I2C transfers)");
+#define dprintk(level, fmt, arg...) do { \
+ if (i2c_debug > level) \
+ dev_printk(KERN_DEBUG, &dev->intf->dev, \
+ "i2c: %s: " fmt, __func__, ## arg); \
+} while (0)
+
+
/*
* em2800_i2c_send_bytes()
* send up to 4 bytes to the em2800 i2c device
@@ -70,7 +78,8 @@ static int em2800_i2c_send_bytes(struct em28xx *dev, u8 addr, u8 *buf, u16 len)
/* trigger write */
ret = dev->em28xx_write_regs(dev, 4 - len, &b2[4 - len], 2 + len);
if (ret != 2 + len) {
- em28xx_warn("failed to trigger write to i2c address 0x%x (error=%i)\n",
+ dev_warn(&dev->intf->dev,
+ "failed to trigger write to i2c address 0x%x (error=%i)\n",
addr, ret);
return (ret < 0) ? ret : -EIO;
}
@@ -80,20 +89,18 @@ static int em2800_i2c_send_bytes(struct em28xx *dev, u8 addr, u8 *buf, u16 len)
if (ret == 0x80 + len - 1)
return len;
if (ret == 0x94 + len - 1) {
- if (i2c_debug == 1)
- em28xx_warn("R05 returned 0x%02x: I2C ACK error\n",
- ret);
+ dprintk(1, "R05 returned 0x%02x: I2C ACK error\n", ret);
return -ENXIO;
}
if (ret < 0) {
- em28xx_warn("failed to get i2c transfer status from bridge register (error=%i)\n",
- ret);
+ dev_warn(&dev->intf->dev,
+ "failed to get i2c transfer status from bridge register (error=%i)\n",
+ ret);
return ret;
}
msleep(5);
}
- if (i2c_debug)
- em28xx_warn("write to i2c device at 0x%x timed out\n", addr);
+ dprintk(0, "write to i2c device at 0x%x timed out\n", addr);
return -ETIMEDOUT;
}
@@ -116,8 +123,9 @@ static int em2800_i2c_recv_bytes(struct em28xx *dev, u8 addr, u8 *buf, u16 len)
buf2[0] = addr;
ret = dev->em28xx_write_regs(dev, 0x04, buf2, 2);
if (ret != 2) {
- em28xx_warn("failed to trigger read from i2c address 0x%x (error=%i)\n",
- addr, ret);
+ dev_warn(&dev->intf->dev,
+ "failed to trigger read from i2c address 0x%x (error=%i)\n",
+ addr, ret);
return (ret < 0) ? ret : -EIO;
}
@@ -127,29 +135,28 @@ static int em2800_i2c_recv_bytes(struct em28xx *dev, u8 addr, u8 *buf, u16 len)
if (ret == 0x84 + len - 1)
break;
if (ret == 0x94 + len - 1) {
- if (i2c_debug == 1)
- em28xx_warn("R05 returned 0x%02x: I2C ACK error\n",
- ret);
+ dprintk(1, "R05 returned 0x%02x: I2C ACK error\n",
+ ret);
return -ENXIO;
}
if (ret < 0) {
- em28xx_warn("failed to get i2c transfer status from bridge register (error=%i)\n",
- ret);
+ dev_warn(&dev->intf->dev,
+ "failed to get i2c transfer status from bridge register (error=%i)\n",
+ ret);
return ret;
}
msleep(5);
}
if (ret != 0x84 + len - 1) {
- if (i2c_debug)
- em28xx_warn("read from i2c device at 0x%x timed out\n",
- addr);
+ dprintk(0, "read from i2c device at 0x%x timed out\n", addr);
}
/* get the received message */
ret = dev->em28xx_read_reg_req_len(dev, 0x00, 4-len, buf2, len);
if (ret != len) {
- em28xx_warn("reading from i2c device at 0x%x failed: couldn't get the received message from the bridge (error=%i)\n",
- addr, ret);
+ dev_warn(&dev->intf->dev,
+ "reading from i2c device at 0x%x failed: couldn't get the received message from the bridge (error=%i)\n",
+ addr, ret);
return (ret < 0) ? ret : -EIO;
}
for (i = 0; i < len; i++)
@@ -193,12 +200,14 @@ static int em28xx_i2c_send_bytes(struct em28xx *dev, u16 addr, u8 *buf,
ret = dev->em28xx_write_regs_req(dev, stop ? 2 : 3, addr, buf, len);
if (ret != len) {
if (ret < 0) {
- em28xx_warn("writing to i2c device at 0x%x failed (error=%i)\n",
- addr, ret);
+ dev_warn(&dev->intf->dev,
+ "writing to i2c device at 0x%x failed (error=%i)\n",
+ addr, ret);
return ret;
} else {
- em28xx_warn("%i bytes write to i2c device at 0x%x requested, but %i bytes written\n",
- len, addr, ret);
+ dev_warn(&dev->intf->dev,
+ "%i bytes write to i2c device at 0x%x requested, but %i bytes written\n",
+ len, addr, ret);
return -EIO;
}
}
@@ -209,14 +218,14 @@ static int em28xx_i2c_send_bytes(struct em28xx *dev, u16 addr, u8 *buf,
if (ret == 0) /* success */
return len;
if (ret == 0x10) {
- if (i2c_debug == 1)
- em28xx_warn("I2C ACK error on writing to addr 0x%02x\n",
- addr);
+ dprintk(1, "I2C ACK error on writing to addr 0x%02x\n",
+ addr);
return -ENXIO;
}
if (ret < 0) {
- em28xx_warn("failed to get i2c transfer status from bridge register (error=%i)\n",
- ret);
+ dev_warn(&dev->intf->dev,
+ "failed to get i2c transfer status from bridge register (error=%i)\n",
+ ret);
return ret;
}
msleep(5);
@@ -229,14 +238,15 @@ static int em28xx_i2c_send_bytes(struct em28xx *dev, u16 addr, u8 *buf,
if (ret == 0x02 || ret == 0x04) {
/* NOTE: these errors seem to be related to clock stretching */
- if (i2c_debug)
- em28xx_warn("write to i2c device at 0x%x timed out (status=%i)\n",
- addr, ret);
+ dprintk(0,
+ "write to i2c device at 0x%x timed out (status=%i)\n",
+ addr, ret);
return -ETIMEDOUT;
}
- em28xx_warn("write to i2c device at 0x%x failed with unknown error (status=%i)\n",
- addr, ret);
+ dev_warn(&dev->intf->dev,
+ "write to i2c device at 0x%x failed with unknown error (status=%i)\n",
+ addr, ret);
return -EIO;
}
@@ -258,8 +268,9 @@ static int em28xx_i2c_recv_bytes(struct em28xx *dev, u16 addr, u8 *buf, u16 len)
/* Read data from i2c device */
ret = dev->em28xx_read_reg_req_len(dev, 2, addr, buf, len);
if (ret < 0) {
- em28xx_warn("reading from i2c device at 0x%x failed (error=%i)\n",
- addr, ret);
+ dev_warn(&dev->intf->dev,
+ "reading from i2c device at 0x%x failed (error=%i)\n",
+ addr, ret);
return ret;
}
/*
@@ -276,27 +287,28 @@ static int em28xx_i2c_recv_bytes(struct em28xx *dev, u16 addr, u8 *buf, u16 len)
if (ret == 0) /* success */
return len;
if (ret < 0) {
- em28xx_warn("failed to get i2c transfer status from bridge register (error=%i)\n",
- ret);
+ dev_warn(&dev->intf->dev,
+ "failed to get i2c transfer status from bridge register (error=%i)\n",
+ ret);
return ret;
}
if (ret == 0x10) {
- if (i2c_debug == 1)
- em28xx_warn("I2C ACK error on writing to addr 0x%02x\n",
- addr);
+ dprintk(1, "I2C ACK error on writing to addr 0x%02x\n",
+ addr);
return -ENXIO;
}
if (ret == 0x02 || ret == 0x04) {
/* NOTE: these errors seem to be related to clock stretching */
- if (i2c_debug)
- em28xx_warn("write to i2c device at 0x%x timed out (status=%i)\n",
- addr, ret);
+ dprintk(0,
+ "write to i2c device at 0x%x timed out (status=%i)\n",
+ addr, ret);
return -ETIMEDOUT;
}
- em28xx_warn("write to i2c device at 0x%x failed with unknown error (status=%i)\n",
- addr, ret);
+ dev_warn(&dev->intf->dev,
+ "write to i2c device at 0x%x failed with unknown error (status=%i)\n",
+ addr, ret);
return -EIO;
}
@@ -335,12 +347,14 @@ static int em25xx_bus_B_send_bytes(struct em28xx *dev, u16 addr, u8 *buf,
ret = dev->em28xx_write_regs_req(dev, 0x06, addr, buf, len);
if (ret != len) {
if (ret < 0) {
- em28xx_warn("writing to i2c device at 0x%x failed (error=%i)\n",
- addr, ret);
+ dev_warn(&dev->intf->dev,
+ "writing to i2c device at 0x%x failed (error=%i)\n",
+ addr, ret);
return ret;
} else {
- em28xx_warn("%i bytes write to i2c device at 0x%x requested, but %i bytes written\n",
- len, addr, ret);
+ dev_warn(&dev->intf->dev,
+ "%i bytes write to i2c device at 0x%x requested, but %i bytes written\n",
+ len, addr, ret);
return -EIO;
}
}
@@ -353,9 +367,7 @@ static int em25xx_bus_B_send_bytes(struct em28xx *dev, u16 addr, u8 *buf,
if (!ret)
return len;
else if (ret > 0) {
- if (i2c_debug == 1)
- em28xx_warn("Bus B R08 returned 0x%02x: I2C ACK error\n",
- ret);
+ dprintk(1, "Bus B R08 returned 0x%02x: I2C ACK error\n", ret);
return -ENXIO;
}
@@ -386,8 +398,9 @@ static int em25xx_bus_B_recv_bytes(struct em28xx *dev, u16 addr, u8 *buf,
/* Read value */
ret = dev->em28xx_read_reg_req_len(dev, 0x06, addr, buf, len);
if (ret < 0) {
- em28xx_warn("reading from i2c device at 0x%x failed (error=%i)\n",
- addr, ret);
+ dev_warn(&dev->intf->dev,
+ "reading from i2c device at 0x%x failed (error=%i)\n",
+ addr, ret);
return ret;
}
/*
@@ -408,9 +421,7 @@ static int em25xx_bus_B_recv_bytes(struct em28xx *dev, u16 addr, u8 *buf,
if (!ret)
return len;
else if (ret > 0) {
- if (i2c_debug == 1)
- em28xx_warn("Bus B R08 returned 0x%02x: I2C ACK error\n",
- ret);
+ dprintk(1, "Bus B R08 returned 0x%02x: I2C ACK error\n", ret);
return -ENXIO;
}
@@ -528,57 +539,46 @@ static int em28xx_i2c_xfer(struct i2c_adapter *i2c_adap,
}
for (i = 0; i < num; i++) {
addr = msgs[i].addr << 1;
- if (i2c_debug > 1)
- printk(KERN_DEBUG "%s at %s: %s %s addr=%02x len=%d:",
- dev->name, __func__ ,
- (msgs[i].flags & I2C_M_RD) ? "read" : "write",
- i == num - 1 ? "stop" : "nonstop",
- addr, msgs[i].len);
if (!msgs[i].len) {
/*
* no len: check only for device presence
* This code is only called during device probe.
*/
rc = i2c_check_for_device(i2c_bus, addr);
- if (rc < 0) {
- if (rc == -ENXIO) {
- if (i2c_debug > 1)
- printk(KERN_CONT " no device\n");
- rc = -ENODEV;
- } else {
- if (i2c_debug > 1)
- printk(KERN_CONT " ERROR: %i\n", rc);
- }
- rt_mutex_unlock(&dev->i2c_bus_lock);
- return rc;
- }
+
+ if (rc == -ENXIO)
+ rc = -ENODEV;
} else if (msgs[i].flags & I2C_M_RD) {
/* read bytes */
rc = i2c_recv_bytes(i2c_bus, msgs[i]);
-
- if (i2c_debug > 1 && rc >= 0)
- printk(KERN_CONT " %*ph",
- msgs[i].len, msgs[i].buf);
} else {
- if (i2c_debug > 1)
- printk(KERN_CONT " %*ph",
- msgs[i].len, msgs[i].buf);
-
/* write bytes */
rc = i2c_send_bytes(i2c_bus, msgs[i], i == num - 1);
}
- if (rc < 0) {
- if (i2c_debug > 1)
- printk(KERN_CONT " ERROR: %i\n", rc);
- rt_mutex_unlock(&dev->i2c_bus_lock);
- return rc;
- }
- if (i2c_debug > 1)
- printk(KERN_CONT "\n");
+
+ if (rc < 0)
+ goto error;
+
+ dprintk(2, "%s %s addr=%02x len=%d: %*ph\n",
+ (msgs[i].flags & I2C_M_RD) ? "read" : "write",
+ i == num - 1 ? "stop" : "nonstop",
+ addr, msgs[i].len,
+ msgs[i].len, msgs[i].buf);
}
rt_mutex_unlock(&dev->i2c_bus_lock);
return num;
+
+error:
+ dprintk(2, "%s %s addr=%02x len=%d: %sERROR: %i\n",
+ (msgs[i].flags & I2C_M_RD) ? "read" : "write",
+ i == num - 1 ? "stop" : "nonstop",
+ addr, msgs[i].len,
+ (rc == -ENODEV) ? "no device " : "",
+ rc);
+
+ rt_mutex_unlock(&dev->i2c_bus_lock);
+ return rc;
}
/*
@@ -672,7 +672,7 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
/* Check if board has eeprom */
err = i2c_master_recv(&dev->i2c_client[bus], &buf, 0);
if (err < 0) {
- em28xx_info("board has no eeprom\n");
+ dev_info(&dev->intf->dev, "board has no eeprom\n");
return -ENODEV;
}
@@ -685,17 +685,19 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
dev->eeprom_addrwidth_16bit,
len, data);
if (err != len) {
- em28xx_errdev("failed to read eeprom (err=%d)\n", err);
+ dev_err(&dev->intf->dev,
+ "failed to read eeprom (err=%d)\n", err);
goto error;
}
if (i2c_debug) {
/* Display eeprom content */
- print_hex_dump(KERN_INFO, "eeprom ", DUMP_PREFIX_OFFSET,
+ print_hex_dump(KERN_DEBUG, "em28xx eeprom ", DUMP_PREFIX_OFFSET,
16, 1, data, len, true);
if (dev->eeprom_addrwidth_16bit)
- em28xx_info("eeprom %06x: ... (skipped)\n", 256);
+ dev_info(&dev->intf->dev,
+ "eeprom %06x: ... (skipped)\n", 256);
}
if (dev->eeprom_addrwidth_16bit &&
@@ -707,11 +709,14 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
dev->hash = em28xx_hash_mem(data, len, 32);
mc_start = (data[1] << 8) + 4; /* usually 0x0004 */
- em28xx_info("EEPROM ID = %02x %02x %02x %02x, EEPROM hash = 0x%08lx\n",
- data[0], data[1], data[2], data[3], dev->hash);
- em28xx_info("EEPROM info:\n");
- em28xx_info("\tmicrocode start address = 0x%04x, boot configuration = 0x%02x\n",
- mc_start, data[2]);
+ dev_info(&dev->intf->dev,
+ "EEPROM ID = %02x %02x %02x %02x, EEPROM hash = 0x%08lx\n",
+ data[0], data[1], data[2], data[3], dev->hash);
+ dev_info(&dev->intf->dev,
+ "EEPROM info:\n");
+ dev_info(&dev->intf->dev,
+ "\tmicrocode start address = 0x%04x, boot configuration = 0x%02x\n",
+ mc_start, data[2]);
/*
* boot configuration (address 0x0002):
* [0] microcode download speed: 1 = 400 kHz; 0 = 100 kHz
@@ -729,8 +734,9 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
err = em28xx_i2c_read_block(dev, bus, mc_start + 46, 1, 2,
data);
if (err != 2) {
- em28xx_errdev("failed to read hardware configuration data from eeprom (err=%d)\n",
- err);
+ dev_err(&dev->intf->dev,
+ "failed to read hardware configuration data from eeprom (err=%d)\n",
+ err);
goto error;
}
@@ -747,8 +753,9 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
err = em28xx_i2c_read_block(dev, bus, hwconf_offset, 1, len,
data);
if (err != len) {
- em28xx_errdev("failed to read hardware configuration data from eeprom (err=%d)\n",
- err);
+ dev_err(&dev->intf->dev,
+ "failed to read hardware configuration data from eeprom (err=%d)\n",
+ err);
goto error;
}
@@ -756,7 +763,8 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
/* NOTE: not all devices provide this type of dataset */
if (data[0] != 0x1a || data[1] != 0xeb ||
data[2] != 0x67 || data[3] != 0x95) {
- em28xx_info("\tno hardware configuration dataset found in eeprom\n");
+ dev_info(&dev->intf->dev,
+ "\tno hardware configuration dataset found in eeprom\n");
kfree(data);
return 0;
}
@@ -767,11 +775,14 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
data[0] == 0x1a && data[1] == 0xeb &&
data[2] == 0x67 && data[3] == 0x95) {
dev->hash = em28xx_hash_mem(data, len, 32);
- em28xx_info("EEPROM ID = %02x %02x %02x %02x, EEPROM hash = 0x%08lx\n",
- data[0], data[1], data[2], data[3], dev->hash);
- em28xx_info("EEPROM info:\n");
+ dev_info(&dev->intf->dev,
+ "EEPROM ID = %02x %02x %02x %02x, EEPROM hash = 0x%08lx\n",
+ data[0], data[1], data[2], data[3], dev->hash);
+ dev_info(&dev->intf->dev,
+ "EEPROM info:\n");
} else {
- em28xx_info("unknown eeprom format or eeprom corrupted !\n");
+ dev_info(&dev->intf->dev,
+ "unknown eeprom format or eeprom corrupted !\n");
err = -ENODEV;
goto error;
}
@@ -782,50 +793,55 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
switch (le16_to_cpu(dev_config->chip_conf) >> 4 & 0x3) {
case 0:
- em28xx_info("\tNo audio on board.\n");
+ dev_info(&dev->intf->dev, "\tNo audio on board.\n");
break;
case 1:
- em28xx_info("\tAC97 audio (5 sample rates)\n");
+ dev_info(&dev->intf->dev, "\tAC97 audio (5 sample rates)\n");
break;
case 2:
if (dev->chip_id < CHIP_ID_EM2860)
- em28xx_info("\tI2S audio, sample rate=32k\n");
+ dev_info(&dev->intf->dev,
+ "\tI2S audio, sample rate=32k\n");
else
- em28xx_info("\tI2S audio, 3 sample rates\n");
+ dev_info(&dev->intf->dev,
+ "\tI2S audio, 3 sample rates\n");
break;
case 3:
if (dev->chip_id < CHIP_ID_EM2860)
- em28xx_info("\tI2S audio, 3 sample rates\n");
+ dev_info(&dev->intf->dev,
+ "\tI2S audio, 3 sample rates\n");
else
- em28xx_info("\tI2S audio, 5 sample rates\n");
+ dev_info(&dev->intf->dev,
+ "\tI2S audio, 5 sample rates\n");
break;
}
if (le16_to_cpu(dev_config->chip_conf) & 1 << 3)
- em28xx_info("\tUSB Remote wakeup capable\n");
+ dev_info(&dev->intf->dev, "\tUSB Remote wakeup capable\n");
if (le16_to_cpu(dev_config->chip_conf) & 1 << 2)
- em28xx_info("\tUSB Self power capable\n");
+ dev_info(&dev->intf->dev, "\tUSB Self power capable\n");
switch (le16_to_cpu(dev_config->chip_conf) & 0x3) {
case 0:
- em28xx_info("\t500mA max power\n");
+ dev_info(&dev->intf->dev, "\t500mA max power\n");
break;
case 1:
- em28xx_info("\t400mA max power\n");
+ dev_info(&dev->intf->dev, "\t400mA max power\n");
break;
case 2:
- em28xx_info("\t300mA max power\n");
+ dev_info(&dev->intf->dev, "\t300mA max power\n");
break;
case 3:
- em28xx_info("\t200mA max power\n");
+ dev_info(&dev->intf->dev, "\t200mA max power\n");
break;
}
- em28xx_info("\tTable at offset 0x%02x, strings=0x%04x, 0x%04x, 0x%04x\n",
- dev_config->string_idx_table,
- le16_to_cpu(dev_config->string1),
- le16_to_cpu(dev_config->string2),
- le16_to_cpu(dev_config->string3));
+ dev_info(&dev->intf->dev,
+ "\tTable at offset 0x%02x, strings=0x%04x, 0x%04x, 0x%04x\n",
+ dev_config->string_idx_table,
+ le16_to_cpu(dev_config->string1),
+ le16_to_cpu(dev_config->string2),
+ le16_to_cpu(dev_config->string3));
return 0;
@@ -914,8 +930,9 @@ void em28xx_do_i2c_scan(struct em28xx *dev, unsigned bus)
if (rc < 0)
continue;
i2c_devicelist[i] = i;
- em28xx_info("found i2c device @ 0x%x on bus %d [%s]\n",
- i << 1, bus, i2c_devs[i] ? i2c_devs[i] : "???");
+ dev_info(&dev->intf->dev,
+ "found i2c device @ 0x%x on bus %d [%s]\n",
+ i << 1, bus, i2c_devs[i] ? i2c_devs[i] : "???");
}
if (bus == dev->def_i2c_bus)
@@ -939,8 +956,8 @@ int em28xx_i2c_register(struct em28xx *dev, unsigned bus,
return -ENODEV;
dev->i2c_adap[bus] = em28xx_adap_template;
- dev->i2c_adap[bus].dev.parent = &dev->udev->dev;
- strcpy(dev->i2c_adap[bus].name, dev->name);
+ dev->i2c_adap[bus].dev.parent = &dev->intf->dev;
+ strcpy(dev->i2c_adap[bus].name, dev_name(&dev->intf->dev));
dev->i2c_bus[bus].bus = bus;
dev->i2c_bus[bus].algo_type = algo_type;
@@ -949,8 +966,9 @@ int em28xx_i2c_register(struct em28xx *dev, unsigned bus,
retval = i2c_add_adapter(&dev->i2c_adap[bus]);
if (retval < 0) {
- em28xx_errdev("%s: i2c_add_adapter failed! retval [%d]\n",
- __func__, retval);
+ dev_err(&dev->intf->dev,
+ "%s: i2c_add_adapter failed! retval [%d]\n",
+ __func__, retval);
return retval;
}
@@ -961,8 +979,9 @@ int em28xx_i2c_register(struct em28xx *dev, unsigned bus,
if (!bus) {
retval = em28xx_i2c_eeprom(dev, bus, &dev->eedata, &dev->eedata_len);
if ((retval < 0) && (retval != -ENODEV)) {
- em28xx_errdev("%s: em28xx_i2_eeprom failed! retval [%d]\n",
- __func__, retval);
+ dev_err(&dev->intf->dev,
+ "%s: em28xx_i2_eeprom failed! retval [%d]\n",
+ __func__, retval);
return retval;
}
diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
index 4007356d991d..782ce095c8c5 100644
--- a/drivers/media/usb/em28xx/em28xx-input.c
+++ b/drivers/media/usb/em28xx/em28xx-input.c
@@ -21,6 +21,8 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include "em28xx.h"
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -29,8 +31,6 @@
#include <linux/slab.h>
#include <linux/bitrev.h>
-#include "em28xx.h"
-
#define EM28XX_SNAPSHOT_KEY KEY_CAMERA
#define EM28XX_BUTTONS_DEBOUNCED_QUERY_INTERVAL 500 /* [ms] */
#define EM28XX_BUTTONS_VOLATILE_QUERY_INTERVAL 100 /* [ms] */
@@ -41,10 +41,11 @@ MODULE_PARM_DESC(ir_debug, "enable debug messages [IR]");
#define MODULE_NAME "em28xx"
-#define dprintk(fmt, arg...) \
- if (ir_debug) { \
- printk(KERN_DEBUG "%s/ir: " fmt, ir->name , ## arg); \
- }
+#define dprintk( fmt, arg...) do { \
+ if (ir_debug) \
+ dev_printk(KERN_DEBUG, &ir->dev->intf->dev, \
+ "input: %s: " fmt, __func__, ## arg); \
+} while (0)
/**********************************************************
Polling structure used by em28xx IR's
@@ -458,8 +459,9 @@ static int em28xx_ir_change_protocol(struct rc_dev *rc_dev, u64 *rc_type)
case CHIP_ID_EM28178:
return em2874_ir_change_protocol(rc_dev, rc_type);
default:
- printk("Unrecognized em28xx chip id 0x%02x: IR not supported\n",
- dev->chip_id);
+ dev_err(&ir->dev->intf->dev,
+ "Unrecognized em28xx chip id 0x%02x: IR not supported\n",
+ dev->chip_id);
return -EINVAL;
}
}
@@ -564,15 +566,16 @@ static void em28xx_query_buttons(struct work_struct *work)
static int em28xx_register_snapshot_button(struct em28xx *dev)
{
+ struct usb_device *udev = interface_to_usbdev(dev->intf);
struct input_dev *input_dev;
int err;
- em28xx_info("Registering snapshot button...\n");
+ dev_info(&dev->intf->dev, "Registering snapshot button...\n");
input_dev = input_allocate_device();
if (!input_dev)
return -ENOMEM;
- usb_make_path(dev->udev, dev->snapshot_button_path,
+ usb_make_path(udev, dev->snapshot_button_path,
sizeof(dev->snapshot_button_path));
strlcat(dev->snapshot_button_path, "/sbutton",
sizeof(dev->snapshot_button_path));
@@ -584,14 +587,14 @@ static int em28xx_register_snapshot_button(struct em28xx *dev)
input_dev->keycodesize = 0;
input_dev->keycodemax = 0;
input_dev->id.bustype = BUS_USB;
- input_dev->id.vendor = le16_to_cpu(dev->udev->descriptor.idVendor);
- input_dev->id.product = le16_to_cpu(dev->udev->descriptor.idProduct);
+ input_dev->id.vendor = le16_to_cpu(udev->descriptor.idVendor);
+ input_dev->id.product = le16_to_cpu(udev->descriptor.idProduct);
input_dev->id.version = 1;
- input_dev->dev.parent = &dev->udev->dev;
+ input_dev->dev.parent = &dev->intf->dev;
err = input_register_device(input_dev);
if (err) {
- em28xx_errdev("input_register_device failed\n");
+ dev_err(&dev->intf->dev, "input_register_device failed\n");
input_free_device(input_dev);
return err;
}
@@ -631,7 +634,8 @@ static void em28xx_init_buttons(struct em28xx *dev)
} else if (button->role == EM28XX_BUTTON_ILLUMINATION) {
/* Check sanity */
if (!em28xx_find_led(dev, EM28XX_LED_ILLUMINATION)) {
- em28xx_errdev("BUG: illumination button defined, but no illumination LED.\n");
+ dev_err(&dev->intf->dev,
+ "BUG: illumination button defined, but no illumination LED.\n");
goto next_button;
}
}
@@ -667,7 +671,7 @@ static void em28xx_shutdown_buttons(struct em28xx *dev)
dev->num_button_polling_addresses = 0;
/* Deregister input devices */
if (dev->sbutton_input_dev != NULL) {
- em28xx_info("Deregistering snapshot button\n");
+ dev_info(&dev->intf->dev, "Deregistering snapshot button\n");
input_unregister_device(dev->sbutton_input_dev);
dev->sbutton_input_dev = NULL;
}
@@ -675,6 +679,7 @@ static void em28xx_shutdown_buttons(struct em28xx *dev)
static int em28xx_ir_init(struct em28xx *dev)
{
+ struct usb_device *udev = interface_to_usbdev(dev->intf);
struct em28xx_IR *ir;
struct rc_dev *rc;
int err = -ENOMEM;
@@ -696,19 +701,20 @@ static int em28xx_ir_init(struct em28xx *dev)
i2c_rc_dev_addr = em28xx_probe_i2c_ir(dev);
if (!i2c_rc_dev_addr) {
dev->board.has_ir_i2c = 0;
- em28xx_warn("No i2c IR remote control device found.\n");
+ dev_warn(&dev->intf->dev,
+ "No i2c IR remote control device found.\n");
return -ENODEV;
}
}
if (dev->board.ir_codes == NULL && !dev->board.has_ir_i2c) {
/* No remote control support */
- em28xx_warn("Remote control support is not available for "
- "this card.\n");
+ dev_warn(&dev->intf->dev,
+ "Remote control support is not available for this card.\n");
return 0;
}
- em28xx_info("Registering input extension\n");
+ dev_info(&dev->intf->dev, "Registering input extension\n");
ir = kzalloc(sizeof(*ir), GFP_KERNEL);
if (!ir)
@@ -792,18 +798,19 @@ static int em28xx_ir_init(struct em28xx *dev)
ir->polling = 100; /* ms */
/* init input device */
- snprintf(ir->name, sizeof(ir->name), "em28xx IR (%s)", dev->name);
+ snprintf(ir->name, sizeof(ir->name), "%s IR",
+ dev_name(&dev->intf->dev));
- usb_make_path(dev->udev, ir->phys, sizeof(ir->phys));
+ usb_make_path(udev, ir->phys, sizeof(ir->phys));
strlcat(ir->phys, "/input0", sizeof(ir->phys));
rc->input_name = ir->name;
rc->input_phys = ir->phys;
rc->input_id.bustype = BUS_USB;
rc->input_id.version = 1;
- rc->input_id.vendor = le16_to_cpu(dev->udev->descriptor.idVendor);
- rc->input_id.product = le16_to_cpu(dev->udev->descriptor.idProduct);
- rc->dev.parent = &dev->udev->dev;
+ rc->input_id.vendor = le16_to_cpu(udev->descriptor.idVendor);
+ rc->input_id.product = le16_to_cpu(udev->descriptor.idProduct);
+ rc->dev.parent = &dev->intf->dev;
rc->driver_name = MODULE_NAME;
/* all done */
@@ -811,7 +818,7 @@ static int em28xx_ir_init(struct em28xx *dev)
if (err)
goto error;
- em28xx_info("Input extension successfully initalized\n");
+ dev_info(&dev->intf->dev, "Input extension successfully initalized\n");
return 0;
@@ -832,7 +839,7 @@ static int em28xx_ir_fini(struct em28xx *dev)
return 0;
}
- em28xx_info("Closing input extension\n");
+ dev_info(&dev->intf->dev, "Closing input extension\n");
em28xx_shutdown_buttons(dev);
@@ -861,7 +868,7 @@ static int em28xx_ir_suspend(struct em28xx *dev)
if (dev->is_audio_only)
return 0;
- em28xx_info("Suspending input extension\n");
+ dev_info(&dev->intf->dev, "Suspending input extension\n");
if (ir)
cancel_delayed_work_sync(&ir->work);
cancel_delayed_work_sync(&dev->buttons_query_work);
@@ -878,7 +885,7 @@ static int em28xx_ir_resume(struct em28xx *dev)
if (dev->is_audio_only)
return 0;
- em28xx_info("Resuming input extension\n");
+ dev_info(&dev->intf->dev, "Resuming input extension\n");
/* if suspend calls ir_raw_event_unregister(), the should call
ir_raw_event_register() */
if (ir)
diff --git a/drivers/media/usb/em28xx/em28xx-vbi.c b/drivers/media/usb/em28xx/em28xx-vbi.c
index 836c6b53b16c..0bac552bbe87 100644
--- a/drivers/media/usb/em28xx/em28xx-vbi.c
+++ b/drivers/media/usb/em28xx/em28xx-vbi.c
@@ -21,12 +21,14 @@
02110-1301, USA.
*/
+#include "em28xx.h"
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/hardirq.h>
#include <linux/init.h>
+#include <linux/usb.h>
-#include "em28xx.h"
#include "em28xx-v4l.h"
/* ------------------------------------------------------------------ */
@@ -63,8 +65,9 @@ static int vbi_buffer_prepare(struct vb2_buffer *vb)
size = v4l2->vbi_width * v4l2->vbi_height * 2;
if (vb2_plane_size(vb, 0) < size) {
- printk(KERN_INFO "%s data will not fit into plane (%lu < %lu)\n",
- __func__, vb2_plane_size(vb, 0), size);
+ dev_info(&dev->intf->dev,
+ "%s data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0), size);
return -EINVAL;
}
vb2_set_plane_payload(vb, 0, size);
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index 1f7fa059eb34..8d93100334ea 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -26,6 +26,8 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "em28xx.h"
+
#include <linux/init.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -37,7 +39,6 @@
#include <linux/mutex.h>
#include <linux/slab.h>
-#include "em28xx.h"
#include "em28xx-v4l.h"
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
@@ -63,18 +64,17 @@ static int alt;
module_param(alt, int, 0644);
MODULE_PARM_DESC(alt, "alternate setting to use for video endpoint");
-#define em28xx_videodbg(fmt, arg...) do {\
- if (video_debug) \
- printk(KERN_INFO "%s %s :"fmt, \
- dev->name, __func__ , ##arg); } while (0)
+#define em28xx_videodbg(fmt, arg...) do { \
+ if (video_debug) \
+ dev_printk(KERN_DEBUG, &dev->intf->dev, \
+ "video: %s: " fmt, __func__, ## arg); \
+} while (0)
-#define em28xx_isocdbg(fmt, arg...) \
-do {\
- if (isoc_debug) { \
- printk(KERN_INFO "%s %s :"fmt, \
- dev->name, __func__ , ##arg); \
- } \
- } while (0)
+#define em28xx_isocdbg(fmt, arg...) do {\
+ if (isoc_debug) \
+ dev_printk(KERN_DEBUG, &dev->intf->dev, \
+ "isoc: %s: " fmt, __func__, ## arg); \
+} while (0)
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC " - v4l2 interface");
@@ -360,6 +360,7 @@ static int em28xx_resolution_set(struct em28xx *dev)
static int em28xx_set_alternate(struct em28xx *dev)
{
struct em28xx_v4l2 *v4l2 = dev->v4l2;
+ struct usb_device *udev = interface_to_usbdev(dev->intf);
int errCode;
int i;
unsigned int min_pkt_size = v4l2->width * 2 + 4;
@@ -411,10 +412,11 @@ set_alt:
}
em28xx_videodbg("setting alternate %d with wMaxPacketSize=%u\n",
dev->alt, dev->max_pkt_size);
- errCode = usb_set_interface(dev->udev, dev->ifnum, dev->alt);
+ errCode = usb_set_interface(udev, dev->ifnum, dev->alt);
if (errCode < 0) {
- em28xx_errdev("cannot change alternate number to %d (error=%i)\n",
- dev->alt, errCode);
+ dev_err(&dev->intf->dev,
+ "cannot change alternate number to %d (error=%i)\n",
+ dev->alt, errCode);
return errCode;
}
return 0;
@@ -505,8 +507,7 @@ static void em28xx_copy_video(struct em28xx *dev,
if ((char *)startwrite + lencopy > (char *)buf->vb_buf +
buf->length) {
- em28xx_isocdbg("Overflow of %zu bytes past buffer end"
- "(2)\n",
+ em28xx_isocdbg("Overflow of %zu bytes past buffer end(2)\n",
((char *)startwrite + lencopy) -
((char *)buf->vb_buf + buf->length));
lencopy = remain = (char *)buf->vb_buf + buf->length -
@@ -926,10 +927,11 @@ static int em28xx_enable_analog_tuner(struct em28xx *dev)
ret = media_entity_setup_link(link, flags);
if (ret) {
- pr_err("Couldn't change link %s->%s to %s. Error %d\n",
- source->name, sink->name,
- flags ? "enabled" : "disabled",
- ret);
+ dev_err(&dev->intf->dev,
+ "Couldn't change link %s->%s to %s. Error %d\n",
+ source->name, sink->name,
+ flags ? "enabled" : "disabled",
+ ret);
return ret;
} else
em28xx_videodbg("link %s->%s was %s\n",
@@ -957,14 +959,16 @@ static void em28xx_v4l2_create_entities(struct em28xx *dev)
v4l2->video_pad.flags = MEDIA_PAD_FL_SINK;
ret = media_entity_pads_init(&v4l2->vdev.entity, 1, &v4l2->video_pad);
if (ret < 0)
- pr_err("failed to initialize video media entity!\n");
+ dev_err(&dev->intf->dev,
+ "failed to initialize video media entity!\n");
if (em28xx_vbi_supported(dev)) {
v4l2->vbi_pad.flags = MEDIA_PAD_FL_SINK;
ret = media_entity_pads_init(&v4l2->vbi_dev.entity, 1,
&v4l2->vbi_pad);
if (ret < 0)
- pr_err("failed to initialize vbi media entity!\n");
+ dev_err(&dev->intf->dev,
+ "failed to initialize vbi media entity!\n");
}
/* Webcams don't have input connectors */
@@ -997,11 +1001,13 @@ static void em28xx_v4l2_create_entities(struct em28xx *dev)
ret = media_entity_pads_init(ent, 1, &dev->input_pad[i]);
if (ret < 0)
- pr_err("failed to initialize input pad[%d]!\n", i);
+ dev_err(&dev->intf->dev,
+ "failed to initialize input pad[%d]!\n", i);
ret = media_device_register_entity(dev->media_dev, ent);
if (ret < 0)
- pr_err("failed to register input entity %d!\n", i);
+ dev_err(&dev->intf->dev,
+ "failed to register input entity %d!\n", i);
}
#endif
}
@@ -1854,10 +1860,11 @@ static int vidioc_querycap(struct file *file, void *priv,
struct video_device *vdev = video_devdata(file);
struct em28xx *dev = video_drvdata(file);
struct em28xx_v4l2 *v4l2 = dev->v4l2;
+ struct usb_device *udev = interface_to_usbdev(dev->intf);
strlcpy(cap->driver, "em28xx", sizeof(cap->driver));
strlcpy(cap->card, em28xx_boards[dev->model].name, sizeof(cap->card));
- usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
+ usb_make_path(udev, cap->bus_info, sizeof(cap->bus_info));
if (vdev->vfl_type == VFL_TYPE_GRABBER)
cap->device_caps = V4L2_CAP_READWRITE |
@@ -2048,8 +2055,9 @@ static int em28xx_v4l2_open(struct file *filp)
ret = v4l2_fh_open(filp);
if (ret) {
- em28xx_errdev("%s: v4l2_fh_open() returned error %d\n",
- __func__, ret);
+ dev_err(&dev->intf->dev,
+ "%s: v4l2_fh_open() returned error %d\n",
+ __func__, ret);
mutex_unlock(&dev->lock);
return ret;
}
@@ -2103,7 +2111,7 @@ static int em28xx_v4l2_fini(struct em28xx *dev)
if (v4l2 == NULL)
return 0;
- em28xx_info("Closing video extension\n");
+ dev_info(&dev->intf->dev, "Closing video extension\n");
mutex_lock(&dev->lock);
@@ -2114,18 +2122,18 @@ static int em28xx_v4l2_fini(struct em28xx *dev)
em28xx_v4l2_media_release(dev);
if (video_is_registered(&v4l2->radio_dev)) {
- em28xx_info("V4L2 device %s deregistered\n",
- video_device_node_name(&v4l2->radio_dev));
+ dev_info(&dev->intf->dev, "V4L2 device %s deregistered\n",
+ video_device_node_name(&v4l2->radio_dev));
video_unregister_device(&v4l2->radio_dev);
}
if (video_is_registered(&v4l2->vbi_dev)) {
- em28xx_info("V4L2 device %s deregistered\n",
- video_device_node_name(&v4l2->vbi_dev));
+ dev_info(&dev->intf->dev, "V4L2 device %s deregistered\n",
+ video_device_node_name(&v4l2->vbi_dev));
video_unregister_device(&v4l2->vbi_dev);
}
if (video_is_registered(&v4l2->vdev)) {
- em28xx_info("V4L2 device %s deregistered\n",
- video_device_node_name(&v4l2->vdev));
+ dev_info(&dev->intf->dev, "V4L2 device %s deregistered\n",
+ video_device_node_name(&v4l2->vdev));
video_unregister_device(&v4l2->vdev);
}
@@ -2154,7 +2162,7 @@ static int em28xx_v4l2_suspend(struct em28xx *dev)
if (!dev->has_video)
return 0;
- em28xx_info("Suspending video extension\n");
+ dev_info(&dev->intf->dev, "Suspending video extension\n");
em28xx_stop_urbs(dev);
return 0;
}
@@ -2167,7 +2175,7 @@ static int em28xx_v4l2_resume(struct em28xx *dev)
if (!dev->has_video)
return 0;
- em28xx_info("Resuming video extension\n");
+ dev_info(&dev->intf->dev, "Resuming video extension\n");
/* what do we do here */
return 0;
}
@@ -2181,6 +2189,7 @@ static int em28xx_v4l2_close(struct file *filp)
{
struct em28xx *dev = video_drvdata(filp);
struct em28xx_v4l2 *v4l2 = dev->v4l2;
+ struct usb_device *udev = interface_to_usbdev(dev->intf);
int errCode;
em28xx_videodbg("users=%d\n", v4l2->users);
@@ -2202,10 +2211,11 @@ static int em28xx_v4l2_close(struct file *filp)
/* set alternate 0 */
dev->alt = 0;
em28xx_videodbg("setting alternate 0\n");
- errCode = usb_set_interface(dev->udev, 0, 0);
+ errCode = usb_set_interface(udev, 0, 0);
if (errCode < 0) {
- em28xx_errdev("cannot change alternate number to "
- "0 (error=%i)\n", errCode);
+ dev_err(&dev->intf->dev,
+ "cannot change alternate number to 0 (error=%i)\n",
+ errCode);
}
}
@@ -2338,7 +2348,7 @@ static void em28xx_vdev_init(struct em28xx *dev,
vfd->tvnorms = 0;
snprintf(vfd->name, sizeof(vfd->name), "%s %s",
- dev->name, type_name);
+ dev_name(&dev->intf->dev), type_name);
video_set_drvdata(vfd, dev);
}
@@ -2422,13 +2432,12 @@ static int em28xx_v4l2_init(struct em28xx *dev)
return 0;
}
- em28xx_info("Registering V4L2 extension\n");
+ dev_info(&dev->intf->dev, "Registering V4L2 extension\n");
mutex_lock(&dev->lock);
v4l2 = kzalloc(sizeof(struct em28xx_v4l2), GFP_KERNEL);
- if (v4l2 == NULL) {
- em28xx_info("em28xx_v4l: memory allocation failed\n");
+ if (!v4l2) {
mutex_unlock(&dev->lock);
return -ENOMEM;
}
@@ -2439,9 +2448,10 @@ static int em28xx_v4l2_init(struct em28xx *dev)
#ifdef CONFIG_MEDIA_CONTROLLER
v4l2->v4l2_dev.mdev = dev->media_dev;
#endif
- ret = v4l2_device_register(&dev->udev->dev, &v4l2->v4l2_dev);
+ ret = v4l2_device_register(&dev->intf->dev, &v4l2->v4l2_dev);
if (ret < 0) {
- em28xx_errdev("Call to v4l2_device_register() failed!\n");
+ dev_err(&dev->intf->dev,
+ "Call to v4l2_device_register() failed!\n");
goto err;
}
@@ -2525,8 +2535,9 @@ static int em28xx_v4l2_init(struct em28xx *dev)
/* Configure audio */
ret = em28xx_audio_setup(dev);
if (ret < 0) {
- em28xx_errdev("%s: Error while setting audio - error [%d]!\n",
- __func__, ret);
+ dev_err(&dev->intf->dev,
+ "%s: Error while setting audio - error [%d]!\n",
+ __func__, ret);
goto unregister_dev;
}
if (dev->audio_mode.ac97 != EM28XX_NO_AC97) {
@@ -2553,16 +2564,18 @@ static int em28xx_v4l2_init(struct em28xx *dev)
/* Send a reset to other chips via gpio */
ret = em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xf7);
if (ret < 0) {
- em28xx_errdev("%s: em28xx_write_reg - msp34xx(1) failed! error [%d]\n",
- __func__, ret);
+ dev_err(&dev->intf->dev,
+ "%s: em28xx_write_reg - msp34xx(1) failed! error [%d]\n",
+ __func__, ret);
goto unregister_dev;
}
msleep(3);
ret = em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xff);
if (ret < 0) {
- em28xx_errdev("%s: em28xx_write_reg - msp34xx(2) failed! error [%d]\n",
- __func__, ret);
+ dev_err(&dev->intf->dev,
+ "%s: em28xx_write_reg - msp34xx(2) failed! error [%d]\n",
+ __func__, ret);
goto unregister_dev;
}
msleep(3);
@@ -2663,8 +2676,8 @@ static int em28xx_v4l2_init(struct em28xx *dev)
ret = video_register_device(&v4l2->vdev, VFL_TYPE_GRABBER,
video_nr[dev->devno]);
if (ret) {
- em28xx_errdev("unable to register video device (error=%i).\n",
- ret);
+ dev_err(&dev->intf->dev,
+ "unable to register video device (error=%i).\n", ret);
goto unregister_dev;
}
@@ -2693,7 +2706,8 @@ static int em28xx_v4l2_init(struct em28xx *dev)
ret = video_register_device(&v4l2->vbi_dev, VFL_TYPE_VBI,
vbi_nr[dev->devno]);
if (ret < 0) {
- em28xx_errdev("unable to register vbi device\n");
+ dev_err(&dev->intf->dev,
+ "unable to register vbi device\n");
goto unregister_dev;
}
}
@@ -2704,11 +2718,13 @@ static int em28xx_v4l2_init(struct em28xx *dev)
ret = video_register_device(&v4l2->radio_dev, VFL_TYPE_RADIO,
radio_nr[dev->devno]);
if (ret < 0) {
- em28xx_errdev("can't register radio device\n");
+ dev_err(&dev->intf->dev,
+ "can't register radio device\n");
goto unregister_dev;
}
- em28xx_info("Registered radio device as %s\n",
- video_device_node_name(&v4l2->radio_dev));
+ dev_info(&dev->intf->dev,
+ "Registered radio device as %s\n",
+ video_device_node_name(&v4l2->radio_dev));
}
/* Init entities at the Media Controller */
@@ -2717,18 +2733,21 @@ static int em28xx_v4l2_init(struct em28xx *dev)
#ifdef CONFIG_MEDIA_CONTROLLER
ret = v4l2_mc_create_media_graph(dev->media_dev);
if (ret) {
- em28xx_errdev("failed to create media graph\n");
+ dev_err(&dev->intf->dev,
+ "failed to create media graph\n");
em28xx_v4l2_media_release(dev);
goto unregister_dev;
}
#endif
- em28xx_info("V4L2 video device registered as %s\n",
- video_device_node_name(&v4l2->vdev));
+ dev_info(&dev->intf->dev,
+ "V4L2 video device registered as %s\n",
+ video_device_node_name(&v4l2->vdev));
if (video_is_registered(&v4l2->vbi_dev))
- em28xx_info("V4L2 VBI device registered as %s\n",
- video_device_node_name(&v4l2->vbi_dev));
+ dev_info(&dev->intf->dev,
+ "V4L2 VBI device registered as %s\n",
+ video_device_node_name(&v4l2->vbi_dev));
/* Save some power by putting tuner to sleep */
v4l2_device_call_all(&v4l2->v4l2_dev, 0, core, s_power, 0);
@@ -2736,7 +2755,8 @@ static int em28xx_v4l2_init(struct em28xx *dev)
/* initialize videobuf2 stuff */
em28xx_vb2_setup(dev);
- em28xx_info("V4L2 extension successfully initialized\n");
+ dev_info(&dev->intf->dev,
+ "V4L2 extension successfully initialized\n");
kref_get(&dev->ref);
@@ -2745,18 +2765,21 @@ static int em28xx_v4l2_init(struct em28xx *dev)
unregister_dev:
if (video_is_registered(&v4l2->radio_dev)) {
- em28xx_info("V4L2 device %s deregistered\n",
- video_device_node_name(&v4l2->radio_dev));
+ dev_info(&dev->intf->dev,
+ "V4L2 device %s deregistered\n",
+ video_device_node_name(&v4l2->radio_dev));
video_unregister_device(&v4l2->radio_dev);
}
if (video_is_registered(&v4l2->vbi_dev)) {
- em28xx_info("V4L2 device %s deregistered\n",
- video_device_node_name(&v4l2->vbi_dev));
+ dev_info(&dev->intf->dev,
+ "V4L2 device %s deregistered\n",
+ video_device_node_name(&v4l2->vbi_dev));
video_unregister_device(&v4l2->vbi_dev);
}
if (video_is_registered(&v4l2->vdev)) {
- em28xx_info("V4L2 device %s deregistered\n",
- video_device_node_name(&v4l2->vdev));
+ dev_info(&dev->intf->dev,
+ "V4L2 device %s deregistered\n",
+ video_device_node_name(&v4l2->vdev));
video_unregister_device(&v4l2->vdev);
}
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index d148463b22c1..ca59e2d4fccf 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -610,7 +610,6 @@ struct em28xx {
struct em28xx_IR *ir;
/* generic device properties */
- char name[30]; /* name (including minor) of the device */
int model; /* index in the device_data struct */
int devno; /* marks the number of this device */
enum em28xx_chip_id chip_id;
@@ -678,7 +677,7 @@ struct em28xx {
spinlock_t slock;
/* usb transfer */
- struct usb_device *udev; /* the usb device */
+ struct usb_interface *intf; /* the usb interface */
u8 ifnum; /* number of the assigned usb interface */
u8 analog_ep_isoc; /* address of isoc endpoint for analog */
u8 analog_ep_bulk; /* address of bulk endpoint for analog */
@@ -797,20 +796,4 @@ void em28xx_free_device(struct kref *ref);
int em28xx_detect_sensor(struct em28xx *dev);
int em28xx_init_camera(struct em28xx *dev);
-/* printk macros */
-
-#define em28xx_err(fmt, arg...) do {\
- printk(KERN_ERR fmt , ##arg); } while (0)
-
-#define em28xx_errdev(fmt, arg...) do {\
- printk(KERN_ERR "%s: "fmt,\
- dev->name , ##arg); } while (0)
-
-#define em28xx_info(fmt, arg...) do {\
- printk(KERN_INFO "%s: "fmt,\
- dev->name , ##arg); } while (0)
-#define em28xx_warn(fmt, arg...) do {\
- printk(KERN_WARNING "%s: "fmt,\
- dev->name , ##arg); } while (0)
-
#endif
diff --git a/drivers/media/usb/go7007/Kconfig b/drivers/media/usb/go7007/Kconfig
index 95a3af644a92..af1d02430931 100644
--- a/drivers/media/usb/go7007/Kconfig
+++ b/drivers/media/usb/go7007/Kconfig
@@ -11,7 +11,7 @@ config VIDEO_GO7007
select VIDEO_TW2804 if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_TW9903 if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_TW9906 if MEDIA_SUBDRV_AUTOSELECT
- select VIDEO_OV7640 if MEDIA_SUBDRV_AUTOSELECT
+ select VIDEO_OV7640 if MEDIA_SUBDRV_AUTOSELECT && MEDIA_CAMERA_SUPPORT
select VIDEO_UDA1342 if MEDIA_SUBDRV_AUTOSELECT
---help---
This is a video4linux driver for the WIS GO7007 MPEG
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index af2395a76d8b..fa2cbb981905 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -201,8 +201,7 @@ static int alloc_and_submit_int_urb(struct gspca_dev *gspca_dev,
buffer_len = le16_to_cpu(ep->wMaxPacketSize);
interval = ep->bInterval;
- PDEBUG(D_CONF, "found int in endpoint: 0x%x, "
- "buffer_len=%u, interval=%u",
+ PDEBUG(D_CONF, "found int in endpoint: 0x%x, buffer_len=%u, interval=%u",
ep->bEndpointAddress, buffer_len, interval);
dev = gspca_dev->dev;
diff --git a/drivers/media/usb/gspca/jl2005bcd.c b/drivers/media/usb/gspca/jl2005bcd.c
index ac295f04bd18..b12ecb72df4c 100644
--- a/drivers/media/usb/gspca/jl2005bcd.c
+++ b/drivers/media/usb/gspca/jl2005bcd.c
@@ -299,10 +299,7 @@ static int jl2005c_stream_start_cif_small(struct gspca_dev *gspca_dev)
static int jl2005c_stop(struct gspca_dev *gspca_dev)
{
- int retval;
-
- retval = jl2005c_write_reg(gspca_dev, 0x07, 0x00);
- return retval;
+ return jl2005c_write_reg(gspca_dev, 0x07, 0x00);
}
/*
diff --git a/drivers/media/usb/gspca/m5602/m5602_core.c b/drivers/media/usb/gspca/m5602/m5602_core.c
index e4a0658e3f83..f1dcd9021983 100644
--- a/drivers/media/usb/gspca/m5602/m5602_core.c
+++ b/drivers/media/usb/gspca/m5602/m5602_core.c
@@ -154,8 +154,8 @@ int m5602_read_sensor(struct sd *sd, const u8 address,
err = m5602_read_bridge(sd, M5602_XB_I2C_DATA, &(i2c_data[i]));
- PDEBUG(D_CONF, "Reading sensor register "
- "0x%x containing 0x%x ", address, *i2c_data);
+ PDEBUG(D_CONF, "Reading sensor register 0x%x containing 0x%x ",
+ address, *i2c_data);
}
return err;
}
@@ -441,13 +441,10 @@ MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
module_param(force_sensor, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(force_sensor,
- "forces detection of a sensor, "
- "1 = OV9650, 2 = S5K83A, 3 = S5K4AA, "
- "4 = MT9M111, 5 = PO1030, 6 = OV7660");
+ "forces detection of a sensor, 1 = OV9650, 2 = S5K83A, 3 = S5K4AA, 4 = MT9M111, 5 = PO1030, 6 = OV7660");
module_param(dump_bridge, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dump_bridge, "Dumps all usb bridge registers at startup");
module_param(dump_sensor, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(dump_sensor, "Dumps all usb sensor registers "
- "at startup providing a sensor is found");
+MODULE_PARM_DESC(dump_sensor, "Dumps all usb sensor registers at startup providing a sensor is found");
diff --git a/drivers/media/usb/gspca/mr97310a.c b/drivers/media/usb/gspca/mr97310a.c
index f006e29ca019..6dfb364094ec 100644
--- a/drivers/media/usb/gspca/mr97310a.c
+++ b/drivers/media/usb/gspca/mr97310a.c
@@ -72,8 +72,7 @@
#define MR97310A_MIN_CLOCKDIV_MAX 8
#define MR97310A_MIN_CLOCKDIV_DEFAULT 3
-MODULE_AUTHOR("Kyle Guinn <elyk03@gmail.com>,"
- "Theodore Kilgore <kilgota@auburn.edu>");
+MODULE_AUTHOR("Kyle Guinn <elyk03@gmail.com>,Theodore Kilgore <kilgota@auburn.edu>");
MODULE_DESCRIPTION("GSPCA/Mars-Semi MR97310A USB Camera Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c
index 965372a5ff2f..4dbca54cf2a8 100644
--- a/drivers/media/usb/gspca/ov519.c
+++ b/drivers/media/usb/gspca/ov519.c
@@ -4326,8 +4326,7 @@ static void ov511_pkt_scan(struct gspca_dev *gspca_dev,
/* Frame end */
if ((in[9] + 1) * 8 != gspca_dev->pixfmt.width ||
(in[10] + 1) * 8 != gspca_dev->pixfmt.height) {
- PERR("Invalid frame size, got: %dx%d,"
- " requested: %dx%d\n",
+ PERR("Invalid frame size, got: %dx%d, requested: %dx%d\n",
(in[9] + 1) * 8, (in[10] + 1) * 8,
gspca_dev->pixfmt.width,
gspca_dev->pixfmt.height);
diff --git a/drivers/media/usb/gspca/pac207.c b/drivers/media/usb/gspca/pac207.c
index 07529e5a0c56..51e11248bbb8 100644
--- a/drivers/media/usb/gspca/pac207.c
+++ b/drivers/media/usb/gspca/pac207.c
@@ -179,8 +179,8 @@ static int sd_config(struct gspca_dev *gspca_dev,
}
PDEBUG(D_PROBE,
- "Pixart PAC207BCA Image Processor and Control Chip detected"
- " (vid/pid 0x%04X:0x%04X)", id->idVendor, id->idProduct);
+ "Pixart PAC207BCA Image Processor and Control Chip detected (vid/pid 0x%04X:0x%04X)",
+ id->idVendor, id->idProduct);
cam = &gspca_dev->cam;
cam->cam_mode = sif_mode;
diff --git a/drivers/media/usb/gspca/pac7302.c b/drivers/media/usb/gspca/pac7302.c
index 8b08bd0172f4..be07a24c4518 100644
--- a/drivers/media/usb/gspca/pac7302.c
+++ b/drivers/media/usb/gspca/pac7302.c
@@ -105,8 +105,7 @@
#define PAC7302_EXPOSURE_DEFAULT 66 /* 33 ms / 30 fps */
#define PAC7302_EXPOSURE_KNEE 133 /* 66 ms / 15 fps */
-MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>, "
- "Thomas Kaiser thomas@kaiser-linux.li");
+MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>, Thomas Kaiser thomas@kaiser-linux.li");
MODULE_DESCRIPTION("Pixart PAC7302");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/gspca/sn9c20x.c b/drivers/media/usb/gspca/sn9c20x.c
index 10269dad9d20..e7430b06526a 100644
--- a/drivers/media/usb/gspca/sn9c20x.c
+++ b/drivers/media/usb/gspca/sn9c20x.c
@@ -29,8 +29,7 @@
#include <linux/dmi.h>
-MODULE_AUTHOR("Brian Johnson <brijohn@gmail.com>, "
- "microdia project <microdia@googlegroups.com>");
+MODULE_AUTHOR("Brian Johnson <brijohn@gmail.com>, microdia project <microdia@googlegroups.com>");
MODULE_DESCRIPTION("GSPCA/SN9C20X USB Camera Driver");
MODULE_LICENSE("GPL");
@@ -1948,8 +1947,7 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev)
intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface);
if (intf->num_altsetting != 9) {
- pr_warn("sn9c20x camera with unknown number of alt "
- "settings (%d), please report!\n",
+ pr_warn("sn9c20x camera with unknown number of alt settings (%d), please report!\n",
intf->num_altsetting);
gspca_dev->alt = intf->num_altsetting;
return 0;
diff --git a/drivers/media/usb/gspca/spca506.c b/drivers/media/usb/gspca/spca506.c
index bcd2c04c770e..ee84863d27d4 100644
--- a/drivers/media/usb/gspca/spca506.c
+++ b/drivers/media/usb/gspca/spca506.c
@@ -581,8 +581,7 @@ static const struct sd_desc sd_desc = {
/* -- module initialisation -- */
static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x06e1, 0xa190)},
-/*fixme: may be IntelPCCameraPro BRIDGE_SPCA505
- {USB_DEVICE(0x0733, 0x0430)}, */
+/* {USB_DEVICE(0x0733, 0x0430)}, FIXME: may be IntelPCCameraPro BRIDGE_SPCA505 */
{USB_DEVICE(0x0734, 0x043b)},
{USB_DEVICE(0x99fa, 0x8988)},
{}
diff --git a/drivers/media/usb/gspca/sq905.c b/drivers/media/usb/gspca/sq905.c
index a7ae0ec9fa91..9424c33f0ddb 100644
--- a/drivers/media/usb/gspca/sq905.c
+++ b/drivers/media/usb/gspca/sq905.c
@@ -41,8 +41,7 @@
#include <linux/slab.h>
#include "gspca.h"
-MODULE_AUTHOR("Adam Baker <linux@baker-net.org.uk>, "
- "Theodore Kilgore <kilgota@auburn.edu>");
+MODULE_AUTHOR("Adam Baker <linux@baker-net.org.uk>, Theodore Kilgore <kilgota@auburn.edu>");
MODULE_DESCRIPTION("GSPCA/SQ905 USB Camera Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/gspca/sq905c.c b/drivers/media/usb/gspca/sq905c.c
index aa21edc9502d..6c45dcc44eb0 100644
--- a/drivers/media/usb/gspca/sq905c.c
+++ b/drivers/media/usb/gspca/sq905c.c
@@ -210,8 +210,8 @@ static int sd_config(struct gspca_dev *gspca_dev,
int ret;
PDEBUG(D_PROBE,
- "SQ9050 camera detected"
- " (vid/pid 0x%04X:0x%04X)", id->idVendor, id->idProduct);
+ "SQ9050 camera detected (vid/pid 0x%04X:0x%04X)",
+ id->idVendor, id->idProduct);
ret = sq905c_command(gspca_dev, SQ905C_GET_ID, 0);
if (ret < 0) {
@@ -257,11 +257,8 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
/* this function is called at probe and resume time */
static int sd_init(struct gspca_dev *gspca_dev)
{
- int ret;
-
/* connect to the camera and reset it. */
- ret = sq905c_command(gspca_dev, SQ905C_CLEAR, 0);
- return ret;
+ return sq905c_command(gspca_dev, SQ905C_CLEAR, 0);
}
/* Set up for getting frames. */
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx.c b/drivers/media/usb/gspca/stv06xx/stv06xx.c
index 6ac93d8db427..fef7a784b879 100644
--- a/drivers/media/usb/gspca/stv06xx/stv06xx.c
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx.c
@@ -412,8 +412,7 @@ static void stv06xx_pkt_scan(struct gspca_dev *gspca_dev,
len -= 4;
if (len < chunk_len) {
- PERR("URB packet length is smaller"
- " than the specified chunk length");
+ PERR("URB packet length is smaller than the specified chunk length");
gspca_dev->last_packet_type = DISCARD_PACKET;
return;
}
@@ -455,8 +454,7 @@ frame_data:
sd->to_skip = gspca_dev->pixfmt.width * 4;
if (chunk_len)
- PERR("Chunk length is "
- "non-zero on a SOF");
+ PERR("Chunk length is non-zero on a SOF");
break;
case 0x8002:
@@ -469,8 +467,7 @@ frame_data:
NULL, 0);
if (chunk_len)
- PERR("Chunk length is "
- "non-zero on a EOF");
+ PERR("Chunk length is non-zero on a EOF");
break;
case 0x0005:
@@ -582,18 +579,12 @@ static int stv06xx_config(struct gspca_dev *gspca_dev,
/* -- module initialisation -- */
static const struct usb_device_id device_table[] = {
- /* QuickCam Express */
- {USB_DEVICE(0x046d, 0x0840), .driver_info = BRIDGE_STV600 },
- /* LEGO cam / QuickCam Web */
- {USB_DEVICE(0x046d, 0x0850), .driver_info = BRIDGE_STV610 },
- /* Dexxa WebCam USB */
- {USB_DEVICE(0x046d, 0x0870), .driver_info = BRIDGE_STV602 },
- /* QuickCam Messenger */
- {USB_DEVICE(0x046D, 0x08F0), .driver_info = BRIDGE_ST6422 },
- /* QuickCam Communicate */
- {USB_DEVICE(0x046D, 0x08F5), .driver_info = BRIDGE_ST6422 },
- /* QuickCam Messenger (new) */
- {USB_DEVICE(0x046D, 0x08F6), .driver_info = BRIDGE_ST6422 },
+ {USB_DEVICE(0x046d, 0x0840), .driver_info = BRIDGE_STV600 }, /* QuickCam Express */
+ {USB_DEVICE(0x046d, 0x0850), .driver_info = BRIDGE_STV610 }, /* LEGO cam / QuickCam Web */
+ {USB_DEVICE(0x046d, 0x0870), .driver_info = BRIDGE_STV602 }, /* Dexxa WebCam USB */
+ {USB_DEVICE(0x046D, 0x08F0), .driver_info = BRIDGE_ST6422 }, /* QuickCam Messenger */
+ {USB_DEVICE(0x046D, 0x08F5), .driver_info = BRIDGE_ST6422 }, /* QuickCam Communicate */
+ {USB_DEVICE(0x046D, 0x08F6), .driver_info = BRIDGE_ST6422 }, /* QuickCam Messenger (new) */
{}
};
MODULE_DEVICE_TABLE(usb, device_table);
diff --git a/drivers/media/usb/gspca/sunplus.c b/drivers/media/usb/gspca/sunplus.c
index 46c9f2229a18..38dc9e7aa313 100644
--- a/drivers/media/usb/gspca/sunplus.c
+++ b/drivers/media/usb/gspca/sunplus.c
@@ -368,8 +368,7 @@ static void spca504_read_info(struct gspca_dev *gspca_dev)
info[i] = gspca_dev->usb_buf[0];
}
PDEBUG(D_STREAM,
- "Read info: %d %d %d %d %d %d."
- " Should be 1,0,2,2,0,0",
+ "Read info: %d %d %d %d %d %d. Should be 1,0,2,2,0,0",
info[0], info[1], info[2],
info[3], info[4], info[5]);
}
diff --git a/drivers/media/usb/gspca/topro.c b/drivers/media/usb/gspca/topro.c
index 15eb069ab60b..983fc6b500af 100644
--- a/drivers/media/usb/gspca/topro.c
+++ b/drivers/media/usb/gspca/topro.c
@@ -24,8 +24,7 @@
#include "gspca.h"
MODULE_DESCRIPTION("Topro TP6800/6810 gspca webcam driver");
-MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>, "
- "Anders Blomdell <anders.blomdell@control.lth.se>");
+MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>, Anders Blomdell <anders.blomdell@control.lth.se>");
MODULE_LICENSE("GPL");
static int force_sensor = -1;
diff --git a/drivers/media/usb/gspca/zc3xx.c b/drivers/media/usb/gspca/zc3xx.c
index 5f7254d2bc9a..d5d8c7e81762 100644
--- a/drivers/media/usb/gspca/zc3xx.c
+++ b/drivers/media/usb/gspca/zc3xx.c
@@ -25,8 +25,7 @@
#include "gspca.h"
#include "jpeg.h"
-MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>, "
- "Serge A. Suchkov <Serge.A.S@tochka.ru>");
+MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>, Serge A. Suchkov <Serge.A.S@tochka.ru>");
MODULE_DESCRIPTION("GSPCA ZC03xx/VC3xx USB Camera Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
index a61d8fd63c12..15f016ad5b89 100644
--- a/drivers/media/usb/hdpvr/hdpvr-core.c
+++ b/drivers/media/usb/hdpvr/hdpvr-core.c
@@ -41,13 +41,11 @@ MODULE_PARM_DESC(hdpvr_debug, "enable debugging output");
static uint default_video_input = HDPVR_VIDEO_INPUTS;
module_param(default_video_input, uint, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(default_video_input, "default video input: 0=Component / "
- "1=S-Video / 2=Composite");
+MODULE_PARM_DESC(default_video_input, "default video input: 0=Component / 1=S-Video / 2=Composite");
static uint default_audio_input = HDPVR_AUDIO_INPUTS;
module_param(default_audio_input, uint, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(default_audio_input, "default audio input: 0=RCA back / "
- "1=RCA front / 2=S/PDIF");
+MODULE_PARM_DESC(default_audio_input, "default audio input: 0=RCA back / 1=RCA front / 2=S/PDIF");
static bool boost_audio;
module_param(boost_audio, bool, S_IRUGO|S_IWUSR);
@@ -165,8 +163,7 @@ static int device_authorization(struct hdpvr_device *dev)
dev->flags |= HDPVR_FLAG_AC3_CAP;
break;
default:
- v4l2_info(&dev->v4l2_dev, "untested firmware, the driver might"
- " not work.\n");
+ v4l2_info(&dev->v4l2_dev, "untested firmware, the driver might not work.\n");
if (dev->fw_ver >= HDPVR_FIRMWARE_VERSION_AC3)
dev->flags |= HDPVR_FLAG_AC3_CAP;
else
diff --git a/drivers/media/usb/hdpvr/hdpvr-i2c.c b/drivers/media/usb/hdpvr/hdpvr-i2c.c
index 9b641c4d4431..fcab55038d99 100644
--- a/drivers/media/usb/hdpvr/hdpvr-i2c.c
+++ b/drivers/media/usb/hdpvr/hdpvr-i2c.c
@@ -145,15 +145,14 @@ static int hdpvr_transfer(struct i2c_adapter *i2c_adapter, struct i2c_msg *msgs,
msgs[0].len);
} else if (num == 2) {
if (msgs[0].addr != msgs[1].addr) {
- v4l2_warn(&dev->v4l2_dev, "refusing 2-phase i2c xfer "
- "with conflicting target addresses\n");
+ v4l2_warn(&dev->v4l2_dev, "refusing 2-phase i2c xfer with conflicting target addresses\n");
retval = -EINVAL;
goto out;
}
if ((msgs[0].flags & I2C_M_RD) || !(msgs[1].flags & I2C_M_RD)) {
- v4l2_warn(&dev->v4l2_dev, "refusing complex xfer with "
- "r0=%d, r1=%d\n", msgs[0].flags & I2C_M_RD,
+ v4l2_warn(&dev->v4l2_dev, "refusing complex xfer with r0=%d, r1=%d\n",
+ msgs[0].flags & I2C_M_RD,
msgs[1].flags & I2C_M_RD);
retval = -EINVAL;
goto out;
diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
index 474c11e1d495..7fb036d6a86e 100644
--- a/drivers/media/usb/hdpvr/hdpvr-video.c
+++ b/drivers/media/usb/hdpvr/hdpvr-video.c
@@ -336,9 +336,7 @@ static int hdpvr_stop_streaming(struct hdpvr_device *dev)
buf = kmalloc(dev->bulk_in_size, GFP_KERNEL);
if (!buf)
- v4l2_err(&dev->v4l2_dev, "failed to allocate temporary buffer "
- "for emptying the internal device buffer. "
- "Next capture start will be slow\n");
+ v4l2_err(&dev->v4l2_dev, "failed to allocate temporary buffer for emptying the internal device buffer. Next capture start will be slow\n");
dev->status = STATUS_SHUTTING_DOWN;
hdpvr_config_call(dev, CTRL_STOP_STREAMING_VALUE, 0x00);
@@ -451,6 +449,7 @@ static ssize_t hdpvr_read(struct file *file, char __user *buffer, size_t count,
if (buf->status != BUFSTAT_READY &&
dev->status != STATUS_DISCONNECTED) {
+ int err;
/* return nonblocking */
if (file->f_flags & O_NONBLOCK) {
if (!ret)
@@ -458,9 +457,24 @@ static ssize_t hdpvr_read(struct file *file, char __user *buffer, size_t count,
goto err;
}
- if (wait_event_interruptible(dev->wait_data,
- buf->status == BUFSTAT_READY))
- return -ERESTARTSYS;
+ err = wait_event_interruptible_timeout(dev->wait_data,
+ buf->status == BUFSTAT_READY,
+ msecs_to_jiffies(1000));
+ if (err < 0) {
+ ret = err;
+ goto err;
+ }
+ if (!err) {
+ v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev,
+ "timeout: restart streaming\n");
+ hdpvr_stop_streaming(dev);
+ msecs_to_jiffies(4000);
+ err = hdpvr_start_streaming(dev);
+ if (err) {
+ ret = err;
+ goto err;
+ }
+ }
}
if (buf->status != BUFSTAT_READY)
diff --git a/drivers/staging/media/pulse8-cec/Kconfig b/drivers/media/usb/pulse8-cec/Kconfig
index c6aa2d1c9df0..6ffc407de62f 100644
--- a/drivers/staging/media/pulse8-cec/Kconfig
+++ b/drivers/media/usb/pulse8-cec/Kconfig
@@ -1,6 +1,6 @@
config USB_PULSE8_CEC
tristate "Pulse Eight HDMI CEC"
- depends on USB_ACM && MEDIA_CEC
+ depends on USB_ACM && MEDIA_CEC_SUPPORT
select SERIO
select SERIO_SERPORT
---help---
diff --git a/drivers/staging/media/pulse8-cec/Makefile b/drivers/media/usb/pulse8-cec/Makefile
index 9800690bc25a..9800690bc25a 100644
--- a/drivers/staging/media/pulse8-cec/Makefile
+++ b/drivers/media/usb/pulse8-cec/Makefile
diff --git a/drivers/staging/media/pulse8-cec/pulse8-cec.c b/drivers/media/usb/pulse8-cec/pulse8-cec.c
index 1732c3857b8e..7c18daeb0ade 100644
--- a/drivers/staging/media/pulse8-cec/pulse8-cec.c
+++ b/drivers/media/usb/pulse8-cec/pulse8-cec.c
@@ -375,27 +375,35 @@ static int pulse8_setup(struct pulse8 *pulse8, struct serio *serio,
switch (log_addrs->primary_device_type[0]) {
case CEC_OP_PRIM_DEVTYPE_TV:
log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_TV;
+ log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_TV;
break;
case CEC_OP_PRIM_DEVTYPE_RECORD:
log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_RECORD;
+ log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_RECORD;
break;
case CEC_OP_PRIM_DEVTYPE_TUNER:
log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_TUNER;
+ log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_TUNER;
break;
case CEC_OP_PRIM_DEVTYPE_PLAYBACK:
log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_PLAYBACK;
+ log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_PLAYBACK;
break;
case CEC_OP_PRIM_DEVTYPE_AUDIOSYSTEM:
log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_PLAYBACK;
+ log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_AUDIOSYSTEM;
break;
case CEC_OP_PRIM_DEVTYPE_SWITCH:
log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_UNREGISTERED;
+ log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_SWITCH;
break;
case CEC_OP_PRIM_DEVTYPE_PROCESSOR:
log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_SPECIFIC;
+ log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_SWITCH;
break;
default:
log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_UNREGISTERED;
+ log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_SWITCH;
dev_info(pulse8->dev, "Unknown Primary Device Type: %d\n",
log_addrs->primary_device_type[0]);
break;
@@ -651,7 +659,7 @@ static int pulse8_connect(struct serio *serio, struct serio_driver *drv)
pulse8->serio = serio;
pulse8->adap = cec_allocate_adapter(&pulse8_cec_adap_ops, pulse8,
- "HDMI CEC", caps, 1, &serio->dev);
+ "HDMI CEC", caps, 1);
err = PTR_ERR_OR_ZERO(pulse8->adap);
if (err < 0)
goto free_device;
@@ -671,7 +679,7 @@ static int pulse8_connect(struct serio *serio, struct serio_driver *drv)
if (err)
goto close_serio;
- err = cec_register_adapter(pulse8->adap);
+ err = cec_register_adapter(pulse8->adap, &serio->dev);
if (err < 0)
goto close_serio;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-audio.c b/drivers/media/usb/pvrusb2/pvrusb2-audio.c
index 5f953d837bf1..3bac50a248d4 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-audio.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-audio.c
@@ -74,9 +74,7 @@ void pvr2_msp3400_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
input = sp->def[hdw->input_val];
} else {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "*** WARNING *** subdev msp3400 set_input:"
- " Invalid routing scheme (%u)"
- " and/or input (%d)",
+ "*** WARNING *** subdev msp3400 set_input: Invalid routing scheme (%u) and/or input (%d)",
sid, hdw->input_val);
return;
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-cs53l32a.c b/drivers/media/usb/pvrusb2/pvrusb2-cs53l32a.c
index f82f0f0f2c04..7f29a0464f36 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-cs53l32a.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-cs53l32a.c
@@ -72,9 +72,7 @@ void pvr2_cs53l32a_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
(hdw->input_val < 0) ||
(hdw->input_val >= sp->cnt)) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "*** WARNING *** subdev v4l2 set_input:"
- " Invalid routing scheme (%u)"
- " and/or input (%d)",
+ "*** WARNING *** subdev v4l2 set_input: Invalid routing scheme (%u) and/or input (%d)",
sid, hdw->input_val);
return;
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.c b/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.c
index 7d675fae1846..30eef97ef2ef 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.c
@@ -137,9 +137,7 @@ void pvr2_cx25840_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
(hdw->input_val < 0) ||
(hdw->input_val >= sp->cnt)) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "*** WARNING *** subdev cx2584x set_input:"
- " Invalid routing scheme (%u)"
- " and/or input (%d)",
+ "*** WARNING *** subdev cx2584x set_input: Invalid routing scheme (%u) and/or input (%d)",
sid, hdw->input_val);
return;
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-debugifc.c b/drivers/media/usb/pvrusb2/pvrusb2-debugifc.c
index e4022bcb155b..58ec706ebdb3 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-debugifc.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-debugifc.c
@@ -176,9 +176,7 @@ int pvr2_debugifc_print_status(struct pvr2_hdw *hdw,
pvr2_stream_get_stats(sp, &stats, 0);
ccnt = scnprintf(
buf,acnt,
- "Bytes streamed=%u"
- " URBs: queued=%u idle=%u ready=%u"
- " processed=%u failed=%u\n",
+ "Bytes streamed=%u URBs: queued=%u idle=%u ready=%u processed=%u failed=%u\n",
stats.bytes_processed,
stats.buffers_in_queue,
stats.buffers_in_idle,
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
index e1907cd0c3b7..276b17fb9aad 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
@@ -56,8 +56,7 @@ static u8 *pvr2_eeprom_fetch(struct pvr2_hdw *hdw)
eeprom = kmalloc(EEPROM_SIZE,GFP_KERNEL);
if (!eeprom) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Failed to allocate memory"
- " required to read eeprom");
+ "Failed to allocate memory required to read eeprom");
return NULL;
}
@@ -74,8 +73,8 @@ static u8 *pvr2_eeprom_fetch(struct pvr2_hdw *hdw)
strange but it's what they do) */
mode16 = (addr & 1);
eepromSize = (mode16 ? 4096 : 256);
- trace_eeprom("Examining %d byte eeprom at location 0x%x"
- " using %d bit addressing",eepromSize,addr,
+ trace_eeprom("Examining %d byte eeprom at location 0x%x using %d bit addressing",
+ eepromSize, addr,
mode16 ? 16 : 8);
msg[0].addr = addr;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-encoder.c b/drivers/media/usb/pvrusb2/pvrusb2-encoder.c
index 593b3e9b6bfd..f0483621d2a3 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-encoder.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-encoder.c
@@ -188,9 +188,7 @@ static int pvr2_encoder_cmd(void *ctxt,
if (arg_cnt_send > (ARRAY_SIZE(wrData) - 4)) {
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
- "Failed to write cx23416 command"
- " - too many input arguments"
- " (was given %u limit %lu)",
+ "Failed to write cx23416 command - too many input arguments (was given %u limit %lu)",
arg_cnt_send, (long unsigned) ARRAY_SIZE(wrData) - 4);
return -EINVAL;
}
@@ -198,9 +196,7 @@ static int pvr2_encoder_cmd(void *ctxt,
if (arg_cnt_recv > (ARRAY_SIZE(rdData) - 4)) {
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
- "Failed to write cx23416 command"
- " - too many return arguments"
- " (was given %u limit %lu)",
+ "Failed to write cx23416 command - too many return arguments (was given %u limit %lu)",
arg_cnt_recv, (long unsigned) ARRAY_SIZE(rdData) - 4);
return -EINVAL;
}
@@ -248,14 +244,12 @@ static int pvr2_encoder_cmd(void *ctxt,
retry_flag = !0;
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
- "Encoder timed out waiting for us"
- "; arranging to retry");
+ "Encoder timed out waiting for us; arranging to retry");
} else {
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
- "***WARNING*** device's encoder"
- " appears to be stuck"
- " (status=0x%08x)",rdData[0]);
+ "***WARNING*** device's encoder appears to be stuck (status=0x%08x)",
+rdData[0]);
}
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
@@ -293,11 +287,7 @@ static int pvr2_encoder_cmd(void *ctxt,
}
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
- "Giving up on command."
- " This is normally recovered via a firmware"
- " reload and re-initialization; concern"
- " is only warranted if this happens repeatedly"
- " and rapidly.");
+ "Giving up on command. This is normally recovered via a firmware reload and re-initialization; concern is only warranted if this happens repeatedly and rapidly.");
break;
}
wrData[0] = 0x7;
@@ -325,9 +315,7 @@ static int pvr2_encoder_vcmd(struct pvr2_hdw *hdw, int cmd,
if (args > ARRAY_SIZE(data)) {
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
- "Failed to write cx23416 command"
- " - too many arguments"
- " (was given %u limit %lu)",
+ "Failed to write cx23416 command - too many arguments (was given %u limit %lu)",
args, (long unsigned) ARRAY_SIZE(data));
return -EINVAL;
}
@@ -433,8 +421,7 @@ int pvr2_encoder_configure(struct pvr2_hdw *hdw)
{
int ret;
int val;
- pvr2_trace(PVR2_TRACE_ENCODER,"pvr2_encoder_configure"
- " (cx2341x module)");
+ pvr2_trace(PVR2_TRACE_ENCODER, "pvr2_encoder_configure (cx2341x module)");
hdw->enc_ctl_state.port = CX2341X_PORT_STREAMING;
hdw->enc_ctl_state.width = hdw->res_hor_val;
hdw->enc_ctl_state.height = hdw->res_ver_val;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index 1eb4f7ba2967..e3ed8ffee9f7 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -1371,8 +1371,7 @@ static int pvr2_locate_firmware(struct pvr2_hdw *hdw,
fwnames[idx],
&hdw->usb_dev->dev);
if (!ret) {
- trace_firmware("Located %s firmware: %s;"
- " uploading...",
+ trace_firmware("Located %s firmware: %s; uploading...",
fwtypename,
fwnames[idx]);
return idx;
@@ -1383,21 +1382,17 @@ static int pvr2_locate_firmware(struct pvr2_hdw *hdw,
return ret;
}
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "***WARNING***"
- " Device %s firmware"
- " seems to be missing.",
+ "***WARNING*** Device %s firmware seems to be missing.",
fwtypename);
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Did you install the pvrusb2 firmware files"
- " in their proper location?");
+ "Did you install the pvrusb2 firmware files in their proper location?");
if (fwcount == 1) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"request_firmware unable to locate %s file %s",
fwtypename,fwnames[0]);
} else {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "request_firmware unable to locate"
- " one of the following %s files:",
+ "request_firmware unable to locate one of the following %s files:",
fwtypename);
for (idx = 0; idx < fwcount; idx++) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
@@ -1431,8 +1426,7 @@ static int pvr2_upload_firmware1(struct pvr2_hdw *hdw)
if (!hdw->hdw_desc->fx2_firmware.cnt) {
hdw->fw1_state = FW1_STATE_OK;
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Connected device type defines"
- " no firmware to upload; ignoring firmware");
+ "Connected device type defines no firmware to upload; ignoring firmware");
return -ENOTTY;
}
@@ -1457,13 +1451,11 @@ static int pvr2_upload_firmware1(struct pvr2_hdw *hdw)
(!(hdw->hdw_desc->flag_fx2_16kb && (fwsize == 0x4000)))) {
if (hdw->hdw_desc->flag_fx2_16kb) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Wrong fx2 firmware size"
- " (expected 8192 or 16384, got %u)",
+ "Wrong fx2 firmware size (expected 8192 or 16384, got %u)",
fwsize);
} else {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Wrong fx2 firmware size"
- " (expected 8192, got %u)",
+ "Wrong fx2 firmware size (expected 8192, got %u)",
fwsize);
}
release_firmware(fw_entry);
@@ -1585,8 +1577,7 @@ int pvr2_upload_firmware2(struct pvr2_hdw *hdw)
if (fw_len % sizeof(u32)) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "size of %s firmware"
- " must be a multiple of %zu bytes",
+ "size of %s firmware must be a multiple of %zu bytes",
fw_files[fwidx],sizeof(u32));
release_firmware(fw_entry);
ret = -EINVAL;
@@ -1887,8 +1878,7 @@ static void pvr2_hdw_setup_std(struct pvr2_hdw *hdw)
bcnt = pvr2_std_id_to_str(buf,sizeof(buf),hdw->std_mask_eeprom);
pvr2_trace(PVR2_TRACE_STD,
- "Supported video standard(s) reported available"
- " in hardware: %.*s",
+ "Supported video standard(s) reported available in hardware: %.*s",
bcnt,buf);
hdw->std_mask_avail = hdw->std_mask_eeprom;
@@ -1897,8 +1887,7 @@ static void pvr2_hdw_setup_std(struct pvr2_hdw *hdw)
if (std2) {
bcnt = pvr2_std_id_to_str(buf,sizeof(buf),std2);
pvr2_trace(PVR2_TRACE_STD,
- "Expanding supported video standards"
- " to include: %.*s",
+ "Expanding supported video standards to include: %.*s",
bcnt,buf);
hdw->std_mask_avail |= std2;
}
@@ -1917,8 +1906,8 @@ static void pvr2_hdw_setup_std(struct pvr2_hdw *hdw)
if (std3) {
bcnt = pvr2_std_id_to_str(buf,sizeof(buf),std3);
pvr2_trace(PVR2_TRACE_STD,
- "Initial video standard"
- " (determined by device type): %.*s",bcnt,buf);
+ "Initial video standard (determined by device type): %.*s",
+ bcnt, buf);
hdw->std_mask_cur = std3;
hdw->std_dirty = !0;
return;
@@ -1980,8 +1969,7 @@ static void pvr2_hdw_cx25840_vbi_hack(struct pvr2_hdw *hdw)
}
pvr2_trace(PVR2_TRACE_INIT,
- "Module ID %u:"
- " Executing cx25840 VBI hack",
+ "Module ID %u: Executing cx25840 VBI hack",
hdw->decoder_client_id);
memset(&fmt, 0, sizeof(fmt));
fmt.type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
@@ -2007,8 +1995,7 @@ static int pvr2_hdw_load_subdev(struct pvr2_hdw *hdw,
fname = (mid < ARRAY_SIZE(module_names)) ? module_names[mid] : NULL;
if (!fname) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Module ID %u for device %s has no name?"
- " The driver might have a configuration problem.",
+ "Module ID %u for device %s has no name? The driver might have a configuration problem.",
mid,
hdw->hdw_desc->description);
return -EINVAL;
@@ -2027,32 +2014,27 @@ static int pvr2_hdw_load_subdev(struct pvr2_hdw *hdw,
ARRAY_SIZE(i2caddr));
if (i2ccnt) {
pvr2_trace(PVR2_TRACE_INIT,
- "Module ID %u:"
- " Using default i2c address list",
+ "Module ID %u: Using default i2c address list",
mid);
}
}
if (!i2ccnt) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Module ID %u (%s) for device %s:"
- " No i2c addresses."
- " The driver might have a configuration problem.",
+ "Module ID %u (%s) for device %s: No i2c addresses. The driver might have a configuration problem.",
mid, fname, hdw->hdw_desc->description);
return -EINVAL;
}
if (i2ccnt == 1) {
pvr2_trace(PVR2_TRACE_INIT,
- "Module ID %u:"
- " Setting up with specified i2c address 0x%x",
+ "Module ID %u: Setting up with specified i2c address 0x%x",
mid, i2caddr[0]);
sd = v4l2_i2c_new_subdev(&hdw->v4l2_dev, &hdw->i2c_adap,
fname, i2caddr[0], NULL);
} else {
pvr2_trace(PVR2_TRACE_INIT,
- "Module ID %u:"
- " Setting up with address probe list",
+ "Module ID %u: Setting up with address probe list",
mid);
sd = v4l2_i2c_new_subdev(&hdw->v4l2_dev, &hdw->i2c_adap,
fname, 0, i2caddr);
@@ -2060,9 +2042,7 @@ static int pvr2_hdw_load_subdev(struct pvr2_hdw *hdw,
if (!sd) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Module ID %u (%s) for device %s failed to load."
- " Possible missing sub-device kernel module or"
- " initialization failure within module.",
+ "Module ID %u (%s) for device %s failed to load. Possible missing sub-device kernel module or initialization failure within module.",
mid, fname, hdw->hdw_desc->description);
return -EIO;
}
@@ -2124,18 +2104,14 @@ static void pvr2_hdw_setup_low(struct pvr2_hdw *hdw)
== 0);
if (reloadFl) {
pvr2_trace(PVR2_TRACE_INIT,
- "USB endpoint config looks strange"
- "; possibly firmware needs to be"
- " loaded");
+ "USB endpoint config looks strange; possibly firmware needs to be loaded");
}
}
if (!reloadFl) {
reloadFl = !pvr2_hdw_check_firmware(hdw);
if (reloadFl) {
pvr2_trace(PVR2_TRACE_INIT,
- "Check for FX2 firmware failed"
- "; possibly firmware needs to be"
- " loaded");
+ "Check for FX2 firmware failed; possibly firmware needs to be loaded");
}
}
if (reloadFl) {
@@ -2200,8 +2176,7 @@ static void pvr2_hdw_setup_low(struct pvr2_hdw *hdw)
if (!pvr2_hdw_dev_ok(hdw)) return;
if (ret < 0) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Unable to determine location of eeprom,"
- " skipping");
+ "Unable to determine location of eeprom, skipping");
} else {
hdw->eeprom_addr = ret;
pvr2_eeprom_analyze(hdw);
@@ -2254,8 +2229,7 @@ static void pvr2_hdw_setup_low(struct pvr2_hdw *hdw)
idx = get_default_error_tolerance(hdw);
if (idx) {
pvr2_trace(PVR2_TRACE_INIT,
- "pvr2_hdw_setup: video stream %p"
- " setting tolerance %u",
+ "pvr2_hdw_setup: video stream %p setting tolerance %u",
hdw->vid_stream,idx);
}
pvr2_stream_setup(hdw->vid_stream,hdw->usb_dev,
@@ -2285,16 +2259,13 @@ static void pvr2_hdw_setup(struct pvr2_hdw *hdw)
if (hdw->flag_init_ok) {
pvr2_trace(
PVR2_TRACE_INFO,
- "Device initialization"
- " completed successfully.");
+ "Device initialization completed successfully.");
break;
}
if (hdw->fw1_state == FW1_STATE_RELOAD) {
pvr2_trace(
PVR2_TRACE_INFO,
- "Device microcontroller firmware"
- " (re)loaded; it should now reset"
- " and reconnect.");
+ "Device microcontroller firmware (re)loaded; it should now reset and reconnect.");
break;
}
pvr2_trace(
@@ -2303,48 +2274,35 @@ static void pvr2_hdw_setup(struct pvr2_hdw *hdw)
if (hdw->fw1_state == FW1_STATE_MISSING) {
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
- "Giving up since device"
- " microcontroller firmware"
- " appears to be missing.");
+ "Giving up since device microcontroller firmware appears to be missing.");
break;
}
}
if (hdw->flag_modulefail) {
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
- "***WARNING*** pvrusb2 driver initialization"
- " failed due to the failure of one or more"
- " sub-device kernel modules.");
+ "***WARNING*** pvrusb2 driver initialization failed due to the failure of one or more sub-device kernel modules.");
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
- "You need to resolve the failing condition"
- " before this driver can function. There"
- " should be some earlier messages giving more"
- " information about the problem.");
+ "You need to resolve the failing condition before this driver can function. There should be some earlier messages giving more information about the problem.");
break;
}
if (procreload) {
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
- "Attempting pvrusb2 recovery by reloading"
- " primary firmware.");
+ "Attempting pvrusb2 recovery by reloading primary firmware.");
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
- "If this works, device should disconnect"
- " and reconnect in a sane state.");
+ "If this works, device should disconnect and reconnect in a sane state.");
hdw->fw1_state = FW1_STATE_UNKNOWN;
pvr2_upload_firmware1(hdw);
} else {
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
- "***WARNING*** pvrusb2 device hardware"
- " appears to be jammed"
- " and I can't clear it.");
+ "***WARNING*** pvrusb2 device hardware appears to be jammed and I can't clear it.");
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
- "You might need to power cycle"
- " the pvrusb2 device"
- " in order to recover.");
+ "You might need to power cycle the pvrusb2 device in order to recover.");
}
} while (0);
pvr2_trace(PVR2_TRACE_INIT,"pvr2_hdw_setup(hdw=%p) end",hdw);
@@ -2396,12 +2354,8 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
hdw_desc = (const struct pvr2_device_desc *)(devid->driver_info);
if (hdw_desc == NULL) {
- pvr2_trace(PVR2_TRACE_INIT, "pvr2_hdw_create:"
- " No device description pointer,"
- " unable to continue.");
- pvr2_trace(PVR2_TRACE_INIT, "If you have a new device type,"
- " please contact Mike Isely <isely@pobox.com>"
- " to get it included in the driver\n");
+ pvr2_trace(PVR2_TRACE_INIT, "pvr2_hdw_create: No device description pointer, unable to continue.");
+ pvr2_trace(PVR2_TRACE_INIT, "If you have a new device type, please contact Mike Isely <isely@pobox.com> to get it included in the driver\n");
goto fail;
}
@@ -2413,14 +2367,12 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
if (hdw_desc->flag_is_experimental) {
pvr2_trace(PVR2_TRACE_INFO, "**********");
pvr2_trace(PVR2_TRACE_INFO,
- "WARNING: Support for this device (%s) is"
- " experimental.", hdw_desc->description);
+ "WARNING: Support for this device (%s) is experimental.",
+ hdw_desc->description);
pvr2_trace(PVR2_TRACE_INFO,
- "Important functionality might not be"
- " entirely working.");
+ "Important functionality might not be entirely working.");
pvr2_trace(PVR2_TRACE_INFO,
- "Please consider contacting the driver author to"
- " help with further stabilization of the driver.");
+ "Please consider contacting the driver author to help with further stabilization of the driver.");
pvr2_trace(PVR2_TRACE_INFO, "**********");
}
if (!hdw) goto fail;
@@ -3375,8 +3327,7 @@ static u8 *pvr2_full_eeprom_fetch(struct pvr2_hdw *hdw)
eeprom = kmalloc(EEPROM_SIZE,GFP_KERNEL);
if (!eeprom) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Failed to allocate memory"
- " required to read eeprom");
+ "Failed to allocate memory required to read eeprom");
return NULL;
}
@@ -3393,8 +3344,8 @@ static u8 *pvr2_full_eeprom_fetch(struct pvr2_hdw *hdw)
strange but it's what they do) */
mode16 = (addr & 1);
eepromSize = (mode16 ? EEPROM_SIZE : 256);
- trace_eeprom("Examining %d byte eeprom at location 0x%x"
- " using %d bit addressing",eepromSize,addr,
+ trace_eeprom("Examining %d byte eeprom at location 0x%x using %d bit addressing",
+ eepromSize, addr,
mode16 ? 16 : 8);
msg[0].addr = addr;
@@ -3461,8 +3412,8 @@ void pvr2_hdw_cpufw_set_enabled(struct pvr2_hdw *hdw,
if (hdw->fw_cpu_flag) {
hdw->fw_size = (mode == 1) ? 0x4000 : 0x2000;
pvr2_trace(PVR2_TRACE_FIRMWARE,
- "Preparing to suck out CPU firmware"
- " (size=%u)", hdw->fw_size);
+ "Preparing to suck out CPU firmware (size=%u)",
+ hdw->fw_size);
hdw->fw_buffer = kzalloc(hdw->fw_size,GFP_KERNEL);
if (!hdw->fw_buffer) {
hdw->fw_size = 0;
@@ -3620,21 +3571,18 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw,
struct timer_list timer;
if (!hdw->ctl_lock_held) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Attempted to execute control transfer"
- " without lock!!");
+ "Attempted to execute control transfer without lock!!");
return -EDEADLK;
}
if (!hdw->flag_ok && !probe_fl) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Attempted to execute control transfer"
- " when device not ok");
+ "Attempted to execute control transfer when device not ok");
return -EIO;
}
if (!(hdw->ctl_read_urb && hdw->ctl_write_urb)) {
if (!probe_fl) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Attempted to execute control transfer"
- " when USB is disconnected");
+ "Attempted to execute control transfer when USB is disconnected");
}
return -ENOTTY;
}
@@ -3645,16 +3593,14 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw,
if (write_len > PVR2_CTL_BUFFSIZE) {
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
- "Attempted to execute %d byte"
- " control-write transfer (limit=%d)",
+ "Attempted to execute %d byte control-write transfer (limit=%d)",
write_len,PVR2_CTL_BUFFSIZE);
return -EINVAL;
}
if (read_len > PVR2_CTL_BUFFSIZE) {
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
- "Attempted to execute %d byte"
- " control-read transfer (limit=%d)",
+ "Attempted to execute %d byte control-read transfer (limit=%d)",
write_len,PVR2_CTL_BUFFSIZE);
return -EINVAL;
}
@@ -3703,8 +3649,8 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw,
status = usb_submit_urb(hdw->ctl_write_urb,GFP_KERNEL);
if (status < 0) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Failed to submit write-control"
- " URB status=%d",status);
+ "Failed to submit write-control URB status=%d",
+status);
hdw->ctl_write_pend_flag = 0;
goto done;
}
@@ -3727,8 +3673,8 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw,
status = usb_submit_urb(hdw->ctl_read_urb,GFP_KERNEL);
if (status < 0) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Failed to submit read-control"
- " URB status=%d",status);
+ "Failed to submit read-control URB status=%d",
+status);
hdw->ctl_read_pend_flag = 0;
goto done;
}
@@ -3770,8 +3716,7 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw,
status = hdw->ctl_write_urb->status;
if (!probe_fl) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "control-write URB failure,"
- " status=%d",
+ "control-write URB failure, status=%d",
status);
}
goto done;
@@ -3781,8 +3726,7 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw,
status = -EIO;
if (!probe_fl) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "control-write URB short,"
- " expected=%d got=%d",
+ "control-write URB short, expected=%d got=%d",
write_len,
hdw->ctl_write_urb->actual_length);
}
@@ -3800,8 +3744,7 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw,
status = hdw->ctl_read_urb->status;
if (!probe_fl) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "control-read URB failure,"
- " status=%d",
+ "control-read URB failure, status=%d",
status);
}
goto done;
@@ -3811,8 +3754,7 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw,
status = -EIO;
if (!probe_fl) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "control-read URB short,"
- " expected=%d got=%d",
+ "control-read URB short, expected=%d got=%d",
read_len,
hdw->ctl_read_urb->actual_length);
}
@@ -4799,9 +4741,7 @@ static unsigned int pvr2_hdw_report_unlocked(struct pvr2_hdw *hdw,int which,
0);
return scnprintf(
buf,acnt,
- "Bytes streamed=%u"
- " URBs: queued=%u idle=%u ready=%u"
- " processed=%u failed=%u",
+ "Bytes streamed=%u URBs: queued=%u idle=%u ready=%u processed=%u failed=%u",
stats.bytes_processed,
stats.buffers_in_queue,
stats.buffers_in_idle,
@@ -5013,8 +4953,7 @@ int pvr2_hdw_gpio_chg_dir(struct pvr2_hdw *hdw,u32 msk,u32 val)
if (ret) return ret;
nval = (cval & ~msk) | (val & msk);
pvr2_trace(PVR2_TRACE_GPIO,
- "GPIO direction changing 0x%x:0x%x"
- " from 0x%x to 0x%x",
+ "GPIO direction changing 0x%x:0x%x from 0x%x to 0x%x",
msk,val,cval,nval);
} else {
nval = val;
@@ -5057,9 +4996,7 @@ void pvr2_hdw_status_poll(struct pvr2_hdw *hdw)
now. (Of course, no sub-drivers seem to implement it either.
But now it's a a chicken and egg problem...) */
v4l2_device_call_all(&hdw->v4l2_dev, 0, tuner, g_tuner, vtp);
- pvr2_trace(PVR2_TRACE_CHIPS, "subdev status poll"
- " type=%u strength=%u audio=0x%x cap=0x%x"
- " low=%u hi=%u",
+ pvr2_trace(PVR2_TRACE_CHIPS, "subdev status poll type=%u strength=%u audio=0x%x cap=0x%x low=%u hi=%u",
vtp->type,
vtp->signal, vtp->rxsubchans, vtp->capability,
vtp->rangelow, vtp->rangehigh);
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
index 6da5fb544817..cc63e5f4c26c 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
@@ -62,8 +62,7 @@ static int pvr2_i2c_write(struct pvr2_hdw *hdw, /* Context */
if (!data) length = 0;
if (length > (sizeof(hdw->cmd_buffer) - 3)) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Killing an I2C write to %u that is too large"
- " (desired=%u limit=%u)",
+ "Killing an I2C write to %u that is too large (desired=%u limit=%u)",
i2c_addr,
length,(unsigned int)(sizeof(hdw->cmd_buffer) - 3));
return -ENOTSUPP;
@@ -90,8 +89,7 @@ static int pvr2_i2c_write(struct pvr2_hdw *hdw, /* Context */
if (hdw->cmd_buffer[0] != 8) {
ret = -EIO;
if (hdw->cmd_buffer[0] != 7) {
- trace_i2c("unexpected status"
- " from i2_write[%d]: %d",
+ trace_i2c("unexpected status from i2_write[%d]: %d",
i2c_addr,hdw->cmd_buffer[0]);
}
}
@@ -116,16 +114,14 @@ static int pvr2_i2c_read(struct pvr2_hdw *hdw, /* Context */
if (!data) dlen = 0;
if (dlen > (sizeof(hdw->cmd_buffer) - 4)) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Killing an I2C read to %u that has wlen too large"
- " (desired=%u limit=%u)",
+ "Killing an I2C read to %u that has wlen too large (desired=%u limit=%u)",
i2c_addr,
dlen,(unsigned int)(sizeof(hdw->cmd_buffer) - 4));
return -ENOTSUPP;
}
if (res && (rlen > (sizeof(hdw->cmd_buffer) - 1))) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Killing an I2C read to %u that has rlen too large"
- " (desired=%u limit=%u)",
+ "Killing an I2C read to %u that has rlen too large (desired=%u limit=%u)",
i2c_addr,
rlen,(unsigned int)(sizeof(hdw->cmd_buffer) - 1));
return -ENOTSUPP;
@@ -154,8 +150,7 @@ static int pvr2_i2c_read(struct pvr2_hdw *hdw, /* Context */
if (hdw->cmd_buffer[0] != 8) {
ret = -EIO;
if (hdw->cmd_buffer[0] != 7) {
- trace_i2c("unexpected status"
- " from i2_read[%d]: %d",
+ trace_i2c("unexpected status from i2_read[%d]: %d",
i2c_addr,hdw->cmd_buffer[0]);
}
}
@@ -352,13 +347,11 @@ static int i2c_hack_cx25840(struct pvr2_hdw *hdw,
if ((ret != 0) || (*rdata == 0x04) || (*rdata == 0x0a)) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "WARNING: Detected a wedged cx25840 chip;"
- " the device will not work.");
+ "WARNING: Detected a wedged cx25840 chip; the device will not work.");
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"WARNING: Try power cycling the pvrusb2 device.");
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "WARNING: Disabling further access to the device"
- " to prevent other foul-ups.");
+ "WARNING: Disabling further access to the device to prevent other foul-ups.");
// This blocks all further communication with the part.
hdw->i2c_func[0x44] = NULL;
pvr2_hdw_render_useless(hdw);
@@ -444,8 +437,7 @@ static int pvr2_i2c_xfer(struct i2c_adapter *i2c_adap,
}
} else if (num == 2) {
if (msgs[0].addr != msgs[1].addr) {
- trace_i2c("i2c refusing 2 phase transfer with"
- " conflicting target addresses");
+ trace_i2c("i2c refusing 2 phase transfer with conflicting target addresses");
ret = -ENOTSUPP;
goto done;
}
@@ -477,8 +469,7 @@ static int pvr2_i2c_xfer(struct i2c_adapter *i2c_adap,
ret = 2;
goto done;
} else {
- trace_i2c("i2c refusing complex transfer"
- " read0=%d read1=%d",
+ trace_i2c("i2c refusing complex transfer read0=%d read1=%d",
(msgs[0].flags & I2C_M_RD),
(msgs[1].flags & I2C_M_RD));
}
@@ -492,8 +483,7 @@ static int pvr2_i2c_xfer(struct i2c_adapter *i2c_adap,
for (idx = 0; idx < num; idx++) {
cnt = msgs[idx].len;
printk(KERN_INFO
- "pvrusb2 i2c xfer %u/%u:"
- " addr=0x%x len=%d %s",
+ "pvrusb2 i2c xfer %u/%u: addr=0x%x len=%d %s",
idx+1,num,
msgs[idx].addr,
cnt,
@@ -501,18 +491,18 @@ static int pvr2_i2c_xfer(struct i2c_adapter *i2c_adap,
"read" : "write"));
if ((ret > 0) || !(msgs[idx].flags & I2C_M_RD)) {
if (cnt > 8) cnt = 8;
- printk(" [");
+ printk(KERN_CONT " [");
for (offs = 0; offs < (cnt>8?8:cnt); offs++) {
- if (offs) printk(" ");
- printk("%02x",msgs[idx].buf[offs]);
+ if (offs) printk(KERN_CONT " ");
+ printk(KERN_CONT "%02x",msgs[idx].buf[offs]);
}
- if (offs < cnt) printk(" ...");
- printk("]");
+ if (offs < cnt) printk(KERN_CONT " ...");
+ printk(KERN_CONT "]");
}
if (idx+1 == num) {
- printk(" result=%d",ret);
+ printk(KERN_CONT " result=%d",ret);
}
- printk("\n");
+ printk(KERN_CONT "\n");
}
if (!num) {
printk(KERN_INFO
@@ -668,8 +658,7 @@ void pvr2_i2c_core_init(struct pvr2_hdw *hdw)
the emulated IR receiver. */
if (do_i2c_probe(hdw, 0x71)) {
pvr2_trace(PVR2_TRACE_INFO,
- "Device has newer IR hardware;"
- " disabling unneeded virtual IR device");
+ "Device has newer IR hardware; disabling unneeded virtual IR device");
hdw->i2c_func[0x18] = NULL;
/* Remember that this is a different device... */
hdw->ir_scheme_active = PVR2_IR_SCHEME_24XXX_MCE;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-io.c b/drivers/media/usb/pvrusb2/pvrusb2-io.c
index e68ce24f27e3..e3103ecd4828 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-io.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-io.c
@@ -113,8 +113,7 @@ static const char *pvr2_buffer_state_decode(enum pvr2_buffer_state st)
static void pvr2_buffer_describe(struct pvr2_buffer *bp,const char *msg)
{
pvr2_trace(PVR2_TRACE_INFO,
- "buffer%s%s %p state=%s id=%d status=%d"
- " stream=%p purb=%p sig=0x%x",
+ "buffer%s%s %p state=%s id=%d status=%d stream=%p purb=%p sig=0x%x",
(msg ? " " : ""),
(msg ? msg : ""),
bp,
@@ -156,8 +155,7 @@ static void pvr2_buffer_remove(struct pvr2_buffer *bp)
(*cnt)--;
(*bcnt) -= ccnt;
pvr2_trace(PVR2_TRACE_BUF_FLOW,
- "/*---TRACE_FLOW---*/"
- " bufferPool %8s dec cap=%07d cnt=%02d",
+ "/*---TRACE_FLOW---*/ bufferPool %8s dec cap=%07d cnt=%02d",
pvr2_buffer_state_decode(bp->state),*bcnt,*cnt);
bp->state = pvr2_buffer_state_none;
}
@@ -198,8 +196,7 @@ static int pvr2_buffer_set_ready(struct pvr2_buffer *bp)
(sp->r_count)++;
sp->r_bcount += bp->used_count;
pvr2_trace(PVR2_TRACE_BUF_FLOW,
- "/*---TRACE_FLOW---*/"
- " bufferPool %8s inc cap=%07d cnt=%02d",
+ "/*---TRACE_FLOW---*/ bufferPool %8s inc cap=%07d cnt=%02d",
pvr2_buffer_state_decode(bp->state),
sp->r_bcount,sp->r_count);
spin_unlock_irqrestore(&sp->list_lock,irq_flags);
@@ -224,8 +221,7 @@ static void pvr2_buffer_set_idle(struct pvr2_buffer *bp)
(sp->i_count)++;
sp->i_bcount += bp->max_count;
pvr2_trace(PVR2_TRACE_BUF_FLOW,
- "/*---TRACE_FLOW---*/"
- " bufferPool %8s inc cap=%07d cnt=%02d",
+ "/*---TRACE_FLOW---*/ bufferPool %8s inc cap=%07d cnt=%02d",
pvr2_buffer_state_decode(bp->state),
sp->i_bcount,sp->i_count);
spin_unlock_irqrestore(&sp->list_lock,irq_flags);
@@ -249,8 +245,7 @@ static void pvr2_buffer_set_queued(struct pvr2_buffer *bp)
(sp->q_count)++;
sp->q_bcount += bp->max_count;
pvr2_trace(PVR2_TRACE_BUF_FLOW,
- "/*---TRACE_FLOW---*/"
- " bufferPool %8s inc cap=%07d cnt=%02d",
+ "/*---TRACE_FLOW---*/ bufferPool %8s inc cap=%07d cnt=%02d",
pvr2_buffer_state_decode(bp->state),
sp->q_bcount,sp->q_count);
spin_unlock_irqrestore(&sp->list_lock,irq_flags);
@@ -293,8 +288,8 @@ static void pvr2_buffer_done(struct pvr2_buffer *bp)
bp->signature = 0;
bp->stream = NULL;
usb_free_urb(bp->purb);
- pvr2_trace(PVR2_TRACE_BUF_POOL,"/*---TRACE_FLOW---*/"
- " bufferDone %p",bp);
+ pvr2_trace(PVR2_TRACE_BUF_POOL, "/*---TRACE_FLOW---*/ bufferDone %p",
+ bp);
}
static int pvr2_stream_buffer_count(struct pvr2_stream *sp,unsigned int cnt)
@@ -306,8 +301,7 @@ static int pvr2_stream_buffer_count(struct pvr2_stream *sp,unsigned int cnt)
if (cnt == sp->buffer_total_count) return 0;
pvr2_trace(PVR2_TRACE_BUF_POOL,
- "/*---TRACE_FLOW---*/ poolResize "
- " stream=%p cur=%d adj=%+d",
+ "/*---TRACE_FLOW---*/ poolResize stream=%p cur=%d adj=%+d",
sp,
sp->buffer_total_count,
cnt-sp->buffer_total_count);
@@ -374,8 +368,7 @@ static int pvr2_stream_achieve_buffer_count(struct pvr2_stream *sp)
if (sp->buffer_total_count == sp->buffer_target_count) return 0;
pvr2_trace(PVR2_TRACE_BUF_POOL,
- "/*---TRACE_FLOW---*/"
- " poolCheck stream=%p cur=%d tgt=%d",
+ "/*---TRACE_FLOW---*/ poolCheck stream=%p cur=%d tgt=%d",
sp,sp->buffer_total_count,sp->buffer_target_count);
if (sp->buffer_total_count < sp->buffer_target_count) {
@@ -454,8 +447,8 @@ static void buffer_complete(struct urb *urb)
bp->used_count = urb->actual_length;
if (sp->fail_count) {
pvr2_trace(PVR2_TRACE_TOLERANCE,
- "stream %p transfer ok"
- " - fail count reset",sp);
+ "stream %p transfer ok - fail count reset",
+ sp);
sp->fail_count = 0;
}
} else if (sp->fail_count < sp->fail_tolerance) {
@@ -464,8 +457,7 @@ static void buffer_complete(struct urb *urb)
(sp->fail_count)++;
(sp->buffers_failed)++;
pvr2_trace(PVR2_TRACE_TOLERANCE,
- "stream %p ignoring error %d"
- " - fail count increased to %u",
+ "stream %p ignoring error %d - fail count increased to %u",
sp,urb->status,sp->fail_count);
} else {
(sp->buffers_failed)++;
@@ -666,8 +658,7 @@ int pvr2_buffer_set_buffer(struct pvr2_buffer *bp,void *ptr,unsigned int cnt)
bp->max_count = cnt;
bp->stream->i_bcount += bp->max_count;
pvr2_trace(PVR2_TRACE_BUF_FLOW,
- "/*---TRACE_FLOW---*/ bufferPool "
- " %8s cap cap=%07d cnt=%02d",
+ "/*---TRACE_FLOW---*/ bufferPool %8s cap cap=%07d cnt=%02d",
pvr2_buffer_state_decode(
pvr2_buffer_state_idle),
bp->stream->i_bcount,bp->stream->i_count);
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-ioread.c b/drivers/media/usb/pvrusb2/pvrusb2-ioread.c
index 614d55767a4e..3c7ca2c2c108 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-ioread.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-ioread.c
@@ -25,7 +25,7 @@
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/mutex.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define BUFFER_COUNT 32
#define BUFFER_SIZE PAGE_ALIGN(0x4000)
@@ -169,9 +169,7 @@ static int pvr2_ioread_start(struct pvr2_ioread *cp)
stat = pvr2_buffer_queue(bp);
if (stat < 0) {
pvr2_trace(PVR2_TRACE_DATA_FLOW,
- "/*---TRACE_READ---*/"
- " pvr2_ioread_start id=%p"
- " error=%d",
+ "/*---TRACE_READ---*/ pvr2_ioread_start id=%p error=%d",
cp,stat);
pvr2_ioread_stop(cp);
return stat;
@@ -209,8 +207,8 @@ int pvr2_ioread_setup(struct pvr2_ioread *cp,struct pvr2_stream *sp)
do {
if (cp->stream) {
pvr2_trace(PVR2_TRACE_START_STOP,
- "/*---TRACE_READ---*/"
- " pvr2_ioread_setup (tear-down) id=%p",cp);
+ "/*---TRACE_READ---*/ pvr2_ioread_setup (tear-down) id=%p",
+ cp);
pvr2_ioread_stop(cp);
pvr2_stream_kill(cp->stream);
if (pvr2_stream_get_buffer_count(cp->stream)) {
@@ -220,8 +218,8 @@ int pvr2_ioread_setup(struct pvr2_ioread *cp,struct pvr2_stream *sp)
}
if (sp) {
pvr2_trace(PVR2_TRACE_START_STOP,
- "/*---TRACE_READ---*/"
- " pvr2_ioread_setup (setup) id=%p",cp);
+ "/*---TRACE_READ---*/ pvr2_ioread_setup (setup) id=%p",
+ cp);
pvr2_stream_kill(sp);
ret = pvr2_stream_set_buffer_count(sp,BUFFER_COUNT);
if (ret < 0) {
@@ -270,9 +268,7 @@ static int pvr2_ioread_get_buffer(struct pvr2_ioread *cp)
if (stat < 0) {
// Streaming error...
pvr2_trace(PVR2_TRACE_DATA_FLOW,
- "/*---TRACE_READ---*/"
- " pvr2_ioread_read id=%p"
- " queue_error=%d",
+ "/*---TRACE_READ---*/ pvr2_ioread_read id=%p queue_error=%d",
cp,stat);
pvr2_ioread_stop(cp);
return 0;
@@ -292,9 +288,7 @@ static int pvr2_ioread_get_buffer(struct pvr2_ioread *cp)
if (stat < 0) {
// Streaming error...
pvr2_trace(PVR2_TRACE_DATA_FLOW,
- "/*---TRACE_READ---*/"
- " pvr2_ioread_read id=%p"
- " buffer_error=%d",
+ "/*---TRACE_READ---*/ pvr2_ioread_read id=%p buffer_error=%d",
cp,stat);
pvr2_ioread_stop(cp);
// Give up.
@@ -347,8 +341,7 @@ static void pvr2_ioread_filter(struct pvr2_ioread *cp)
if (cp->sync_buf_offs >= cp->sync_key_len) {
cp->sync_trashed_count -= cp->sync_key_len;
pvr2_trace(PVR2_TRACE_DATA_FLOW,
- "/*---TRACE_READ---*/"
- " sync_state <== 2 (skipped %u bytes)",
+ "/*---TRACE_READ---*/ sync_state <== 2 (skipped %u bytes)",
cp->sync_trashed_count);
cp->sync_state = 2;
cp->sync_buf_offs = 0;
@@ -358,8 +351,7 @@ static void pvr2_ioread_filter(struct pvr2_ioread *cp)
if (cp->c_data_offs < cp->c_data_len) {
// Sanity check - should NEVER get here
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "ERROR: pvr2_ioread filter sync problem"
- " len=%u offs=%u",
+ "ERROR: pvr2_ioread filter sync problem len=%u offs=%u",
cp->c_data_len,cp->c_data_offs);
// Get out so we don't get stuck in an infinite
// loop.
@@ -418,8 +410,8 @@ int pvr2_ioread_read(struct pvr2_ioread *cp,void __user *buf,unsigned int cnt)
if (!cnt) {
pvr2_trace(PVR2_TRACE_TRAP,
- "/*---TRACE_READ---*/ pvr2_ioread_read id=%p"
- " ZERO Request? Returning zero.",cp);
+ "/*---TRACE_READ---*/ pvr2_ioread_read id=%p ZERO Request? Returning zero.",
+cp);
return 0;
}
@@ -477,8 +469,7 @@ int pvr2_ioread_read(struct pvr2_ioread *cp,void __user *buf,unsigned int cnt)
// Consumed entire key; switch mode
// to normal.
pvr2_trace(PVR2_TRACE_DATA_FLOW,
- "/*---TRACE_READ---*/"
- " sync_state <== 0");
+ "/*---TRACE_READ---*/ sync_state <== 0");
cp->sync_state = 0;
}
} else {
@@ -502,8 +493,7 @@ int pvr2_ioread_read(struct pvr2_ioread *cp,void __user *buf,unsigned int cnt)
}
pvr2_trace(PVR2_TRACE_DATA_FLOW,
- "/*---TRACE_READ---*/ pvr2_ioread_read"
- " id=%p request=%d result=%d",
+ "/*---TRACE_READ---*/ pvr2_ioread_read id=%p request=%d result=%d",
cp,req_cnt,ret);
return ret;
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-std.c b/drivers/media/usb/pvrusb2/pvrusb2-std.c
index 9a596a3a4c27..cd7bc18a1ba2 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-std.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-std.c
@@ -357,8 +357,7 @@ struct v4l2_standard *pvr2_std_create_enum(unsigned int *countptr,
bcnt = pvr2_std_id_to_str(buf,sizeof(buf),fmsk);
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
- "WARNING:"
- " Failed to classify the following standard(s): %.*s",
+ "WARNING: Failed to classify the following standard(s): %.*s",
bcnt,buf);
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-sysfs.c b/drivers/media/usb/pvrusb2/pvrusb2-sysfs.c
index 06fe63ced58c..d977976b8d91 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-sysfs.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-sysfs.c
@@ -116,7 +116,6 @@ static ssize_t show_type(struct device *class_dev,
}
pvr2_sysfs_trace("pvr2_sysfs(%p) show_type(cid=%d) is %s",
cip->chptr, cip->ctl_id, name);
- if (!name) return -EINVAL;
return scnprintf(buf, PAGE_SIZE, "%s\n", name);
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index 2cc4d2b6f810..bbbe18d5275a 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -949,8 +949,8 @@ static long pvr2_v4l2_ioctl(struct file *file,
if (ret < 0) {
if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL) {
pvr2_trace(PVR2_TRACE_V4LIOCTL,
- "pvr2_v4l2_do_ioctl failure, ret=%ld"
- " command was:", ret);
+ "pvr2_v4l2_do_ioctl failure, ret=%ld command was:",
+ret);
v4l_printk_ioctl(pvr2_hdw_get_driver_name(hdw), cmd);
}
} else {
@@ -1254,8 +1254,7 @@ static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip,
nr_ptr = video_nr;
if (!dip->stream) {
pr_err(KBUILD_MODNAME
- ": Failed to set up pvrusb2 v4l video dev"
- " due to missing stream instance\n");
+ ": Failed to set up pvrusb2 v4l video dev due to missing stream instance\n");
return;
}
break;
@@ -1272,8 +1271,7 @@ static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip,
break;
default:
/* Bail out (this should be impossible) */
- pr_err(KBUILD_MODNAME ": Failed to set up pvrusb2 v4l dev"
- " due to unrecognized config\n");
+ pr_err(KBUILD_MODNAME ": Failed to set up pvrusb2 v4l dev due to unrecognized config\n");
return;
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-video-v4l.c b/drivers/media/usb/pvrusb2/pvrusb2-video-v4l.c
index 105123ab36aa..6fee367139aa 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-video-v4l.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-video-v4l.c
@@ -91,9 +91,7 @@ void pvr2_saa7115_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
(hdw->input_val < 0) ||
(hdw->input_val >= sp->cnt)) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "*** WARNING *** subdev v4l2 set_input:"
- " Invalid routing scheme (%u)"
- " and/or input (%d)",
+ "*** WARNING *** subdev v4l2 set_input: Invalid routing scheme (%u) and/or input (%d)",
sid, hdw->input_val);
return;
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-wm8775.c b/drivers/media/usb/pvrusb2/pvrusb2-wm8775.c
index f1df94a2436f..7993983de5a6 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-wm8775.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-wm8775.c
@@ -49,8 +49,7 @@ void pvr2_wm8775_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
input = 2;
break;
}
- pvr2_trace(PVR2_TRACE_CHIPS, "subdev wm8775"
- " set_input(val=%d route=0x%x)",
+ pvr2_trace(PVR2_TRACE_CHIPS, "subdev wm8775 set_input(val=%d route=0x%x)",
hdw->input_val, input);
sd->ops->audio->s_routing(sd, input, 0, 0);
diff --git a/drivers/media/usb/pwc/pwc-ctrl.c b/drivers/media/usb/pwc/pwc-ctrl.c
index 3a1618580ed6..655cef39eb3d 100644
--- a/drivers/media/usb/pwc/pwc-ctrl.c
+++ b/drivers/media/usb/pwc/pwc-ctrl.c
@@ -39,7 +39,7 @@
/* Control functions for the cam; brightness, contrast, video mode, etc. */
#ifdef __KERNEL__
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#endif
#include <asm/errno.h>
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index ff657644b6b3..22420c14ac98 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -238,8 +238,8 @@ static void pwc_frame_complete(struct pwc_device *pdev)
} else {
/* Check for underflow first */
if (fbuf->filled < pdev->frame_total_size) {
- PWC_DEBUG_FLOW("Frame buffer underflow (%d bytes);"
- " discarded.\n", fbuf->filled);
+ PWC_DEBUG_FLOW("Frame buffer underflow (%d bytes); discarded.\n",
+ fbuf->filled);
} else {
fbuf->vb.field = V4L2_FIELD_NONE;
fbuf->vb.sequence = pdev->vframe_count;
diff --git a/drivers/media/usb/pwc/pwc-v4l.c b/drivers/media/usb/pwc/pwc-v4l.c
index 3d987984602f..92f04db6bbae 100644
--- a/drivers/media/usb/pwc/pwc-v4l.c
+++ b/drivers/media/usb/pwc/pwc-v4l.c
@@ -406,8 +406,7 @@ static void pwc_vidioc_fill_fmt(struct v4l2_format *f,
f->fmt.pix.bytesperline = f->fmt.pix.width;
f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.width * 3 / 2;
f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
- PWC_DEBUG_IOCTL("pwc_vidioc_fill_fmt() "
- "width=%d, height=%d, bytesperline=%d, sizeimage=%d, pixelformat=%c%c%c%c\n",
+ PWC_DEBUG_IOCTL("pwc_vidioc_fill_fmt() width=%d, height=%d, bytesperline=%d, sizeimage=%d, pixelformat=%c%c%c%c\n",
f->fmt.pix.width,
f->fmt.pix.height,
f->fmt.pix.bytesperline,
@@ -473,8 +472,7 @@ static int pwc_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f)
pixelformat = f->fmt.pix.pixelformat;
- PWC_DEBUG_IOCTL("Trying to set format to: width=%d height=%d fps=%d "
- "format=%c%c%c%c\n",
+ PWC_DEBUG_IOCTL("Trying to set format to: width=%d height=%d fps=%d format=%c%c%c%c\n",
f->fmt.pix.width, f->fmt.pix.height, pdev->vframes,
(pixelformat)&255,
(pixelformat>>8)&255,
diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
index c2e25876e93b..a4dcaec31d02 100644
--- a/drivers/media/usb/siano/smsusb.c
+++ b/drivers/media/usb/siano/smsusb.c
@@ -604,8 +604,8 @@ static int smsusb_resume(struct usb_interface *intf)
intf->cur_altsetting->desc.
bInterfaceNumber, 0);
if (rc < 0) {
- printk(KERN_INFO "%s usb_set_interface failed, "
- "rc %d\n", __func__, rc);
+ printk(KERN_INFO "%s usb_set_interface failed, rc %d\n",
+ __func__, rc);
return rc;
}
}
diff --git a/drivers/media/usb/stkwebcam/stk-sensor.c b/drivers/media/usb/stkwebcam/stk-sensor.c
index e546b014d7ad..fbccbb2eed9f 100644
--- a/drivers/media/usb/stkwebcam/stk-sensor.c
+++ b/drivers/media/usb/stkwebcam/stk-sensor.c
@@ -228,7 +228,7 @@
static int stk_sensor_outb(struct stk_camera *dev, u8 reg, u8 val)
{
int i = 0;
- int tmpval = 0;
+ u8 tmpval = 0;
if (stk_camera_write_reg(dev, STK_IIC_TX_INDEX, reg))
return 1;
@@ -253,7 +253,7 @@ static int stk_sensor_outb(struct stk_camera *dev, u8 reg, u8 val)
static int stk_sensor_inb(struct stk_camera *dev, u8 reg, u8 *val)
{
int i = 0;
- int tmpval = 0;
+ u8 tmpval = 0;
if (stk_camera_write_reg(dev, STK_IIC_RX_INDEX, reg))
return 1;
@@ -274,7 +274,7 @@ static int stk_sensor_inb(struct stk_camera *dev, u8 reg, u8 *val)
if (stk_camera_read_reg(dev, STK_IIC_RX_VALUE, &tmpval))
return 1;
- *val = (u8) tmpval;
+ *val = tmpval;
return 0;
}
@@ -391,8 +391,8 @@ int stk_sensor_init(struct stk_camera *dev)
}
stk_sensor_write_regvals(dev, ov_initvals);
msleep(10);
- STK_INFO("OmniVision sensor detected, id %02X%02X"
- " at address %x\n", idh, idl, SENSOR_ADDRESS);
+ STK_INFO("OmniVision sensor detected, id %02X%02X at address %x\n",
+ idh, idl, SENSOR_ADDRESS);
return 0;
}
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index 22a9aae16291..a212248bc2a3 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -144,7 +144,7 @@ int stk_camera_write_reg(struct stk_camera *dev, u16 index, u8 value)
return 0;
}
-int stk_camera_read_reg(struct stk_camera *dev, u16 index, int *value)
+int stk_camera_read_reg(struct stk_camera *dev, u16 index, u8 *value)
{
struct usb_device *udev = dev->udev;
unsigned char *buf;
@@ -163,7 +163,7 @@ int stk_camera_read_reg(struct stk_camera *dev, u16 index, int *value)
sizeof(u8),
500);
if (ret >= 0)
- memcpy(value, buf, sizeof(u8));
+ *value = *buf;
kfree(buf);
return ret;
@@ -171,9 +171,10 @@ int stk_camera_read_reg(struct stk_camera *dev, u16 index, int *value)
static int stk_start_stream(struct stk_camera *dev)
{
- int value;
+ u8 value;
int i, ret;
- int value_116, value_117;
+ u8 value_116, value_117;
+
if (!is_present(dev))
return -ENODEV;
@@ -213,7 +214,7 @@ static int stk_start_stream(struct stk_camera *dev)
static int stk_stop_stream(struct stk_camera *dev)
{
- int value;
+ u8 value;
int i;
if (is_present(dev)) {
stk_camera_read_reg(dev, 0x0100, &value);
@@ -372,8 +373,7 @@ static void stk_isoc_handler(struct urb *urb)
if (fb->v4lbuf.bytesused != 0
&& fb->v4lbuf.bytesused != dev->frame_size) {
(void) (printk_ratelimit() &&
- STK_ERROR("frame %d, "
- "bytesused=%d, skipping\n",
+ STK_ERROR("frame %d, bytesused=%d, skipping\n",
i, fb->v4lbuf.bytesused));
fb->v4lbuf.bytesused = 0;
fill = fb->buffer;
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.h b/drivers/media/usb/stkwebcam/stk-webcam.h
index 9bbfa3d9bfdd..92bb48e3c74e 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.h
+++ b/drivers/media/usb/stkwebcam/stk-webcam.h
@@ -129,7 +129,7 @@ struct stk_camera {
#define vdev_to_camera(d) container_of(d, struct stk_camera, vdev)
int stk_camera_write_reg(struct stk_camera *, u16, u8);
-int stk_camera_read_reg(struct stk_camera *, u16, int *);
+int stk_camera_read_reg(struct stk_camera *, u16, u8 *);
int stk_sensor_init(struct stk_camera *);
int stk_sensor_configure(struct stk_camera *);
diff --git a/drivers/media/usb/tm6000/tm6000-alsa.c b/drivers/media/usb/tm6000/tm6000-alsa.c
index f16fbd1f9f51..422322541af6 100644
--- a/drivers/media/usb/tm6000/tm6000-alsa.c
+++ b/drivers/media/usb/tm6000/tm6000-alsa.c
@@ -58,9 +58,7 @@ MODULE_PARM_DESC(index, "Index value for tm6000x capture interface(s).");
MODULE_DESCRIPTION("ALSA driver module for tm5600/tm6000/tm6010 based TV cards");
MODULE_AUTHOR("Mauro Carvalho Chehab");
MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Trident,tm5600},"
- "{{Trident,tm6000},"
- "{{Trident,tm6010}");
+MODULE_SUPPORTED_DEVICE("{{Trident,tm5600},{{Trident,tm6000},{{Trident,tm6010}");
static unsigned int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "enable debug messages");
diff --git a/drivers/media/usb/tm6000/tm6000-core.c b/drivers/media/usb/tm6000/tm6000-core.c
index 7c32353c59db..8d104e5c4be3 100644
--- a/drivers/media/usb/tm6000/tm6000-core.c
+++ b/drivers/media/usb/tm6000/tm6000-core.c
@@ -602,8 +602,8 @@ int tm6000_init(struct tm6000_core *dev)
for (i = 0; i < size; i++) {
rc = tm6000_set_reg(dev, tab[i].req, tab[i].reg, tab[i].val);
if (rc < 0) {
- printk(KERN_ERR "Error %i while setting req %d, "
- "reg %d to value %d\n", rc,
+ printk(KERN_ERR "Error %i while setting req %d, reg %d to value %d\n",
+ rc,
tab[i].req, tab[i].reg, tab[i].val);
return rc;
}
@@ -761,9 +761,8 @@ int tm6000_tvaudio_set_mute(struct tm6000_core *dev, u8 mute)
if (dev->dev_type == TM6010)
tm6010_set_mute_sif(dev, mute);
else {
- printk(KERN_INFO "ERROR: TM5600 and TM6000 don't has"
- " SIF audio inputs. Please check the %s"
- " configuration.\n", dev->name);
+ printk(KERN_INFO "ERROR: TM5600 and TM6000 don't has SIF audio inputs. Please check the %s configuration.\n",
+ dev->name);
return -EINVAL;
}
break;
@@ -822,9 +821,8 @@ void tm6000_set_volume(struct tm6000_core *dev, int vol)
if (dev->dev_type == TM6010)
tm6010_set_volume_sif(dev, vol);
else
- printk(KERN_INFO "ERROR: TM5600 and TM6000 don't has"
- " SIF audio inputs. Please check the %s"
- " configuration.\n", dev->name);
+ printk(KERN_INFO "ERROR: TM5600 and TM6000 don't has SIF audio inputs. Please check the %s configuration.\n",
+ dev->name);
break;
case TM6000_AMUX_ADC1:
case TM6000_AMUX_ADC2:
diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
index 0426b210383b..70dbaec1219e 100644
--- a/drivers/media/usb/tm6000/tm6000-dvb.c
+++ b/drivers/media/usb/tm6000/tm6000-dvb.c
@@ -35,9 +35,7 @@ MODULE_DESCRIPTION("DVB driver extension module for tm5600/6000/6010 based TV ca
MODULE_AUTHOR("Mauro Carvalho Chehab");
MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Trident, tm5600},"
- "{{Trident, tm6000},"
- "{{Trident, tm6010}");
+MODULE_SUPPORTED_DEVICE("{{Trident, tm5600},{{Trident, tm6000},{{Trident, tm6010}");
static int debug;
@@ -292,13 +290,11 @@ static int register_dvb(struct tm6000_core *dev)
}
if (!dvb_attach(xc2028_attach, dvb->frontend, &cfg)) {
- printk(KERN_ERR "tm6000: couldn't register "
- "frontend (xc3028)\n");
+ printk(KERN_ERR "tm6000: couldn't register frontend (xc3028)\n");
ret = -EINVAL;
goto frontend_err;
}
- printk(KERN_INFO "tm6000: XC2028/3028 asked to be "
- "attached to frontend!\n");
+ printk(KERN_INFO "tm6000: XC2028/3028 asked to be attached to frontend!\n");
break;
}
case TUNER_XC5000: {
@@ -315,13 +311,11 @@ static int register_dvb(struct tm6000_core *dev)
}
if (!dvb_attach(xc5000_attach, dvb->frontend, &dev->i2c_adap, &cfg)) {
- printk(KERN_ERR "tm6000: couldn't register "
- "frontend (xc5000)\n");
+ printk(KERN_ERR "tm6000: couldn't register frontend (xc5000)\n");
ret = -EINVAL;
goto frontend_err;
}
- printk(KERN_INFO "tm6000: XC5000 asked to be "
- "attached to frontend!\n");
+ printk(KERN_INFO "tm6000: XC5000 asked to be attached to frontend!\n");
break;
}
}
diff --git a/drivers/media/usb/tm6000/tm6000-i2c.c b/drivers/media/usb/tm6000/tm6000-i2c.c
index c7e23e3dd75e..b01d3ee56e77 100644
--- a/drivers/media/usb/tm6000/tm6000-i2c.c
+++ b/drivers/media/usb/tm6000/tm6000-i2c.c
@@ -173,8 +173,7 @@ static int tm6000_i2c_xfer(struct i2c_adapter *i2c_adap,
* immediately after a 1 or 2 byte write to select
* a register. We cannot fulfil this request.
*/
- i2c_dprintk(2, " read without preceding write not"
- " supported");
+ i2c_dprintk(2, " read without preceding write not supported");
rc = -EOPNOTSUPP;
goto err;
} else if (i + 1 < num && msgs[i].len <= 2 &&
diff --git a/drivers/media/usb/tm6000/tm6000-stds.c b/drivers/media/usb/tm6000/tm6000-stds.c
index 93a4b2434b6e..4064a5e8fae1 100644
--- a/drivers/media/usb/tm6000/tm6000-stds.c
+++ b/drivers/media/usb/tm6000/tm6000-stds.c
@@ -464,8 +464,7 @@ static int tm6000_load_std(struct tm6000_core *dev, struct tm6000_reg_settings *
for (i = 0; set[i].req; i++) {
rc = tm6000_set_reg(dev, set[i].req, set[i].reg, set[i].value);
if (rc < 0) {
- printk(KERN_ERR "Error %i while setting "
- "req %d, reg %d to value %d\n",
+ printk(KERN_ERR "Error %i while setting req %d, reg %d to value %d\n",
rc, set[i].req, set[i].reg, set[i].value);
return rc;
}
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index dee7e7d3d47d..d9f3fa5db8dd 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -615,8 +615,7 @@ static int tm6000_prepare_isoc(struct tm6000_core *dev)
return -ENOMEM;
}
- dprintk(dev, V4L2_DEBUG_QUEUE, "Allocating %d x %d packets"
- " (%d bytes) of %d bytes each to handle %u size\n",
+ dprintk(dev, V4L2_DEBUG_QUEUE, "Allocating %d x %d packets (%d bytes) of %d bytes each to handle %u size\n",
max_packets, num_bufs, sb_size,
dev->isoc_in.maxsize, size);
@@ -939,8 +938,8 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
if (NULL == fmt) {
- dprintk(dev, 2, "Fourcc format (0x%08x)"
- " invalid.\n", f->fmt.pix.pixelformat);
+ dprintk(dev, 2, "Fourcc format (0x%08x) invalid.\n",
+ f->fmt.pix.pixelformat);
return -EINVAL;
}
@@ -1366,14 +1365,13 @@ static int __tm6000_open(struct file *file)
fh->width = dev->width;
fh->height = dev->height;
- dprintk(dev, V4L2_DEBUG_OPEN, "Open: fh=0x%08lx, dev=0x%08lx, "
- "dev->vidq=0x%08lx\n",
+ dprintk(dev, V4L2_DEBUG_OPEN, "Open: fh=0x%08lx, dev=0x%08lx, dev->vidq=0x%08lx\n",
(unsigned long)fh, (unsigned long)dev,
(unsigned long)&dev->vidq);
- dprintk(dev, V4L2_DEBUG_OPEN, "Open: list_empty "
- "queued=%d\n", list_empty(&dev->vidq.queued));
- dprintk(dev, V4L2_DEBUG_OPEN, "Open: list_empty "
- "active=%d\n", list_empty(&dev->vidq.active));
+ dprintk(dev, V4L2_DEBUG_OPEN, "Open: list_empty queued=%d\n",
+ list_empty(&dev->vidq.queued));
+ dprintk(dev, V4L2_DEBUG_OPEN, "Open: list_empty active=%d\n",
+ list_empty(&dev->vidq.active));
/* initialize hardware on analog mode */
rc = tm6000_init_analog_mode(dev);
diff --git a/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c b/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
index d52d4a8d39ad..361e40b56045 100644
--- a/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
+++ b/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
@@ -767,8 +767,7 @@ static void ttusb_iso_irq(struct urb *urb)
for (i = 0; i < urb->number_of_packets; ++i) {
numpkt++;
if (time_after_eq(jiffies, lastj + HZ)) {
- dprintk("frames/s: %lu (ts: %d, stuff %d, "
- "sec: %d, invalid: %d, all: %d)\n",
+ dprintk("frames/s: %lu (ts: %d, stuff %d, sec: %d, invalid: %d, all: %d)\n",
numpkt * HZ / (jiffies - lastj),
numts, numstuff, numsec, numinvalid,
numts + numstuff + numsec + numinvalid);
diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c
index 4e7671a3a1e4..fc0219f1b7df 100644
--- a/drivers/media/usb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c
@@ -36,7 +36,6 @@
#include "dmxdev.h"
#include "dvb_demux.h"
-#include "dvb_filter.h"
#include "dvb_frontend.h"
#include "dvb_net.h"
#include "ttusbdecfe.h"
@@ -92,6 +91,15 @@ enum ttusb_dec_interface {
TTUSB_DEC_INTERFACE_OUT
};
+typedef int (dvb_filter_pes2ts_cb_t) (void *, unsigned char *);
+
+struct dvb_filter_pes2ts {
+ unsigned char buf[188];
+ unsigned char cc;
+ dvb_filter_pes2ts_cb_t *cb;
+ void *priv;
+};
+
struct ttusb_dec {
enum ttusb_dec_model model;
char *model_name;
@@ -201,6 +209,54 @@ static u16 rc_keys[] = {
KEY_RADIO
};
+static void dvb_filter_pes2ts_init(struct dvb_filter_pes2ts *p2ts,
+ unsigned short pid,
+ dvb_filter_pes2ts_cb_t *cb, void *priv)
+{
+ unsigned char *buf=p2ts->buf;
+
+ buf[0]=0x47;
+ buf[1]=(pid>>8);
+ buf[2]=pid&0xff;
+ p2ts->cc=0;
+ p2ts->cb=cb;
+ p2ts->priv=priv;
+}
+
+static int dvb_filter_pes2ts(struct dvb_filter_pes2ts *p2ts,
+ unsigned char *pes, int len, int payload_start)
+{
+ unsigned char *buf=p2ts->buf;
+ int ret=0, rest;
+
+ //len=6+((pes[4]<<8)|pes[5]);
+
+ if (payload_start)
+ buf[1]|=0x40;
+ else
+ buf[1]&=~0x40;
+ while (len>=184) {
+ buf[3]=0x10|((p2ts->cc++)&0x0f);
+ memcpy(buf+4, pes, 184);
+ if ((ret=p2ts->cb(p2ts->priv, buf)))
+ return ret;
+ len-=184; pes+=184;
+ buf[1]&=~0x40;
+ }
+ if (!len)
+ return 0;
+ buf[3]=0x30|((p2ts->cc++)&0x0f);
+ rest=183-len;
+ if (rest) {
+ buf[5]=0x00;
+ if (rest-1)
+ memset(buf+6, 0xff, rest-1);
+ }
+ buf[4]=rest;
+ memcpy(buf+5+rest, pes, len);
+ return p2ts->cb(p2ts->priv, buf);
+}
+
static void ttusb_dec_set_model(struct ttusb_dec *dec,
enum ttusb_dec_model model);
@@ -273,7 +329,7 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
int param_length, const u8 params[],
int *result_length, u8 cmd_result[])
{
- int result, actual_len, i;
+ int result, actual_len;
u8 *b;
dprintk("%s\n", __func__);
@@ -297,10 +353,8 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
memcpy(&b[4], params, param_length);
if (debug) {
- printk("%s: command: ", __func__);
- for (i = 0; i < param_length + 4; i++)
- printk("0x%02X ", b[i]);
- printk("\n");
+ printk(KERN_DEBUG "%s: command: %*ph\n",
+ __func__, param_length, b);
}
result = usb_bulk_msg(dec->udev, dec->command_pipe, b,
@@ -325,10 +379,8 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
return result;
} else {
if (debug) {
- printk("%s: result: ", __func__);
- for (i = 0; i < actual_len; i++)
- printk("0x%02X ", b[i]);
- printk("\n");
+ printk(KERN_DEBUG "%s: result: %*ph\n",
+ __func__, actual_len, b);
}
if (result_length)
@@ -652,8 +704,8 @@ static void ttusb_dec_process_urb_frame(struct ttusb_dec *dec, u8 *b,
dec->packet_payload_length = 2;
dec->packet_state = 7;
} else {
- printk("%s: unknown packet type: "
- "%02x%02x\n", __func__,
+ printk("%s: unknown packet type: %02x%02x\n",
+ __func__,
dec->packet[0], dec->packet[1]);
dec->packet_state = 0;
}
@@ -905,8 +957,8 @@ static int ttusb_dec_start_iso_xfer(struct ttusb_dec *dec)
for (i = 0; i < ISO_BUF_COUNT; i++) {
if ((result = usb_submit_urb(dec->iso_urb[i],
GFP_ATOMIC))) {
- printk("%s: failed urb submission %d: "
- "error %d\n", __func__, i, result);
+ printk("%s: failed urb submission %d: error %d\n",
+ __func__, i, result);
while (i) {
usb_kill_urb(dec->iso_urb[i - 1]);
@@ -1319,8 +1371,7 @@ static int ttusb_dec_boot_dsp(struct ttusb_dec *dec)
memcpy(&tmp, &firmware[56], 4);
crc32_check = ntohl(tmp);
if (crc32_csum != crc32_check) {
- printk("%s: crc32 check of DSP code failed (calculated "
- "0x%08x != 0x%08x in file), file invalid.\n",
+ printk("%s: crc32 check of DSP code failed (calculated 0x%08x != 0x%08x in file), file invalid.\n",
__func__, crc32_csum, crc32_check);
release_firmware(fw_entry);
return -ENOENT;
@@ -1397,11 +1448,9 @@ static int ttusb_dec_init_stb(struct ttusb_dec *dec)
if (!mode) {
if (version == 0xABCDEFAB)
- printk(KERN_INFO "ttusb_dec: no version "
- "info in Firmware\n");
+ printk(KERN_INFO "ttusb_dec: no version info in Firmware\n");
else
- printk(KERN_INFO "ttusb_dec: Firmware "
- "%x.%02x%c%c\n",
+ printk(KERN_INFO "ttusb_dec: Firmware %x.%02x%c%c\n",
version >> 24, (version >> 16) & 0xff,
(version >> 8) & 0xff, version & 0xff);
@@ -1425,8 +1474,7 @@ static int ttusb_dec_init_stb(struct ttusb_dec *dec)
ttusb_dec_set_model(dec, TTUSB_DEC2540T);
break;
default:
- printk(KERN_ERR "%s: unknown model returned "
- "by firmware (%08x) - please report\n",
+ printk(KERN_ERR "%s: unknown model returned by firmware (%08x) - please report\n",
__func__, model);
return -ENOENT;
}
diff --git a/drivers/media/usb/ttusb-dec/ttusbdecfe.c b/drivers/media/usb/ttusb-dec/ttusbdecfe.c
index 8781335ab92f..2d9444905fdb 100644
--- a/drivers/media/usb/ttusb-dec/ttusbdecfe.c
+++ b/drivers/media/usb/ttusb-dec/ttusbdecfe.c
@@ -205,7 +205,7 @@ static void ttusbdecfe_release(struct dvb_frontend* fe)
kfree(state);
}
-static struct dvb_frontend_ops ttusbdecfe_dvbt_ops;
+static const struct dvb_frontend_ops ttusbdecfe_dvbt_ops;
struct dvb_frontend* ttusbdecfe_dvbt_attach(const struct ttusbdecfe_config* config)
{
@@ -225,7 +225,7 @@ struct dvb_frontend* ttusbdecfe_dvbt_attach(const struct ttusbdecfe_config* conf
return &state->frontend;
}
-static struct dvb_frontend_ops ttusbdecfe_dvbs_ops;
+static const struct dvb_frontend_ops ttusbdecfe_dvbs_ops;
struct dvb_frontend* ttusbdecfe_dvbs_attach(const struct ttusbdecfe_config* config)
{
@@ -247,7 +247,7 @@ struct dvb_frontend* ttusbdecfe_dvbs_attach(const struct ttusbdecfe_config* conf
return &state->frontend;
}
-static struct dvb_frontend_ops ttusbdecfe_dvbt_ops = {
+static const struct dvb_frontend_ops ttusbdecfe_dvbt_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "TechnoTrend/Hauppauge DEC2000-t Frontend",
@@ -270,7 +270,7 @@ static struct dvb_frontend_ops ttusbdecfe_dvbt_ops = {
.read_status = ttusbdecfe_dvbt_read_status,
};
-static struct dvb_frontend_ops ttusbdecfe_dvbs_ops = {
+static const struct dvb_frontend_ops ttusbdecfe_dvbs_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "TechnoTrend/Hauppauge DEC3000-s Frontend",
diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c
index 6cbe4a245c9f..d3b6d3dfaa09 100644
--- a/drivers/media/usb/usbtv/usbtv-video.c
+++ b/drivers/media/usb/usbtv/usbtv-video.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Lubomir Rintel
+ * Copyright (c) 2013,2016 Lubomir Rintel
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -259,6 +259,10 @@ static int usbtv_setup_capture(struct usbtv *usbtv)
if (ret)
return ret;
+ ret = v4l2_ctrl_handler_setup(&usbtv->ctrl);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -696,11 +700,91 @@ static const struct vb2_ops usbtv_vb2_ops = {
.stop_streaming = usbtv_stop_streaming,
};
+static int usbtv_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct usbtv *usbtv = container_of(ctrl->handler, struct usbtv,
+ ctrl);
+ u8 *data;
+ u16 index, size;
+ int ret;
+
+ data = kmalloc(3, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /*
+ * Read in the current brightness/contrast registers. We need them
+ * both, because the values are for some reason interleaved.
+ */
+ if (ctrl->id == V4L2_CID_BRIGHTNESS || ctrl->id == V4L2_CID_CONTRAST) {
+ ret = usb_control_msg(usbtv->udev,
+ usb_sndctrlpipe(usbtv->udev, 0), USBTV_CONTROL_REG,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, USBTV_BASE + 0x0244, (void *)data, 3, 0);
+ if (ret < 0)
+ goto error;
+ }
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ index = USBTV_BASE + 0x0244;
+ size = 3;
+ data[0] &= 0xf0;
+ data[0] |= (ctrl->val >> 8) & 0xf;
+ data[2] = ctrl->val & 0xff;
+ break;
+ case V4L2_CID_CONTRAST:
+ index = USBTV_BASE + 0x0244;
+ size = 3;
+ data[0] &= 0x0f;
+ data[0] |= (ctrl->val >> 4) & 0xf0;
+ data[1] = ctrl->val & 0xff;
+ break;
+ case V4L2_CID_SATURATION:
+ index = USBTV_BASE + 0x0242;
+ data[0] = ctrl->val >> 8;
+ data[1] = ctrl->val & 0xff;
+ size = 2;
+ break;
+ case V4L2_CID_HUE:
+ index = USBTV_BASE + 0x0240;
+ size = 2;
+ if (ctrl->val > 0) {
+ data[0] = 0x92 + (ctrl->val >> 8);
+ data[1] = ctrl->val & 0xff;
+ } else {
+ data[0] = 0x82 + (-ctrl->val >> 8);
+ data[1] = -ctrl->val & 0xff;
+ }
+ break;
+ default:
+ kfree(data);
+ return -EINVAL;
+ }
+
+ ret = usb_control_msg(usbtv->udev, usb_sndctrlpipe(usbtv->udev, 0),
+ USBTV_CONTROL_REG,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, index, (void *)data, size, 0);
+
+error:
+ if (ret < 0)
+ dev_warn(usbtv->dev, "Failed to submit a control request.\n");
+
+ kfree(data);
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops usbtv_ctrl_ops = {
+ .s_ctrl = usbtv_s_ctrl,
+};
+
static void usbtv_release(struct v4l2_device *v4l2_dev)
{
struct usbtv *usbtv = container_of(v4l2_dev, struct usbtv, v4l2_dev);
v4l2_device_unregister(&usbtv->v4l2_dev);
+ v4l2_ctrl_handler_free(&usbtv->ctrl);
vb2_queue_release(&usbtv->vb2q);
kfree(usbtv);
}
@@ -731,7 +815,24 @@ int usbtv_video_init(struct usbtv *usbtv)
return ret;
}
+ /* controls */
+ v4l2_ctrl_handler_init(&usbtv->ctrl, 4);
+ v4l2_ctrl_new_std(&usbtv->ctrl, &usbtv_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 0x3ff, 1, 0x1d0);
+ v4l2_ctrl_new_std(&usbtv->ctrl, &usbtv_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 0x3ff, 1, 0x1c0);
+ v4l2_ctrl_new_std(&usbtv->ctrl, &usbtv_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 0x3ff, 1, 0x200);
+ v4l2_ctrl_new_std(&usbtv->ctrl, &usbtv_ctrl_ops,
+ V4L2_CID_HUE, -0xdff, 0xdff, 1, 0x000);
+ ret = usbtv->ctrl.error;
+ if (ret < 0) {
+ dev_warn(usbtv->dev, "Could not initialize controls\n");
+ goto ctrl_fail;
+ }
+
/* v4l2 structure */
+ usbtv->v4l2_dev.ctrl_handler = &usbtv->ctrl;
usbtv->v4l2_dev.release = usbtv_release;
ret = v4l2_device_register(usbtv->dev, &usbtv->v4l2_dev);
if (ret < 0) {
@@ -760,6 +861,8 @@ int usbtv_video_init(struct usbtv *usbtv)
vdev_fail:
v4l2_device_unregister(&usbtv->v4l2_dev);
v4l2_fail:
+ctrl_fail:
+ v4l2_ctrl_handler_free(&usbtv->ctrl);
vb2_queue_release(&usbtv->vb2q);
return ret;
diff --git a/drivers/media/usb/usbtv/usbtv.h b/drivers/media/usb/usbtv/usbtv.h
index 011f9fdc77a9..0231e449877e 100644
--- a/drivers/media/usb/usbtv/usbtv.h
+++ b/drivers/media/usb/usbtv/usbtv.h
@@ -38,6 +38,7 @@
#include <linux/usb.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-vmalloc.h>
@@ -45,6 +46,7 @@
#define USBTV_VIDEO_ENDP 0x81
#define USBTV_AUDIO_ENDP 0x83
#define USBTV_BASE 0xc000
+#define USBTV_CONTROL_REG 11
#define USBTV_REQUEST_REG 12
/* Number of concurrent isochronous urbs submitted.
@@ -87,6 +89,7 @@ struct usbtv {
/* video */
struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler ctrl;
struct video_device vdev;
struct vb2_queue vb2q;
struct mutex v4l2_lock;
diff --git a/drivers/media/usb/usbvision/usbvision-core.c b/drivers/media/usb/usbvision/usbvision-core.c
index c23bf73a68ea..bf041a9e69db 100644
--- a/drivers/media/usb/usbvision/usbvision-core.c
+++ b/drivers/media/usb/usbvision/usbvision-core.c
@@ -1656,8 +1656,8 @@ static int usbvision_set_video_format(struct usb_usbvision *usbvision, int forma
(__u16) USBVISION_FILT_CONT, value, 2, HZ);
if (rc < 0) {
- printk(KERN_ERR "%s: ERROR=%d. USBVISION stopped - "
- "reconnect or reload driver.\n", proc, rc);
+ printk(KERN_ERR "%s: ERROR=%d. USBVISION stopped - reconnect or reload driver.\n",
+ proc, rc);
}
usbvision->isoc_mode = format;
return rc;
@@ -1890,8 +1890,8 @@ static int usbvision_set_compress_params(struct usb_usbvision *usbvision)
(__u16) USBVISION_INTRA_CYC, value, 5, HZ);
if (rc < 0) {
- printk(KERN_ERR "%sERROR=%d. USBVISION stopped - "
- "reconnect or reload driver.\n", proc, rc);
+ printk(KERN_ERR "%sERROR=%d. USBVISION stopped - reconnect or reload driver.\n",
+ proc, rc);
return rc;
}
@@ -1921,8 +1921,8 @@ static int usbvision_set_compress_params(struct usb_usbvision *usbvision)
(__u16) USBVISION_PCM_THR1, value, 6, HZ);
if (rc < 0) {
- printk(KERN_ERR "%sERROR=%d. USBVISION stopped - "
- "reconnect or reload driver.\n", proc, rc);
+ printk(KERN_ERR "%sERROR=%d. USBVISION stopped - reconnect or reload driver.\n",
+ proc, rc);
}
return rc;
}
@@ -1960,8 +1960,8 @@ int usbvision_set_input(struct usb_usbvision *usbvision)
rc = usbvision_write_reg(usbvision, USBVISION_VIN_REG1, value[0]);
if (rc < 0) {
- printk(KERN_ERR "%sERROR=%d. USBVISION stopped - "
- "reconnect or reload driver.\n", proc, rc);
+ printk(KERN_ERR "%sERROR=%d. USBVISION stopped - reconnect or reload driver.\n",
+ proc, rc);
return rc;
}
@@ -2026,8 +2026,8 @@ int usbvision_set_input(struct usb_usbvision *usbvision)
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT, 0,
(__u16) USBVISION_LXSIZE_I, value, 8, HZ);
if (rc < 0) {
- printk(KERN_ERR "%sERROR=%d. USBVISION stopped - "
- "reconnect or reload driver.\n", proc, rc);
+ printk(KERN_ERR "%sERROR=%d. USBVISION stopped - reconnect or reload driver.\n",
+ proc, rc);
return rc;
}
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index c8b4eb2ee7a2..a7529196c327 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -1456,8 +1456,8 @@ static int usbvision_probe(struct usb_interface *intf,
}
if (interface->desc.bNumEndpoints < 2) {
- dev_err(&intf->dev, "interface %d has %d endpoints, but must"
- " have minimum 2\n", ifnum, interface->desc.bNumEndpoints);
+ dev_err(&intf->dev, "interface %d has %d endpoints, but must have minimum 2\n",
+ ifnum, interface->desc.bNumEndpoints);
ret = -ENODEV;
goto err_usb;
}
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 302e284a95eb..04bf35063c4c 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -168,6 +168,26 @@ static struct uvc_format_desc uvc_fmts[] = {
.guid = UVC_GUID_FORMAT_RW10,
.fcc = V4L2_PIX_FMT_SRGGB10P,
},
+ {
+ .name = "Bayer 16-bit (SBGGR16)",
+ .guid = UVC_GUID_FORMAT_BG16,
+ .fcc = V4L2_PIX_FMT_SBGGR16,
+ },
+ {
+ .name = "Bayer 16-bit (SGBRG16)",
+ .guid = UVC_GUID_FORMAT_GB16,
+ .fcc = V4L2_PIX_FMT_SGBRG16,
+ },
+ {
+ .name = "Bayer 16-bit (SRGGB16)",
+ .guid = UVC_GUID_FORMAT_RG16,
+ .fcc = V4L2_PIX_FMT_SRGGB16,
+ },
+ {
+ .name = "Bayer 16-bit (SGRBG16)",
+ .guid = UVC_GUID_FORMAT_GR16,
+ .fcc = V4L2_PIX_FMT_SGRBG16,
+ },
};
/* ------------------------------------------------------------------------
@@ -1309,7 +1329,7 @@ static int uvc_scan_chain_entity(struct uvc_video_chain *chain,
switch (UVC_ENTITY_TYPE(entity)) {
case UVC_VC_EXTENSION_UNIT:
if (uvc_trace_param & UVC_TRACE_PROBE)
- printk(" <- XU %d", entity->id);
+ printk(KERN_CONT " <- XU %d", entity->id);
if (entity->bNrInPins != 1) {
uvc_trace(UVC_TRACE_DESCR, "Extension unit %d has more "
@@ -1321,7 +1341,7 @@ static int uvc_scan_chain_entity(struct uvc_video_chain *chain,
case UVC_VC_PROCESSING_UNIT:
if (uvc_trace_param & UVC_TRACE_PROBE)
- printk(" <- PU %d", entity->id);
+ printk(KERN_CONT " <- PU %d", entity->id);
if (chain->processing != NULL) {
uvc_trace(UVC_TRACE_DESCR, "Found multiple "
@@ -1334,7 +1354,7 @@ static int uvc_scan_chain_entity(struct uvc_video_chain *chain,
case UVC_VC_SELECTOR_UNIT:
if (uvc_trace_param & UVC_TRACE_PROBE)
- printk(" <- SU %d", entity->id);
+ printk(KERN_CONT " <- SU %d", entity->id);
/* Single-input selector units are ignored. */
if (entity->bNrInPins == 1)
@@ -1353,7 +1373,7 @@ static int uvc_scan_chain_entity(struct uvc_video_chain *chain,
case UVC_ITT_CAMERA:
case UVC_ITT_MEDIA_TRANSPORT_INPUT:
if (uvc_trace_param & UVC_TRACE_PROBE)
- printk(" <- IT %d\n", entity->id);
+ printk(KERN_CONT " <- IT %d\n", entity->id);
break;
@@ -1361,17 +1381,17 @@ static int uvc_scan_chain_entity(struct uvc_video_chain *chain,
case UVC_OTT_DISPLAY:
case UVC_OTT_MEDIA_TRANSPORT_OUTPUT:
if (uvc_trace_param & UVC_TRACE_PROBE)
- printk(" OT %d", entity->id);
+ printk(KERN_CONT " OT %d", entity->id);
break;
case UVC_TT_STREAMING:
if (UVC_ENTITY_IS_ITERM(entity)) {
if (uvc_trace_param & UVC_TRACE_PROBE)
- printk(" <- IT %d\n", entity->id);
+ printk(KERN_CONT " <- IT %d\n", entity->id);
} else {
if (uvc_trace_param & UVC_TRACE_PROBE)
- printk(" OT %d", entity->id);
+ printk(KERN_CONT " OT %d", entity->id);
}
break;
@@ -1416,9 +1436,9 @@ static int uvc_scan_chain_forward(struct uvc_video_chain *chain,
list_add_tail(&forward->chain, &chain->entities);
if (uvc_trace_param & UVC_TRACE_PROBE) {
if (!found)
- printk(" (->");
+ printk(KERN_CONT " (->");
- printk(" XU %d", forward->id);
+ printk(KERN_CONT " XU %d", forward->id);
found = 1;
}
break;
@@ -1436,16 +1456,16 @@ static int uvc_scan_chain_forward(struct uvc_video_chain *chain,
list_add_tail(&forward->chain, &chain->entities);
if (uvc_trace_param & UVC_TRACE_PROBE) {
if (!found)
- printk(" (->");
+ printk(KERN_CONT " (->");
- printk(" OT %d", forward->id);
+ printk(KERN_CONT " OT %d", forward->id);
found = 1;
}
break;
}
}
if (found)
- printk(")");
+ printk(KERN_CONT ")");
return 0;
}
@@ -1471,7 +1491,7 @@ static int uvc_scan_chain_backward(struct uvc_video_chain *chain,
}
if (uvc_trace_param & UVC_TRACE_PROBE)
- printk(" <- IT");
+ printk(KERN_CONT " <- IT");
chain->selector = entity;
for (i = 0; i < entity->bNrInPins; ++i) {
@@ -1485,14 +1505,14 @@ static int uvc_scan_chain_backward(struct uvc_video_chain *chain,
}
if (uvc_trace_param & UVC_TRACE_PROBE)
- printk(" %d", term->id);
+ printk(KERN_CONT " %d", term->id);
list_add_tail(&term->chain, &chain->entities);
uvc_scan_chain_forward(chain, term, entity);
}
if (uvc_trace_param & UVC_TRACE_PROBE)
- printk("\n");
+ printk(KERN_CONT "\n");
id = 0;
break;
@@ -1595,6 +1615,114 @@ static const char *uvc_print_chain(struct uvc_video_chain *chain)
return buffer;
}
+static struct uvc_video_chain *uvc_alloc_chain(struct uvc_device *dev)
+{
+ struct uvc_video_chain *chain;
+
+ chain = kzalloc(sizeof(*chain), GFP_KERNEL);
+ if (chain == NULL)
+ return NULL;
+
+ INIT_LIST_HEAD(&chain->entities);
+ mutex_init(&chain->ctrl_mutex);
+ chain->dev = dev;
+ v4l2_prio_init(&chain->prio);
+
+ return chain;
+}
+
+/*
+ * Fallback heuristic for devices that don't connect units and terminals in a
+ * valid chain.
+ *
+ * Some devices have invalid baSourceID references, causing uvc_scan_chain()
+ * to fail, but if we just take the entities we can find and put them together
+ * in the most sensible chain we can think of, turns out they do work anyway.
+ * Note: This heuristic assumes there is a single chain.
+ *
+ * At the time of writing, devices known to have such a broken chain are
+ * - Acer Integrated Camera (5986:055a)
+ * - Realtek rtl157a7 (0bda:57a7)
+ */
+static int uvc_scan_fallback(struct uvc_device *dev)
+{
+ struct uvc_video_chain *chain;
+ struct uvc_entity *iterm = NULL;
+ struct uvc_entity *oterm = NULL;
+ struct uvc_entity *entity;
+ struct uvc_entity *prev;
+
+ /*
+ * Start by locating the input and output terminals. We only support
+ * devices with exactly one of each for now.
+ */
+ list_for_each_entry(entity, &dev->entities, list) {
+ if (UVC_ENTITY_IS_ITERM(entity)) {
+ if (iterm)
+ return -EINVAL;
+ iterm = entity;
+ }
+
+ if (UVC_ENTITY_IS_OTERM(entity)) {
+ if (oterm)
+ return -EINVAL;
+ oterm = entity;
+ }
+ }
+
+ if (iterm == NULL || oterm == NULL)
+ return -EINVAL;
+
+ /* Allocate the chain and fill it. */
+ chain = uvc_alloc_chain(dev);
+ if (chain == NULL)
+ return -ENOMEM;
+
+ if (uvc_scan_chain_entity(chain, oterm) < 0)
+ goto error;
+
+ prev = oterm;
+
+ /*
+ * Add all Processing and Extension Units with two pads. The order
+ * doesn't matter much, use reverse list traversal to connect units in
+ * UVC descriptor order as we build the chain from output to input. This
+ * leads to units appearing in the order meant by the manufacturer for
+ * the cameras known to require this heuristic.
+ */
+ list_for_each_entry_reverse(entity, &dev->entities, list) {
+ if (entity->type != UVC_VC_PROCESSING_UNIT &&
+ entity->type != UVC_VC_EXTENSION_UNIT)
+ continue;
+
+ if (entity->num_pads != 2)
+ continue;
+
+ if (uvc_scan_chain_entity(chain, entity) < 0)
+ goto error;
+
+ prev->baSourceID[0] = entity->id;
+ prev = entity;
+ }
+
+ if (uvc_scan_chain_entity(chain, iterm) < 0)
+ goto error;
+
+ prev->baSourceID[0] = iterm->id;
+
+ list_add_tail(&chain->list, &dev->chains);
+
+ uvc_trace(UVC_TRACE_PROBE,
+ "Found a video chain by fallback heuristic (%s).\n",
+ uvc_print_chain(chain));
+
+ return 0;
+
+error:
+ kfree(chain);
+ return -EINVAL;
+}
+
/*
* Scan the device for video chains and register video devices.
*
@@ -1617,15 +1745,10 @@ static int uvc_scan_device(struct uvc_device *dev)
if (term->chain.next || term->chain.prev)
continue;
- chain = kzalloc(sizeof(*chain), GFP_KERNEL);
+ chain = uvc_alloc_chain(dev);
if (chain == NULL)
return -ENOMEM;
- INIT_LIST_HEAD(&chain->entities);
- mutex_init(&chain->ctrl_mutex);
- chain->dev = dev;
- v4l2_prio_init(&chain->prio);
-
term->flags |= UVC_ENTITY_FLAG_DEFAULT;
if (uvc_scan_chain(chain, term) < 0) {
@@ -1639,6 +1762,9 @@ static int uvc_scan_device(struct uvc_device *dev)
list_add_tail(&chain->list, &dev->chains);
}
+ if (list_empty(&dev->chains))
+ uvc_scan_fallback(dev);
+
if (list_empty(&dev->chains)) {
uvc_printk(KERN_INFO, "No valid video chain found.\n");
return -1;
@@ -2564,6 +2690,15 @@ static struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = UVC_QUIRK_FORCE_Y8 },
+ /* Oculus VR Rift Sensor */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x2833,
+ .idProduct = 0x0211,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_QUIRK_FORCE_Y8 },
/* Generic USB Video Class */
{ USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, UVC_PC_PROTOCOL_UNDEFINED) },
{ USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, UVC_PC_PROTOCOL_15) },
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 05eed4be25df..3e7e283a44a8 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -66,19 +66,14 @@ static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain,
if (xmap->menu_count == 0 ||
xmap->menu_count > UVC_MAX_CONTROL_MENU_ENTRIES) {
ret = -EINVAL;
- goto done;
+ goto free_map;
}
size = xmap->menu_count * sizeof(*map->menu_info);
- map->menu_info = kmalloc(size, GFP_KERNEL);
- if (map->menu_info == NULL) {
- ret = -ENOMEM;
- goto done;
- }
-
- if (copy_from_user(map->menu_info, xmap->menu_info, size)) {
- ret = -EFAULT;
- goto done;
+ map->menu_info = memdup_user(xmap->menu_info, size);
+ if (IS_ERR(map->menu_info)) {
+ ret = PTR_ERR(map->menu_info);
+ goto free_map;
}
map->menu_count = xmap->menu_count;
@@ -88,13 +83,13 @@ static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain,
uvc_trace(UVC_TRACE_CONTROL, "Unsupported V4L2 control type "
"%u.\n", xmap->v4l2_type);
ret = -ENOTTY;
- goto done;
+ goto free_map;
}
ret = uvc_ctrl_add_mapping(chain, map);
-done:
kfree(map->menu_info);
+free_map:
kfree(map);
return ret;
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 7e4d3eea371b..3d6cc62f3cd2 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -106,6 +106,18 @@
#define UVC_GUID_FORMAT_RGGB \
{ 'R', 'G', 'G', 'B', 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_BG16 \
+ { 'B', 'G', '1', '6', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_GB16 \
+ { 'G', 'B', '1', '6', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_RG16 \
+ { 'R', 'G', '1', '6', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_GR16 \
+ { 'G', 'R', '1', '6', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
#define UVC_GUID_FORMAT_RGBP \
{ 'R', 'G', 'B', 'P', 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index cc128db85723..3950708cbb32 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -633,8 +633,7 @@ static int zr364xx_read_video_callback(struct zr364xx_camera *cam,
} else {
if (frm->cur_size + purb->actual_length > MAX_FRAME_SIZE) {
dev_info(&cam->udev->dev,
- "%s: buffer (%d bytes) too small to hold "
- "frame data. Discarding frame data.\n",
+ "%s: buffer (%d bytes) too small to hold frame data. Discarding frame data.\n",
__func__, MAX_FRAME_SIZE);
} else {
pdest += frm->cur_size;
@@ -1373,8 +1372,7 @@ static int zr364xx_board_init(struct zr364xx_camera *cam)
&cam->buffer.frame[i], i,
cam->buffer.frame[i].lpvbits);
if (cam->buffer.frame[i].lpvbits == NULL) {
- printk(KERN_INFO KBUILD_MODNAME ": out of memory. "
- "Using less frames\n");
+ printk(KERN_INFO KBUILD_MODNAME ": out of memory. Using less frames\n");
break;
}
}
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index 367523a3c774..6b1b78ff1417 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -6,6 +6,7 @@
config VIDEO_V4L2
tristate
depends on (I2C || I2C=n) && VIDEO_DEV
+ select RATIONAL
default (I2C || I2C=n) && VIDEO_DEV
config VIDEO_ADV_DEBUG
diff --git a/drivers/media/v4l2-core/tuner-core.c b/drivers/media/v4l2-core/tuner-core.c
index 731487be5baa..05b5c6652cfa 100644
--- a/drivers/media/v4l2-core/tuner-core.c
+++ b/drivers/media/v4l2-core/tuner-core.c
@@ -84,30 +84,16 @@ static const struct v4l2_subdev_ops tuner_ops;
* Debug macros
*/
-#define tuner_warn(fmt, arg...) do { \
- printk(KERN_WARNING "%s %d-%04x: " fmt, PREFIX, \
- i2c_adapter_id(t->i2c->adapter), \
- t->i2c->addr, ##arg); \
- } while (0)
-
-#define tuner_info(fmt, arg...) do { \
- printk(KERN_INFO "%s %d-%04x: " fmt, PREFIX, \
- i2c_adapter_id(t->i2c->adapter), \
- t->i2c->addr, ##arg); \
- } while (0)
-
-#define tuner_err(fmt, arg...) do { \
- printk(KERN_ERR "%s %d-%04x: " fmt, PREFIX, \
- i2c_adapter_id(t->i2c->adapter), \
- t->i2c->addr, ##arg); \
- } while (0)
-
-#define tuner_dbg(fmt, arg...) do { \
- if (tuner_debug) \
- printk(KERN_DEBUG "%s %d-%04x: " fmt, PREFIX, \
- i2c_adapter_id(t->i2c->adapter), \
- t->i2c->addr, ##arg); \
- } while (0)
+#undef pr_fmt
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": %d-%04x: " fmt, \
+ i2c_adapter_id(t->i2c->adapter), t->i2c->addr
+
+
+#define dprintk(fmt, arg...) do { \
+ if (tuner_debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), __func__, ##arg); \
+} while (0)
/*
* Internal struct used inside the driver
@@ -208,7 +194,7 @@ static void fe_set_params(struct dvb_frontend *fe,
struct tuner *t = fe->analog_demod_priv;
if (NULL == fe_tuner_ops->set_analog_params) {
- tuner_warn("Tuner frontend module has no way to set freq\n");
+ pr_warn("Tuner frontend module has no way to set freq\n");
return;
}
fe_tuner_ops->set_analog_params(fe, params);
@@ -230,7 +216,7 @@ static int fe_set_config(struct dvb_frontend *fe, void *priv_cfg)
if (fe_tuner_ops->set_config)
return fe_tuner_ops->set_config(fe, priv_cfg);
- tuner_warn("Tuner frontend module has no way to set config\n");
+ pr_warn("Tuner frontend module has no way to set config\n");
return 0;
}
@@ -273,14 +259,14 @@ static void set_type(struct i2c_client *c, unsigned int type,
int tune_now = 1;
if (type == UNSET || type == TUNER_ABSENT) {
- tuner_dbg("tuner 0x%02x: Tuner type absent\n", c->addr);
+ dprintk("tuner 0x%02x: Tuner type absent\n", c->addr);
return;
}
t->type = type;
t->config = new_config;
if (tuner_callback != NULL) {
- tuner_dbg("defining GPIO callback\n");
+ dprintk("defining GPIO callback\n");
t->fe.callback = tuner_callback;
}
@@ -442,7 +428,7 @@ static void set_type(struct i2c_client *c, unsigned int type,
t->sd.entity.name = t->name;
#endif
- tuner_dbg("type set to %s\n", t->name);
+ dprintk("type set to %s\n", t->name);
t->mode_mask = new_mode_mask;
@@ -459,13 +445,13 @@ static void set_type(struct i2c_client *c, unsigned int type,
set_tv_freq(c, t->tv_freq);
}
- tuner_dbg("%s %s I2C addr 0x%02x with type %d used for 0x%02x\n",
+ dprintk("%s %s I2C addr 0x%02x with type %d used for 0x%02x\n",
c->adapter->name, c->dev.driver->name, c->addr << 1, type,
t->mode_mask);
return;
attach_failed:
- tuner_dbg("Tuner attach for type = %d failed.\n", t->type);
+ dprintk("Tuner attach for type = %d failed.\n", t->type);
t->type = TUNER_ABSENT;
return;
@@ -491,7 +477,7 @@ static int tuner_s_type_addr(struct v4l2_subdev *sd,
struct tuner *t = to_tuner(sd);
struct i2c_client *c = v4l2_get_subdevdata(sd);
- tuner_dbg("Calling set_type_addr for type=%d, addr=0x%02x, mode=0x%02x, config=%p\n",
+ dprintk("Calling set_type_addr for type=%d, addr=0x%02x, mode=0x%02x, config=%p\n",
tun_setup->type,
tun_setup->addr,
tun_setup->mode_mask,
@@ -503,8 +489,7 @@ static int tuner_s_type_addr(struct v4l2_subdev *sd,
set_type(c, tun_setup->type, tun_setup->mode_mask,
tun_setup->config, tun_setup->tuner_callback);
} else
- tuner_dbg("set addr discarded for type %i, mask %x. "
- "Asked to change tuner at addr 0x%02x, with mask %x\n",
+ dprintk("set addr discarded for type %i, mask %x. Asked to change tuner at addr 0x%02x, with mask %x\n",
t->type, t->mode_mask,
tun_setup->addr, tun_setup->mode_mask);
@@ -534,7 +519,7 @@ static int tuner_s_config(struct v4l2_subdev *sd,
return 0;
}
- tuner_dbg("Tuner frontend module has no way to set config\n");
+ dprintk("Tuner frontend module has no way to set config\n");
return 0;
}
@@ -618,14 +603,12 @@ static int tuner_probe(struct i2c_client *client,
if (show_i2c) {
unsigned char buffer[16];
- int i, rc;
+ int rc;
memset(buffer, 0, sizeof(buffer));
rc = i2c_master_recv(client, buffer, sizeof(buffer));
- tuner_info("I2C RECV = ");
- for (i = 0; i < rc; i++)
- printk(KERN_CONT "%02x ", buffer[i]);
- printk("\n");
+ if (rc >= 0)
+ pr_info("I2C RECV = %*ph\n", rc, buffer);
}
/* autodetection code based on the i2c addr */
@@ -653,7 +636,7 @@ static int tuner_probe(struct i2c_client *client,
since it can be tda9887*/
if (tuner_symbol_probe(tda829x_probe, t->i2c->adapter,
t->i2c->addr) >= 0) {
- tuner_dbg("tda829x detected\n");
+ dprintk("tda829x detected\n");
} else {
/* Default is being tda9887 */
t->type = TUNER_TDA9887;
@@ -690,7 +673,7 @@ static int tuner_probe(struct i2c_client *client,
t->mode_mask = T_ANALOG_TV;
if (radio == NULL)
t->mode_mask |= T_RADIO;
- tuner_dbg("Setting mode_mask to 0x%02x\n", t->mode_mask);
+ dprintk("Setting mode_mask to 0x%02x\n", t->mode_mask);
}
/* Should be just before return */
@@ -719,7 +702,7 @@ register_client:
}
if (ret < 0) {
- tuner_err("failed to initialize media entity!\n");
+ pr_err("failed to initialize media entity!\n");
kfree(t);
return ret;
}
@@ -732,7 +715,7 @@ register_client:
set_type(client, t->type, t->mode_mask, t->config, t->fe.callback);
list_add_tail(&t->list, &tuner_list);
- tuner_info("Tuner %d found with type(s)%s%s.\n",
+ pr_info("Tuner %d found with type(s)%s%s.\n",
t->type,
t->mode_mask & T_RADIO ? " Radio" : "",
t->mode_mask & T_ANALOG_TV ? " TV" : "");
@@ -809,15 +792,15 @@ static int set_mode(struct tuner *t, enum v4l2_tuner_type mode)
if (mode != t->mode) {
if (check_mode(t, mode) == -EINVAL) {
- tuner_dbg("Tuner doesn't support mode %d. "
- "Putting tuner to sleep\n", mode);
+ dprintk("Tuner doesn't support mode %d. Putting tuner to sleep\n",
+ mode);
t->standby = true;
if (analog_ops->standby)
analog_ops->standby(&t->fe);
return -EINVAL;
}
t->mode = mode;
- tuner_dbg("Changing to mode %d\n", mode);
+ dprintk("Changing to mode %d\n", mode);
}
return 0;
}
@@ -864,15 +847,15 @@ static void set_tv_freq(struct i2c_client *c, unsigned int freq)
};
if (t->type == UNSET) {
- tuner_warn("tuner type not set\n");
+ pr_warn("tuner type not set\n");
return;
}
if (NULL == analog_ops->set_params) {
- tuner_warn("Tuner has no way to set tv freq\n");
+ pr_warn("Tuner has no way to set tv freq\n");
return;
}
if (freq < tv_range[0] * 16 || freq > tv_range[1] * 16) {
- tuner_dbg("TV freq (%d.%02d) out of range (%d-%d)\n",
+ dprintk("TV freq (%d.%02d) out of range (%d-%d)\n",
freq / 16, freq % 16 * 100 / 16, tv_range[0],
tv_range[1]);
/* V4L2 spec: if the freq is not possible then the closest
@@ -883,7 +866,7 @@ static void set_tv_freq(struct i2c_client *c, unsigned int freq)
freq = tv_range[1] * 16;
}
params.frequency = freq;
- tuner_dbg("tv freq set to %d.%02d\n",
+ dprintk("tv freq set to %d.%02d\n",
freq / 16, freq % 16 * 100 / 16);
t->tv_freq = freq;
t->standby = false;
@@ -933,7 +916,7 @@ static v4l2_std_id tuner_fixup_std(struct tuner *t, v4l2_std_id std)
return V4L2_STD_PAL_Nc;
return V4L2_STD_PAL_N;
default:
- tuner_warn("pal= argument not recognised\n");
+ pr_warn("pal= argument not recognised\n");
break;
}
}
@@ -959,7 +942,7 @@ static v4l2_std_id tuner_fixup_std(struct tuner *t, v4l2_std_id std)
return V4L2_STD_SECAM_LC;
return V4L2_STD_SECAM_L;
default:
- tuner_warn("secam= argument not recognised\n");
+ pr_warn("secam= argument not recognised\n");
break;
}
}
@@ -976,7 +959,7 @@ static v4l2_std_id tuner_fixup_std(struct tuner *t, v4l2_std_id std)
case 'K':
return V4L2_STD_NTSC_M_KR;
default:
- tuner_info("ntsc= argument not recognised\n");
+ pr_info("ntsc= argument not recognised\n");
break;
}
}
@@ -1005,15 +988,15 @@ static void set_radio_freq(struct i2c_client *c, unsigned int freq)
};
if (t->type == UNSET) {
- tuner_warn("tuner type not set\n");
+ pr_warn("tuner type not set\n");
return;
}
if (NULL == analog_ops->set_params) {
- tuner_warn("tuner has no way to set radio frequency\n");
+ pr_warn("tuner has no way to set radio frequency\n");
return;
}
if (freq < radio_range[0] * 16000 || freq > radio_range[1] * 16000) {
- tuner_dbg("radio freq (%d.%02d) out of range (%d-%d)\n",
+ dprintk("radio freq (%d.%02d) out of range (%d-%d)\n",
freq / 16000, freq % 16000 * 100 / 16000,
radio_range[0], radio_range[1]);
/* V4L2 spec: if the freq is not possible then the closest
@@ -1024,7 +1007,7 @@ static void set_radio_freq(struct i2c_client *c, unsigned int freq)
freq = radio_range[1] * 16000;
}
params.frequency = freq;
- tuner_dbg("radio freq set to %d.%02d\n",
+ dprintk("radio freq set to %d.%02d\n",
freq / 16000, freq % 16000 * 100 / 16000);
t->radio_freq = freq;
t->standby = false;
@@ -1075,10 +1058,10 @@ static void tuner_status(struct dvb_frontend *fe)
freq = t->tv_freq / 16;
freq_fraction = (t->tv_freq % 16) * 100 / 16;
}
- tuner_info("Tuner mode: %s%s\n", p,
+ pr_info("Tuner mode: %s%s\n", p,
t->standby ? " on standby mode" : "");
- tuner_info("Frequency: %lu.%02lu MHz\n", freq, freq_fraction);
- tuner_info("Standard: 0x%08lx\n", (unsigned long)t->std);
+ pr_info("Frequency: %lu.%02lu MHz\n", freq, freq_fraction);
+ pr_info("Standard: 0x%08lx\n", (unsigned long)t->std);
if (t->mode != V4L2_TUNER_RADIO)
return;
if (fe_tuner_ops->get_status) {
@@ -1086,15 +1069,15 @@ static void tuner_status(struct dvb_frontend *fe)
fe_tuner_ops->get_status(&t->fe, &tuner_status);
if (tuner_status & TUNER_STATUS_LOCKED)
- tuner_info("Tuner is locked.\n");
+ pr_info("Tuner is locked.\n");
if (tuner_status & TUNER_STATUS_STEREO)
- tuner_info("Stereo: yes\n");
+ pr_info("Stereo: yes\n");
}
if (analog_ops->has_signal) {
u16 signal;
if (!analog_ops->has_signal(fe, &signal))
- tuner_info("Signal strength: %hu\n", signal);
+ pr_info("Signal strength: %hu\n", signal);
}
}
@@ -1127,13 +1110,13 @@ static int tuner_s_power(struct v4l2_subdev *sd, int on)
if (on) {
if (t->standby && set_mode(t, t->mode) == 0) {
- tuner_dbg("Waking up tuner\n");
+ dprintk("Waking up tuner\n");
set_freq(t, 0);
}
return 0;
}
- tuner_dbg("Putting tuner to sleep\n");
+ dprintk("Putting tuner to sleep\n");
t->standby = true;
if (analog_ops->standby)
analog_ops->standby(&t->fe);
@@ -1149,7 +1132,7 @@ static int tuner_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
t->std = tuner_fixup_std(t, std);
if (t->std != std)
- tuner_dbg("Fixup standard %llx to %llx\n", std, t->std);
+ dprintk("Fixup standard %llx to %llx\n", std, t->std);
set_freq(t, 0);
return 0;
}
@@ -1298,7 +1281,7 @@ static int tuner_suspend(struct device *dev)
struct tuner *t = to_tuner(i2c_get_clientdata(c));
struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
- tuner_dbg("suspend\n");
+ dprintk("suspend\n");
if (t->fe.ops.tuner_ops.suspend)
t->fe.ops.tuner_ops.suspend(&t->fe);
@@ -1313,7 +1296,7 @@ static int tuner_resume(struct device *dev)
struct i2c_client *c = to_i2c_client(dev);
struct tuner *t = to_tuner(i2c_get_clientdata(c));
- tuner_dbg("resume\n");
+ dprintk("resume\n");
if (t->fe.ops.tuner_ops.resume)
t->fe.ops.tuner_ops.resume(&t->fe);
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index 57cfe26a393f..a5ea1f517291 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -54,7 +54,7 @@
#if defined(CONFIG_SPI)
#include <linux/spi/spi.h>
#endif
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/div64.h>
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index bacecbd68a6d..eac9565dc3d8 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -409,7 +409,6 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
struct v4l2_plane32 __user *uplane32;
struct v4l2_plane __user *uplane;
compat_caddr_t p;
- int num_planes;
int ret;
if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_buffer32)) ||
@@ -429,12 +428,15 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
return -EFAULT;
if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
- num_planes = kp->length;
- if (num_planes == 0) {
+ unsigned int num_planes;
+
+ if (kp->length == 0) {
kp->m.planes = NULL;
/* num_planes == 0 is legal, e.g. when userspace doesn't
* need planes array on DQBUF*/
return 0;
+ } else if (kp->length > VIDEO_MAX_PLANES) {
+ return -EINVAL;
}
if (get_user(p, &up->m.planes))
@@ -442,16 +444,16 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
uplane32 = compat_ptr(p);
if (!access_ok(VERIFY_READ, uplane32,
- num_planes * sizeof(struct v4l2_plane32)))
+ kp->length * sizeof(struct v4l2_plane32)))
return -EFAULT;
/* We don't really care if userspace decides to kill itself
* by passing a very big num_planes value */
- uplane = compat_alloc_user_space(num_planes *
- sizeof(struct v4l2_plane));
+ uplane = compat_alloc_user_space(kp->length *
+ sizeof(struct v4l2_plane));
kp->m.planes = (__force struct v4l2_plane *)uplane;
- while (--num_planes >= 0) {
+ for (num_planes = 0; num_planes < kp->length; num_planes++) {
ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
if (ret)
return ret;
@@ -665,7 +667,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
{
struct v4l2_ext_control32 __user *ucontrols;
struct v4l2_ext_control __user *kcontrols;
- int n;
+ unsigned int n;
compat_caddr_t p;
if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_ext_controls32)) ||
@@ -675,20 +677,22 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
copy_from_user(kp->reserved, up->reserved,
sizeof(kp->reserved)))
return -EFAULT;
- n = kp->count;
- if (n == 0) {
+ if (kp->count == 0) {
kp->controls = NULL;
return 0;
+ } else if (kp->count > V4L2_CID_MAX_CTRLS) {
+ return -EINVAL;
}
if (get_user(p, &up->controls))
return -EFAULT;
ucontrols = compat_ptr(p);
if (!access_ok(VERIFY_READ, ucontrols,
- n * sizeof(struct v4l2_ext_control32)))
+ kp->count * sizeof(struct v4l2_ext_control32)))
return -EFAULT;
- kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
+ kcontrols = compat_alloc_user_space(kp->count *
+ sizeof(struct v4l2_ext_control));
kp->controls = (__force struct v4l2_ext_control *)kcontrols;
- while (--n >= 0) {
+ for (n = 0; n < kp->count; n++) {
u32 id;
if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols)))
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index adc2147fcff7..47001e25fd9e 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -885,6 +885,7 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_LINK_FREQ: return "Link Frequency";
case V4L2_CID_PIXEL_RATE: return "Pixel Rate";
case V4L2_CID_TEST_PATTERN: return "Test Pattern";
+ case V4L2_CID_DEINTERLACING_MODE: return "Deinterlacing Mode";
/* DV controls */
/* Keep the order of the 'case's the same as in v4l2-controls.h! */
@@ -1058,6 +1059,7 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_DV_RX_RGB_RANGE:
case V4L2_CID_DV_RX_IT_CONTENT_TYPE:
case V4L2_CID_TEST_PATTERN:
+ case V4L2_CID_DEINTERLACING_MODE:
case V4L2_CID_TUNE_DEEMPHASIS:
case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL:
case V4L2_CID_DETECT_MD_MODE:
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index 8be561ab2615..fa2124cb31bd 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -25,7 +25,7 @@
#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/slab.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index 730a7c392c1d..5c8c49d240d1 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -22,6 +22,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/errno.h>
+#include <linux/rational.h>
#include <linux/videodev2.h>
#include <linux/v4l2-dv-timings.h>
#include <media/v4l2-dv-timings.h>
@@ -224,6 +225,24 @@ bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t,
}
EXPORT_SYMBOL_GPL(v4l2_find_dv_timings_cap);
+bool v4l2_find_dv_timings_cea861_vic(struct v4l2_dv_timings *t, u8 vic)
+{
+ unsigned int i;
+
+ for (i = 0; i < v4l2_dv_timings_presets[i].bt.width; i++) {
+ const struct v4l2_bt_timings *bt =
+ &v4l2_dv_timings_presets[i].bt;
+
+ if ((bt->flags & V4L2_DV_FL_HAS_CEA861_VIC) &&
+ bt->cea861_vic == vic) {
+ *t = v4l2_dv_timings_presets[i];
+ return true;
+ }
+ }
+ return false;
+}
+EXPORT_SYMBOL_GPL(v4l2_find_dv_timings_cea861_vic);
+
/**
* v4l2_match_dv_timings - check if two timings match
* @t1 - compare this v4l2_dv_timings struct...
@@ -306,7 +325,8 @@ void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
(bt->polarities & V4L2_DV_VSYNC_POS_POL) ? "+" : "-",
bt->il_vsync, bt->il_vbackporch);
pr_info("%s: pixelclock: %llu\n", dev_prefix, bt->pixelclock);
- pr_info("%s: flags (0x%x):%s%s%s%s%s%s%s\n", dev_prefix, bt->flags,
+ pr_info("%s: flags (0x%x):%s%s%s%s%s%s%s%s%s%s\n",
+ dev_prefix, bt->flags,
(bt->flags & V4L2_DV_FL_REDUCED_BLANKING) ?
" REDUCED_BLANKING" : "",
((bt->flags & V4L2_DV_FL_REDUCED_BLANKING) &&
@@ -320,16 +340,51 @@ void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
(bt->flags & V4L2_DV_FL_IS_CE_VIDEO) ?
" CE_VIDEO" : "",
(bt->flags & V4L2_DV_FL_FIRST_FIELD_EXTRA_LINE) ?
- " FIRST_FIELD_EXTRA_LINE" : "");
+ " FIRST_FIELD_EXTRA_LINE" : "",
+ (bt->flags & V4L2_DV_FL_HAS_PICTURE_ASPECT) ?
+ " HAS_PICTURE_ASPECT" : "",
+ (bt->flags & V4L2_DV_FL_HAS_CEA861_VIC) ?
+ " HAS_CEA861_VIC" : "",
+ (bt->flags & V4L2_DV_FL_HAS_HDMI_VIC) ?
+ " HAS_HDMI_VIC" : "");
pr_info("%s: standards (0x%x):%s%s%s%s%s\n", dev_prefix, bt->standards,
(bt->standards & V4L2_DV_BT_STD_CEA861) ? " CEA" : "",
(bt->standards & V4L2_DV_BT_STD_DMT) ? " DMT" : "",
(bt->standards & V4L2_DV_BT_STD_CVT) ? " CVT" : "",
(bt->standards & V4L2_DV_BT_STD_GTF) ? " GTF" : "",
(bt->standards & V4L2_DV_BT_STD_SDI) ? " SDI" : "");
+ if (bt->flags & V4L2_DV_FL_HAS_PICTURE_ASPECT)
+ pr_info("%s: picture aspect (hor:vert): %u:%u\n", dev_prefix,
+ bt->picture_aspect.numerator,
+ bt->picture_aspect.denominator);
+ if (bt->flags & V4L2_DV_FL_HAS_CEA861_VIC)
+ pr_info("%s: CEA-861 VIC: %u\n", dev_prefix, bt->cea861_vic);
+ if (bt->flags & V4L2_DV_FL_HAS_HDMI_VIC)
+ pr_info("%s: HDMI VIC: %u\n", dev_prefix, bt->hdmi_vic);
}
EXPORT_SYMBOL_GPL(v4l2_print_dv_timings);
+struct v4l2_fract v4l2_dv_timings_aspect_ratio(const struct v4l2_dv_timings *t)
+{
+ struct v4l2_fract ratio = { 1, 1 };
+ unsigned long n, d;
+
+ if (t->type != V4L2_DV_BT_656_1120)
+ return ratio;
+ if (!(t->bt.flags & V4L2_DV_FL_HAS_PICTURE_ASPECT))
+ return ratio;
+
+ ratio.numerator = t->bt.width * t->bt.picture_aspect.denominator;
+ ratio.denominator = t->bt.height * t->bt.picture_aspect.numerator;
+
+ rational_best_approximation(ratio.numerator, ratio.denominator,
+ ratio.numerator, ratio.denominator, &n, &d);
+ ratio.numerator = n;
+ ratio.denominator = d;
+ return ratio;
+}
+EXPORT_SYMBOL_GPL(v4l2_dv_timings_aspect_ratio);
+
/*
* CVT defines
* Based on Coordinated Video Timings Standard
diff --git a/drivers/media/v4l2-core/v4l2-flash-led-class.c b/drivers/media/v4l2-core/v4l2-flash-led-class.c
index ae7544d5469a..794e563f24f8 100644
--- a/drivers/media/v4l2-core/v4l2-flash-led-class.c
+++ b/drivers/media/v4l2-core/v4l2-flash-led-class.c
@@ -638,7 +638,7 @@ struct v4l2_flash *v4l2_flash_init(
v4l2_flash->iled_cdev = iled_cdev;
v4l2_flash->ops = ops;
sd->dev = dev;
- sd->of_node = of_node;
+ sd->of_node = of_node ? of_node : led_cdev->dev->of_node;
v4l2_subdev_init(sd, &v4l2_flash_subdev_ops);
sd->internal_ops = &v4l2_flash_subdev_internal_ops;
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
@@ -654,10 +654,7 @@ struct v4l2_flash *v4l2_flash_init(
if (ret < 0)
goto err_init_controls;
- if (sd->of_node)
- of_node_get(sd->of_node);
- else
- of_node_get(led_cdev->dev->of_node);
+ of_node_get(sd->of_node);
ret = v4l2_async_register_subdev(sd);
if (ret < 0)
@@ -666,7 +663,7 @@ struct v4l2_flash *v4l2_flash_init(
return v4l2_flash;
err_async_register_sd:
- of_node_put(led_cdev->dev->of_node);
+ of_node_put(sd->of_node);
v4l2_ctrl_handler_free(sd->ctrl_handler);
err_init_controls:
media_entity_cleanup(&sd->entity);
@@ -678,20 +675,15 @@ EXPORT_SYMBOL_GPL(v4l2_flash_init);
void v4l2_flash_release(struct v4l2_flash *v4l2_flash)
{
struct v4l2_subdev *sd;
- struct led_classdev *led_cdev;
if (IS_ERR_OR_NULL(v4l2_flash))
return;
sd = &v4l2_flash->sd;
- led_cdev = &v4l2_flash->fled_cdev->led_cdev;
v4l2_async_unregister_subdev(sd);
- if (sd->of_node)
- of_node_put(sd->of_node);
- else
- of_node_put(led_cdev->dev->of_node);
+ of_node_put(sd->of_node);
v4l2_ctrl_handler_free(sd->ctrl_handler);
media_entity_cleanup(&sd->entity);
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index c52d94c018bb..0c3f238a2e76 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -174,8 +174,7 @@ static void v4l_print_querycap(const void *arg, bool write_only)
{
const struct v4l2_capability *p = arg;
- pr_cont("driver=%.*s, card=%.*s, bus=%.*s, version=0x%08x, "
- "capabilities=0x%08x, device_caps=0x%08x\n",
+ pr_cont("driver=%.*s, card=%.*s, bus=%.*s, version=0x%08x, capabilities=0x%08x, device_caps=0x%08x\n",
(int)sizeof(p->driver), p->driver,
(int)sizeof(p->card), p->card,
(int)sizeof(p->bus_info), p->bus_info,
@@ -186,8 +185,7 @@ static void v4l_print_enuminput(const void *arg, bool write_only)
{
const struct v4l2_input *p = arg;
- pr_cont("index=%u, name=%.*s, type=%u, audioset=0x%x, tuner=%u, "
- "std=0x%08Lx, status=0x%x, capabilities=0x%x\n",
+ pr_cont("index=%u, name=%.*s, type=%u, audioset=0x%x, tuner=%u, std=0x%08Lx, status=0x%x, capabilities=0x%x\n",
p->index, (int)sizeof(p->name), p->name, p->type, p->audioset,
p->tuner, (unsigned long long)p->std, p->status,
p->capabilities);
@@ -197,8 +195,7 @@ static void v4l_print_enumoutput(const void *arg, bool write_only)
{
const struct v4l2_output *p = arg;
- pr_cont("index=%u, name=%.*s, type=%u, audioset=0x%x, "
- "modulator=%u, std=0x%08Lx, capabilities=0x%x\n",
+ pr_cont("index=%u, name=%.*s, type=%u, audioset=0x%x, modulator=%u, std=0x%08Lx, capabilities=0x%x\n",
p->index, (int)sizeof(p->name), p->name, p->type, p->audioset,
p->modulator, (unsigned long long)p->std, p->capabilities);
}
@@ -256,11 +253,7 @@ static void v4l_print_format(const void *arg, bool write_only)
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
pix = &p->fmt.pix;
- pr_cont(", width=%u, height=%u, "
- "pixelformat=%c%c%c%c, field=%s, "
- "bytesperline=%u, sizeimage=%u, colorspace=%d, "
- "flags=0x%x, ycbcr_enc=%u, quantization=%u, "
- "xfer_func=%u\n",
+ pr_cont(", width=%u, height=%u, pixelformat=%c%c%c%c, field=%s, bytesperline=%u, sizeimage=%u, colorspace=%d, flags=0x%x, ycbcr_enc=%u, quantization=%u, xfer_func=%u\n",
pix->width, pix->height,
(pix->pixelformat & 0xff),
(pix->pixelformat >> 8) & 0xff,
@@ -274,10 +267,7 @@ static void v4l_print_format(const void *arg, bool write_only)
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
mp = &p->fmt.pix_mp;
- pr_cont(", width=%u, height=%u, "
- "format=%c%c%c%c, field=%s, "
- "colorspace=%d, num_planes=%u, flags=0x%x, "
- "ycbcr_enc=%u, quantization=%u, xfer_func=%u\n",
+ pr_cont(", width=%u, height=%u, format=%c%c%c%c, field=%s, colorspace=%d, num_planes=%u, flags=0x%x, ycbcr_enc=%u, quantization=%u, xfer_func=%u\n",
mp->width, mp->height,
(mp->pixelformat & 0xff),
(mp->pixelformat >> 8) & 0xff,
@@ -306,8 +296,7 @@ static void v4l_print_format(const void *arg, bool write_only)
case V4L2_BUF_TYPE_VBI_CAPTURE:
case V4L2_BUF_TYPE_VBI_OUTPUT:
vbi = &p->fmt.vbi;
- pr_cont(", sampling_rate=%u, offset=%u, samples_per_line=%u, "
- "sample_format=%c%c%c%c, start=%u,%u, count=%u,%u\n",
+ pr_cont(", sampling_rate=%u, offset=%u, samples_per_line=%u, sample_format=%c%c%c%c, start=%u,%u, count=%u,%u\n",
vbi->sampling_rate, vbi->offset,
vbi->samples_per_line,
(vbi->sample_format & 0xff),
@@ -343,9 +332,7 @@ static void v4l_print_framebuffer(const void *arg, bool write_only)
{
const struct v4l2_framebuffer *p = arg;
- pr_cont("capability=0x%x, flags=0x%x, base=0x%p, width=%u, "
- "height=%u, pixelformat=%c%c%c%c, "
- "bytesperline=%u, sizeimage=%u, colorspace=%d\n",
+ pr_cont("capability=0x%x, flags=0x%x, base=0x%p, width=%u, height=%u, pixelformat=%c%c%c%c, bytesperline=%u, sizeimage=%u, colorspace=%d\n",
p->capability, p->flags, p->base,
p->fmt.width, p->fmt.height,
(p->fmt.pixelformat & 0xff),
@@ -368,8 +355,7 @@ static void v4l_print_modulator(const void *arg, bool write_only)
if (write_only)
pr_cont("index=%u, txsubchans=0x%x\n", p->index, p->txsubchans);
else
- pr_cont("index=%u, name=%.*s, capability=0x%x, "
- "rangelow=%u, rangehigh=%u, txsubchans=0x%x\n",
+ pr_cont("index=%u, name=%.*s, capability=0x%x, rangelow=%u, rangehigh=%u, txsubchans=0x%x\n",
p->index, (int)sizeof(p->name), p->name, p->capability,
p->rangelow, p->rangehigh, p->txsubchans);
}
@@ -381,9 +367,7 @@ static void v4l_print_tuner(const void *arg, bool write_only)
if (write_only)
pr_cont("index=%u, audmode=%u\n", p->index, p->audmode);
else
- pr_cont("index=%u, name=%.*s, type=%u, capability=0x%x, "
- "rangelow=%u, rangehigh=%u, signal=%u, afc=%d, "
- "rxsubchans=0x%x, audmode=%u\n",
+ pr_cont("index=%u, name=%.*s, type=%u, capability=0x%x, rangelow=%u, rangehigh=%u, signal=%u, afc=%d, rxsubchans=0x%x, audmode=%u\n",
p->index, (int)sizeof(p->name), p->name, p->type,
p->capability, p->rangelow,
p->rangehigh, p->signal, p->afc,
@@ -402,8 +386,8 @@ static void v4l_print_standard(const void *arg, bool write_only)
{
const struct v4l2_standard *p = arg;
- pr_cont("index=%u, id=0x%Lx, name=%.*s, fps=%u/%u, "
- "framelines=%u\n", p->index,
+ pr_cont("index=%u, id=0x%Lx, name=%.*s, fps=%u/%u, framelines=%u\n",
+ p->index,
(unsigned long long)p->id, (int)sizeof(p->name), p->name,
p->frameperiod.numerator,
p->frameperiod.denominator,
@@ -419,8 +403,7 @@ static void v4l_print_hw_freq_seek(const void *arg, bool write_only)
{
const struct v4l2_hw_freq_seek *p = arg;
- pr_cont("tuner=%u, type=%u, seek_upward=%u, wrap_around=%u, spacing=%u, "
- "rangelow=%u, rangehigh=%u\n",
+ pr_cont("tuner=%u, type=%u, seek_upward=%u, wrap_around=%u, spacing=%u, rangelow=%u, rangehigh=%u\n",
p->tuner, p->type, p->seek_upward, p->wrap_around, p->spacing,
p->rangelow, p->rangehigh);
}
@@ -442,8 +425,7 @@ static void v4l_print_buffer(const void *arg, bool write_only)
const struct v4l2_plane *plane;
int i;
- pr_cont("%02ld:%02d:%02d.%08ld index=%d, type=%s, "
- "flags=0x%08x, field=%s, sequence=%d, memory=%s",
+ pr_cont("%02ld:%02d:%02d.%08ld index=%d, type=%s, flags=0x%08x, field=%s, sequence=%d, memory=%s",
p->timestamp.tv_sec / 3600,
(int)(p->timestamp.tv_sec / 60) % 60,
(int)(p->timestamp.tv_sec % 60),
@@ -458,8 +440,7 @@ static void v4l_print_buffer(const void *arg, bool write_only)
for (i = 0; i < p->length; ++i) {
plane = &p->m.planes[i];
printk(KERN_DEBUG
- "plane %d: bytesused=%d, data_offset=0x%08x, "
- "offset/userptr=0x%lx, length=%d\n",
+ "plane %d: bytesused=%d, data_offset=0x%08x, offset/userptr=0x%lx, length=%d\n",
i, plane->bytesused, plane->data_offset,
plane->m.userptr, plane->length);
}
@@ -468,8 +449,7 @@ static void v4l_print_buffer(const void *arg, bool write_only)
p->bytesused, p->m.userptr, p->length);
}
- printk(KERN_DEBUG "timecode=%02d:%02d:%02d type=%d, "
- "flags=0x%08x, frames=%d, userbits=0x%08x\n",
+ printk(KERN_DEBUG "timecode=%02d:%02d:%02d type=%d, flags=0x%08x, frames=%d, userbits=0x%08x\n",
tc->hours, tc->minutes, tc->seconds,
tc->type, tc->flags, tc->frames, *(__u32 *)tc->userbits);
}
@@ -503,8 +483,7 @@ static void v4l_print_streamparm(const void *arg, bool write_only)
p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
const struct v4l2_captureparm *c = &p->parm.capture;
- pr_cont(", capability=0x%x, capturemode=0x%x, timeperframe=%d/%d, "
- "extendedmode=%d, readbuffers=%d\n",
+ pr_cont(", capability=0x%x, capturemode=0x%x, timeperframe=%d/%d, extendedmode=%d, readbuffers=%d\n",
c->capability, c->capturemode,
c->timeperframe.numerator, c->timeperframe.denominator,
c->extendedmode, c->readbuffers);
@@ -512,8 +491,7 @@ static void v4l_print_streamparm(const void *arg, bool write_only)
p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
const struct v4l2_outputparm *c = &p->parm.output;
- pr_cont(", capability=0x%x, outputmode=0x%x, timeperframe=%d/%d, "
- "extendedmode=%d, writebuffers=%d\n",
+ pr_cont(", capability=0x%x, outputmode=0x%x, timeperframe=%d/%d, extendedmode=%d, writebuffers=%d\n",
c->capability, c->outputmode,
c->timeperframe.numerator, c->timeperframe.denominator,
c->extendedmode, c->writebuffers);
@@ -526,8 +504,7 @@ static void v4l_print_queryctrl(const void *arg, bool write_only)
{
const struct v4l2_queryctrl *p = arg;
- pr_cont("id=0x%x, type=%d, name=%.*s, min/max=%d/%d, "
- "step=%d, default=%d, flags=0x%08x\n",
+ pr_cont("id=0x%x, type=%d, name=%.*s, min/max=%d/%d, step=%d, default=%d, flags=0x%08x\n",
p->id, p->type, (int)sizeof(p->name), p->name,
p->minimum, p->maximum,
p->step, p->default_value, p->flags);
@@ -537,9 +514,7 @@ static void v4l_print_query_ext_ctrl(const void *arg, bool write_only)
{
const struct v4l2_query_ext_ctrl *p = arg;
- pr_cont("id=0x%x, type=%d, name=%.*s, min/max=%lld/%lld, "
- "step=%lld, default=%lld, flags=0x%08x, elem_size=%u, elems=%u, "
- "nr_of_dims=%u, dims=%u,%u,%u,%u\n",
+ pr_cont("id=0x%x, type=%d, name=%.*s, min/max=%lld/%lld, step=%lld, default=%lld, flags=0x%08x, elem_size=%u, elems=%u, nr_of_dims=%u, dims=%u,%u,%u,%u\n",
p->id, p->type, (int)sizeof(p->name), p->name,
p->minimum, p->maximum,
p->step, p->default_value, p->flags,
@@ -583,9 +558,7 @@ static void v4l_print_cropcap(const void *arg, bool write_only)
{
const struct v4l2_cropcap *p = arg;
- pr_cont("type=%s, bounds wxh=%dx%d, x,y=%d,%d, "
- "defrect wxh=%dx%d, x,y=%d,%d, "
- "pixelaspect %d/%d\n",
+ pr_cont("type=%s, bounds wxh=%dx%d, x,y=%d,%d, defrect wxh=%dx%d, x,y=%d,%d, pixelaspect %d/%d\n",
prt_names(p->type, v4l2_type_names),
p->bounds.width, p->bounds.height,
p->bounds.left, p->bounds.top,
@@ -618,8 +591,7 @@ static void v4l_print_jpegcompression(const void *arg, bool write_only)
{
const struct v4l2_jpegcompression *p = arg;
- pr_cont("quality=%d, APPn=%d, APP_len=%d, "
- "COM_len=%d, jpeg_markers=0x%x\n",
+ pr_cont("quality=%d, APPn=%d, APP_len=%d, COM_len=%d, jpeg_markers=0x%x\n",
p->quality, p->APPn, p->APP_len,
p->COM_len, p->jpeg_markers);
}
@@ -686,14 +658,7 @@ static void v4l_print_dv_timings(const void *arg, bool write_only)
switch (p->type) {
case V4L2_DV_BT_656_1120:
- pr_cont("type=bt-656/1120, interlaced=%u, "
- "pixelclock=%llu, "
- "width=%u, height=%u, polarities=0x%x, "
- "hfrontporch=%u, hsync=%u, "
- "hbackporch=%u, vfrontporch=%u, "
- "vsync=%u, vbackporch=%u, "
- "il_vfrontporch=%u, il_vsync=%u, "
- "il_vbackporch=%u, standards=0x%x, flags=0x%x\n",
+ pr_cont("type=bt-656/1120, interlaced=%u, pixelclock=%llu, width=%u, height=%u, polarities=0x%x, hfrontporch=%u, hsync=%u, hbackporch=%u, vfrontporch=%u, vsync=%u, vbackporch=%u, il_vfrontporch=%u, il_vsync=%u, il_vbackporch=%u, standards=0x%x, flags=0x%x\n",
p->bt.interlaced, p->bt.pixelclock,
p->bt.width, p->bt.height,
p->bt.polarities, p->bt.hfrontporch,
@@ -723,8 +688,7 @@ static void v4l_print_dv_timings_cap(const void *arg, bool write_only)
switch (p->type) {
case V4L2_DV_BT_656_1120:
- pr_cont("type=bt-656/1120, width=%u-%u, height=%u-%u, "
- "pixelclock=%llu-%llu, standards=0x%x, capabilities=0x%x\n",
+ pr_cont("type=bt-656/1120, width=%u-%u, height=%u-%u, pixelclock=%llu-%llu, standards=0x%x, capabilities=0x%x\n",
p->bt.min_width, p->bt.max_width,
p->bt.min_height, p->bt.max_height,
p->bt.min_pixelclock, p->bt.max_pixelclock,
@@ -805,8 +769,7 @@ static void v4l_print_event(const void *arg, bool write_only)
const struct v4l2_event *p = arg;
const struct v4l2_event_ctrl *c;
- pr_cont("type=0x%x, pending=%u, sequence=%u, id=%u, "
- "timestamp=%lu.%9.9lu\n",
+ pr_cont("type=0x%x, pending=%u, sequence=%u, id=%u, timestamp=%lu.%9.9lu\n",
p->type, p->pending, p->sequence, p->id,
p->timestamp.tv_sec, p->timestamp.tv_nsec);
switch (p->type) {
@@ -822,8 +785,7 @@ static void v4l_print_event(const void *arg, bool write_only)
pr_cont("value64=%lld, ", c->value64);
else
pr_cont("value=%d, ", c->value);
- pr_cont("flags=0x%x, minimum=%d, maximum=%d, step=%d, "
- "default_value=%d\n",
+ pr_cont("flags=0x%x, minimum=%d, maximum=%d, step=%d, default_value=%d\n",
c->flags, c->minimum, c->maximum,
c->step, c->default_value);
break;
@@ -859,8 +821,7 @@ static void v4l_print_freq_band(const void *arg, bool write_only)
{
const struct v4l2_frequency_band *p = arg;
- pr_cont("tuner=%u, type=%u, index=%u, capability=0x%x, "
- "rangelow=%u, rangehigh=%u, modulation=0x%x\n",
+ pr_cont("tuner=%u, type=%u, index=%u, capability=0x%x, rangelow=%u, rangehigh=%u, modulation=0x%x\n",
p->tuner, p->type, p->index,
p->capability, p->rangelow,
p->rangehigh, p->modulation);
@@ -1167,6 +1128,9 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_Y16: descr = "16-bit Greyscale"; break;
case V4L2_PIX_FMT_Y16_BE: descr = "16-bit Greyscale BE"; break;
case V4L2_PIX_FMT_Y10BPACK: descr = "10-bit Greyscale (Packed)"; break;
+ case V4L2_PIX_FMT_Y8I: descr = "Interleaved 8-bit Greyscale"; break;
+ case V4L2_PIX_FMT_Y12I: descr = "Interleaved 12-bit Greyscale"; break;
+ case V4L2_PIX_FMT_Z16: descr = "16-bit Depth"; break;
case V4L2_PIX_FMT_PAL8: descr = "8-bit Palette"; break;
case V4L2_PIX_FMT_UV8: descr = "8-bit Chrominance UV 4-4"; break;
case V4L2_PIX_FMT_YVU410: descr = "Planar YVU 4:1:0"; break;
@@ -1230,7 +1194,10 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_SGBRG10DPCM8: descr = "8-bit Bayer GBGB/RGRG (DPCM)"; break;
case V4L2_PIX_FMT_SGRBG10DPCM8: descr = "8-bit Bayer GRGR/BGBG (DPCM)"; break;
case V4L2_PIX_FMT_SRGGB10DPCM8: descr = "8-bit Bayer RGRG/GBGB (DPCM)"; break;
- case V4L2_PIX_FMT_SBGGR16: descr = "16-bit Bayer BGBG/GRGR (Exp.)"; break;
+ case V4L2_PIX_FMT_SBGGR16: descr = "16-bit Bayer BGBG/GRGR"; break;
+ case V4L2_PIX_FMT_SGBRG16: descr = "16-bit Bayer GBGB/RGRG"; break;
+ case V4L2_PIX_FMT_SGRBG16: descr = "16-bit Bayer GRGR/BGBG"; break;
+ case V4L2_PIX_FMT_SRGGB16: descr = "16-bit Bayer RGRG/GBGB"; break;
case V4L2_PIX_FMT_SN9C20X_I420: descr = "GSPCA SN9C20X I420"; break;
case V4L2_PIX_FMT_SPCA501: descr = "GSPCA SPCA501"; break;
case V4L2_PIX_FMT_SPCA505: descr = "GSPCA SPCA505"; break;
@@ -1239,6 +1206,8 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_TM6000: descr = "A/V + VBI Mux Packet"; break;
case V4L2_PIX_FMT_CIT_YYVYUY: descr = "GSPCA CIT YYVYUY"; break;
case V4L2_PIX_FMT_KONICA420: descr = "GSPCA KONICA420"; break;
+ case V4L2_PIX_FMT_HSV24: descr = "24-bit HSV 8-8-8"; break;
+ case V4L2_PIX_FMT_HSV32: descr = "32-bit XHSV 8-8-8-8"; break;
case V4L2_SDR_FMT_CU8: descr = "Complex U8"; break;
case V4L2_SDR_FMT_CU16LE: descr = "Complex U16LE"; break;
case V4L2_SDR_FMT_CS8: descr = "Complex S8"; break;
@@ -1269,6 +1238,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_VC1_ANNEX_G: descr = "VC-1 (SMPTE 412M Annex G)"; break;
case V4L2_PIX_FMT_VC1_ANNEX_L: descr = "VC-1 (SMPTE 412M Annex L)"; break;
case V4L2_PIX_FMT_VP8: descr = "VP8"; break;
+ case V4L2_PIX_FMT_VP9: descr = "VP9"; break;
case V4L2_PIX_FMT_CPIA1: descr = "GSPCA CPiA YUV"; break;
case V4L2_PIX_FMT_WNVA: descr = "WNVA"; break;
case V4L2_PIX_FMT_SN9C10X: descr = "GSPCA SN9C10X"; break;
@@ -1287,6 +1257,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_JPGL: descr = "JPEG Lite"; break;
case V4L2_PIX_FMT_SE401: descr = "GSPCA SE401"; break;
case V4L2_PIX_FMT_S5C_UYVY_JPG: descr = "S5C73MX interleaved UYVY/JPEG"; break;
+ case V4L2_PIX_FMT_MT21C: descr = "Mediatek Compressed Format"; break;
default:
WARN(1, "Unknown pixelformat 0x%08x\n", fmt->pixelformat);
if (fmt->description[0])
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
index def84753c4c3..1dbf6f7785bb 100644
--- a/drivers/media/v4l2-core/videobuf-core.c
+++ b/drivers/media/v4l2-core/videobuf-core.c
@@ -572,8 +572,7 @@ int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
switch (b->memory) {
case V4L2_MEMORY_MMAP:
if (0 == buf->baddr) {
- dprintk(1, "qbuf: mmap requested "
- "but buffer addr is zero!\n");
+ dprintk(1, "qbuf: mmap requested but buffer addr is zero!\n");
goto done;
}
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index 1db0af6c7f94..ba63ca57ed7e 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -439,13 +439,12 @@ static int videobuf_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct page *page;
dprintk(3, "fault: fault @ %08lx [vma %08lx-%08lx]\n",
- (unsigned long)vmf->virtual_address,
- vma->vm_start, vma->vm_end);
+ vmf->address, vma->vm_start, vma->vm_end);
page = alloc_page(GFP_USER | __GFP_DMA32);
if (!page)
return VM_FAULT_OOM;
- clear_user_highpage(page, (unsigned long)vmf->virtual_address);
+ clear_user_highpage(page, vmf->address);
vmf->page = page;
return 0;
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 21900202ff83..7c1d390ea438 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -358,8 +358,8 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
if (memory == VB2_MEMORY_MMAP) {
ret = __vb2_buf_mem_alloc(vb);
if (ret) {
- dprintk(1, "failed allocating memory for "
- "buffer %d\n", buffer);
+ dprintk(1, "failed allocating memory for buffer %d\n",
+ buffer);
q->bufs[vb->index] = NULL;
kfree(vb);
break;
@@ -372,8 +372,8 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
*/
ret = call_vb_qop(vb, buf_init, vb);
if (ret) {
- dprintk(1, "buffer %d %p initialization"
- " failed\n", buffer, vb);
+ dprintk(1, "buffer %d %p initialization failed\n",
+ buffer, vb);
__vb2_buf_mem_free(vb);
q->bufs[vb->index] = NULL;
kfree(vb);
@@ -997,13 +997,12 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const void *pb)
&& vb->planes[plane].length == planes[plane].length)
continue;
- dprintk(3, "userspace address for plane %d changed, "
- "reacquiring memory\n", plane);
+ dprintk(3, "userspace address for plane %d changed, reacquiring memory\n",
+ plane);
/* Check if the provided plane buffer is large enough */
if (planes[plane].length < vb->planes[plane].min_length) {
- dprintk(1, "provided buffer size %u is less than "
- "setup size %u for plane %d\n",
+ dprintk(1, "provided buffer size %u is less than setup size %u for plane %d\n",
planes[plane].length,
vb->planes[plane].min_length,
plane);
@@ -1032,8 +1031,8 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const void *pb)
planes[plane].m.userptr,
planes[plane].length, dma_dir);
if (IS_ERR(mem_priv)) {
- dprintk(1, "failed acquiring userspace "
- "memory for plane %d\n", plane);
+ dprintk(1, "failed acquiring userspace memory for plane %d\n",
+ plane);
ret = PTR_ERR(mem_priv);
goto err;
}
@@ -1123,8 +1122,7 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const void *pb)
planes[plane].length = dbuf->size;
if (planes[plane].length < vb->planes[plane].min_length) {
- dprintk(1, "invalid dmabuf length %u for plane %d, "
- "minimum length %u\n",
+ dprintk(1, "invalid dmabuf length %u for plane %d, minimum length %u\n",
planes[plane].length, plane,
vb->planes[plane].min_length);
dma_buf_put(dbuf);
@@ -1472,8 +1470,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
}
if (nonblocking) {
- dprintk(1, "nonblocking and no buffers to dequeue, "
- "will not wait\n");
+ dprintk(1, "nonblocking and no buffers to dequeue, will not wait\n");
return -EAGAIN;
}
diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
index 52ef8833f6b6..3529849d2218 100644
--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
+++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
@@ -60,14 +60,13 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer
/* Is memory for copying plane information present? */
if (b->m.planes == NULL) {
- dprintk(1, "multi-planar buffer passed but "
- "planes array not provided\n");
+ dprintk(1, "multi-planar buffer passed but planes array not provided\n");
return -EINVAL;
}
if (b->length < vb->num_planes || b->length > VB2_MAX_PLANES) {
- dprintk(1, "incorrect planes array length, "
- "expected %d, got %d\n", vb->num_planes, b->length);
+ dprintk(1, "incorrect planes array length, expected %d, got %d\n",
+ vb->num_planes, b->length);
return -EINVAL;
}
@@ -316,8 +315,7 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb,
* that just says that it is either a top or a bottom field,
* but not which of the two it is.
*/
- dprintk(1, "the field is incorrectly set to ALTERNATE "
- "for an output buffer\n");
+ dprintk(1, "the field is incorrectly set to ALTERNATE for an output buffer\n");
return -EINVAL;
}
vb->timestamp = 0;
diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c
index ab3227b75c84..3f778147cdef 100644
--- a/drivers/media/v4l2-core/videobuf2-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c
@@ -151,8 +151,7 @@ static void *vb2_vmalloc_vaddr(void *buf_priv)
struct vb2_vmalloc_buf *buf = buf_priv;
if (!buf->vaddr) {
- pr_err("Address of an unallocated plane requested "
- "or cannot map user pointer\n");
+ pr_err("Address of an unallocated plane requested or cannot map user pointer\n");
return NULL;
}
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 4b4c0c3c3d2f..ec80e35c8dfe 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -134,6 +134,14 @@ config MTK_SMI
mainly help enable/disable iommu and control the power domain and
clocks for each local arbiter.
+config DA8XX_DDRCTL
+ bool "Texas Instruments da8xx DDR2/mDDR driver"
+ depends on ARCH_DAVINCI_DA8XX
+ help
+ This driver is for the DDR2/mDDR Memory Controller present on
+ Texas Instruments da8xx SoCs. It's used to tweak various memory
+ controller configuration options.
+
source "drivers/memory/samsung/Kconfig"
source "drivers/memory/tegra/Kconfig"
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index b20ae38b5bfb..e88097fbc085 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_MVEBU_DEVBUS) += mvebu-devbus.o
obj-$(CONFIG_TEGRA20_MC) += tegra20-mc.o
obj-$(CONFIG_JZ4780_NEMC) += jz4780-nemc.o
obj-$(CONFIG_MTK_SMI) += mtk-smi.o
+obj-$(CONFIG_DA8XX_DDRCTL) += da8xx-ddrctl.o
obj-$(CONFIG_SAMSUNG_MC) += samsung/
obj-$(CONFIG_TEGRA_MC) += tegra/
diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c
index b5ed3bd082b5..047d6fcdcec2 100644
--- a/drivers/memory/atmel-ebi.c
+++ b/drivers/memory/atmel-ebi.c
@@ -657,7 +657,7 @@ static int at91_ebi_dev_disable(struct at91_ebi *ebi, struct device_node *np)
return -ENOMEM;
newprop->value = devm_kstrdup(dev, "disabled", GFP_KERNEL);
- if (!newprop->name)
+ if (!newprop->value)
return -ENOMEM;
newprop->length = sizeof("disabled");
diff --git a/drivers/memory/atmel-sdramc.c b/drivers/memory/atmel-sdramc.c
index 12080b05e3e6..b418b39af180 100644
--- a/drivers/memory/atmel-sdramc.c
+++ b/drivers/memory/atmel-sdramc.c
@@ -85,8 +85,4 @@ static struct platform_driver atmel_ramc_driver = {
},
};
-static int __init atmel_ramc_init(void)
-{
- return platform_driver_register(&atmel_ramc_driver);
-}
-device_initcall(atmel_ramc_init);
+builtin_platform_driver(atmel_ramc_driver);
diff --git a/drivers/memory/da8xx-ddrctl.c b/drivers/memory/da8xx-ddrctl.c
new file mode 100644
index 000000000000..030afbe29d0c
--- /dev/null
+++ b/drivers/memory/da8xx-ddrctl.c
@@ -0,0 +1,173 @@
+/*
+ * TI da8xx DDR2/mDDR controller driver
+ *
+ * Copyright (C) 2016 BayLibre SAS
+ *
+ * Author:
+ * Bartosz Golaszewski <bgolaszewski@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+/*
+ * REVISIT: Linux doesn't have a good framework for the kind of performance
+ * knobs this driver controls. We can't use device tree properties as it deals
+ * with hardware configuration rather than description. We also don't want to
+ * commit to maintaining some random sysfs attributes.
+ *
+ * For now we just hardcode the register values for the boards that need
+ * some changes (as is the case for the LCD controller on da850-lcdk - the
+ * first board we support here). When linux gets an appropriate framework,
+ * we'll easily convert the driver to it.
+ */
+
+struct da8xx_ddrctl_config_knob {
+ const char *name;
+ u32 reg;
+ u32 mask;
+ u32 shift;
+};
+
+static const struct da8xx_ddrctl_config_knob da8xx_ddrctl_knobs[] = {
+ {
+ .name = "da850-pbbpr",
+ .reg = 0x20,
+ .mask = 0xffffff00,
+ .shift = 0,
+ },
+};
+
+struct da8xx_ddrctl_setting {
+ const char *name;
+ u32 val;
+};
+
+struct da8xx_ddrctl_board_settings {
+ const char *board;
+ const struct da8xx_ddrctl_setting *settings;
+};
+
+static const struct da8xx_ddrctl_setting da850_lcdk_ddrctl_settings[] = {
+ {
+ .name = "da850-pbbpr",
+ .val = 0x20,
+ },
+ { }
+};
+
+static const struct da8xx_ddrctl_board_settings da8xx_ddrctl_board_confs[] = {
+ {
+ .board = "ti,da850-lcdk",
+ .settings = da850_lcdk_ddrctl_settings,
+ },
+};
+
+static const struct da8xx_ddrctl_config_knob *
+da8xx_ddrctl_match_knob(const struct da8xx_ddrctl_setting *setting)
+{
+ const struct da8xx_ddrctl_config_knob *knob;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(da8xx_ddrctl_knobs); i++) {
+ knob = &da8xx_ddrctl_knobs[i];
+
+ if (strcmp(knob->name, setting->name) == 0)
+ return knob;
+ }
+
+ return NULL;
+}
+
+static const struct da8xx_ddrctl_setting *da8xx_ddrctl_get_board_settings(void)
+{
+ const struct da8xx_ddrctl_board_settings *board_settings;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(da8xx_ddrctl_board_confs); i++) {
+ board_settings = &da8xx_ddrctl_board_confs[i];
+
+ if (of_machine_is_compatible(board_settings->board))
+ return board_settings->settings;
+ }
+
+ return NULL;
+}
+
+static int da8xx_ddrctl_probe(struct platform_device *pdev)
+{
+ const struct da8xx_ddrctl_config_knob *knob;
+ const struct da8xx_ddrctl_setting *setting;
+ struct device_node *node;
+ struct resource *res;
+ void __iomem *ddrctl;
+ struct device *dev;
+ u32 reg;
+
+ dev = &pdev->dev;
+ node = dev->of_node;
+
+ setting = da8xx_ddrctl_get_board_settings();
+ if (!setting) {
+ dev_err(dev, "no settings defined for this board\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ddrctl = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ddrctl)) {
+ dev_err(dev, "unable to map memory controller registers\n");
+ return PTR_ERR(ddrctl);
+ }
+
+ for (; setting->name; setting++) {
+ knob = da8xx_ddrctl_match_knob(setting);
+ if (!knob) {
+ dev_warn(dev,
+ "no such config option: %s\n", setting->name);
+ continue;
+ }
+
+ if (knob->reg + sizeof(u32) > resource_size(res)) {
+ dev_warn(dev,
+ "register offset of '%s' exceeds mapped memory size\n",
+ knob->name);
+ continue;
+ }
+
+ reg = readl(ddrctl + knob->reg);
+ reg &= knob->mask;
+ reg |= setting->val << knob->shift;
+
+ dev_dbg(dev, "writing 0x%08x to %s\n", reg, setting->name);
+
+ writel(reg, ddrctl + knob->reg);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id da8xx_ddrctl_of_match[] = {
+ { .compatible = "ti,da850-ddr-controller", },
+ { },
+};
+
+static struct platform_driver da8xx_ddrctl_driver = {
+ .probe = da8xx_ddrctl_probe,
+ .driver = {
+ .name = "da850-ddr-controller",
+ .of_match_table = da8xx_ddrctl_of_match,
+ },
+};
+module_platform_driver(da8xx_ddrctl_driver);
+
+MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
+MODULE_DESCRIPTION("TI da8xx DDR2/mDDR controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 02b5f69e1a42..7b3b41368931 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -58,7 +58,7 @@
#include <linux/compat.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
diff --git a/drivers/message/fusion/mptlan.h b/drivers/message/fusion/mptlan.h
index 69e9d5463564..8946e19dbfc8 100644
--- a/drivers/message/fusion/mptlan.h
+++ b/drivers/message/fusion/mptlan.h
@@ -70,7 +70,7 @@
#include <linux/workqueue.h>
#include <linux/delay.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
/* Override mptbase.h by pre-defining these! */
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index 25e1aafae60c..227b99018657 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -1132,8 +1132,7 @@ static int pm860x_dt_init(struct device_node *np,
return 0;
}
-static int pm860x_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pm860x_probe(struct i2c_client *client)
{
struct pm860x_platform_data *pdata = dev_get_platdata(&client->dev);
struct device_node *node = client->dev.of_node;
@@ -1259,7 +1258,7 @@ static struct i2c_driver pm860x_driver = {
.pm = &pm860x_pm_ops,
.of_match_table = pm860x_dt_ids,
},
- .probe = pm860x_probe,
+ .probe_new = pm860x_probe,
.remove = pm860x_remove,
.id_table = pm860x_id_table,
};
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 1ed0584f494e..4ce3b6f11830 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -40,6 +40,22 @@ config MFD_ACT8945A
linear regulators, along with a complete ActivePath battery
charger.
+config MFD_SUN4I_GPADC
+ tristate "Allwinner sunxi platforms' GPADC MFD driver"
+ select MFD_CORE
+ select REGMAP_MMIO
+ select REGMAP_IRQ
+ depends on ARCH_SUNXI || COMPILE_TEST
+ help
+ Select this to get support for Allwinner SoCs (A10, A13 and A31) ADC.
+ This driver will only map the hardware interrupt and registers, you
+ have to select individual drivers based on this MFD to be able to use
+ the ADC or the thermal sensor. This will try to probe the ADC driver
+ sun4i-gpadc-iio and the hwmon driver iio_hwmon.
+
+ To compile this driver as a module, choose M here: the module will be
+ called sun4i-gpadc.
+
config MFD_AS3711
bool "AMS AS3711"
select MFD_CORE
@@ -293,6 +309,7 @@ config MFD_DLN2
config MFD_EXYNOS_LPASS
tristate "Samsung Exynos SoC Low Power Audio Subsystem"
+ depends on ARCH_EXYNOS || COMPILE_TEST
select MFD_CORE
select REGMAP_MMIO
help
@@ -563,7 +580,7 @@ config MFD_MAX14577
config MFD_MAX77620
bool "Maxim Semiconductor MAX77620 and MAX20024 PMIC Support"
depends on I2C=y
- depends on OF
+ depends on OF || COMPILE_TEST
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
@@ -578,7 +595,7 @@ config MFD_MAX77620
config MFD_MAX77686
tristate "Maxim Semiconductor MAX77686/802 PMIC Support"
depends on I2C
- depends on OF
+ depends on OF || COMPILE_TEST
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
@@ -877,7 +894,8 @@ config MFD_RN5T618
select MFD_CORE
select REGMAP_I2C
help
- Say yes here to add support for the Ricoh RN5T567 or R5T618 PMIC.
+ Say yes here to add support for the Ricoh RN5T567,
+ RN5T618, RC5T619 PMIC.
This driver provides common support for accessing the device,
additional drivers must be enabled in order to use the
functionality of the device.
@@ -951,7 +969,7 @@ config MFD_SMSC
config ABX500_CORE
bool "ST-Ericsson ABX500 Mixed Signal Circuit register functions"
- default y if ARCH_U300 || ARCH_U8500
+ default y if ARCH_U300 || ARCH_U8500 || COMPILE_TEST
help
Say yes here if you have the ABX500 Mixed Signal IC family
chips. This core driver expose register access functions.
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 7bb5a50127cb..dda4d4f73ad7 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -211,3 +211,4 @@ obj-$(CONFIG_INTEL_SOC_PMIC) += intel-soc-pmic.o
obj-$(CONFIG_MFD_MT6397) += mt6397-core.o
obj-$(CONFIG_MFD_ALTERA_A10SR) += altera-a10sr.o
+obj-$(CONFIG_MFD_SUN4I_GPADC) += sun4i-gpadc.o
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
index 6a5a98806cb8..099635bed188 100644
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -12,7 +12,7 @@
#include <linux/notifier.h>
#include <linux/slab.h>
#include <linux/err.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/device.h>
#include <linux/interrupt.h>
@@ -628,20 +628,10 @@ static void ab3100_setup_debugfs(struct ab3100 *ab3100)
exit_no_debugfs:
return;
}
-static inline void ab3100_remove_debugfs(void)
-{
- debugfs_remove(ab3100_set_reg_file);
- debugfs_remove(ab3100_get_reg_file);
- debugfs_remove(ab3100_reg_file);
- debugfs_remove(ab3100_dir);
-}
#else
static inline void ab3100_setup_debugfs(struct ab3100 *ab3100)
{
}
-static inline void ab3100_remove_debugfs(void)
-{
-}
#endif
/*
@@ -949,45 +939,22 @@ static int ab3100_probe(struct i2c_client *client,
return err;
}
-static int ab3100_remove(struct i2c_client *client)
-{
- struct ab3100 *ab3100 = i2c_get_clientdata(client);
-
- /* Unregister subdevices */
- mfd_remove_devices(&client->dev);
- ab3100_remove_debugfs();
- i2c_unregister_device(ab3100->testreg_client);
- return 0;
-}
-
static const struct i2c_device_id ab3100_id[] = {
{ "ab3100", 0 },
{ }
};
-MODULE_DEVICE_TABLE(i2c, ab3100_id);
static struct i2c_driver ab3100_driver = {
.driver = {
- .name = "ab3100",
+ .name = "ab3100",
+ .suppress_bind_attrs = true,
},
.id_table = ab3100_id,
.probe = ab3100_probe,
- .remove = ab3100_remove,
};
static int __init ab3100_i2c_init(void)
{
return i2c_add_driver(&ab3100_driver);
}
-
-static void __exit ab3100_i2c_exit(void)
-{
- i2c_del_driver(&ab3100_driver);
-}
-
subsys_initcall(ab3100_i2c_init);
-module_exit(ab3100_i2c_exit);
-
-MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
-MODULE_DESCRIPTION("AB3100 core driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 589eebfc13df..6e00124cef01 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -14,7 +14,7 @@
#include <linux/irqdomain.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
-#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/mfd/core.h>
#include <linux/mfd/abx500.h>
@@ -123,6 +123,10 @@ static DEFINE_SPINLOCK(on_stat_lock);
static u8 turn_on_stat_mask = 0xFF;
static u8 turn_on_stat_set;
static bool no_bm; /* No battery management */
+/*
+ * not really modular, but the easiest way to keep compat with existing
+ * bootargs behaviour is to continue using module_param here.
+ */
module_param(no_bm, bool, S_IRUGO);
#define AB9540_MODEM_CTRL2_REG 0x23
@@ -1324,25 +1328,6 @@ static int ab8500_probe(struct platform_device *pdev)
return ret;
}
-static int ab8500_remove(struct platform_device *pdev)
-{
- struct ab8500 *ab8500 = platform_get_drvdata(pdev);
-
- if (((is_ab8505(ab8500) || is_ab9540(ab8500)) &&
- ab8500->chip_id >= AB8500_CUT2P0) || is_ab8540(ab8500))
- sysfs_remove_group(&ab8500->dev->kobj, &ab9540_attr_group);
- else
- sysfs_remove_group(&ab8500->dev->kobj, &ab8500_attr_group);
-
- if ((is_ab8505(ab8500) || is_ab9540(ab8500)) &&
- ab8500->chip_id >= AB8500_CUT2P0)
- sysfs_remove_group(&ab8500->dev->kobj, &ab8505_attr_group);
-
- mfd_remove_devices(ab8500->dev);
-
- return 0;
-}
-
static const struct platform_device_id ab8500_id[] = {
{ "ab8500-core", AB8500_VERSION_AB8500 },
{ "ab8505-i2c", AB8500_VERSION_AB8505 },
@@ -1354,9 +1339,9 @@ static const struct platform_device_id ab8500_id[] = {
static struct platform_driver ab8500_core_driver = {
.driver = {
.name = "ab8500-core",
+ .suppress_bind_attrs = true,
},
.probe = ab8500_probe,
- .remove = ab8500_remove,
.id_table = ab8500_id,
};
@@ -1364,14 +1349,4 @@ static int __init ab8500_core_init(void)
{
return platform_driver_register(&ab8500_core_driver);
}
-
-static void __exit ab8500_core_exit(void)
-{
- platform_driver_unregister(&ab8500_core_driver);
-}
core_initcall(ab8500_core_init);
-module_exit(ab8500_core_exit);
-
-MODULE_AUTHOR("Mattias Wallin, Srinidhi Kasagar, Rabin Vincent");
-MODULE_DESCRIPTION("AB8500 MFD core");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index acf6c00b14b9..c1c815241e02 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -74,7 +74,7 @@
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/fs.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/debugfs.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
@@ -3234,33 +3234,16 @@ err:
return -ENOMEM;
}
-static int ab8500_debug_remove(struct platform_device *plf)
-{
- debugfs_remove_recursive(ab8500_dir);
-
- return 0;
-}
-
static struct platform_driver ab8500_debug_driver = {
.driver = {
.name = "ab8500-debug",
+ .suppress_bind_attrs = true,
},
.probe = ab8500_debug_probe,
- .remove = ab8500_debug_remove
};
static int __init ab8500_debug_init(void)
{
return platform_driver_register(&ab8500_debug_driver);
}
-
-static void __exit ab8500_debug_exit(void)
-{
- platform_driver_unregister(&ab8500_debug_driver);
-}
subsys_initcall(ab8500_debug_init);
-module_exit(ab8500_debug_exit);
-
-MODULE_AUTHOR("Mattias WALLIN <mattias.wallin@stericsson.com");
-MODULE_DESCRIPTION("AB8500 DEBUG");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/ab8500-gpadc.c b/drivers/mfd/ab8500-gpadc.c
index 97dcadc8fa8b..f4e94869d612 100644
--- a/drivers/mfd/ab8500-gpadc.c
+++ b/drivers/mfd/ab8500-gpadc.c
@@ -5,9 +5,9 @@
* Author: Arun R Murthy <arun.murthy@stericsson.com>
* Author: Daniel Willerud <daniel.willerud@stericsson.com>
* Author: Johan Palsson <johan.palsson@stericsson.com>
+ * Author: M'boumba Cedric Madianga
*/
#include <linux/init.h>
-#include <linux/module.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
@@ -1054,11 +1054,7 @@ static int __init ab8500_gpadc_init(void)
{
return platform_driver_register(&ab8500_gpadc_driver);
}
-
-static void __exit ab8500_gpadc_exit(void)
-{
- platform_driver_unregister(&ab8500_gpadc_driver);
-}
+subsys_initcall_sync(ab8500_gpadc_init);
/**
* ab8540_gpadc_get_otp() - returns OTP values
@@ -1077,14 +1073,3 @@ void ab8540_gpadc_get_otp(struct ab8500_gpadc *gpadc,
*ibat_l = gpadc->cal_data[ADC_INPUT_IBAT].otp_calib_lo;
*ibat_h = gpadc->cal_data[ADC_INPUT_IBAT].otp_calib_hi;
}
-
-subsys_initcall_sync(ab8500_gpadc_init);
-module_exit(ab8500_gpadc_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Arun R Murthy");
-MODULE_AUTHOR("Daniel Willerud");
-MODULE_AUTHOR("Johan Palsson");
-MODULE_AUTHOR("M'boumba Cedric Madianga");
-MODULE_ALIAS("platform:ab8500_gpadc");
-MODULE_DESCRIPTION("AB8500 GPADC driver");
diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c
index 207cc497958a..80c0efa66ac1 100644
--- a/drivers/mfd/ab8500-sysctrl.c
+++ b/drivers/mfd/ab8500-sysctrl.c
@@ -1,11 +1,14 @@
/*
+ * AB8500 system control driver
+ *
* Copyright (C) ST-Ericsson SA 2010
* Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com> for ST Ericsson.
* License terms: GNU General Public License (GPL) version 2
*/
#include <linux/err.h>
-#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/export.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/reboot.h>
@@ -158,7 +161,3 @@ static int __init ab8500_sysctrl_init(void)
return platform_driver_register(&ab8500_sysctrl_driver);
}
arch_initcall(ab8500_sysctrl_init);
-
-MODULE_AUTHOR("Mattias Nilsson <mattias.i.nilsson@stericsson.com");
-MODULE_DESCRIPTION("AB8500 system control driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
index fe418995108c..0d3846a4767c 100644
--- a/drivers/mfd/abx500-core.c
+++ b/drivers/mfd/abx500-core.c
@@ -8,7 +8,8 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/err.h>
-#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/export.h>
#include <linux/mfd/abx500.h>
static LIST_HEAD(abx500_list);
@@ -150,7 +151,3 @@ int abx500_startup_irq_enabled(struct device *dev, unsigned int irq)
return -ENOTSUPP;
}
EXPORT_SYMBOL(abx500_startup_irq_enabled);
-
-MODULE_AUTHOR("Mattias Wallin <mattias.wallin@stericsson.com>");
-MODULE_DESCRIPTION("ABX500 core driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index 41767f7239bb..b6d4bc63c426 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -1553,6 +1553,7 @@ EXPORT_SYMBOL_GPL(arizona_dev_init);
int arizona_dev_exit(struct arizona *arizona)
{
+ disable_irq(arizona->irq);
pm_runtime_disable(arizona->dev);
regulator_disable(arizona->dcvdd);
diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c
index 5e18d3c77582..2e01975f042d 100644
--- a/drivers/mfd/arizona-irq.c
+++ b/drivers/mfd/arizona-irq.c
@@ -398,10 +398,10 @@ err_ctrlif:
err_boot_done:
free_irq(arizona->irq, arizona);
err_main_irq:
- regmap_del_irq_chip(irq_create_mapping(arizona->virq, 1),
+ regmap_del_irq_chip(irq_find_mapping(arizona->virq, 1),
arizona->irq_chip);
err_aod:
- regmap_del_irq_chip(irq_create_mapping(arizona->virq, 0),
+ regmap_del_irq_chip(irq_find_mapping(arizona->virq, 0),
arizona->aod_irq_chip);
err:
return ret;
@@ -413,9 +413,9 @@ int arizona_irq_exit(struct arizona *arizona)
free_irq(arizona_map_irq(arizona, ARIZONA_IRQ_CTRLIF_ERR),
arizona);
free_irq(arizona_map_irq(arizona, ARIZONA_IRQ_BOOT_DONE), arizona);
- regmap_del_irq_chip(irq_create_mapping(arizona->virq, 1),
+ regmap_del_irq_chip(irq_find_mapping(arizona->virq, 1),
arizona->irq_chip);
- regmap_del_irq_chip(irq_create_mapping(arizona->virq, 0),
+ regmap_del_irq_chip(irq_find_mapping(arizona->virq, 0),
arizona->aod_irq_chip);
free_irq(arizona->irq, arizona);
diff --git a/drivers/mfd/axp20x-i2c.c b/drivers/mfd/axp20x-i2c.c
index b1b865822c07..d35a5fe6c950 100644
--- a/drivers/mfd/axp20x-i2c.c
+++ b/drivers/mfd/axp20x-i2c.c
@@ -69,10 +69,11 @@ static const struct of_device_id axp20x_i2c_of_match[] = {
};
MODULE_DEVICE_TABLE(of, axp20x_i2c_of_match);
-/*
- * This is useless for OF-enabled devices, but it is needed by I2C subsystem
- */
static const struct i2c_device_id axp20x_i2c_id[] = {
+ { "axp152", 0 },
+ { "axp202", 0 },
+ { "axp209", 0 },
+ { "axp221", 0 },
{ },
};
MODULE_DEVICE_TABLE(i2c, axp20x_i2c_id);
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index ba130be32e61..ed918de84238 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -98,6 +98,7 @@ static const struct regmap_range axp22x_volatile_ranges[] = {
regmap_reg_range(AXP20X_PWR_INPUT_STATUS, AXP20X_PWR_OP_MODE),
regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IRQ5_STATE),
regmap_reg_range(AXP22X_GPIO_STATE, AXP22X_GPIO_STATE),
+ regmap_reg_range(AXP22X_PMIC_ADC_H, AXP20X_IPSOUT_V_HIGH_L),
regmap_reg_range(AXP20X_FG_RES, AXP20X_FG_RES),
};
@@ -135,6 +136,7 @@ static const struct regmap_range axp806_writeable_ranges[] = {
regmap_reg_range(AXP806_PWR_OUT_CTRL1, AXP806_CLDO3_V_CTRL),
regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IRQ2_EN),
regmap_reg_range(AXP20X_IRQ1_STATE, AXP20X_IRQ2_STATE),
+ regmap_reg_range(AXP806_REG_ADDR_EXT, AXP806_REG_ADDR_EXT),
};
static const struct regmap_range axp806_volatile_ranges[] = {
@@ -305,7 +307,7 @@ static const struct regmap_config axp806_regmap_config = {
.val_bits = 8,
.wr_table = &axp806_writeable_table,
.volatile_table = &axp806_volatile_table,
- .max_register = AXP806_VREF_TEMP_WARN_L,
+ .max_register = AXP806_REG_ADDR_EXT,
.cache_type = REGCACHE_RBTREE,
};
diff --git a/drivers/mfd/bcm590xx.c b/drivers/mfd/bcm590xx.c
index 0d76d690176b..c572a35a9341 100644
--- a/drivers/mfd/bcm590xx.c
+++ b/drivers/mfd/bcm590xx.c
@@ -67,7 +67,7 @@ static int bcm590xx_i2c_probe(struct i2c_client *i2c_pri,
/* Secondary I2C slave address is the base address with A(2) asserted */
bcm590xx->i2c_sec = i2c_new_dummy(i2c_pri->adapter,
i2c_pri->addr | BIT(2));
- if (IS_ERR_OR_NULL(bcm590xx->i2c_sec)) {
+ if (!bcm590xx->i2c_sec) {
dev_err(&i2c_pri->dev, "failed to add secondary I2C device\n");
return -ENODEV;
}
diff --git a/drivers/mfd/cs47l24-tables.c b/drivers/mfd/cs47l24-tables.c
index f6b78aafdb55..c090974340ad 100644
--- a/drivers/mfd/cs47l24-tables.c
+++ b/drivers/mfd/cs47l24-tables.c
@@ -292,6 +292,7 @@ static const struct reg_default cs47l24_reg_default[] = {
{ 0x00000502, 0x0000 }, /* R1282 - AIF1 Rx Pin Ctrl */
{ 0x00000503, 0x0000 }, /* R1283 - AIF1 Rate Ctrl */
{ 0x00000504, 0x0000 }, /* R1284 - AIF1 Format */
+ { 0x00000505, 0x0040 }, /* R1285 - AIF1 Tx BCLK Rate */
{ 0x00000506, 0x0040 }, /* R1286 - AIF1 Rx BCLK Rate */
{ 0x00000507, 0x1818 }, /* R1287 - AIF1 Frame Ctrl 1 */
{ 0x00000508, 0x1818 }, /* R1288 - AIF1 Frame Ctrl 2 */
@@ -318,6 +319,7 @@ static const struct reg_default cs47l24_reg_default[] = {
{ 0x00000542, 0x0000 }, /* R1346 - AIF2 Rx Pin Ctrl */
{ 0x00000543, 0x0000 }, /* R1347 - AIF2 Rate Ctrl */
{ 0x00000544, 0x0000 }, /* R1348 - AIF2 Format */
+ { 0x00000545, 0x0040 }, /* R1349 - AIF2 Tx BCLK Rate */
{ 0x00000546, 0x0040 }, /* R1350 - AIF2 Rx BCLK Rate */
{ 0x00000547, 0x1818 }, /* R1351 - AIF2 Frame Ctrl 1 */
{ 0x00000548, 0x1818 }, /* R1352 - AIF2 Frame Ctrl 2 */
@@ -340,6 +342,7 @@ static const struct reg_default cs47l24_reg_default[] = {
{ 0x00000582, 0x0000 }, /* R1410 - AIF3 Rx Pin Ctrl */
{ 0x00000583, 0x0000 }, /* R1411 - AIF3 Rate Ctrl */
{ 0x00000584, 0x0000 }, /* R1412 - AIF3 Format */
+ { 0x00000585, 0x0040 }, /* R1413 - AIF3 Tx BCLK Rate */
{ 0x00000586, 0x0040 }, /* R1414 - AIF3 Rx BCLK Rate */
{ 0x00000587, 0x1818 }, /* R1415 - AIF3 Frame Ctrl 1 */
{ 0x00000588, 0x1818 }, /* R1416 - AIF3 Frame Ctrl 2 */
@@ -923,6 +926,7 @@ static bool cs47l24_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_AIF1_RX_PIN_CTRL:
case ARIZONA_AIF1_RATE_CTRL:
case ARIZONA_AIF1_FORMAT:
+ case ARIZONA_AIF1_TX_BCLK_RATE:
case ARIZONA_AIF1_RX_BCLK_RATE:
case ARIZONA_AIF1_FRAME_CTRL_1:
case ARIZONA_AIF1_FRAME_CTRL_2:
@@ -949,6 +953,7 @@ static bool cs47l24_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_AIF2_RX_PIN_CTRL:
case ARIZONA_AIF2_RATE_CTRL:
case ARIZONA_AIF2_FORMAT:
+ case ARIZONA_AIF2_TX_BCLK_RATE:
case ARIZONA_AIF2_RX_BCLK_RATE:
case ARIZONA_AIF2_FRAME_CTRL_1:
case ARIZONA_AIF2_FRAME_CTRL_2:
@@ -971,6 +976,7 @@ static bool cs47l24_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_AIF3_RX_PIN_CTRL:
case ARIZONA_AIF3_RATE_CTRL:
case ARIZONA_AIF3_FORMAT:
+ case ARIZONA_AIF3_TX_BCLK_RATE:
case ARIZONA_AIF3_RX_BCLK_RATE:
case ARIZONA_AIF3_FRAME_CTRL_1:
case ARIZONA_AIF3_FRAME_CTRL_2:
diff --git a/drivers/mfd/davinci_voicecodec.c b/drivers/mfd/davinci_voicecodec.c
index dff2f19296b8..4d0a5f38038a 100644
--- a/drivers/mfd/davinci_voicecodec.c
+++ b/drivers/mfd/davinci_voicecodec.c
@@ -32,6 +32,7 @@
#include <sound/pcm.h>
#include <linux/mfd/davinci_voicecodec.h>
+#include <mach/hardware.h>
static const struct regmap_config davinci_vc_regmap = {
.reg_bits = 32,
diff --git a/drivers/mfd/fsl-imx25-tsadc.c b/drivers/mfd/fsl-imx25-tsadc.c
index 77b2675cf8f5..ac430a396a89 100644
--- a/drivers/mfd/fsl-imx25-tsadc.c
+++ b/drivers/mfd/fsl-imx25-tsadc.c
@@ -187,6 +187,7 @@ static const struct of_device_id mx25_tsadc_ids[] = {
{ .compatible = "fsl,imx25-tsadc" },
{ /* Sentinel */ }
};
+MODULE_DEVICE_TABLE(of, mx25_tsadc_ids);
static struct platform_driver mx25_tsadc_driver = {
.driver = {
diff --git a/drivers/mfd/hi655x-pmic.c b/drivers/mfd/hi655x-pmic.c
index 0fc62995695b..ba706adee38b 100644
--- a/drivers/mfd/hi655x-pmic.c
+++ b/drivers/mfd/hi655x-pmic.c
@@ -169,6 +169,7 @@ static const struct of_device_id hi655x_pmic_match[] = {
{ .compatible = "hisilicon,hi655x-pmic", },
{},
};
+MODULE_DEVICE_TABLE(of, hi655x_pmic_match);
static struct platform_driver hi655x_pmic_driver = {
.driver = {
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index 9ff243970e93..78dbcf8b0bef 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -41,6 +41,7 @@ static int intel_lpss_pci_probe(struct pci_dev *pdev,
/* Probably it is enough to set this for iDMA capable devices only */
pci_set_master(pdev);
+ pci_try_set_mwi(pdev);
ret = intel_lpss_probe(&pdev->dev, info);
if (ret)
diff --git a/drivers/mfd/intel_soc_pmic_bxtwc.c b/drivers/mfd/intel_soc_pmic_bxtwc.c
index f9a8c5203873..699c8c7c9052 100644
--- a/drivers/mfd/intel_soc_pmic_bxtwc.c
+++ b/drivers/mfd/intel_soc_pmic_bxtwc.c
@@ -42,6 +42,7 @@
#define BXTWC_GPIOIRQ0 0x4E0B
#define BXTWC_GPIOIRQ1 0x4E0C
#define BXTWC_CRITIRQ 0x4E0D
+#define BXTWC_TMUIRQ 0x4FB6
/* Interrupt MASK Registers */
#define BXTWC_MIRQLVL1 0x4E0E
@@ -59,6 +60,7 @@
#define BXTWC_MGPIO0IRQ 0x4E19
#define BXTWC_MGPIO1IRQ 0x4E1A
#define BXTWC_MCRITIRQ 0x4E1B
+#define BXTWC_MTMUIRQ 0x4FB7
/* Whiskey Cove PMIC share same ACPI ID between different platforms */
#define BROXTON_PMIC_WC_HRV 4
@@ -92,6 +94,7 @@ enum bxtwc_irqs_level2 {
BXTWC_GPIO0_IRQ,
BXTWC_GPIO1_IRQ,
BXTWC_CRIT_IRQ,
+ BXTWC_TMU_IRQ,
};
static const struct regmap_irq bxtwc_regmap_irqs[] = {
@@ -120,6 +123,10 @@ static const struct regmap_irq bxtwc_regmap_irqs_level2[] = {
REGMAP_IRQ_REG(BXTWC_CRIT_IRQ, 9, 0x03),
};
+static const struct regmap_irq bxtwc_regmap_irqs_tmu[] = {
+ REGMAP_IRQ_REG(BXTWC_TMU_IRQ, 0, 0x06),
+};
+
static struct regmap_irq_chip bxtwc_regmap_irq_chip = {
.name = "bxtwc_irq_chip",
.status_base = BXTWC_IRQLVL1,
@@ -138,6 +145,15 @@ static struct regmap_irq_chip bxtwc_regmap_irq_chip_level2 = {
.num_regs = 10,
};
+static struct regmap_irq_chip bxtwc_regmap_irq_chip_tmu = {
+ .name = "bxtwc_irq_chip_tmu",
+ .status_base = BXTWC_TMUIRQ,
+ .mask_base = BXTWC_MTMUIRQ,
+ .irqs = bxtwc_regmap_irqs_tmu,
+ .num_irqs = ARRAY_SIZE(bxtwc_regmap_irqs_tmu),
+ .num_regs = 1,
+};
+
static struct resource gpio_resources[] = {
DEFINE_RES_IRQ_NAMED(BXTWC_GPIO0_IRQ, "GPIO0"),
DEFINE_RES_IRQ_NAMED(BXTWC_GPIO1_IRQ, "GPIO1"),
@@ -166,6 +182,10 @@ static struct resource bcu_resources[] = {
DEFINE_RES_IRQ_NAMED(BXTWC_BCU_IRQ, "BCU"),
};
+static struct resource tmu_resources[] = {
+ DEFINE_RES_IRQ_NAMED(BXTWC_TMU_IRQ, "TMU"),
+};
+
static struct mfd_cell bxt_wc_dev[] = {
{
.name = "bxt_wcove_gpadc",
@@ -193,6 +213,12 @@ static struct mfd_cell bxt_wc_dev[] = {
.resources = bcu_resources,
},
{
+ .name = "bxt_wcove_tmu",
+ .num_resources = ARRAY_SIZE(tmu_resources),
+ .resources = tmu_resources,
+ },
+
+ {
.name = "bxt_wcove_gpio",
.num_resources = ARRAY_SIZE(gpio_resources),
.resources = gpio_resources,
@@ -402,6 +428,15 @@ static int bxtwc_probe(struct platform_device *pdev)
goto err_irq_chip_level2;
}
+ ret = regmap_add_irq_chip(pmic->regmap, pmic->irq,
+ IRQF_ONESHOT | IRQF_SHARED,
+ 0, &bxtwc_regmap_irq_chip_tmu,
+ &pmic->irq_chip_data_tmu);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add TMU IRQ chip\n");
+ goto err_irq_chip_tmu;
+ }
+
ret = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE, bxt_wc_dev,
ARRAY_SIZE(bxt_wc_dev), NULL, 0,
NULL);
@@ -431,6 +466,8 @@ static int bxtwc_probe(struct platform_device *pdev)
err_sysfs:
mfd_remove_devices(&pdev->dev);
err_mfd:
+ regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data_tmu);
+err_irq_chip_tmu:
regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data_level2);
err_irq_chip_level2:
regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data);
@@ -446,6 +483,7 @@ static int bxtwc_remove(struct platform_device *pdev)
mfd_remove_devices(&pdev->dev);
regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data);
regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data_level2);
+ regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data_tmu);
return 0;
}
@@ -481,7 +519,7 @@ static const struct acpi_device_id bxtwc_acpi_ids[] = {
{ "INT34D3", },
{ }
};
-MODULE_DEVICE_TABLE(acpi, pmic_acpi_ids);
+MODULE_DEVICE_TABLE(acpi, bxtwc_acpi_ids);
static struct platform_driver bxtwc_driver = {
.probe = bxtwc_probe,
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index c8dee47b45d9..1ef7575547e6 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -493,6 +493,7 @@ static struct lpc_ich_info lpc_chipset_info[] = {
[LPC_LPT] = {
.name = "Lynx Point",
.iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
},
[LPC_LPT_LP] = {
.name = "Lynx Point_LP",
@@ -530,6 +531,7 @@ static struct lpc_ich_info lpc_chipset_info[] = {
[LPC_9S] = {
.name = "9 Series",
.iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
},
};
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
index 258757e216c4..b1700b5fa640 100644
--- a/drivers/mfd/max77620.c
+++ b/drivers/mfd/max77620.c
@@ -461,7 +461,7 @@ static int max77620_probe(struct i2c_client *client,
chip->rmap = devm_regmap_init_i2c(client, rmap_config);
if (IS_ERR(chip->rmap)) {
ret = PTR_ERR(chip->rmap);
- dev_err(chip->dev, "Failed to intialise regmap: %d\n", ret);
+ dev_err(chip->dev, "Failed to initialise regmap: %d\n", ret);
return ret;
}
diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c
index 8f8bacb67a15..ee9e9ea10444 100644
--- a/drivers/mfd/palmas.c
+++ b/drivers/mfd/palmas.c
@@ -431,9 +431,6 @@ static void palmas_power_off(void)
unsigned int addr;
int ret, slave;
- if (!palmas_dev)
- return;
-
slave = PALMAS_BASE_TO_SLAVE(PALMAS_PMU_CONTROL_BASE);
addr = PALMAS_BASE_TO_REG(PALMAS_PMU_CONTROL_BASE, PALMAS_DEV_CTRL);
diff --git a/drivers/mfd/qcom-pm8xxx.c b/drivers/mfd/qcom-pm8xxx.c
index 7f9620ec61e8..f08758f6b418 100644
--- a/drivers/mfd/qcom-pm8xxx.c
+++ b/drivers/mfd/qcom-pm8xxx.c
@@ -39,6 +39,20 @@
#define SSBI_REG_ADDR_IRQ_CONFIG (SSBI_REG_ADDR_IRQ_BASE + 7)
#define SSBI_REG_ADDR_IRQ_RT_STATUS (SSBI_REG_ADDR_IRQ_BASE + 8)
+#define PM8821_SSBI_REG_ADDR_IRQ_BASE 0x100
+#define PM8821_SSBI_REG_ADDR_IRQ_MASTER0 (PM8821_SSBI_REG_ADDR_IRQ_BASE + 0x30)
+#define PM8821_SSBI_REG_ADDR_IRQ_MASTER1 (PM8821_SSBI_REG_ADDR_IRQ_BASE + 0xb0)
+#define PM8821_SSBI_REG(m, b, offset) \
+ ((m == 0) ? \
+ (PM8821_SSBI_REG_ADDR_IRQ_MASTER0 + b + offset) : \
+ (PM8821_SSBI_REG_ADDR_IRQ_MASTER1 + b + offset))
+#define PM8821_SSBI_ADDR_IRQ_ROOT(m, b) PM8821_SSBI_REG(m, b, 0x0)
+#define PM8821_SSBI_ADDR_IRQ_CLEAR(m, b) PM8821_SSBI_REG(m, b, 0x01)
+#define PM8821_SSBI_ADDR_IRQ_MASK(m, b) PM8821_SSBI_REG(m, b, 0x08)
+#define PM8821_SSBI_ADDR_IRQ_RT_STATUS(m, b) PM8821_SSBI_REG(m, b, 0x0f)
+
+#define PM8821_BLOCKS_PER_MASTER 7
+
#define PM_IRQF_LVL_SEL 0x01 /* level select */
#define PM_IRQF_MASK_FE 0x02 /* mask falling edge */
#define PM_IRQF_MASK_RE 0x04 /* mask rising edge */
@@ -54,6 +68,7 @@
#define REG_HWREV_2 0x0E8 /* PMIC4 revision 2 */
#define PM8XXX_NR_IRQS 256
+#define PM8821_NR_IRQS 112
struct pm_irq_chip {
struct regmap *regmap;
@@ -65,6 +80,12 @@ struct pm_irq_chip {
u8 config[0];
};
+struct pm_irq_data {
+ int num_irqs;
+ const struct irq_domain_ops *irq_domain_ops;
+ void (*irq_handler)(struct irq_desc *desc);
+};
+
static int pm8xxx_read_block_irq(struct pm_irq_chip *chip, unsigned int bp,
unsigned int *ip)
{
@@ -182,6 +203,78 @@ static void pm8xxx_irq_handler(struct irq_desc *desc)
chained_irq_exit(irq_chip, desc);
}
+static void pm8821_irq_block_handler(struct pm_irq_chip *chip,
+ int master, int block)
+{
+ int pmirq, irq, i, ret;
+ unsigned int bits;
+
+ ret = regmap_read(chip->regmap,
+ PM8821_SSBI_ADDR_IRQ_ROOT(master, block), &bits);
+ if (ret) {
+ pr_err("Reading block %d failed ret=%d", block, ret);
+ return;
+ }
+
+ /* Convert block offset to global block number */
+ block += (master * PM8821_BLOCKS_PER_MASTER) - 1;
+
+ /* Check IRQ bits */
+ for (i = 0; i < 8; i++) {
+ if (bits & BIT(i)) {
+ pmirq = block * 8 + i;
+ irq = irq_find_mapping(chip->irqdomain, pmirq);
+ generic_handle_irq(irq);
+ }
+ }
+}
+
+static inline void pm8821_irq_master_handler(struct pm_irq_chip *chip,
+ int master, u8 master_val)
+{
+ int block;
+
+ for (block = 1; block < 8; block++)
+ if (master_val & BIT(block))
+ pm8821_irq_block_handler(chip, master, block);
+}
+
+static void pm8821_irq_handler(struct irq_desc *desc)
+{
+ struct pm_irq_chip *chip = irq_desc_get_handler_data(desc);
+ struct irq_chip *irq_chip = irq_desc_get_chip(desc);
+ unsigned int master;
+ int ret;
+
+ chained_irq_enter(irq_chip, desc);
+ ret = regmap_read(chip->regmap,
+ PM8821_SSBI_REG_ADDR_IRQ_MASTER0, &master);
+ if (ret) {
+ pr_err("Failed to read master 0 ret=%d\n", ret);
+ goto done;
+ }
+
+ /* bits 1 through 7 marks the first 7 blocks in master 0 */
+ if (master & GENMASK(7, 1))
+ pm8821_irq_master_handler(chip, 0, master);
+
+ /* bit 0 marks if master 1 contains any bits */
+ if (!(master & BIT(0)))
+ goto done;
+
+ ret = regmap_read(chip->regmap,
+ PM8821_SSBI_REG_ADDR_IRQ_MASTER1, &master);
+ if (ret) {
+ pr_err("Failed to read master 1 ret=%d\n", ret);
+ goto done;
+ }
+
+ pm8821_irq_master_handler(chip, 1, master);
+
+done:
+ chained_irq_exit(irq_chip, desc);
+}
+
static void pm8xxx_irq_mask_ack(struct irq_data *d)
{
struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d);
@@ -299,6 +392,104 @@ static const struct irq_domain_ops pm8xxx_irq_domain_ops = {
.map = pm8xxx_irq_domain_map,
};
+static void pm8821_irq_mask_ack(struct irq_data *d)
+{
+ struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d);
+ unsigned int pmirq = irqd_to_hwirq(d);
+ u8 block, master;
+ int irq_bit, rc;
+
+ block = pmirq / 8;
+ master = block / PM8821_BLOCKS_PER_MASTER;
+ irq_bit = pmirq % 8;
+ block %= PM8821_BLOCKS_PER_MASTER;
+
+ rc = regmap_update_bits(chip->regmap,
+ PM8821_SSBI_ADDR_IRQ_MASK(master, block),
+ BIT(irq_bit), BIT(irq_bit));
+ if (rc) {
+ pr_err("Failed to mask IRQ:%d rc=%d\n", pmirq, rc);
+ return;
+ }
+
+ rc = regmap_update_bits(chip->regmap,
+ PM8821_SSBI_ADDR_IRQ_CLEAR(master, block),
+ BIT(irq_bit), BIT(irq_bit));
+ if (rc)
+ pr_err("Failed to CLEAR IRQ:%d rc=%d\n", pmirq, rc);
+}
+
+static void pm8821_irq_unmask(struct irq_data *d)
+{
+ struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d);
+ unsigned int pmirq = irqd_to_hwirq(d);
+ int irq_bit, rc;
+ u8 block, master;
+
+ block = pmirq / 8;
+ master = block / PM8821_BLOCKS_PER_MASTER;
+ irq_bit = pmirq % 8;
+ block %= PM8821_BLOCKS_PER_MASTER;
+
+ rc = regmap_update_bits(chip->regmap,
+ PM8821_SSBI_ADDR_IRQ_MASK(master, block),
+ BIT(irq_bit), ~BIT(irq_bit));
+ if (rc)
+ pr_err("Failed to read/write unmask IRQ:%d rc=%d\n", pmirq, rc);
+
+}
+
+static int pm8821_irq_get_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which,
+ bool *state)
+{
+ struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d);
+ int rc, pmirq = irqd_to_hwirq(d);
+ u8 block, irq_bit, master;
+ unsigned int bits;
+
+ block = pmirq / 8;
+ master = block / PM8821_BLOCKS_PER_MASTER;
+ irq_bit = pmirq % 8;
+ block %= PM8821_BLOCKS_PER_MASTER;
+
+ rc = regmap_read(chip->regmap,
+ PM8821_SSBI_ADDR_IRQ_RT_STATUS(master, block), &bits);
+ if (rc) {
+ pr_err("Reading Status of IRQ %d failed rc=%d\n", pmirq, rc);
+ return rc;
+ }
+
+ *state = !!(bits & BIT(irq_bit));
+
+ return rc;
+}
+
+static struct irq_chip pm8821_irq_chip = {
+ .name = "pm8821",
+ .irq_mask_ack = pm8821_irq_mask_ack,
+ .irq_unmask = pm8821_irq_unmask,
+ .irq_get_irqchip_state = pm8821_irq_get_irqchip_state,
+ .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE,
+};
+
+static int pm8821_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct pm_irq_chip *chip = d->host_data;
+
+ irq_set_chip_and_handler(irq, &pm8821_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, chip);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops pm8821_irq_domain_ops = {
+ .xlate = irq_domain_xlate_twocell,
+ .map = pm8821_irq_domain_map,
+};
+
static const struct regmap_config ssbi_regmap_config = {
.reg_bits = 16,
.val_bits = 8,
@@ -308,22 +499,41 @@ static const struct regmap_config ssbi_regmap_config = {
.reg_write = ssbi_reg_write
};
+static const struct pm_irq_data pm8xxx_data = {
+ .num_irqs = PM8XXX_NR_IRQS,
+ .irq_domain_ops = &pm8xxx_irq_domain_ops,
+ .irq_handler = pm8xxx_irq_handler,
+};
+
+static const struct pm_irq_data pm8821_data = {
+ .num_irqs = PM8821_NR_IRQS,
+ .irq_domain_ops = &pm8821_irq_domain_ops,
+ .irq_handler = pm8821_irq_handler,
+};
+
static const struct of_device_id pm8xxx_id_table[] = {
- { .compatible = "qcom,pm8018", },
- { .compatible = "qcom,pm8058", },
- { .compatible = "qcom,pm8921", },
+ { .compatible = "qcom,pm8018", .data = &pm8xxx_data},
+ { .compatible = "qcom,pm8058", .data = &pm8xxx_data},
+ { .compatible = "qcom,pm8821", .data = &pm8821_data},
+ { .compatible = "qcom,pm8921", .data = &pm8xxx_data},
{ }
};
MODULE_DEVICE_TABLE(of, pm8xxx_id_table);
static int pm8xxx_probe(struct platform_device *pdev)
{
+ const struct pm_irq_data *data;
struct regmap *regmap;
int irq, rc;
unsigned int val;
u32 rev;
struct pm_irq_chip *chip;
- unsigned int nirqs = PM8XXX_NR_IRQS;
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data) {
+ dev_err(&pdev->dev, "No matching driver data found\n");
+ return -EINVAL;
+ }
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -354,25 +564,26 @@ static int pm8xxx_probe(struct platform_device *pdev)
rev |= val << BITS_PER_BYTE;
chip = devm_kzalloc(&pdev->dev, sizeof(*chip) +
- sizeof(chip->config[0]) * nirqs,
- GFP_KERNEL);
+ sizeof(chip->config[0]) * data->num_irqs,
+ GFP_KERNEL);
if (!chip)
return -ENOMEM;
platform_set_drvdata(pdev, chip);
chip->regmap = regmap;
- chip->num_irqs = nirqs;
+ chip->num_irqs = data->num_irqs;
chip->num_blocks = DIV_ROUND_UP(chip->num_irqs, 8);
chip->num_masters = DIV_ROUND_UP(chip->num_blocks, 8);
spin_lock_init(&chip->pm_irq_lock);
- chip->irqdomain = irq_domain_add_linear(pdev->dev.of_node, nirqs,
- &pm8xxx_irq_domain_ops,
+ chip->irqdomain = irq_domain_add_linear(pdev->dev.of_node,
+ data->num_irqs,
+ data->irq_domain_ops,
chip);
if (!chip->irqdomain)
return -ENODEV;
- irq_set_chained_handler_and_data(irq, pm8xxx_irq_handler, chip);
+ irq_set_chained_handler_and_data(irq, data->irq_handler, chip);
irq_set_irq_wake(irq, 1);
rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
diff --git a/drivers/mfd/rk808.c b/drivers/mfd/rk808.c
index 0f8acc5882a4..2c9acdba7c2d 100644
--- a/drivers/mfd/rk808.c
+++ b/drivers/mfd/rk808.c
@@ -290,6 +290,24 @@ static void rk808_device_shutdown(void)
dev_err(&rk808_i2c_client->dev, "power off error!\n");
}
+static void rk818_device_shutdown(void)
+{
+ int ret;
+ struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
+
+ if (!rk808) {
+ dev_warn(&rk808_i2c_client->dev,
+ "have no rk818, so do nothing here\n");
+ return;
+ }
+
+ ret = regmap_update_bits(rk808->regmap,
+ RK818_DEVCTRL_REG,
+ DEV_OFF, DEV_OFF);
+ if (ret)
+ dev_err(&rk808_i2c_client->dev, "power off error!\n");
+}
+
static const struct of_device_id rk808_of_match[] = {
{ .compatible = "rockchip,rk808" },
{ .compatible = "rockchip,rk818" },
@@ -304,6 +322,7 @@ static int rk808_probe(struct i2c_client *client,
struct rk808 *rk808;
const struct rk808_reg_data *pre_init_reg;
const struct mfd_cell *cells;
+ void (*pm_pwroff_fn)(void);
int nr_pre_init_regs;
int nr_cells;
int pm_off = 0;
@@ -331,6 +350,7 @@ static int rk808_probe(struct i2c_client *client,
nr_pre_init_regs = ARRAY_SIZE(rk808_pre_init_reg);
cells = rk808s;
nr_cells = ARRAY_SIZE(rk808s);
+ pm_pwroff_fn = rk808_device_shutdown;
break;
case RK818_ID:
rk808->regmap_cfg = &rk818_regmap_config;
@@ -339,6 +359,7 @@ static int rk808_probe(struct i2c_client *client,
nr_pre_init_regs = ARRAY_SIZE(rk818_pre_init_reg);
cells = rk818s;
nr_cells = ARRAY_SIZE(rk818s);
+ pm_pwroff_fn = rk818_device_shutdown;
break;
default:
dev_err(&client->dev, "Unsupported RK8XX ID %lu\n",
@@ -393,7 +414,7 @@ static int rk808_probe(struct i2c_client *client,
"rockchip,system-power-controller");
if (pm_off && !pm_power_off) {
rk808_i2c_client = client;
- pm_power_off = rk808_device_shutdown;
+ pm_power_off = pm_pwroff_fn;
}
return 0;
diff --git a/drivers/mfd/rn5t618.c b/drivers/mfd/rn5t618.c
index ee94080e1cbb..8131d1975745 100644
--- a/drivers/mfd/rn5t618.c
+++ b/drivers/mfd/rn5t618.c
@@ -87,6 +87,7 @@ static int rn5t618_restart(struct notifier_block *this,
static const struct of_device_id rn5t618_of_match[] = {
{ .compatible = "ricoh,rn5t567", .data = (void *)RN5T567 },
{ .compatible = "ricoh,rn5t618", .data = (void *)RN5T618 },
+ { .compatible = "ricoh,rc5t619", .data = (void *)RC5T619 },
{ }
};
MODULE_DEVICE_TABLE(of, rn5t618_of_match);
diff --git a/drivers/mfd/si476x-i2c.c b/drivers/mfd/si476x-i2c.c
index c180b7533bba..e6a3d999a376 100644
--- a/drivers/mfd/si476x-i2c.c
+++ b/drivers/mfd/si476x-i2c.c
@@ -753,7 +753,7 @@ static int si476x_core_probe(struct i2c_client *client,
ARRAY_SIZE(core->supplies),
core->supplies);
if (rval) {
- dev_err(&client->dev, "Failet to gett all of the regulators\n");
+ dev_err(&client->dev, "Failed to get all of the regulators\n");
goto free_gpio;
}
diff --git a/drivers/mfd/sun4i-gpadc.c b/drivers/mfd/sun4i-gpadc.c
new file mode 100644
index 000000000000..9cfc88134d03
--- /dev/null
+++ b/drivers/mfd/sun4i-gpadc.c
@@ -0,0 +1,181 @@
+/* ADC MFD core driver for sunxi platforms
+ *
+ * Copyright (c) 2016 Quentin Schulz <quentin.schulz@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+
+#include <linux/mfd/sun4i-gpadc.h>
+
+#define ARCH_SUN4I_A10 0
+#define ARCH_SUN5I_A13 1
+#define ARCH_SUN6I_A31 2
+
+static struct resource adc_resources[] = {
+ DEFINE_RES_IRQ_NAMED(SUN4I_GPADC_IRQ_FIFO_DATA, "FIFO_DATA_PENDING"),
+ DEFINE_RES_IRQ_NAMED(SUN4I_GPADC_IRQ_TEMP_DATA, "TEMP_DATA_PENDING"),
+};
+
+static const struct regmap_irq sun4i_gpadc_regmap_irq[] = {
+ REGMAP_IRQ_REG(SUN4I_GPADC_IRQ_FIFO_DATA, 0,
+ SUN4I_GPADC_INT_FIFOC_TP_DATA_IRQ_EN),
+ REGMAP_IRQ_REG(SUN4I_GPADC_IRQ_TEMP_DATA, 0,
+ SUN4I_GPADC_INT_FIFOC_TEMP_IRQ_EN),
+};
+
+static const struct regmap_irq_chip sun4i_gpadc_regmap_irq_chip = {
+ .name = "sun4i_gpadc_irq_chip",
+ .status_base = SUN4I_GPADC_INT_FIFOS,
+ .ack_base = SUN4I_GPADC_INT_FIFOS,
+ .mask_base = SUN4I_GPADC_INT_FIFOC,
+ .init_ack_masked = true,
+ .mask_invert = true,
+ .irqs = sun4i_gpadc_regmap_irq,
+ .num_irqs = ARRAY_SIZE(sun4i_gpadc_regmap_irq),
+ .num_regs = 1,
+};
+
+static struct mfd_cell sun4i_gpadc_cells[] = {
+ {
+ .name = "sun4i-a10-gpadc-iio",
+ .resources = adc_resources,
+ .num_resources = ARRAY_SIZE(adc_resources),
+ },
+ { .name = "iio_hwmon" }
+};
+
+static struct mfd_cell sun5i_gpadc_cells[] = {
+ {
+ .name = "sun5i-a13-gpadc-iio",
+ .resources = adc_resources,
+ .num_resources = ARRAY_SIZE(adc_resources),
+ },
+ { .name = "iio_hwmon" },
+};
+
+static struct mfd_cell sun6i_gpadc_cells[] = {
+ {
+ .name = "sun6i-a31-gpadc-iio",
+ .resources = adc_resources,
+ .num_resources = ARRAY_SIZE(adc_resources),
+ },
+ { .name = "iio_hwmon" },
+};
+
+static const struct regmap_config sun4i_gpadc_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .fast_io = true,
+};
+
+static const struct of_device_id sun4i_gpadc_of_match[] = {
+ {
+ .compatible = "allwinner,sun4i-a10-ts",
+ .data = (void *)ARCH_SUN4I_A10,
+ }, {
+ .compatible = "allwinner,sun5i-a13-ts",
+ .data = (void *)ARCH_SUN5I_A13,
+ }, {
+ .compatible = "allwinner,sun6i-a31-ts",
+ .data = (void *)ARCH_SUN6I_A31,
+ }, { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, sun4i_gpadc_of_match);
+
+static int sun4i_gpadc_probe(struct platform_device *pdev)
+{
+ struct sun4i_gpadc_dev *dev;
+ struct resource *mem;
+ const struct of_device_id *of_id;
+ const struct mfd_cell *cells;
+ unsigned int irq, size;
+ int ret;
+
+ of_id = of_match_node(sun4i_gpadc_of_match, pdev->dev.of_node);
+ if (!of_id)
+ return -EINVAL;
+
+ switch ((long)of_id->data) {
+ case ARCH_SUN4I_A10:
+ cells = sun4i_gpadc_cells;
+ size = ARRAY_SIZE(sun4i_gpadc_cells);
+ break;
+ case ARCH_SUN5I_A13:
+ cells = sun5i_gpadc_cells;
+ size = ARRAY_SIZE(sun5i_gpadc_cells);
+ break;
+ case ARCH_SUN6I_A31:
+ cells = sun6i_gpadc_cells;
+ size = ARRAY_SIZE(sun6i_gpadc_cells);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dev->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(dev->base))
+ return PTR_ERR(dev->base);
+
+ dev->dev = &pdev->dev;
+ dev_set_drvdata(dev->dev, dev);
+
+ dev->regmap = devm_regmap_init_mmio(dev->dev, dev->base,
+ &sun4i_gpadc_regmap_config);
+ if (IS_ERR(dev->regmap)) {
+ ret = PTR_ERR(dev->regmap);
+ dev_err(&pdev->dev, "failed to init regmap: %d\n", ret);
+ return ret;
+ }
+
+ /* Disable all interrupts */
+ regmap_write(dev->regmap, SUN4I_GPADC_INT_FIFOC, 0);
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_regmap_add_irq_chip(&pdev->dev, dev->regmap, irq,
+ IRQF_ONESHOT, 0,
+ &sun4i_gpadc_regmap_irq_chip,
+ &dev->regmap_irqc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add irq chip: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_mfd_add_devices(dev->dev, 0, cells, size, NULL, 0, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add MFD devices: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct platform_driver sun4i_gpadc_driver = {
+ .driver = {
+ .name = "sun4i-gpadc",
+ .of_match_table = of_match_ptr(sun4i_gpadc_of_match),
+ },
+ .probe = sun4i_gpadc_probe,
+};
+
+module_platform_driver(sun4i_gpadc_driver);
+
+MODULE_DESCRIPTION("Allwinner sunxi platforms' GPADC MFD core driver");
+MODULE_AUTHOR("Quentin Schulz <quentin.schulz@free-electrons.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index 274bf39968aa..cc9e563f23aa 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -53,7 +53,7 @@ int tc3589x_reg_read(struct tc3589x *tc3589x, u8 reg)
EXPORT_SYMBOL_GPL(tc3589x_reg_read);
/**
- * tc3589x_reg_read() - write a single TC3589x register
+ * tc3589x_reg_write() - write a single TC3589x register
* @tc3589x: Device to write to
* @reg: Register to read
* @data: Value to write
@@ -118,7 +118,7 @@ EXPORT_SYMBOL_GPL(tc3589x_block_write);
* @tc3589x: Device to write to
* @reg: Register to write
* @mask: Mask of bits to set
- * @values: Value to set
+ * @val: Value to set
*/
int tc3589x_set_bits(struct tc3589x *tc3589x, u8 reg, u8 mask, u8 val)
{
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index 9a4d8684dd32..f769c7d4e335 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -42,26 +42,6 @@ static struct resource pb_resources[] = {
DEFINE_RES_IRQ_NAMED(TPS65217_IRQ_PB, "PB"),
};
-struct tps65217_irq {
- int mask;
- int interrupt;
-};
-
-static const struct tps65217_irq tps65217_irqs[] = {
- [TPS65217_IRQ_PB] = {
- .mask = TPS65217_INT_PBM,
- .interrupt = TPS65217_INT_PBI,
- },
- [TPS65217_IRQ_AC] = {
- .mask = TPS65217_INT_ACM,
- .interrupt = TPS65217_INT_ACI,
- },
- [TPS65217_IRQ_USB] = {
- .mask = TPS65217_INT_USBM,
- .interrupt = TPS65217_INT_USBI,
- },
-};
-
static void tps65217_irq_lock(struct irq_data *data)
{
struct tps65217 *tps = irq_data_get_irq_chip_data(data);
@@ -74,37 +54,32 @@ static void tps65217_irq_sync_unlock(struct irq_data *data)
struct tps65217 *tps = irq_data_get_irq_chip_data(data);
int ret;
- ret = tps65217_reg_write(tps, TPS65217_REG_INT, tps->irq_mask,
- TPS65217_PROTECT_NONE);
+ ret = tps65217_set_bits(tps, TPS65217_REG_INT, TPS65217_INT_MASK,
+ tps->irq_mask, TPS65217_PROTECT_NONE);
if (ret != 0)
dev_err(tps->dev, "Failed to sync IRQ masks\n");
mutex_unlock(&tps->irq_lock);
}
-static inline const struct tps65217_irq *
-irq_to_tps65217_irq(struct tps65217 *tps, struct irq_data *data)
-{
- return &tps65217_irqs[data->hwirq];
-}
-
static void tps65217_irq_enable(struct irq_data *data)
{
struct tps65217 *tps = irq_data_get_irq_chip_data(data);
- const struct tps65217_irq *irq_data = irq_to_tps65217_irq(tps, data);
+ u8 mask = BIT(data->hwirq) << TPS65217_INT_SHIFT;
- tps->irq_mask &= ~irq_data->mask;
+ tps->irq_mask &= ~mask;
}
static void tps65217_irq_disable(struct irq_data *data)
{
struct tps65217 *tps = irq_data_get_irq_chip_data(data);
- const struct tps65217_irq *irq_data = irq_to_tps65217_irq(tps, data);
+ u8 mask = BIT(data->hwirq) << TPS65217_INT_SHIFT;
- tps->irq_mask |= irq_data->mask;
+ tps->irq_mask |= mask;
}
static struct irq_chip tps65217_irq_chip = {
+ .name = "tps65217",
.irq_bus_lock = tps65217_irq_lock,
.irq_bus_sync_unlock = tps65217_irq_sync_unlock,
.irq_enable = tps65217_irq_enable,
@@ -149,8 +124,8 @@ static irqreturn_t tps65217_irq_thread(int irq, void *data)
return IRQ_NONE;
}
- for (i = 0; i < ARRAY_SIZE(tps65217_irqs); i++) {
- if (status & tps65217_irqs[i].interrupt) {
+ for (i = 0; i < TPS65217_NUM_IRQ; i++) {
+ if (status & BIT(i)) {
handle_nested_irq(irq_find_mapping(tps->irq_domain, i));
handled = true;
}
@@ -188,10 +163,9 @@ static int tps65217_irq_init(struct tps65217 *tps, int irq)
tps->irq = irq;
/* Mask all interrupt sources */
- tps->irq_mask = (TPS65217_INT_RESERVEDM | TPS65217_INT_PBM
- | TPS65217_INT_ACM | TPS65217_INT_USBM);
- tps65217_reg_write(tps, TPS65217_REG_INT, tps->irq_mask,
- TPS65217_PROTECT_NONE);
+ tps->irq_mask = TPS65217_INT_MASK;
+ tps65217_set_bits(tps, TPS65217_REG_INT, TPS65217_INT_MASK,
+ TPS65217_INT_MASK, TPS65217_PROTECT_NONE);
tps->irq_domain = irq_domain_add_linear(tps->dev->of_node,
TPS65217_NUM_IRQ, &tps65217_irq_domain_ops, tps);
@@ -209,6 +183,8 @@ static int tps65217_irq_init(struct tps65217 *tps, int irq)
return ret;
}
+ enable_irq_wake(irq);
+
return 0;
}
@@ -424,6 +400,24 @@ static int tps65217_probe(struct i2c_client *client,
return 0;
}
+static int tps65217_remove(struct i2c_client *client)
+{
+ struct tps65217 *tps = i2c_get_clientdata(client);
+ unsigned int virq;
+ int i;
+
+ for (i = 0; i < TPS65217_NUM_IRQ; i++) {
+ virq = irq_find_mapping(tps->irq_domain, i);
+ if (virq)
+ irq_dispose_mapping(virq);
+ }
+
+ irq_domain_remove(tps->irq_domain);
+ tps->irq_domain = NULL;
+
+ return 0;
+}
+
static const struct i2c_device_id tps65217_id_table[] = {
{"tps65217", TPS65217},
{ /* sentinel */ }
@@ -437,6 +431,7 @@ static struct i2c_driver tps65217_driver = {
},
.id_table = tps65217_id_table,
.probe = tps65217_probe,
+ .remove = tps65217_remove,
};
static int __init tps65217_init(void)
diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
index ba610adbdbff..13834a0d2817 100644
--- a/drivers/mfd/tps65218.c
+++ b/drivers/mfd/tps65218.c
@@ -33,19 +33,17 @@
#define TPS65218_PASSWORD_REGS_UNLOCK 0x7D
-/**
- * tps65218_reg_read: Read a single tps65218 register.
- *
- * @tps: Device to read from.
- * @reg: Register to read.
- * @val: Contians the value
- */
-int tps65218_reg_read(struct tps65218 *tps, unsigned int reg,
- unsigned int *val)
-{
- return regmap_read(tps->regmap, reg, val);
-}
-EXPORT_SYMBOL_GPL(tps65218_reg_read);
+static const struct mfd_cell tps65218_cells[] = {
+ {
+ .name = "tps65218-pwrbutton",
+ .of_compatible = "ti,tps65218-pwrbutton",
+ },
+ {
+ .name = "tps65218-gpio",
+ .of_compatible = "ti,tps65218-gpio",
+ },
+ { .name = "tps65218-regulator", },
+};
/**
* tps65218_reg_write: Write a single tps65218 register.
@@ -93,7 +91,7 @@ static int tps65218_update_bits(struct tps65218 *tps, unsigned int reg,
int ret;
unsigned int data;
- ret = tps65218_reg_read(tps, reg, &data);
+ ret = regmap_read(tps->regmap, reg, &data);
if (ret) {
dev_err(tps->dev, "Read from reg 0x%x failed\n", reg);
return ret;
@@ -251,7 +249,7 @@ static int tps65218_probe(struct i2c_client *client,
if (ret < 0)
return ret;
- ret = tps65218_reg_read(tps, TPS65218_REG_CHIPID, &chipid);
+ ret = regmap_read(tps->regmap, TPS65218_REG_CHIPID, &chipid);
if (ret) {
dev_err(tps->dev, "Failed to read chipid: %d\n", ret);
return ret;
@@ -259,8 +257,10 @@ static int tps65218_probe(struct i2c_client *client,
tps->rev = chipid & TPS65218_CHIPID_REV_MASK;
- ret = of_platform_populate(client->dev.of_node, NULL, NULL,
- &client->dev);
+ ret = mfd_add_devices(tps->dev, PLATFORM_DEVID_AUTO, tps65218_cells,
+ ARRAY_SIZE(tps65218_cells), NULL, 0,
+ regmap_irq_get_domain(tps->irq_data));
+
if (ret < 0)
goto err_irq;
diff --git a/drivers/mfd/tps65912-core.c b/drivers/mfd/tps65912-core.c
index a88cfa80dbc4..f33567bc428d 100644
--- a/drivers/mfd/tps65912-core.c
+++ b/drivers/mfd/tps65912-core.c
@@ -77,6 +77,23 @@ static struct regmap_irq_chip tps65912_irq_chip = {
.init_ack_masked = true,
};
+static const struct regmap_range tps65912_yes_ranges[] = {
+ regmap_reg_range(TPS65912_INT_STS, TPS65912_GPIO5),
+};
+
+static const struct regmap_access_table tps65912_volatile_table = {
+ .yes_ranges = tps65912_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(tps65912_yes_ranges),
+};
+
+const struct regmap_config tps65912_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_table = &tps65912_volatile_table,
+};
+EXPORT_SYMBOL_GPL(tps65912_regmap_config);
+
int tps65912_device_init(struct tps65912 *tps)
{
int ret;
diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c
index ab8b23b5bd22..853113d97c1e 100644
--- a/drivers/mfd/wm5102-tables.c
+++ b/drivers/mfd/wm5102-tables.c
@@ -244,752 +244,752 @@ const struct regmap_irq_chip wm5102_irq = {
};
static const struct reg_default wm5102_reg_default[] = {
- { 0x00000008, 0x0019 }, /* R8 - Ctrl IF SPI CFG 1 */
- { 0x00000009, 0x0001 }, /* R9 - Ctrl IF I2C1 CFG 1 */
- { 0x00000020, 0x0000 }, /* R32 - Tone Generator 1 */
- { 0x00000021, 0x1000 }, /* R33 - Tone Generator 2 */
- { 0x00000022, 0x0000 }, /* R34 - Tone Generator 3 */
- { 0x00000023, 0x1000 }, /* R35 - Tone Generator 4 */
- { 0x00000024, 0x0000 }, /* R36 - Tone Generator 5 */
- { 0x00000030, 0x0000 }, /* R48 - PWM Drive 1 */
- { 0x00000031, 0x0100 }, /* R49 - PWM Drive 2 */
- { 0x00000032, 0x0100 }, /* R50 - PWM Drive 3 */
- { 0x00000040, 0x0000 }, /* R64 - Wake control */
- { 0x00000041, 0x0000 }, /* R65 - Sequence control */
- { 0x00000061, 0x01FF }, /* R97 - Sample Rate Sequence Select 1 */
- { 0x00000062, 0x01FF }, /* R98 - Sample Rate Sequence Select 2 */
- { 0x00000063, 0x01FF }, /* R99 - Sample Rate Sequence Select 3 */
- { 0x00000064, 0x01FF }, /* R100 - Sample Rate Sequence Select 4 */
+ { 0x00000008, 0x0019 }, /* R8 - Ctrl IF SPI CFG 1 */
+ { 0x00000009, 0x0001 }, /* R9 - Ctrl IF I2C1 CFG 1 */
+ { 0x00000020, 0x0000 }, /* R32 - Tone Generator 1 */
+ { 0x00000021, 0x1000 }, /* R33 - Tone Generator 2 */
+ { 0x00000022, 0x0000 }, /* R34 - Tone Generator 3 */
+ { 0x00000023, 0x1000 }, /* R35 - Tone Generator 4 */
+ { 0x00000024, 0x0000 }, /* R36 - Tone Generator 5 */
+ { 0x00000030, 0x0000 }, /* R48 - PWM Drive 1 */
+ { 0x00000031, 0x0100 }, /* R49 - PWM Drive 2 */
+ { 0x00000032, 0x0100 }, /* R50 - PWM Drive 3 */
+ { 0x00000040, 0x0000 }, /* R64 - Wake control */
+ { 0x00000041, 0x0000 }, /* R65 - Sequence control */
+ { 0x00000061, 0x01FF }, /* R97 - Sample Rate Sequence Select 1 */
+ { 0x00000062, 0x01FF }, /* R98 - Sample Rate Sequence Select 2 */
+ { 0x00000063, 0x01FF }, /* R99 - Sample Rate Sequence Select 3 */
+ { 0x00000064, 0x01FF }, /* R100 - Sample Rate Sequence Select 4 */
{ 0x00000066, 0x01FF }, /* R102 - Always On Triggers Sequence Select 1 */
{ 0x00000067, 0x01FF }, /* R103 - Always On Triggers Sequence Select 2 */
{ 0x00000068, 0x01FF }, /* R104 - Always On Triggers Sequence Select 3 */
{ 0x00000069, 0x01FF }, /* R105 - Always On Triggers Sequence Select 4 */
{ 0x0000006A, 0x01FF }, /* R106 - Always On Triggers Sequence Select 5 */
{ 0x0000006B, 0x01FF }, /* R107 - Always On Triggers Sequence Select 6 */
- { 0x00000070, 0x0000 }, /* R112 - Comfort Noise Generator */
- { 0x00000090, 0x0000 }, /* R144 - Haptics Control 1 */
- { 0x00000091, 0x7FFF }, /* R145 - Haptics Control 2 */
- { 0x00000092, 0x0000 }, /* R146 - Haptics phase 1 intensity */
- { 0x00000093, 0x0000 }, /* R147 - Haptics phase 1 duration */
- { 0x00000094, 0x0000 }, /* R148 - Haptics phase 2 intensity */
- { 0x00000095, 0x0000 }, /* R149 - Haptics phase 2 duration */
- { 0x00000096, 0x0000 }, /* R150 - Haptics phase 3 intensity */
- { 0x00000097, 0x0000 }, /* R151 - Haptics phase 3 duration */
+ { 0x00000070, 0x0000 }, /* R112 - Comfort Noise Generator */
+ { 0x00000090, 0x0000 }, /* R144 - Haptics Control 1 */
+ { 0x00000091, 0x7FFF }, /* R145 - Haptics Control 2 */
+ { 0x00000092, 0x0000 }, /* R146 - Haptics phase 1 intensity */
+ { 0x00000093, 0x0000 }, /* R147 - Haptics phase 1 duration */
+ { 0x00000094, 0x0000 }, /* R148 - Haptics phase 2 intensity */
+ { 0x00000095, 0x0000 }, /* R149 - Haptics phase 2 duration */
+ { 0x00000096, 0x0000 }, /* R150 - Haptics phase 3 intensity */
+ { 0x00000097, 0x0000 }, /* R151 - Haptics phase 3 duration */
{ 0x00000100, 0x0002 }, /* R256 - Clock 32k 1 */
- { 0x00000101, 0x0304 }, /* R257 - System Clock 1 */
- { 0x00000102, 0x0011 }, /* R258 - Sample rate 1 */
- { 0x00000103, 0x0011 }, /* R259 - Sample rate 2 */
- { 0x00000104, 0x0011 }, /* R260 - Sample rate 3 */
- { 0x00000112, 0x0305 }, /* R274 - Async clock 1 */
- { 0x00000113, 0x0011 }, /* R275 - Async sample rate 1 */
+ { 0x00000101, 0x0304 }, /* R257 - System Clock 1 */
+ { 0x00000102, 0x0011 }, /* R258 - Sample rate 1 */
+ { 0x00000103, 0x0011 }, /* R259 - Sample rate 2 */
+ { 0x00000104, 0x0011 }, /* R260 - Sample rate 3 */
+ { 0x00000112, 0x0305 }, /* R274 - Async clock 1 */
+ { 0x00000113, 0x0011 }, /* R275 - Async sample rate 1 */
{ 0x00000114, 0x0011 }, /* R276 - Async sample rate 2 */
- { 0x00000149, 0x0000 }, /* R329 - Output system clock */
- { 0x0000014A, 0x0000 }, /* R330 - Output async clock */
- { 0x00000152, 0x0000 }, /* R338 - Rate Estimator 1 */
- { 0x00000153, 0x0000 }, /* R339 - Rate Estimator 2 */
- { 0x00000154, 0x0000 }, /* R340 - Rate Estimator 3 */
- { 0x00000155, 0x0000 }, /* R341 - Rate Estimator 4 */
- { 0x00000156, 0x0000 }, /* R342 - Rate Estimator 5 */
- { 0x00000161, 0x0000 }, /* R353 - Dynamic Frequency Scaling 1 */
+ { 0x00000149, 0x0000 }, /* R329 - Output system clock */
+ { 0x0000014A, 0x0000 }, /* R330 - Output async clock */
+ { 0x00000152, 0x0000 }, /* R338 - Rate Estimator 1 */
+ { 0x00000153, 0x0000 }, /* R339 - Rate Estimator 2 */
+ { 0x00000154, 0x0000 }, /* R340 - Rate Estimator 3 */
+ { 0x00000155, 0x0000 }, /* R341 - Rate Estimator 4 */
+ { 0x00000156, 0x0000 }, /* R342 - Rate Estimator 5 */
+ { 0x00000161, 0x0000 }, /* R353 - Dynamic Frequency Scaling 1 */
{ 0x00000171, 0x0000 }, /* R369 - FLL1 Control 1 */
- { 0x00000172, 0x0008 }, /* R370 - FLL1 Control 2 */
- { 0x00000173, 0x0018 }, /* R371 - FLL1 Control 3 */
- { 0x00000174, 0x007D }, /* R372 - FLL1 Control 4 */
- { 0x00000175, 0x0004 }, /* R373 - FLL1 Control 5 */
- { 0x00000176, 0x0000 }, /* R374 - FLL1 Control 6 */
+ { 0x00000172, 0x0008 }, /* R370 - FLL1 Control 2 */
+ { 0x00000173, 0x0018 }, /* R371 - FLL1 Control 3 */
+ { 0x00000174, 0x007D }, /* R372 - FLL1 Control 4 */
+ { 0x00000175, 0x0004 }, /* R373 - FLL1 Control 5 */
+ { 0x00000176, 0x0000 }, /* R374 - FLL1 Control 6 */
{ 0x00000179, 0x0000 }, /* R377 - FLL1 Control 7 */
- { 0x00000181, 0x0000 }, /* R385 - FLL1 Synchroniser 1 */
- { 0x00000182, 0x0000 }, /* R386 - FLL1 Synchroniser 2 */
- { 0x00000183, 0x0000 }, /* R387 - FLL1 Synchroniser 3 */
- { 0x00000184, 0x0000 }, /* R388 - FLL1 Synchroniser 4 */
- { 0x00000185, 0x0000 }, /* R389 - FLL1 Synchroniser 5 */
- { 0x00000186, 0x0000 }, /* R390 - FLL1 Synchroniser 6 */
+ { 0x00000181, 0x0000 }, /* R385 - FLL1 Synchroniser 1 */
+ { 0x00000182, 0x0000 }, /* R386 - FLL1 Synchroniser 2 */
+ { 0x00000183, 0x0000 }, /* R387 - FLL1 Synchroniser 3 */
+ { 0x00000184, 0x0000 }, /* R388 - FLL1 Synchroniser 4 */
+ { 0x00000185, 0x0000 }, /* R389 - FLL1 Synchroniser 5 */
+ { 0x00000186, 0x0000 }, /* R390 - FLL1 Synchroniser 6 */
{ 0x00000187, 0x0001 }, /* R391 - FLL1 Synchroniser 7 */
- { 0x00000189, 0x0000 }, /* R393 - FLL1 Spread Spectrum */
- { 0x0000018A, 0x0004 }, /* R394 - FLL1 GPIO Clock */
- { 0x00000191, 0x0000 }, /* R401 - FLL2 Control 1 */
- { 0x00000192, 0x0008 }, /* R402 - FLL2 Control 2 */
- { 0x00000193, 0x0018 }, /* R403 - FLL2 Control 3 */
- { 0x00000194, 0x007D }, /* R404 - FLL2 Control 4 */
- { 0x00000195, 0x0004 }, /* R405 - FLL2 Control 5 */
- { 0x00000196, 0x0000 }, /* R406 - FLL2 Control 6 */
+ { 0x00000189, 0x0000 }, /* R393 - FLL1 Spread Spectrum */
+ { 0x0000018A, 0x0004 }, /* R394 - FLL1 GPIO Clock */
+ { 0x00000191, 0x0000 }, /* R401 - FLL2 Control 1 */
+ { 0x00000192, 0x0008 }, /* R402 - FLL2 Control 2 */
+ { 0x00000193, 0x0018 }, /* R403 - FLL2 Control 3 */
+ { 0x00000194, 0x007D }, /* R404 - FLL2 Control 4 */
+ { 0x00000195, 0x0004 }, /* R405 - FLL2 Control 5 */
+ { 0x00000196, 0x0000 }, /* R406 - FLL2 Control 6 */
{ 0x00000199, 0x0000 }, /* R409 - FLL2 Control 7 */
- { 0x000001A1, 0x0000 }, /* R417 - FLL2 Synchroniser 1 */
- { 0x000001A2, 0x0000 }, /* R418 - FLL2 Synchroniser 2 */
- { 0x000001A3, 0x0000 }, /* R419 - FLL2 Synchroniser 3 */
- { 0x000001A4, 0x0000 }, /* R420 - FLL2 Synchroniser 4 */
- { 0x000001A5, 0x0000 }, /* R421 - FLL2 Synchroniser 5 */
- { 0x000001A6, 0x0000 }, /* R422 - FLL2 Synchroniser 6 */
+ { 0x000001A1, 0x0000 }, /* R417 - FLL2 Synchroniser 1 */
+ { 0x000001A2, 0x0000 }, /* R418 - FLL2 Synchroniser 2 */
+ { 0x000001A3, 0x0000 }, /* R419 - FLL2 Synchroniser 3 */
+ { 0x000001A4, 0x0000 }, /* R420 - FLL2 Synchroniser 4 */
+ { 0x000001A5, 0x0000 }, /* R421 - FLL2 Synchroniser 5 */
+ { 0x000001A6, 0x0000 }, /* R422 - FLL2 Synchroniser 6 */
{ 0x000001A7, 0x0001 }, /* R423 - FLL2 Synchroniser 7 */
- { 0x000001A9, 0x0000 }, /* R425 - FLL2 Spread Spectrum */
- { 0x000001AA, 0x0004 }, /* R426 - FLL2 GPIO Clock */
- { 0x00000200, 0x0006 }, /* R512 - Mic Charge Pump 1 */
- { 0x00000210, 0x00D4 }, /* R528 - LDO1 Control 1 */
+ { 0x000001A9, 0x0000 }, /* R425 - FLL2 Spread Spectrum */
+ { 0x000001AA, 0x0004 }, /* R426 - FLL2 GPIO Clock */
+ { 0x00000200, 0x0006 }, /* R512 - Mic Charge Pump 1 */
+ { 0x00000210, 0x00D4 }, /* R528 - LDO1 Control 1 */
{ 0x00000212, 0x0000 }, /* R530 - LDO1 Control 2 */
- { 0x00000213, 0x0344 }, /* R531 - LDO2 Control 1 */
- { 0x00000218, 0x01A6 }, /* R536 - Mic Bias Ctrl 1 */
- { 0x00000219, 0x01A6 }, /* R537 - Mic Bias Ctrl 2 */
- { 0x0000021A, 0x01A6 }, /* R538 - Mic Bias Ctrl 3 */
- { 0x00000293, 0x0000 }, /* R659 - Accessory Detect Mode 1 */
- { 0x0000029B, 0x0020 }, /* R667 - Headphone Detect 1 */
+ { 0x00000213, 0x0344 }, /* R531 - LDO2 Control 1 */
+ { 0x00000218, 0x01A6 }, /* R536 - Mic Bias Ctrl 1 */
+ { 0x00000219, 0x01A6 }, /* R537 - Mic Bias Ctrl 2 */
+ { 0x0000021A, 0x01A6 }, /* R538 - Mic Bias Ctrl 3 */
+ { 0x00000293, 0x0000 }, /* R659 - Accessory Detect Mode 1 */
+ { 0x0000029B, 0x0020 }, /* R667 - Headphone Detect 1 */
{ 0x000002A2, 0x0000 }, /* R674 - Micd clamp control */
- { 0x000002A3, 0x1102 }, /* R675 - Mic Detect 1 */
- { 0x000002A4, 0x009F }, /* R676 - Mic Detect 2 */
+ { 0x000002A3, 0x1102 }, /* R675 - Mic Detect 1 */
+ { 0x000002A4, 0x009F }, /* R676 - Mic Detect 2 */
{ 0x000002A6, 0x3737 }, /* R678 - Mic Detect Level 1 */
{ 0x000002A7, 0x2C37 }, /* R679 - Mic Detect Level 2 */
{ 0x000002A8, 0x1422 }, /* R680 - Mic Detect Level 3 */
{ 0x000002A9, 0x030A }, /* R681 - Mic Detect Level 4 */
- { 0x000002C3, 0x0000 }, /* R707 - Mic noise mix control 1 */
- { 0x000002CB, 0x0000 }, /* R715 - Isolation control */
- { 0x000002D3, 0x0000 }, /* R723 - Jack detect analogue */
- { 0x00000300, 0x0000 }, /* R768 - Input Enables */
- { 0x00000308, 0x0000 }, /* R776 - Input Rate */
- { 0x00000309, 0x0022 }, /* R777 - Input Volume Ramp */
- { 0x00000310, 0x2080 }, /* R784 - IN1L Control */
- { 0x00000311, 0x0180 }, /* R785 - ADC Digital Volume 1L */
- { 0x00000312, 0x0000 }, /* R786 - DMIC1L Control */
- { 0x00000314, 0x0080 }, /* R788 - IN1R Control */
- { 0x00000315, 0x0180 }, /* R789 - ADC Digital Volume 1R */
- { 0x00000316, 0x0000 }, /* R790 - DMIC1R Control */
- { 0x00000318, 0x2080 }, /* R792 - IN2L Control */
- { 0x00000319, 0x0180 }, /* R793 - ADC Digital Volume 2L */
- { 0x0000031A, 0x0000 }, /* R794 - DMIC2L Control */
- { 0x0000031C, 0x0080 }, /* R796 - IN2R Control */
- { 0x0000031D, 0x0180 }, /* R797 - ADC Digital Volume 2R */
- { 0x0000031E, 0x0000 }, /* R798 - DMIC2R Control */
- { 0x00000320, 0x2080 }, /* R800 - IN3L Control */
- { 0x00000321, 0x0180 }, /* R801 - ADC Digital Volume 3L */
- { 0x00000322, 0x0000 }, /* R802 - DMIC3L Control */
- { 0x00000324, 0x0080 }, /* R804 - IN3R Control */
- { 0x00000325, 0x0180 }, /* R805 - ADC Digital Volume 3R */
- { 0x00000326, 0x0000 }, /* R806 - DMIC3R Control */
- { 0x00000400, 0x0000 }, /* R1024 - Output Enables 1 */
- { 0x00000408, 0x0000 }, /* R1032 - Output Rate 1 */
- { 0x00000409, 0x0022 }, /* R1033 - Output Volume Ramp */
+ { 0x000002C3, 0x0000 }, /* R707 - Mic noise mix control 1 */
+ { 0x000002CB, 0x0000 }, /* R715 - Isolation control */
+ { 0x000002D3, 0x0000 }, /* R723 - Jack detect analogue */
+ { 0x00000300, 0x0000 }, /* R768 - Input Enables */
+ { 0x00000308, 0x0000 }, /* R776 - Input Rate */
+ { 0x00000309, 0x0022 }, /* R777 - Input Volume Ramp */
+ { 0x00000310, 0x2080 }, /* R784 - IN1L Control */
+ { 0x00000311, 0x0180 }, /* R785 - ADC Digital Volume 1L */
+ { 0x00000312, 0x0000 }, /* R786 - DMIC1L Control */
+ { 0x00000314, 0x0080 }, /* R788 - IN1R Control */
+ { 0x00000315, 0x0180 }, /* R789 - ADC Digital Volume 1R */
+ { 0x00000316, 0x0000 }, /* R790 - DMIC1R Control */
+ { 0x00000318, 0x2080 }, /* R792 - IN2L Control */
+ { 0x00000319, 0x0180 }, /* R793 - ADC Digital Volume 2L */
+ { 0x0000031A, 0x0000 }, /* R794 - DMIC2L Control */
+ { 0x0000031C, 0x0080 }, /* R796 - IN2R Control */
+ { 0x0000031D, 0x0180 }, /* R797 - ADC Digital Volume 2R */
+ { 0x0000031E, 0x0000 }, /* R798 - DMIC2R Control */
+ { 0x00000320, 0x2080 }, /* R800 - IN3L Control */
+ { 0x00000321, 0x0180 }, /* R801 - ADC Digital Volume 3L */
+ { 0x00000322, 0x0000 }, /* R802 - DMIC3L Control */
+ { 0x00000324, 0x0080 }, /* R804 - IN3R Control */
+ { 0x00000325, 0x0180 }, /* R805 - ADC Digital Volume 3R */
+ { 0x00000326, 0x0000 }, /* R806 - DMIC3R Control */
+ { 0x00000400, 0x0000 }, /* R1024 - Output Enables 1 */
+ { 0x00000408, 0x0000 }, /* R1032 - Output Rate 1 */
+ { 0x00000409, 0x0022 }, /* R1033 - Output Volume Ramp */
{ 0x00000410, 0x6080 }, /* R1040 - Output Path Config 1L */
- { 0x00000411, 0x0180 }, /* R1041 - DAC Digital Volume 1L */
+ { 0x00000411, 0x0180 }, /* R1041 - DAC Digital Volume 1L */
{ 0x00000412, 0x0081 }, /* R1042 - DAC Volume Limit 1L */
- { 0x00000413, 0x0001 }, /* R1043 - Noise Gate Select 1L */
- { 0x00000414, 0x0080 }, /* R1044 - Output Path Config 1R */
- { 0x00000415, 0x0180 }, /* R1045 - DAC Digital Volume 1R */
+ { 0x00000413, 0x0001 }, /* R1043 - Noise Gate Select 1L */
+ { 0x00000414, 0x0080 }, /* R1044 - Output Path Config 1R */
+ { 0x00000415, 0x0180 }, /* R1045 - DAC Digital Volume 1R */
{ 0x00000416, 0x0081 }, /* R1046 - DAC Volume Limit 1R */
- { 0x00000417, 0x0002 }, /* R1047 - Noise Gate Select 1R */
+ { 0x00000417, 0x0002 }, /* R1047 - Noise Gate Select 1R */
{ 0x00000418, 0xA080 }, /* R1048 - Output Path Config 2L */
- { 0x00000419, 0x0180 }, /* R1049 - DAC Digital Volume 2L */
+ { 0x00000419, 0x0180 }, /* R1049 - DAC Digital Volume 2L */
{ 0x0000041A, 0x0081 }, /* R1050 - DAC Volume Limit 2L */
- { 0x0000041B, 0x0004 }, /* R1051 - Noise Gate Select 2L */
- { 0x0000041C, 0x0080 }, /* R1052 - Output Path Config 2R */
- { 0x0000041D, 0x0180 }, /* R1053 - DAC Digital Volume 2R */
+ { 0x0000041B, 0x0004 }, /* R1051 - Noise Gate Select 2L */
+ { 0x0000041C, 0x0080 }, /* R1052 - Output Path Config 2R */
+ { 0x0000041D, 0x0180 }, /* R1053 - DAC Digital Volume 2R */
{ 0x0000041E, 0x0081 }, /* R1054 - DAC Volume Limit 2R */
- { 0x0000041F, 0x0008 }, /* R1055 - Noise Gate Select 2R */
+ { 0x0000041F, 0x0008 }, /* R1055 - Noise Gate Select 2R */
{ 0x00000420, 0xA080 }, /* R1056 - Output Path Config 3L */
- { 0x00000421, 0x0180 }, /* R1057 - DAC Digital Volume 3L */
+ { 0x00000421, 0x0180 }, /* R1057 - DAC Digital Volume 3L */
{ 0x00000422, 0x0081 }, /* R1058 - DAC Volume Limit 3L */
- { 0x00000423, 0x0010 }, /* R1059 - Noise Gate Select 3L */
+ { 0x00000423, 0x0010 }, /* R1059 - Noise Gate Select 3L */
{ 0x00000428, 0xE000 }, /* R1064 - Output Path Config 4L */
- { 0x00000429, 0x0180 }, /* R1065 - DAC Digital Volume 4L */
+ { 0x00000429, 0x0180 }, /* R1065 - DAC Digital Volume 4L */
{ 0x0000042A, 0x0081 }, /* R1066 - Out Volume 4L */
- { 0x0000042B, 0x0040 }, /* R1067 - Noise Gate Select 4L */
- { 0x0000042D, 0x0180 }, /* R1069 - DAC Digital Volume 4R */
+ { 0x0000042B, 0x0040 }, /* R1067 - Noise Gate Select 4L */
+ { 0x0000042D, 0x0180 }, /* R1069 - DAC Digital Volume 4R */
{ 0x0000042E, 0x0081 }, /* R1070 - Out Volume 4R */
- { 0x0000042F, 0x0080 }, /* R1071 - Noise Gate Select 4R */
- { 0x00000430, 0x0000 }, /* R1072 - Output Path Config 5L */
- { 0x00000431, 0x0180 }, /* R1073 - DAC Digital Volume 5L */
+ { 0x0000042F, 0x0080 }, /* R1071 - Noise Gate Select 4R */
+ { 0x00000430, 0x0000 }, /* R1072 - Output Path Config 5L */
+ { 0x00000431, 0x0180 }, /* R1073 - DAC Digital Volume 5L */
{ 0x00000432, 0x0081 }, /* R1074 - DAC Volume Limit 5L */
- { 0x00000433, 0x0100 }, /* R1075 - Noise Gate Select 5L */
- { 0x00000435, 0x0180 }, /* R1077 - DAC Digital Volume 5R */
+ { 0x00000433, 0x0100 }, /* R1075 - Noise Gate Select 5L */
+ { 0x00000435, 0x0180 }, /* R1077 - DAC Digital Volume 5R */
{ 0x00000436, 0x0081 }, /* R1078 - DAC Volume Limit 5R */
{ 0x00000437, 0x0200 }, /* R1079 - Noise Gate Select 5R */
{ 0x00000440, 0x0FFF }, /* R1088 - DRE Enable */
{ 0x00000442, 0x3F0A }, /* R1090 - DRE Control 2 */
{ 0x00000443, 0xDC1F }, /* R1090 - DRE Control 3 */
- { 0x00000450, 0x0000 }, /* R1104 - DAC AEC Control 1 */
+ { 0x00000450, 0x0000 }, /* R1104 - DAC AEC Control 1 */
{ 0x00000458, 0x000B }, /* R1112 - Noise Gate Control */
- { 0x00000490, 0x0069 }, /* R1168 - PDM SPK1 CTRL 1 */
- { 0x00000491, 0x0000 }, /* R1169 - PDM SPK1 CTRL 2 */
- { 0x00000500, 0x000C }, /* R1280 - AIF1 BCLK Ctrl */
- { 0x00000501, 0x0008 }, /* R1281 - AIF1 Tx Pin Ctrl */
- { 0x00000502, 0x0000 }, /* R1282 - AIF1 Rx Pin Ctrl */
- { 0x00000503, 0x0000 }, /* R1283 - AIF1 Rate Ctrl */
- { 0x00000504, 0x0000 }, /* R1284 - AIF1 Format */
- { 0x00000505, 0x0040 }, /* R1285 - AIF1 Tx BCLK Rate */
- { 0x00000506, 0x0040 }, /* R1286 - AIF1 Rx BCLK Rate */
- { 0x00000507, 0x1818 }, /* R1287 - AIF1 Frame Ctrl 1 */
- { 0x00000508, 0x1818 }, /* R1288 - AIF1 Frame Ctrl 2 */
- { 0x00000509, 0x0000 }, /* R1289 - AIF1 Frame Ctrl 3 */
- { 0x0000050A, 0x0001 }, /* R1290 - AIF1 Frame Ctrl 4 */
- { 0x0000050B, 0x0002 }, /* R1291 - AIF1 Frame Ctrl 5 */
- { 0x0000050C, 0x0003 }, /* R1292 - AIF1 Frame Ctrl 6 */
- { 0x0000050D, 0x0004 }, /* R1293 - AIF1 Frame Ctrl 7 */
- { 0x0000050E, 0x0005 }, /* R1294 - AIF1 Frame Ctrl 8 */
- { 0x0000050F, 0x0006 }, /* R1295 - AIF1 Frame Ctrl 9 */
- { 0x00000510, 0x0007 }, /* R1296 - AIF1 Frame Ctrl 10 */
- { 0x00000511, 0x0000 }, /* R1297 - AIF1 Frame Ctrl 11 */
- { 0x00000512, 0x0001 }, /* R1298 - AIF1 Frame Ctrl 12 */
- { 0x00000513, 0x0002 }, /* R1299 - AIF1 Frame Ctrl 13 */
- { 0x00000514, 0x0003 }, /* R1300 - AIF1 Frame Ctrl 14 */
- { 0x00000515, 0x0004 }, /* R1301 - AIF1 Frame Ctrl 15 */
- { 0x00000516, 0x0005 }, /* R1302 - AIF1 Frame Ctrl 16 */
- { 0x00000517, 0x0006 }, /* R1303 - AIF1 Frame Ctrl 17 */
- { 0x00000518, 0x0007 }, /* R1304 - AIF1 Frame Ctrl 18 */
- { 0x00000519, 0x0000 }, /* R1305 - AIF1 Tx Enables */
- { 0x0000051A, 0x0000 }, /* R1306 - AIF1 Rx Enables */
- { 0x00000540, 0x000C }, /* R1344 - AIF2 BCLK Ctrl */
- { 0x00000541, 0x0008 }, /* R1345 - AIF2 Tx Pin Ctrl */
- { 0x00000542, 0x0000 }, /* R1346 - AIF2 Rx Pin Ctrl */
- { 0x00000543, 0x0000 }, /* R1347 - AIF2 Rate Ctrl */
- { 0x00000544, 0x0000 }, /* R1348 - AIF2 Format */
- { 0x00000545, 0x0040 }, /* R1349 - AIF2 Tx BCLK Rate */
- { 0x00000546, 0x0040 }, /* R1350 - AIF2 Rx BCLK Rate */
- { 0x00000547, 0x1818 }, /* R1351 - AIF2 Frame Ctrl 1 */
- { 0x00000548, 0x1818 }, /* R1352 - AIF2 Frame Ctrl 2 */
- { 0x00000549, 0x0000 }, /* R1353 - AIF2 Frame Ctrl 3 */
- { 0x0000054A, 0x0001 }, /* R1354 - AIF2 Frame Ctrl 4 */
- { 0x00000551, 0x0000 }, /* R1361 - AIF2 Frame Ctrl 11 */
- { 0x00000552, 0x0001 }, /* R1362 - AIF2 Frame Ctrl 12 */
- { 0x00000559, 0x0000 }, /* R1369 - AIF2 Tx Enables */
- { 0x0000055A, 0x0000 }, /* R1370 - AIF2 Rx Enables */
- { 0x00000580, 0x000C }, /* R1408 - AIF3 BCLK Ctrl */
- { 0x00000581, 0x0008 }, /* R1409 - AIF3 Tx Pin Ctrl */
- { 0x00000582, 0x0000 }, /* R1410 - AIF3 Rx Pin Ctrl */
- { 0x00000583, 0x0000 }, /* R1411 - AIF3 Rate Ctrl */
- { 0x00000584, 0x0000 }, /* R1412 - AIF3 Format */
- { 0x00000585, 0x0040 }, /* R1413 - AIF3 Tx BCLK Rate */
- { 0x00000586, 0x0040 }, /* R1414 - AIF3 Rx BCLK Rate */
- { 0x00000587, 0x1818 }, /* R1415 - AIF3 Frame Ctrl 1 */
- { 0x00000588, 0x1818 }, /* R1416 - AIF3 Frame Ctrl 2 */
- { 0x00000589, 0x0000 }, /* R1417 - AIF3 Frame Ctrl 3 */
- { 0x0000058A, 0x0001 }, /* R1418 - AIF3 Frame Ctrl 4 */
- { 0x00000591, 0x0000 }, /* R1425 - AIF3 Frame Ctrl 11 */
- { 0x00000592, 0x0001 }, /* R1426 - AIF3 Frame Ctrl 12 */
- { 0x00000599, 0x0000 }, /* R1433 - AIF3 Tx Enables */
- { 0x0000059A, 0x0000 }, /* R1434 - AIF3 Rx Enables */
- { 0x000005E3, 0x0004 }, /* R1507 - SLIMbus Framer Ref Gear */
- { 0x000005E5, 0x0000 }, /* R1509 - SLIMbus Rates 1 */
- { 0x000005E6, 0x0000 }, /* R1510 - SLIMbus Rates 2 */
- { 0x000005E7, 0x0000 }, /* R1511 - SLIMbus Rates 3 */
- { 0x000005E8, 0x0000 }, /* R1512 - SLIMbus Rates 4 */
- { 0x000005E9, 0x0000 }, /* R1513 - SLIMbus Rates 5 */
- { 0x000005EA, 0x0000 }, /* R1514 - SLIMbus Rates 6 */
- { 0x000005EB, 0x0000 }, /* R1515 - SLIMbus Rates 7 */
- { 0x000005EC, 0x0000 }, /* R1516 - SLIMbus Rates 8 */
- { 0x000005F5, 0x0000 }, /* R1525 - SLIMbus RX Channel Enable */
- { 0x000005F6, 0x0000 }, /* R1526 - SLIMbus TX Channel Enable */
- { 0x00000640, 0x0000 }, /* R1600 - PWM1MIX Input 1 Source */
- { 0x00000641, 0x0080 }, /* R1601 - PWM1MIX Input 1 Volume */
- { 0x00000642, 0x0000 }, /* R1602 - PWM1MIX Input 2 Source */
- { 0x00000643, 0x0080 }, /* R1603 - PWM1MIX Input 2 Volume */
- { 0x00000644, 0x0000 }, /* R1604 - PWM1MIX Input 3 Source */
- { 0x00000645, 0x0080 }, /* R1605 - PWM1MIX Input 3 Volume */
- { 0x00000646, 0x0000 }, /* R1606 - PWM1MIX Input 4 Source */
- { 0x00000647, 0x0080 }, /* R1607 - PWM1MIX Input 4 Volume */
- { 0x00000648, 0x0000 }, /* R1608 - PWM2MIX Input 1 Source */
- { 0x00000649, 0x0080 }, /* R1609 - PWM2MIX Input 1 Volume */
- { 0x0000064A, 0x0000 }, /* R1610 - PWM2MIX Input 2 Source */
- { 0x0000064B, 0x0080 }, /* R1611 - PWM2MIX Input 2 Volume */
- { 0x0000064C, 0x0000 }, /* R1612 - PWM2MIX Input 3 Source */
- { 0x0000064D, 0x0080 }, /* R1613 - PWM2MIX Input 3 Volume */
- { 0x0000064E, 0x0000 }, /* R1614 - PWM2MIX Input 4 Source */
- { 0x0000064F, 0x0080 }, /* R1615 - PWM2MIX Input 4 Volume */
- { 0x00000660, 0x0000 }, /* R1632 - MICMIX Input 1 Source */
- { 0x00000661, 0x0080 }, /* R1633 - MICMIX Input 1 Volume */
- { 0x00000662, 0x0000 }, /* R1634 - MICMIX Input 2 Source */
- { 0x00000663, 0x0080 }, /* R1635 - MICMIX Input 2 Volume */
- { 0x00000664, 0x0000 }, /* R1636 - MICMIX Input 3 Source */
- { 0x00000665, 0x0080 }, /* R1637 - MICMIX Input 3 Volume */
- { 0x00000666, 0x0000 }, /* R1638 - MICMIX Input 4 Source */
- { 0x00000667, 0x0080 }, /* R1639 - MICMIX Input 4 Volume */
- { 0x00000668, 0x0000 }, /* R1640 - NOISEMIX Input 1 Source */
- { 0x00000669, 0x0080 }, /* R1641 - NOISEMIX Input 1 Volume */
- { 0x0000066A, 0x0000 }, /* R1642 - NOISEMIX Input 2 Source */
- { 0x0000066B, 0x0080 }, /* R1643 - NOISEMIX Input 2 Volume */
- { 0x0000066C, 0x0000 }, /* R1644 - NOISEMIX Input 3 Source */
- { 0x0000066D, 0x0080 }, /* R1645 - NOISEMIX Input 3 Volume */
- { 0x0000066E, 0x0000 }, /* R1646 - NOISEMIX Input 4 Source */
- { 0x0000066F, 0x0080 }, /* R1647 - NOISEMIX Input 4 Volume */
- { 0x00000680, 0x0000 }, /* R1664 - OUT1LMIX Input 1 Source */
- { 0x00000681, 0x0080 }, /* R1665 - OUT1LMIX Input 1 Volume */
- { 0x00000682, 0x0000 }, /* R1666 - OUT1LMIX Input 2 Source */
- { 0x00000683, 0x0080 }, /* R1667 - OUT1LMIX Input 2 Volume */
- { 0x00000684, 0x0000 }, /* R1668 - OUT1LMIX Input 3 Source */
- { 0x00000685, 0x0080 }, /* R1669 - OUT1LMIX Input 3 Volume */
- { 0x00000686, 0x0000 }, /* R1670 - OUT1LMIX Input 4 Source */
- { 0x00000687, 0x0080 }, /* R1671 - OUT1LMIX Input 4 Volume */
- { 0x00000688, 0x0000 }, /* R1672 - OUT1RMIX Input 1 Source */
- { 0x00000689, 0x0080 }, /* R1673 - OUT1RMIX Input 1 Volume */
- { 0x0000068A, 0x0000 }, /* R1674 - OUT1RMIX Input 2 Source */
- { 0x0000068B, 0x0080 }, /* R1675 - OUT1RMIX Input 2 Volume */
- { 0x0000068C, 0x0000 }, /* R1676 - OUT1RMIX Input 3 Source */
- { 0x0000068D, 0x0080 }, /* R1677 - OUT1RMIX Input 3 Volume */
- { 0x0000068E, 0x0000 }, /* R1678 - OUT1RMIX Input 4 Source */
- { 0x0000068F, 0x0080 }, /* R1679 - OUT1RMIX Input 4 Volume */
- { 0x00000690, 0x0000 }, /* R1680 - OUT2LMIX Input 1 Source */
- { 0x00000691, 0x0080 }, /* R1681 - OUT2LMIX Input 1 Volume */
- { 0x00000692, 0x0000 }, /* R1682 - OUT2LMIX Input 2 Source */
- { 0x00000693, 0x0080 }, /* R1683 - OUT2LMIX Input 2 Volume */
- { 0x00000694, 0x0000 }, /* R1684 - OUT2LMIX Input 3 Source */
- { 0x00000695, 0x0080 }, /* R1685 - OUT2LMIX Input 3 Volume */
- { 0x00000696, 0x0000 }, /* R1686 - OUT2LMIX Input 4 Source */
- { 0x00000697, 0x0080 }, /* R1687 - OUT2LMIX Input 4 Volume */
- { 0x00000698, 0x0000 }, /* R1688 - OUT2RMIX Input 1 Source */
- { 0x00000699, 0x0080 }, /* R1689 - OUT2RMIX Input 1 Volume */
- { 0x0000069A, 0x0000 }, /* R1690 - OUT2RMIX Input 2 Source */
- { 0x0000069B, 0x0080 }, /* R1691 - OUT2RMIX Input 2 Volume */
- { 0x0000069C, 0x0000 }, /* R1692 - OUT2RMIX Input 3 Source */
- { 0x0000069D, 0x0080 }, /* R1693 - OUT2RMIX Input 3 Volume */
- { 0x0000069E, 0x0000 }, /* R1694 - OUT2RMIX Input 4 Source */
- { 0x0000069F, 0x0080 }, /* R1695 - OUT2RMIX Input 4 Volume */
- { 0x000006A0, 0x0000 }, /* R1696 - OUT3LMIX Input 1 Source */
- { 0x000006A1, 0x0080 }, /* R1697 - OUT3LMIX Input 1 Volume */
- { 0x000006A2, 0x0000 }, /* R1698 - OUT3LMIX Input 2 Source */
- { 0x000006A3, 0x0080 }, /* R1699 - OUT3LMIX Input 2 Volume */
- { 0x000006A4, 0x0000 }, /* R1700 - OUT3LMIX Input 3 Source */
- { 0x000006A5, 0x0080 }, /* R1701 - OUT3LMIX Input 3 Volume */
- { 0x000006A6, 0x0000 }, /* R1702 - OUT3LMIX Input 4 Source */
- { 0x000006A7, 0x0080 }, /* R1703 - OUT3LMIX Input 4 Volume */
- { 0x000006B0, 0x0000 }, /* R1712 - OUT4LMIX Input 1 Source */
- { 0x000006B1, 0x0080 }, /* R1713 - OUT4LMIX Input 1 Volume */
- { 0x000006B2, 0x0000 }, /* R1714 - OUT4LMIX Input 2 Source */
- { 0x000006B3, 0x0080 }, /* R1715 - OUT4LMIX Input 2 Volume */
- { 0x000006B4, 0x0000 }, /* R1716 - OUT4LMIX Input 3 Source */
- { 0x000006B5, 0x0080 }, /* R1717 - OUT4LMIX Input 3 Volume */
- { 0x000006B6, 0x0000 }, /* R1718 - OUT4LMIX Input 4 Source */
- { 0x000006B7, 0x0080 }, /* R1719 - OUT4LMIX Input 4 Volume */
- { 0x000006B8, 0x0000 }, /* R1720 - OUT4RMIX Input 1 Source */
- { 0x000006B9, 0x0080 }, /* R1721 - OUT4RMIX Input 1 Volume */
- { 0x000006BA, 0x0000 }, /* R1722 - OUT4RMIX Input 2 Source */
- { 0x000006BB, 0x0080 }, /* R1723 - OUT4RMIX Input 2 Volume */
- { 0x000006BC, 0x0000 }, /* R1724 - OUT4RMIX Input 3 Source */
- { 0x000006BD, 0x0080 }, /* R1725 - OUT4RMIX Input 3 Volume */
- { 0x000006BE, 0x0000 }, /* R1726 - OUT4RMIX Input 4 Source */
- { 0x000006BF, 0x0080 }, /* R1727 - OUT4RMIX Input 4 Volume */
- { 0x000006C0, 0x0000 }, /* R1728 - OUT5LMIX Input 1 Source */
- { 0x000006C1, 0x0080 }, /* R1729 - OUT5LMIX Input 1 Volume */
- { 0x000006C2, 0x0000 }, /* R1730 - OUT5LMIX Input 2 Source */
- { 0x000006C3, 0x0080 }, /* R1731 - OUT5LMIX Input 2 Volume */
- { 0x000006C4, 0x0000 }, /* R1732 - OUT5LMIX Input 3 Source */
- { 0x000006C5, 0x0080 }, /* R1733 - OUT5LMIX Input 3 Volume */
- { 0x000006C6, 0x0000 }, /* R1734 - OUT5LMIX Input 4 Source */
- { 0x000006C7, 0x0080 }, /* R1735 - OUT5LMIX Input 4 Volume */
- { 0x000006C8, 0x0000 }, /* R1736 - OUT5RMIX Input 1 Source */
- { 0x000006C9, 0x0080 }, /* R1737 - OUT5RMIX Input 1 Volume */
- { 0x000006CA, 0x0000 }, /* R1738 - OUT5RMIX Input 2 Source */
- { 0x000006CB, 0x0080 }, /* R1739 - OUT5RMIX Input 2 Volume */
- { 0x000006CC, 0x0000 }, /* R1740 - OUT5RMIX Input 3 Source */
- { 0x000006CD, 0x0080 }, /* R1741 - OUT5RMIX Input 3 Volume */
- { 0x000006CE, 0x0000 }, /* R1742 - OUT5RMIX Input 4 Source */
- { 0x000006CF, 0x0080 }, /* R1743 - OUT5RMIX Input 4 Volume */
- { 0x00000700, 0x0000 }, /* R1792 - AIF1TX1MIX Input 1 Source */
- { 0x00000701, 0x0080 }, /* R1793 - AIF1TX1MIX Input 1 Volume */
- { 0x00000702, 0x0000 }, /* R1794 - AIF1TX1MIX Input 2 Source */
- { 0x00000703, 0x0080 }, /* R1795 - AIF1TX1MIX Input 2 Volume */
- { 0x00000704, 0x0000 }, /* R1796 - AIF1TX1MIX Input 3 Source */
- { 0x00000705, 0x0080 }, /* R1797 - AIF1TX1MIX Input 3 Volume */
- { 0x00000706, 0x0000 }, /* R1798 - AIF1TX1MIX Input 4 Source */
- { 0x00000707, 0x0080 }, /* R1799 - AIF1TX1MIX Input 4 Volume */
- { 0x00000708, 0x0000 }, /* R1800 - AIF1TX2MIX Input 1 Source */
- { 0x00000709, 0x0080 }, /* R1801 - AIF1TX2MIX Input 1 Volume */
- { 0x0000070A, 0x0000 }, /* R1802 - AIF1TX2MIX Input 2 Source */
- { 0x0000070B, 0x0080 }, /* R1803 - AIF1TX2MIX Input 2 Volume */
- { 0x0000070C, 0x0000 }, /* R1804 - AIF1TX2MIX Input 3 Source */
- { 0x0000070D, 0x0080 }, /* R1805 - AIF1TX2MIX Input 3 Volume */
- { 0x0000070E, 0x0000 }, /* R1806 - AIF1TX2MIX Input 4 Source */
- { 0x0000070F, 0x0080 }, /* R1807 - AIF1TX2MIX Input 4 Volume */
- { 0x00000710, 0x0000 }, /* R1808 - AIF1TX3MIX Input 1 Source */
- { 0x00000711, 0x0080 }, /* R1809 - AIF1TX3MIX Input 1 Volume */
- { 0x00000712, 0x0000 }, /* R1810 - AIF1TX3MIX Input 2 Source */
- { 0x00000713, 0x0080 }, /* R1811 - AIF1TX3MIX Input 2 Volume */
- { 0x00000714, 0x0000 }, /* R1812 - AIF1TX3MIX Input 3 Source */
- { 0x00000715, 0x0080 }, /* R1813 - AIF1TX3MIX Input 3 Volume */
- { 0x00000716, 0x0000 }, /* R1814 - AIF1TX3MIX Input 4 Source */
- { 0x00000717, 0x0080 }, /* R1815 - AIF1TX3MIX Input 4 Volume */
- { 0x00000718, 0x0000 }, /* R1816 - AIF1TX4MIX Input 1 Source */
- { 0x00000719, 0x0080 }, /* R1817 - AIF1TX4MIX Input 1 Volume */
- { 0x0000071A, 0x0000 }, /* R1818 - AIF1TX4MIX Input 2 Source */
- { 0x0000071B, 0x0080 }, /* R1819 - AIF1TX4MIX Input 2 Volume */
- { 0x0000071C, 0x0000 }, /* R1820 - AIF1TX4MIX Input 3 Source */
- { 0x0000071D, 0x0080 }, /* R1821 - AIF1TX4MIX Input 3 Volume */
- { 0x0000071E, 0x0000 }, /* R1822 - AIF1TX4MIX Input 4 Source */
- { 0x0000071F, 0x0080 }, /* R1823 - AIF1TX4MIX Input 4 Volume */
- { 0x00000720, 0x0000 }, /* R1824 - AIF1TX5MIX Input 1 Source */
- { 0x00000721, 0x0080 }, /* R1825 - AIF1TX5MIX Input 1 Volume */
- { 0x00000722, 0x0000 }, /* R1826 - AIF1TX5MIX Input 2 Source */
- { 0x00000723, 0x0080 }, /* R1827 - AIF1TX5MIX Input 2 Volume */
- { 0x00000724, 0x0000 }, /* R1828 - AIF1TX5MIX Input 3 Source */
- { 0x00000725, 0x0080 }, /* R1829 - AIF1TX5MIX Input 3 Volume */
- { 0x00000726, 0x0000 }, /* R1830 - AIF1TX5MIX Input 4 Source */
- { 0x00000727, 0x0080 }, /* R1831 - AIF1TX5MIX Input 4 Volume */
- { 0x00000728, 0x0000 }, /* R1832 - AIF1TX6MIX Input 1 Source */
- { 0x00000729, 0x0080 }, /* R1833 - AIF1TX6MIX Input 1 Volume */
- { 0x0000072A, 0x0000 }, /* R1834 - AIF1TX6MIX Input 2 Source */
- { 0x0000072B, 0x0080 }, /* R1835 - AIF1TX6MIX Input 2 Volume */
- { 0x0000072C, 0x0000 }, /* R1836 - AIF1TX6MIX Input 3 Source */
- { 0x0000072D, 0x0080 }, /* R1837 - AIF1TX6MIX Input 3 Volume */
- { 0x0000072E, 0x0000 }, /* R1838 - AIF1TX6MIX Input 4 Source */
- { 0x0000072F, 0x0080 }, /* R1839 - AIF1TX6MIX Input 4 Volume */
- { 0x00000730, 0x0000 }, /* R1840 - AIF1TX7MIX Input 1 Source */
- { 0x00000731, 0x0080 }, /* R1841 - AIF1TX7MIX Input 1 Volume */
- { 0x00000732, 0x0000 }, /* R1842 - AIF1TX7MIX Input 2 Source */
- { 0x00000733, 0x0080 }, /* R1843 - AIF1TX7MIX Input 2 Volume */
- { 0x00000734, 0x0000 }, /* R1844 - AIF1TX7MIX Input 3 Source */
- { 0x00000735, 0x0080 }, /* R1845 - AIF1TX7MIX Input 3 Volume */
- { 0x00000736, 0x0000 }, /* R1846 - AIF1TX7MIX Input 4 Source */
- { 0x00000737, 0x0080 }, /* R1847 - AIF1TX7MIX Input 4 Volume */
- { 0x00000738, 0x0000 }, /* R1848 - AIF1TX8MIX Input 1 Source */
- { 0x00000739, 0x0080 }, /* R1849 - AIF1TX8MIX Input 1 Volume */
- { 0x0000073A, 0x0000 }, /* R1850 - AIF1TX8MIX Input 2 Source */
- { 0x0000073B, 0x0080 }, /* R1851 - AIF1TX8MIX Input 2 Volume */
- { 0x0000073C, 0x0000 }, /* R1852 - AIF1TX8MIX Input 3 Source */
- { 0x0000073D, 0x0080 }, /* R1853 - AIF1TX8MIX Input 3 Volume */
- { 0x0000073E, 0x0000 }, /* R1854 - AIF1TX8MIX Input 4 Source */
- { 0x0000073F, 0x0080 }, /* R1855 - AIF1TX8MIX Input 4 Volume */
- { 0x00000740, 0x0000 }, /* R1856 - AIF2TX1MIX Input 1 Source */
- { 0x00000741, 0x0080 }, /* R1857 - AIF2TX1MIX Input 1 Volume */
- { 0x00000742, 0x0000 }, /* R1858 - AIF2TX1MIX Input 2 Source */
- { 0x00000743, 0x0080 }, /* R1859 - AIF2TX1MIX Input 2 Volume */
- { 0x00000744, 0x0000 }, /* R1860 - AIF2TX1MIX Input 3 Source */
- { 0x00000745, 0x0080 }, /* R1861 - AIF2TX1MIX Input 3 Volume */
- { 0x00000746, 0x0000 }, /* R1862 - AIF2TX1MIX Input 4 Source */
- { 0x00000747, 0x0080 }, /* R1863 - AIF2TX1MIX Input 4 Volume */
- { 0x00000748, 0x0000 }, /* R1864 - AIF2TX2MIX Input 1 Source */
- { 0x00000749, 0x0080 }, /* R1865 - AIF2TX2MIX Input 1 Volume */
- { 0x0000074A, 0x0000 }, /* R1866 - AIF2TX2MIX Input 2 Source */
- { 0x0000074B, 0x0080 }, /* R1867 - AIF2TX2MIX Input 2 Volume */
- { 0x0000074C, 0x0000 }, /* R1868 - AIF2TX2MIX Input 3 Source */
- { 0x0000074D, 0x0080 }, /* R1869 - AIF2TX2MIX Input 3 Volume */
- { 0x0000074E, 0x0000 }, /* R1870 - AIF2TX2MIX Input 4 Source */
- { 0x0000074F, 0x0080 }, /* R1871 - AIF2TX2MIX Input 4 Volume */
- { 0x00000780, 0x0000 }, /* R1920 - AIF3TX1MIX Input 1 Source */
- { 0x00000781, 0x0080 }, /* R1921 - AIF3TX1MIX Input 1 Volume */
- { 0x00000782, 0x0000 }, /* R1922 - AIF3TX1MIX Input 2 Source */
- { 0x00000783, 0x0080 }, /* R1923 - AIF3TX1MIX Input 2 Volume */
- { 0x00000784, 0x0000 }, /* R1924 - AIF3TX1MIX Input 3 Source */
- { 0x00000785, 0x0080 }, /* R1925 - AIF3TX1MIX Input 3 Volume */
- { 0x00000786, 0x0000 }, /* R1926 - AIF3TX1MIX Input 4 Source */
- { 0x00000787, 0x0080 }, /* R1927 - AIF3TX1MIX Input 4 Volume */
- { 0x00000788, 0x0000 }, /* R1928 - AIF3TX2MIX Input 1 Source */
- { 0x00000789, 0x0080 }, /* R1929 - AIF3TX2MIX Input 1 Volume */
- { 0x0000078A, 0x0000 }, /* R1930 - AIF3TX2MIX Input 2 Source */
- { 0x0000078B, 0x0080 }, /* R1931 - AIF3TX2MIX Input 2 Volume */
- { 0x0000078C, 0x0000 }, /* R1932 - AIF3TX2MIX Input 3 Source */
- { 0x0000078D, 0x0080 }, /* R1933 - AIF3TX2MIX Input 3 Volume */
- { 0x0000078E, 0x0000 }, /* R1934 - AIF3TX2MIX Input 4 Source */
- { 0x0000078F, 0x0080 }, /* R1935 - AIF3TX2MIX Input 4 Volume */
- { 0x000007C0, 0x0000 }, /* R1984 - SLIMTX1MIX Input 1 Source */
- { 0x000007C1, 0x0080 }, /* R1985 - SLIMTX1MIX Input 1 Volume */
- { 0x000007C2, 0x0000 }, /* R1986 - SLIMTX1MIX Input 2 Source */
- { 0x000007C3, 0x0080 }, /* R1987 - SLIMTX1MIX Input 2 Volume */
- { 0x000007C4, 0x0000 }, /* R1988 - SLIMTX1MIX Input 3 Source */
- { 0x000007C5, 0x0080 }, /* R1989 - SLIMTX1MIX Input 3 Volume */
- { 0x000007C6, 0x0000 }, /* R1990 - SLIMTX1MIX Input 4 Source */
- { 0x000007C7, 0x0080 }, /* R1991 - SLIMTX1MIX Input 4 Volume */
- { 0x000007C8, 0x0000 }, /* R1992 - SLIMTX2MIX Input 1 Source */
- { 0x000007C9, 0x0080 }, /* R1993 - SLIMTX2MIX Input 1 Volume */
- { 0x000007CA, 0x0000 }, /* R1994 - SLIMTX2MIX Input 2 Source */
- { 0x000007CB, 0x0080 }, /* R1995 - SLIMTX2MIX Input 2 Volume */
- { 0x000007CC, 0x0000 }, /* R1996 - SLIMTX2MIX Input 3 Source */
- { 0x000007CD, 0x0080 }, /* R1997 - SLIMTX2MIX Input 3 Volume */
- { 0x000007CE, 0x0000 }, /* R1998 - SLIMTX2MIX Input 4 Source */
- { 0x000007CF, 0x0080 }, /* R1999 - SLIMTX2MIX Input 4 Volume */
- { 0x000007D0, 0x0000 }, /* R2000 - SLIMTX3MIX Input 1 Source */
- { 0x000007D1, 0x0080 }, /* R2001 - SLIMTX3MIX Input 1 Volume */
- { 0x000007D2, 0x0000 }, /* R2002 - SLIMTX3MIX Input 2 Source */
- { 0x000007D3, 0x0080 }, /* R2003 - SLIMTX3MIX Input 2 Volume */
- { 0x000007D4, 0x0000 }, /* R2004 - SLIMTX3MIX Input 3 Source */
- { 0x000007D5, 0x0080 }, /* R2005 - SLIMTX3MIX Input 3 Volume */
- { 0x000007D6, 0x0000 }, /* R2006 - SLIMTX3MIX Input 4 Source */
- { 0x000007D7, 0x0080 }, /* R2007 - SLIMTX3MIX Input 4 Volume */
- { 0x000007D8, 0x0000 }, /* R2008 - SLIMTX4MIX Input 1 Source */
- { 0x000007D9, 0x0080 }, /* R2009 - SLIMTX4MIX Input 1 Volume */
- { 0x000007DA, 0x0000 }, /* R2010 - SLIMTX4MIX Input 2 Source */
- { 0x000007DB, 0x0080 }, /* R2011 - SLIMTX4MIX Input 2 Volume */
- { 0x000007DC, 0x0000 }, /* R2012 - SLIMTX4MIX Input 3 Source */
- { 0x000007DD, 0x0080 }, /* R2013 - SLIMTX4MIX Input 3 Volume */
- { 0x000007DE, 0x0000 }, /* R2014 - SLIMTX4MIX Input 4 Source */
- { 0x000007DF, 0x0080 }, /* R2015 - SLIMTX4MIX Input 4 Volume */
- { 0x000007E0, 0x0000 }, /* R2016 - SLIMTX5MIX Input 1 Source */
- { 0x000007E1, 0x0080 }, /* R2017 - SLIMTX5MIX Input 1 Volume */
- { 0x000007E2, 0x0000 }, /* R2018 - SLIMTX5MIX Input 2 Source */
- { 0x000007E3, 0x0080 }, /* R2019 - SLIMTX5MIX Input 2 Volume */
- { 0x000007E4, 0x0000 }, /* R2020 - SLIMTX5MIX Input 3 Source */
- { 0x000007E5, 0x0080 }, /* R2021 - SLIMTX5MIX Input 3 Volume */
- { 0x000007E6, 0x0000 }, /* R2022 - SLIMTX5MIX Input 4 Source */
- { 0x000007E7, 0x0080 }, /* R2023 - SLIMTX5MIX Input 4 Volume */
- { 0x000007E8, 0x0000 }, /* R2024 - SLIMTX6MIX Input 1 Source */
- { 0x000007E9, 0x0080 }, /* R2025 - SLIMTX6MIX Input 1 Volume */
- { 0x000007EA, 0x0000 }, /* R2026 - SLIMTX6MIX Input 2 Source */
- { 0x000007EB, 0x0080 }, /* R2027 - SLIMTX6MIX Input 2 Volume */
- { 0x000007EC, 0x0000 }, /* R2028 - SLIMTX6MIX Input 3 Source */
- { 0x000007ED, 0x0080 }, /* R2029 - SLIMTX6MIX Input 3 Volume */
- { 0x000007EE, 0x0000 }, /* R2030 - SLIMTX6MIX Input 4 Source */
- { 0x000007EF, 0x0080 }, /* R2031 - SLIMTX6MIX Input 4 Volume */
- { 0x000007F0, 0x0000 }, /* R2032 - SLIMTX7MIX Input 1 Source */
- { 0x000007F1, 0x0080 }, /* R2033 - SLIMTX7MIX Input 1 Volume */
- { 0x000007F2, 0x0000 }, /* R2034 - SLIMTX7MIX Input 2 Source */
- { 0x000007F3, 0x0080 }, /* R2035 - SLIMTX7MIX Input 2 Volume */
- { 0x000007F4, 0x0000 }, /* R2036 - SLIMTX7MIX Input 3 Source */
- { 0x000007F5, 0x0080 }, /* R2037 - SLIMTX7MIX Input 3 Volume */
- { 0x000007F6, 0x0000 }, /* R2038 - SLIMTX7MIX Input 4 Source */
- { 0x000007F7, 0x0080 }, /* R2039 - SLIMTX7MIX Input 4 Volume */
- { 0x000007F8, 0x0000 }, /* R2040 - SLIMTX8MIX Input 1 Source */
- { 0x000007F9, 0x0080 }, /* R2041 - SLIMTX8MIX Input 1 Volume */
- { 0x000007FA, 0x0000 }, /* R2042 - SLIMTX8MIX Input 2 Source */
- { 0x000007FB, 0x0080 }, /* R2043 - SLIMTX8MIX Input 2 Volume */
- { 0x000007FC, 0x0000 }, /* R2044 - SLIMTX8MIX Input 3 Source */
- { 0x000007FD, 0x0080 }, /* R2045 - SLIMTX8MIX Input 3 Volume */
- { 0x000007FE, 0x0000 }, /* R2046 - SLIMTX8MIX Input 4 Source */
- { 0x000007FF, 0x0080 }, /* R2047 - SLIMTX8MIX Input 4 Volume */
- { 0x00000880, 0x0000 }, /* R2176 - EQ1MIX Input 1 Source */
- { 0x00000881, 0x0080 }, /* R2177 - EQ1MIX Input 1 Volume */
- { 0x00000882, 0x0000 }, /* R2178 - EQ1MIX Input 2 Source */
- { 0x00000883, 0x0080 }, /* R2179 - EQ1MIX Input 2 Volume */
- { 0x00000884, 0x0000 }, /* R2180 - EQ1MIX Input 3 Source */
- { 0x00000885, 0x0080 }, /* R2181 - EQ1MIX Input 3 Volume */
- { 0x00000886, 0x0000 }, /* R2182 - EQ1MIX Input 4 Source */
- { 0x00000887, 0x0080 }, /* R2183 - EQ1MIX Input 4 Volume */
- { 0x00000888, 0x0000 }, /* R2184 - EQ2MIX Input 1 Source */
- { 0x00000889, 0x0080 }, /* R2185 - EQ2MIX Input 1 Volume */
- { 0x0000088A, 0x0000 }, /* R2186 - EQ2MIX Input 2 Source */
- { 0x0000088B, 0x0080 }, /* R2187 - EQ2MIX Input 2 Volume */
- { 0x0000088C, 0x0000 }, /* R2188 - EQ2MIX Input 3 Source */
- { 0x0000088D, 0x0080 }, /* R2189 - EQ2MIX Input 3 Volume */
- { 0x0000088E, 0x0000 }, /* R2190 - EQ2MIX Input 4 Source */
- { 0x0000088F, 0x0080 }, /* R2191 - EQ2MIX Input 4 Volume */
- { 0x00000890, 0x0000 }, /* R2192 - EQ3MIX Input 1 Source */
- { 0x00000891, 0x0080 }, /* R2193 - EQ3MIX Input 1 Volume */
- { 0x00000892, 0x0000 }, /* R2194 - EQ3MIX Input 2 Source */
- { 0x00000893, 0x0080 }, /* R2195 - EQ3MIX Input 2 Volume */
- { 0x00000894, 0x0000 }, /* R2196 - EQ3MIX Input 3 Source */
- { 0x00000895, 0x0080 }, /* R2197 - EQ3MIX Input 3 Volume */
- { 0x00000896, 0x0000 }, /* R2198 - EQ3MIX Input 4 Source */
- { 0x00000897, 0x0080 }, /* R2199 - EQ3MIX Input 4 Volume */
- { 0x00000898, 0x0000 }, /* R2200 - EQ4MIX Input 1 Source */
- { 0x00000899, 0x0080 }, /* R2201 - EQ4MIX Input 1 Volume */
- { 0x0000089A, 0x0000 }, /* R2202 - EQ4MIX Input 2 Source */
- { 0x0000089B, 0x0080 }, /* R2203 - EQ4MIX Input 2 Volume */
- { 0x0000089C, 0x0000 }, /* R2204 - EQ4MIX Input 3 Source */
- { 0x0000089D, 0x0080 }, /* R2205 - EQ4MIX Input 3 Volume */
- { 0x0000089E, 0x0000 }, /* R2206 - EQ4MIX Input 4 Source */
- { 0x0000089F, 0x0080 }, /* R2207 - EQ4MIX Input 4 Volume */
- { 0x000008C0, 0x0000 }, /* R2240 - DRC1LMIX Input 1 Source */
- { 0x000008C1, 0x0080 }, /* R2241 - DRC1LMIX Input 1 Volume */
- { 0x000008C2, 0x0000 }, /* R2242 - DRC1LMIX Input 2 Source */
- { 0x000008C3, 0x0080 }, /* R2243 - DRC1LMIX Input 2 Volume */
- { 0x000008C4, 0x0000 }, /* R2244 - DRC1LMIX Input 3 Source */
- { 0x000008C5, 0x0080 }, /* R2245 - DRC1LMIX Input 3 Volume */
- { 0x000008C6, 0x0000 }, /* R2246 - DRC1LMIX Input 4 Source */
- { 0x000008C7, 0x0080 }, /* R2247 - DRC1LMIX Input 4 Volume */
- { 0x000008C8, 0x0000 }, /* R2248 - DRC1RMIX Input 1 Source */
- { 0x000008C9, 0x0080 }, /* R2249 - DRC1RMIX Input 1 Volume */
- { 0x000008CA, 0x0000 }, /* R2250 - DRC1RMIX Input 2 Source */
- { 0x000008CB, 0x0080 }, /* R2251 - DRC1RMIX Input 2 Volume */
- { 0x000008CC, 0x0000 }, /* R2252 - DRC1RMIX Input 3 Source */
- { 0x000008CD, 0x0080 }, /* R2253 - DRC1RMIX Input 3 Volume */
- { 0x000008CE, 0x0000 }, /* R2254 - DRC1RMIX Input 4 Source */
- { 0x000008CF, 0x0080 }, /* R2255 - DRC1RMIX Input 4 Volume */
- { 0x00000900, 0x0000 }, /* R2304 - HPLP1MIX Input 1 Source */
- { 0x00000901, 0x0080 }, /* R2305 - HPLP1MIX Input 1 Volume */
- { 0x00000902, 0x0000 }, /* R2306 - HPLP1MIX Input 2 Source */
- { 0x00000903, 0x0080 }, /* R2307 - HPLP1MIX Input 2 Volume */
- { 0x00000904, 0x0000 }, /* R2308 - HPLP1MIX Input 3 Source */
- { 0x00000905, 0x0080 }, /* R2309 - HPLP1MIX Input 3 Volume */
- { 0x00000906, 0x0000 }, /* R2310 - HPLP1MIX Input 4 Source */
- { 0x00000907, 0x0080 }, /* R2311 - HPLP1MIX Input 4 Volume */
- { 0x00000908, 0x0000 }, /* R2312 - HPLP2MIX Input 1 Source */
- { 0x00000909, 0x0080 }, /* R2313 - HPLP2MIX Input 1 Volume */
- { 0x0000090A, 0x0000 }, /* R2314 - HPLP2MIX Input 2 Source */
- { 0x0000090B, 0x0080 }, /* R2315 - HPLP2MIX Input 2 Volume */
- { 0x0000090C, 0x0000 }, /* R2316 - HPLP2MIX Input 3 Source */
- { 0x0000090D, 0x0080 }, /* R2317 - HPLP2MIX Input 3 Volume */
- { 0x0000090E, 0x0000 }, /* R2318 - HPLP2MIX Input 4 Source */
- { 0x0000090F, 0x0080 }, /* R2319 - HPLP2MIX Input 4 Volume */
- { 0x00000910, 0x0000 }, /* R2320 - HPLP3MIX Input 1 Source */
- { 0x00000911, 0x0080 }, /* R2321 - HPLP3MIX Input 1 Volume */
- { 0x00000912, 0x0000 }, /* R2322 - HPLP3MIX Input 2 Source */
- { 0x00000913, 0x0080 }, /* R2323 - HPLP3MIX Input 2 Volume */
- { 0x00000914, 0x0000 }, /* R2324 - HPLP3MIX Input 3 Source */
- { 0x00000915, 0x0080 }, /* R2325 - HPLP3MIX Input 3 Volume */
- { 0x00000916, 0x0000 }, /* R2326 - HPLP3MIX Input 4 Source */
- { 0x00000917, 0x0080 }, /* R2327 - HPLP3MIX Input 4 Volume */
- { 0x00000918, 0x0000 }, /* R2328 - HPLP4MIX Input 1 Source */
- { 0x00000919, 0x0080 }, /* R2329 - HPLP4MIX Input 1 Volume */
- { 0x0000091A, 0x0000 }, /* R2330 - HPLP4MIX Input 2 Source */
- { 0x0000091B, 0x0080 }, /* R2331 - HPLP4MIX Input 2 Volume */
- { 0x0000091C, 0x0000 }, /* R2332 - HPLP4MIX Input 3 Source */
- { 0x0000091D, 0x0080 }, /* R2333 - HPLP4MIX Input 3 Volume */
- { 0x0000091E, 0x0000 }, /* R2334 - HPLP4MIX Input 4 Source */
- { 0x0000091F, 0x0080 }, /* R2335 - HPLP4MIX Input 4 Volume */
- { 0x00000940, 0x0000 }, /* R2368 - DSP1LMIX Input 1 Source */
- { 0x00000941, 0x0080 }, /* R2369 - DSP1LMIX Input 1 Volume */
- { 0x00000942, 0x0000 }, /* R2370 - DSP1LMIX Input 2 Source */
- { 0x00000943, 0x0080 }, /* R2371 - DSP1LMIX Input 2 Volume */
- { 0x00000944, 0x0000 }, /* R2372 - DSP1LMIX Input 3 Source */
- { 0x00000945, 0x0080 }, /* R2373 - DSP1LMIX Input 3 Volume */
- { 0x00000946, 0x0000 }, /* R2374 - DSP1LMIX Input 4 Source */
- { 0x00000947, 0x0080 }, /* R2375 - DSP1LMIX Input 4 Volume */
- { 0x00000948, 0x0000 }, /* R2376 - DSP1RMIX Input 1 Source */
- { 0x00000949, 0x0080 }, /* R2377 - DSP1RMIX Input 1 Volume */
- { 0x0000094A, 0x0000 }, /* R2378 - DSP1RMIX Input 2 Source */
- { 0x0000094B, 0x0080 }, /* R2379 - DSP1RMIX Input 2 Volume */
- { 0x0000094C, 0x0000 }, /* R2380 - DSP1RMIX Input 3 Source */
- { 0x0000094D, 0x0080 }, /* R2381 - DSP1RMIX Input 3 Volume */
- { 0x0000094E, 0x0000 }, /* R2382 - DSP1RMIX Input 4 Source */
- { 0x0000094F, 0x0080 }, /* R2383 - DSP1RMIX Input 4 Volume */
- { 0x00000950, 0x0000 }, /* R2384 - DSP1AUX1MIX Input 1 Source */
- { 0x00000958, 0x0000 }, /* R2392 - DSP1AUX2MIX Input 1 Source */
- { 0x00000960, 0x0000 }, /* R2400 - DSP1AUX3MIX Input 1 Source */
- { 0x00000968, 0x0000 }, /* R2408 - DSP1AUX4MIX Input 1 Source */
- { 0x00000970, 0x0000 }, /* R2416 - DSP1AUX5MIX Input 1 Source */
- { 0x00000978, 0x0000 }, /* R2424 - DSP1AUX6MIX Input 1 Source */
- { 0x00000A80, 0x0000 }, /* R2688 - ASRC1LMIX Input 1 Source */
- { 0x00000A88, 0x0000 }, /* R2696 - ASRC1RMIX Input 1 Source */
- { 0x00000A90, 0x0000 }, /* R2704 - ASRC2LMIX Input 1 Source */
- { 0x00000A98, 0x0000 }, /* R2712 - ASRC2RMIX Input 1 Source */
- { 0x00000B00, 0x0000 }, /* R2816 - ISRC1DEC1MIX Input 1 Source */
- { 0x00000B08, 0x0000 }, /* R2824 - ISRC1DEC2MIX Input 1 Source */
- { 0x00000B20, 0x0000 }, /* R2848 - ISRC1INT1MIX Input 1 Source */
- { 0x00000B28, 0x0000 }, /* R2856 - ISRC1INT2MIX Input 1 Source */
- { 0x00000B40, 0x0000 }, /* R2880 - ISRC2DEC1MIX Input 1 Source */
- { 0x00000B48, 0x0000 }, /* R2888 - ISRC2DEC2MIX Input 1 Source */
- { 0x00000B60, 0x0000 }, /* R2912 - ISRC2INT1MIX Input 1 Source */
- { 0x00000B68, 0x0000 }, /* R2920 - ISRC2INT2MIX Input 1 Source */
- { 0x00000C00, 0xA101 }, /* R3072 - GPIO1 CTRL */
- { 0x00000C01, 0xA101 }, /* R3073 - GPIO2 CTRL */
- { 0x00000C02, 0xA101 }, /* R3074 - GPIO3 CTRL */
- { 0x00000C03, 0xA101 }, /* R3075 - GPIO4 CTRL */
- { 0x00000C04, 0xA101 }, /* R3076 - GPIO5 CTRL */
- { 0x00000C0F, 0x0400 }, /* R3087 - IRQ CTRL 1 */
- { 0x00000C10, 0x1000 }, /* R3088 - GPIO Debounce Config */
- { 0x00000C20, 0x8002 }, /* R3104 - Misc Pad Ctrl 1 */
+ { 0x00000490, 0x0069 }, /* R1168 - PDM SPK1 CTRL 1 */
+ { 0x00000491, 0x0000 }, /* R1169 - PDM SPK1 CTRL 2 */
+ { 0x00000500, 0x000C }, /* R1280 - AIF1 BCLK Ctrl */
+ { 0x00000501, 0x0008 }, /* R1281 - AIF1 Tx Pin Ctrl */
+ { 0x00000502, 0x0000 }, /* R1282 - AIF1 Rx Pin Ctrl */
+ { 0x00000503, 0x0000 }, /* R1283 - AIF1 Rate Ctrl */
+ { 0x00000504, 0x0000 }, /* R1284 - AIF1 Format */
+ { 0x00000505, 0x0040 }, /* R1285 - AIF1 Tx BCLK Rate */
+ { 0x00000506, 0x0040 }, /* R1286 - AIF1 Rx BCLK Rate */
+ { 0x00000507, 0x1818 }, /* R1287 - AIF1 Frame Ctrl 1 */
+ { 0x00000508, 0x1818 }, /* R1288 - AIF1 Frame Ctrl 2 */
+ { 0x00000509, 0x0000 }, /* R1289 - AIF1 Frame Ctrl 3 */
+ { 0x0000050A, 0x0001 }, /* R1290 - AIF1 Frame Ctrl 4 */
+ { 0x0000050B, 0x0002 }, /* R1291 - AIF1 Frame Ctrl 5 */
+ { 0x0000050C, 0x0003 }, /* R1292 - AIF1 Frame Ctrl 6 */
+ { 0x0000050D, 0x0004 }, /* R1293 - AIF1 Frame Ctrl 7 */
+ { 0x0000050E, 0x0005 }, /* R1294 - AIF1 Frame Ctrl 8 */
+ { 0x0000050F, 0x0006 }, /* R1295 - AIF1 Frame Ctrl 9 */
+ { 0x00000510, 0x0007 }, /* R1296 - AIF1 Frame Ctrl 10 */
+ { 0x00000511, 0x0000 }, /* R1297 - AIF1 Frame Ctrl 11 */
+ { 0x00000512, 0x0001 }, /* R1298 - AIF1 Frame Ctrl 12 */
+ { 0x00000513, 0x0002 }, /* R1299 - AIF1 Frame Ctrl 13 */
+ { 0x00000514, 0x0003 }, /* R1300 - AIF1 Frame Ctrl 14 */
+ { 0x00000515, 0x0004 }, /* R1301 - AIF1 Frame Ctrl 15 */
+ { 0x00000516, 0x0005 }, /* R1302 - AIF1 Frame Ctrl 16 */
+ { 0x00000517, 0x0006 }, /* R1303 - AIF1 Frame Ctrl 17 */
+ { 0x00000518, 0x0007 }, /* R1304 - AIF1 Frame Ctrl 18 */
+ { 0x00000519, 0x0000 }, /* R1305 - AIF1 Tx Enables */
+ { 0x0000051A, 0x0000 }, /* R1306 - AIF1 Rx Enables */
+ { 0x00000540, 0x000C }, /* R1344 - AIF2 BCLK Ctrl */
+ { 0x00000541, 0x0008 }, /* R1345 - AIF2 Tx Pin Ctrl */
+ { 0x00000542, 0x0000 }, /* R1346 - AIF2 Rx Pin Ctrl */
+ { 0x00000543, 0x0000 }, /* R1347 - AIF2 Rate Ctrl */
+ { 0x00000544, 0x0000 }, /* R1348 - AIF2 Format */
+ { 0x00000545, 0x0040 }, /* R1349 - AIF2 Tx BCLK Rate */
+ { 0x00000546, 0x0040 }, /* R1350 - AIF2 Rx BCLK Rate */
+ { 0x00000547, 0x1818 }, /* R1351 - AIF2 Frame Ctrl 1 */
+ { 0x00000548, 0x1818 }, /* R1352 - AIF2 Frame Ctrl 2 */
+ { 0x00000549, 0x0000 }, /* R1353 - AIF2 Frame Ctrl 3 */
+ { 0x0000054A, 0x0001 }, /* R1354 - AIF2 Frame Ctrl 4 */
+ { 0x00000551, 0x0000 }, /* R1361 - AIF2 Frame Ctrl 11 */
+ { 0x00000552, 0x0001 }, /* R1362 - AIF2 Frame Ctrl 12 */
+ { 0x00000559, 0x0000 }, /* R1369 - AIF2 Tx Enables */
+ { 0x0000055A, 0x0000 }, /* R1370 - AIF2 Rx Enables */
+ { 0x00000580, 0x000C }, /* R1408 - AIF3 BCLK Ctrl */
+ { 0x00000581, 0x0008 }, /* R1409 - AIF3 Tx Pin Ctrl */
+ { 0x00000582, 0x0000 }, /* R1410 - AIF3 Rx Pin Ctrl */
+ { 0x00000583, 0x0000 }, /* R1411 - AIF3 Rate Ctrl */
+ { 0x00000584, 0x0000 }, /* R1412 - AIF3 Format */
+ { 0x00000585, 0x0040 }, /* R1413 - AIF3 Tx BCLK Rate */
+ { 0x00000586, 0x0040 }, /* R1414 - AIF3 Rx BCLK Rate */
+ { 0x00000587, 0x1818 }, /* R1415 - AIF3 Frame Ctrl 1 */
+ { 0x00000588, 0x1818 }, /* R1416 - AIF3 Frame Ctrl 2 */
+ { 0x00000589, 0x0000 }, /* R1417 - AIF3 Frame Ctrl 3 */
+ { 0x0000058A, 0x0001 }, /* R1418 - AIF3 Frame Ctrl 4 */
+ { 0x00000591, 0x0000 }, /* R1425 - AIF3 Frame Ctrl 11 */
+ { 0x00000592, 0x0001 }, /* R1426 - AIF3 Frame Ctrl 12 */
+ { 0x00000599, 0x0000 }, /* R1433 - AIF3 Tx Enables */
+ { 0x0000059A, 0x0000 }, /* R1434 - AIF3 Rx Enables */
+ { 0x000005E3, 0x0004 }, /* R1507 - SLIMbus Framer Ref Gear */
+ { 0x000005E5, 0x0000 }, /* R1509 - SLIMbus Rates 1 */
+ { 0x000005E6, 0x0000 }, /* R1510 - SLIMbus Rates 2 */
+ { 0x000005E7, 0x0000 }, /* R1511 - SLIMbus Rates 3 */
+ { 0x000005E8, 0x0000 }, /* R1512 - SLIMbus Rates 4 */
+ { 0x000005E9, 0x0000 }, /* R1513 - SLIMbus Rates 5 */
+ { 0x000005EA, 0x0000 }, /* R1514 - SLIMbus Rates 6 */
+ { 0x000005EB, 0x0000 }, /* R1515 - SLIMbus Rates 7 */
+ { 0x000005EC, 0x0000 }, /* R1516 - SLIMbus Rates 8 */
+ { 0x000005F5, 0x0000 }, /* R1525 - SLIMbus RX Channel Enable */
+ { 0x000005F6, 0x0000 }, /* R1526 - SLIMbus TX Channel Enable */
+ { 0x00000640, 0x0000 }, /* R1600 - PWM1MIX Input 1 Source */
+ { 0x00000641, 0x0080 }, /* R1601 - PWM1MIX Input 1 Volume */
+ { 0x00000642, 0x0000 }, /* R1602 - PWM1MIX Input 2 Source */
+ { 0x00000643, 0x0080 }, /* R1603 - PWM1MIX Input 2 Volume */
+ { 0x00000644, 0x0000 }, /* R1604 - PWM1MIX Input 3 Source */
+ { 0x00000645, 0x0080 }, /* R1605 - PWM1MIX Input 3 Volume */
+ { 0x00000646, 0x0000 }, /* R1606 - PWM1MIX Input 4 Source */
+ { 0x00000647, 0x0080 }, /* R1607 - PWM1MIX Input 4 Volume */
+ { 0x00000648, 0x0000 }, /* R1608 - PWM2MIX Input 1 Source */
+ { 0x00000649, 0x0080 }, /* R1609 - PWM2MIX Input 1 Volume */
+ { 0x0000064A, 0x0000 }, /* R1610 - PWM2MIX Input 2 Source */
+ { 0x0000064B, 0x0080 }, /* R1611 - PWM2MIX Input 2 Volume */
+ { 0x0000064C, 0x0000 }, /* R1612 - PWM2MIX Input 3 Source */
+ { 0x0000064D, 0x0080 }, /* R1613 - PWM2MIX Input 3 Volume */
+ { 0x0000064E, 0x0000 }, /* R1614 - PWM2MIX Input 4 Source */
+ { 0x0000064F, 0x0080 }, /* R1615 - PWM2MIX Input 4 Volume */
+ { 0x00000660, 0x0000 }, /* R1632 - MICMIX Input 1 Source */
+ { 0x00000661, 0x0080 }, /* R1633 - MICMIX Input 1 Volume */
+ { 0x00000662, 0x0000 }, /* R1634 - MICMIX Input 2 Source */
+ { 0x00000663, 0x0080 }, /* R1635 - MICMIX Input 2 Volume */
+ { 0x00000664, 0x0000 }, /* R1636 - MICMIX Input 3 Source */
+ { 0x00000665, 0x0080 }, /* R1637 - MICMIX Input 3 Volume */
+ { 0x00000666, 0x0000 }, /* R1638 - MICMIX Input 4 Source */
+ { 0x00000667, 0x0080 }, /* R1639 - MICMIX Input 4 Volume */
+ { 0x00000668, 0x0000 }, /* R1640 - NOISEMIX Input 1 Source */
+ { 0x00000669, 0x0080 }, /* R1641 - NOISEMIX Input 1 Volume */
+ { 0x0000066A, 0x0000 }, /* R1642 - NOISEMIX Input 2 Source */
+ { 0x0000066B, 0x0080 }, /* R1643 - NOISEMIX Input 2 Volume */
+ { 0x0000066C, 0x0000 }, /* R1644 - NOISEMIX Input 3 Source */
+ { 0x0000066D, 0x0080 }, /* R1645 - NOISEMIX Input 3 Volume */
+ { 0x0000066E, 0x0000 }, /* R1646 - NOISEMIX Input 4 Source */
+ { 0x0000066F, 0x0080 }, /* R1647 - NOISEMIX Input 4 Volume */
+ { 0x00000680, 0x0000 }, /* R1664 - OUT1LMIX Input 1 Source */
+ { 0x00000681, 0x0080 }, /* R1665 - OUT1LMIX Input 1 Volume */
+ { 0x00000682, 0x0000 }, /* R1666 - OUT1LMIX Input 2 Source */
+ { 0x00000683, 0x0080 }, /* R1667 - OUT1LMIX Input 2 Volume */
+ { 0x00000684, 0x0000 }, /* R1668 - OUT1LMIX Input 3 Source */
+ { 0x00000685, 0x0080 }, /* R1669 - OUT1LMIX Input 3 Volume */
+ { 0x00000686, 0x0000 }, /* R1670 - OUT1LMIX Input 4 Source */
+ { 0x00000687, 0x0080 }, /* R1671 - OUT1LMIX Input 4 Volume */
+ { 0x00000688, 0x0000 }, /* R1672 - OUT1RMIX Input 1 Source */
+ { 0x00000689, 0x0080 }, /* R1673 - OUT1RMIX Input 1 Volume */
+ { 0x0000068A, 0x0000 }, /* R1674 - OUT1RMIX Input 2 Source */
+ { 0x0000068B, 0x0080 }, /* R1675 - OUT1RMIX Input 2 Volume */
+ { 0x0000068C, 0x0000 }, /* R1676 - OUT1RMIX Input 3 Source */
+ { 0x0000068D, 0x0080 }, /* R1677 - OUT1RMIX Input 3 Volume */
+ { 0x0000068E, 0x0000 }, /* R1678 - OUT1RMIX Input 4 Source */
+ { 0x0000068F, 0x0080 }, /* R1679 - OUT1RMIX Input 4 Volume */
+ { 0x00000690, 0x0000 }, /* R1680 - OUT2LMIX Input 1 Source */
+ { 0x00000691, 0x0080 }, /* R1681 - OUT2LMIX Input 1 Volume */
+ { 0x00000692, 0x0000 }, /* R1682 - OUT2LMIX Input 2 Source */
+ { 0x00000693, 0x0080 }, /* R1683 - OUT2LMIX Input 2 Volume */
+ { 0x00000694, 0x0000 }, /* R1684 - OUT2LMIX Input 3 Source */
+ { 0x00000695, 0x0080 }, /* R1685 - OUT2LMIX Input 3 Volume */
+ { 0x00000696, 0x0000 }, /* R1686 - OUT2LMIX Input 4 Source */
+ { 0x00000697, 0x0080 }, /* R1687 - OUT2LMIX Input 4 Volume */
+ { 0x00000698, 0x0000 }, /* R1688 - OUT2RMIX Input 1 Source */
+ { 0x00000699, 0x0080 }, /* R1689 - OUT2RMIX Input 1 Volume */
+ { 0x0000069A, 0x0000 }, /* R1690 - OUT2RMIX Input 2 Source */
+ { 0x0000069B, 0x0080 }, /* R1691 - OUT2RMIX Input 2 Volume */
+ { 0x0000069C, 0x0000 }, /* R1692 - OUT2RMIX Input 3 Source */
+ { 0x0000069D, 0x0080 }, /* R1693 - OUT2RMIX Input 3 Volume */
+ { 0x0000069E, 0x0000 }, /* R1694 - OUT2RMIX Input 4 Source */
+ { 0x0000069F, 0x0080 }, /* R1695 - OUT2RMIX Input 4 Volume */
+ { 0x000006A0, 0x0000 }, /* R1696 - OUT3LMIX Input 1 Source */
+ { 0x000006A1, 0x0080 }, /* R1697 - OUT3LMIX Input 1 Volume */
+ { 0x000006A2, 0x0000 }, /* R1698 - OUT3LMIX Input 2 Source */
+ { 0x000006A3, 0x0080 }, /* R1699 - OUT3LMIX Input 2 Volume */
+ { 0x000006A4, 0x0000 }, /* R1700 - OUT3LMIX Input 3 Source */
+ { 0x000006A5, 0x0080 }, /* R1701 - OUT3LMIX Input 3 Volume */
+ { 0x000006A6, 0x0000 }, /* R1702 - OUT3LMIX Input 4 Source */
+ { 0x000006A7, 0x0080 }, /* R1703 - OUT3LMIX Input 4 Volume */
+ { 0x000006B0, 0x0000 }, /* R1712 - OUT4LMIX Input 1 Source */
+ { 0x000006B1, 0x0080 }, /* R1713 - OUT4LMIX Input 1 Volume */
+ { 0x000006B2, 0x0000 }, /* R1714 - OUT4LMIX Input 2 Source */
+ { 0x000006B3, 0x0080 }, /* R1715 - OUT4LMIX Input 2 Volume */
+ { 0x000006B4, 0x0000 }, /* R1716 - OUT4LMIX Input 3 Source */
+ { 0x000006B5, 0x0080 }, /* R1717 - OUT4LMIX Input 3 Volume */
+ { 0x000006B6, 0x0000 }, /* R1718 - OUT4LMIX Input 4 Source */
+ { 0x000006B7, 0x0080 }, /* R1719 - OUT4LMIX Input 4 Volume */
+ { 0x000006B8, 0x0000 }, /* R1720 - OUT4RMIX Input 1 Source */
+ { 0x000006B9, 0x0080 }, /* R1721 - OUT4RMIX Input 1 Volume */
+ { 0x000006BA, 0x0000 }, /* R1722 - OUT4RMIX Input 2 Source */
+ { 0x000006BB, 0x0080 }, /* R1723 - OUT4RMIX Input 2 Volume */
+ { 0x000006BC, 0x0000 }, /* R1724 - OUT4RMIX Input 3 Source */
+ { 0x000006BD, 0x0080 }, /* R1725 - OUT4RMIX Input 3 Volume */
+ { 0x000006BE, 0x0000 }, /* R1726 - OUT4RMIX Input 4 Source */
+ { 0x000006BF, 0x0080 }, /* R1727 - OUT4RMIX Input 4 Volume */
+ { 0x000006C0, 0x0000 }, /* R1728 - OUT5LMIX Input 1 Source */
+ { 0x000006C1, 0x0080 }, /* R1729 - OUT5LMIX Input 1 Volume */
+ { 0x000006C2, 0x0000 }, /* R1730 - OUT5LMIX Input 2 Source */
+ { 0x000006C3, 0x0080 }, /* R1731 - OUT5LMIX Input 2 Volume */
+ { 0x000006C4, 0x0000 }, /* R1732 - OUT5LMIX Input 3 Source */
+ { 0x000006C5, 0x0080 }, /* R1733 - OUT5LMIX Input 3 Volume */
+ { 0x000006C6, 0x0000 }, /* R1734 - OUT5LMIX Input 4 Source */
+ { 0x000006C7, 0x0080 }, /* R1735 - OUT5LMIX Input 4 Volume */
+ { 0x000006C8, 0x0000 }, /* R1736 - OUT5RMIX Input 1 Source */
+ { 0x000006C9, 0x0080 }, /* R1737 - OUT5RMIX Input 1 Volume */
+ { 0x000006CA, 0x0000 }, /* R1738 - OUT5RMIX Input 2 Source */
+ { 0x000006CB, 0x0080 }, /* R1739 - OUT5RMIX Input 2 Volume */
+ { 0x000006CC, 0x0000 }, /* R1740 - OUT5RMIX Input 3 Source */
+ { 0x000006CD, 0x0080 }, /* R1741 - OUT5RMIX Input 3 Volume */
+ { 0x000006CE, 0x0000 }, /* R1742 - OUT5RMIX Input 4 Source */
+ { 0x000006CF, 0x0080 }, /* R1743 - OUT5RMIX Input 4 Volume */
+ { 0x00000700, 0x0000 }, /* R1792 - AIF1TX1MIX Input 1 Source */
+ { 0x00000701, 0x0080 }, /* R1793 - AIF1TX1MIX Input 1 Volume */
+ { 0x00000702, 0x0000 }, /* R1794 - AIF1TX1MIX Input 2 Source */
+ { 0x00000703, 0x0080 }, /* R1795 - AIF1TX1MIX Input 2 Volume */
+ { 0x00000704, 0x0000 }, /* R1796 - AIF1TX1MIX Input 3 Source */
+ { 0x00000705, 0x0080 }, /* R1797 - AIF1TX1MIX Input 3 Volume */
+ { 0x00000706, 0x0000 }, /* R1798 - AIF1TX1MIX Input 4 Source */
+ { 0x00000707, 0x0080 }, /* R1799 - AIF1TX1MIX Input 4 Volume */
+ { 0x00000708, 0x0000 }, /* R1800 - AIF1TX2MIX Input 1 Source */
+ { 0x00000709, 0x0080 }, /* R1801 - AIF1TX2MIX Input 1 Volume */
+ { 0x0000070A, 0x0000 }, /* R1802 - AIF1TX2MIX Input 2 Source */
+ { 0x0000070B, 0x0080 }, /* R1803 - AIF1TX2MIX Input 2 Volume */
+ { 0x0000070C, 0x0000 }, /* R1804 - AIF1TX2MIX Input 3 Source */
+ { 0x0000070D, 0x0080 }, /* R1805 - AIF1TX2MIX Input 3 Volume */
+ { 0x0000070E, 0x0000 }, /* R1806 - AIF1TX2MIX Input 4 Source */
+ { 0x0000070F, 0x0080 }, /* R1807 - AIF1TX2MIX Input 4 Volume */
+ { 0x00000710, 0x0000 }, /* R1808 - AIF1TX3MIX Input 1 Source */
+ { 0x00000711, 0x0080 }, /* R1809 - AIF1TX3MIX Input 1 Volume */
+ { 0x00000712, 0x0000 }, /* R1810 - AIF1TX3MIX Input 2 Source */
+ { 0x00000713, 0x0080 }, /* R1811 - AIF1TX3MIX Input 2 Volume */
+ { 0x00000714, 0x0000 }, /* R1812 - AIF1TX3MIX Input 3 Source */
+ { 0x00000715, 0x0080 }, /* R1813 - AIF1TX3MIX Input 3 Volume */
+ { 0x00000716, 0x0000 }, /* R1814 - AIF1TX3MIX Input 4 Source */
+ { 0x00000717, 0x0080 }, /* R1815 - AIF1TX3MIX Input 4 Volume */
+ { 0x00000718, 0x0000 }, /* R1816 - AIF1TX4MIX Input 1 Source */
+ { 0x00000719, 0x0080 }, /* R1817 - AIF1TX4MIX Input 1 Volume */
+ { 0x0000071A, 0x0000 }, /* R1818 - AIF1TX4MIX Input 2 Source */
+ { 0x0000071B, 0x0080 }, /* R1819 - AIF1TX4MIX Input 2 Volume */
+ { 0x0000071C, 0x0000 }, /* R1820 - AIF1TX4MIX Input 3 Source */
+ { 0x0000071D, 0x0080 }, /* R1821 - AIF1TX4MIX Input 3 Volume */
+ { 0x0000071E, 0x0000 }, /* R1822 - AIF1TX4MIX Input 4 Source */
+ { 0x0000071F, 0x0080 }, /* R1823 - AIF1TX4MIX Input 4 Volume */
+ { 0x00000720, 0x0000 }, /* R1824 - AIF1TX5MIX Input 1 Source */
+ { 0x00000721, 0x0080 }, /* R1825 - AIF1TX5MIX Input 1 Volume */
+ { 0x00000722, 0x0000 }, /* R1826 - AIF1TX5MIX Input 2 Source */
+ { 0x00000723, 0x0080 }, /* R1827 - AIF1TX5MIX Input 2 Volume */
+ { 0x00000724, 0x0000 }, /* R1828 - AIF1TX5MIX Input 3 Source */
+ { 0x00000725, 0x0080 }, /* R1829 - AIF1TX5MIX Input 3 Volume */
+ { 0x00000726, 0x0000 }, /* R1830 - AIF1TX5MIX Input 4 Source */
+ { 0x00000727, 0x0080 }, /* R1831 - AIF1TX5MIX Input 4 Volume */
+ { 0x00000728, 0x0000 }, /* R1832 - AIF1TX6MIX Input 1 Source */
+ { 0x00000729, 0x0080 }, /* R1833 - AIF1TX6MIX Input 1 Volume */
+ { 0x0000072A, 0x0000 }, /* R1834 - AIF1TX6MIX Input 2 Source */
+ { 0x0000072B, 0x0080 }, /* R1835 - AIF1TX6MIX Input 2 Volume */
+ { 0x0000072C, 0x0000 }, /* R1836 - AIF1TX6MIX Input 3 Source */
+ { 0x0000072D, 0x0080 }, /* R1837 - AIF1TX6MIX Input 3 Volume */
+ { 0x0000072E, 0x0000 }, /* R1838 - AIF1TX6MIX Input 4 Source */
+ { 0x0000072F, 0x0080 }, /* R1839 - AIF1TX6MIX Input 4 Volume */
+ { 0x00000730, 0x0000 }, /* R1840 - AIF1TX7MIX Input 1 Source */
+ { 0x00000731, 0x0080 }, /* R1841 - AIF1TX7MIX Input 1 Volume */
+ { 0x00000732, 0x0000 }, /* R1842 - AIF1TX7MIX Input 2 Source */
+ { 0x00000733, 0x0080 }, /* R1843 - AIF1TX7MIX Input 2 Volume */
+ { 0x00000734, 0x0000 }, /* R1844 - AIF1TX7MIX Input 3 Source */
+ { 0x00000735, 0x0080 }, /* R1845 - AIF1TX7MIX Input 3 Volume */
+ { 0x00000736, 0x0000 }, /* R1846 - AIF1TX7MIX Input 4 Source */
+ { 0x00000737, 0x0080 }, /* R1847 - AIF1TX7MIX Input 4 Volume */
+ { 0x00000738, 0x0000 }, /* R1848 - AIF1TX8MIX Input 1 Source */
+ { 0x00000739, 0x0080 }, /* R1849 - AIF1TX8MIX Input 1 Volume */
+ { 0x0000073A, 0x0000 }, /* R1850 - AIF1TX8MIX Input 2 Source */
+ { 0x0000073B, 0x0080 }, /* R1851 - AIF1TX8MIX Input 2 Volume */
+ { 0x0000073C, 0x0000 }, /* R1852 - AIF1TX8MIX Input 3 Source */
+ { 0x0000073D, 0x0080 }, /* R1853 - AIF1TX8MIX Input 3 Volume */
+ { 0x0000073E, 0x0000 }, /* R1854 - AIF1TX8MIX Input 4 Source */
+ { 0x0000073F, 0x0080 }, /* R1855 - AIF1TX8MIX Input 4 Volume */
+ { 0x00000740, 0x0000 }, /* R1856 - AIF2TX1MIX Input 1 Source */
+ { 0x00000741, 0x0080 }, /* R1857 - AIF2TX1MIX Input 1 Volume */
+ { 0x00000742, 0x0000 }, /* R1858 - AIF2TX1MIX Input 2 Source */
+ { 0x00000743, 0x0080 }, /* R1859 - AIF2TX1MIX Input 2 Volume */
+ { 0x00000744, 0x0000 }, /* R1860 - AIF2TX1MIX Input 3 Source */
+ { 0x00000745, 0x0080 }, /* R1861 - AIF2TX1MIX Input 3 Volume */
+ { 0x00000746, 0x0000 }, /* R1862 - AIF2TX1MIX Input 4 Source */
+ { 0x00000747, 0x0080 }, /* R1863 - AIF2TX1MIX Input 4 Volume */
+ { 0x00000748, 0x0000 }, /* R1864 - AIF2TX2MIX Input 1 Source */
+ { 0x00000749, 0x0080 }, /* R1865 - AIF2TX2MIX Input 1 Volume */
+ { 0x0000074A, 0x0000 }, /* R1866 - AIF2TX2MIX Input 2 Source */
+ { 0x0000074B, 0x0080 }, /* R1867 - AIF2TX2MIX Input 2 Volume */
+ { 0x0000074C, 0x0000 }, /* R1868 - AIF2TX2MIX Input 3 Source */
+ { 0x0000074D, 0x0080 }, /* R1869 - AIF2TX2MIX Input 3 Volume */
+ { 0x0000074E, 0x0000 }, /* R1870 - AIF2TX2MIX Input 4 Source */
+ { 0x0000074F, 0x0080 }, /* R1871 - AIF2TX2MIX Input 4 Volume */
+ { 0x00000780, 0x0000 }, /* R1920 - AIF3TX1MIX Input 1 Source */
+ { 0x00000781, 0x0080 }, /* R1921 - AIF3TX1MIX Input 1 Volume */
+ { 0x00000782, 0x0000 }, /* R1922 - AIF3TX1MIX Input 2 Source */
+ { 0x00000783, 0x0080 }, /* R1923 - AIF3TX1MIX Input 2 Volume */
+ { 0x00000784, 0x0000 }, /* R1924 - AIF3TX1MIX Input 3 Source */
+ { 0x00000785, 0x0080 }, /* R1925 - AIF3TX1MIX Input 3 Volume */
+ { 0x00000786, 0x0000 }, /* R1926 - AIF3TX1MIX Input 4 Source */
+ { 0x00000787, 0x0080 }, /* R1927 - AIF3TX1MIX Input 4 Volume */
+ { 0x00000788, 0x0000 }, /* R1928 - AIF3TX2MIX Input 1 Source */
+ { 0x00000789, 0x0080 }, /* R1929 - AIF3TX2MIX Input 1 Volume */
+ { 0x0000078A, 0x0000 }, /* R1930 - AIF3TX2MIX Input 2 Source */
+ { 0x0000078B, 0x0080 }, /* R1931 - AIF3TX2MIX Input 2 Volume */
+ { 0x0000078C, 0x0000 }, /* R1932 - AIF3TX2MIX Input 3 Source */
+ { 0x0000078D, 0x0080 }, /* R1933 - AIF3TX2MIX Input 3 Volume */
+ { 0x0000078E, 0x0000 }, /* R1934 - AIF3TX2MIX Input 4 Source */
+ { 0x0000078F, 0x0080 }, /* R1935 - AIF3TX2MIX Input 4 Volume */
+ { 0x000007C0, 0x0000 }, /* R1984 - SLIMTX1MIX Input 1 Source */
+ { 0x000007C1, 0x0080 }, /* R1985 - SLIMTX1MIX Input 1 Volume */
+ { 0x000007C2, 0x0000 }, /* R1986 - SLIMTX1MIX Input 2 Source */
+ { 0x000007C3, 0x0080 }, /* R1987 - SLIMTX1MIX Input 2 Volume */
+ { 0x000007C4, 0x0000 }, /* R1988 - SLIMTX1MIX Input 3 Source */
+ { 0x000007C5, 0x0080 }, /* R1989 - SLIMTX1MIX Input 3 Volume */
+ { 0x000007C6, 0x0000 }, /* R1990 - SLIMTX1MIX Input 4 Source */
+ { 0x000007C7, 0x0080 }, /* R1991 - SLIMTX1MIX Input 4 Volume */
+ { 0x000007C8, 0x0000 }, /* R1992 - SLIMTX2MIX Input 1 Source */
+ { 0x000007C9, 0x0080 }, /* R1993 - SLIMTX2MIX Input 1 Volume */
+ { 0x000007CA, 0x0000 }, /* R1994 - SLIMTX2MIX Input 2 Source */
+ { 0x000007CB, 0x0080 }, /* R1995 - SLIMTX2MIX Input 2 Volume */
+ { 0x000007CC, 0x0000 }, /* R1996 - SLIMTX2MIX Input 3 Source */
+ { 0x000007CD, 0x0080 }, /* R1997 - SLIMTX2MIX Input 3 Volume */
+ { 0x000007CE, 0x0000 }, /* R1998 - SLIMTX2MIX Input 4 Source */
+ { 0x000007CF, 0x0080 }, /* R1999 - SLIMTX2MIX Input 4 Volume */
+ { 0x000007D0, 0x0000 }, /* R2000 - SLIMTX3MIX Input 1 Source */
+ { 0x000007D1, 0x0080 }, /* R2001 - SLIMTX3MIX Input 1 Volume */
+ { 0x000007D2, 0x0000 }, /* R2002 - SLIMTX3MIX Input 2 Source */
+ { 0x000007D3, 0x0080 }, /* R2003 - SLIMTX3MIX Input 2 Volume */
+ { 0x000007D4, 0x0000 }, /* R2004 - SLIMTX3MIX Input 3 Source */
+ { 0x000007D5, 0x0080 }, /* R2005 - SLIMTX3MIX Input 3 Volume */
+ { 0x000007D6, 0x0000 }, /* R2006 - SLIMTX3MIX Input 4 Source */
+ { 0x000007D7, 0x0080 }, /* R2007 - SLIMTX3MIX Input 4 Volume */
+ { 0x000007D8, 0x0000 }, /* R2008 - SLIMTX4MIX Input 1 Source */
+ { 0x000007D9, 0x0080 }, /* R2009 - SLIMTX4MIX Input 1 Volume */
+ { 0x000007DA, 0x0000 }, /* R2010 - SLIMTX4MIX Input 2 Source */
+ { 0x000007DB, 0x0080 }, /* R2011 - SLIMTX4MIX Input 2 Volume */
+ { 0x000007DC, 0x0000 }, /* R2012 - SLIMTX4MIX Input 3 Source */
+ { 0x000007DD, 0x0080 }, /* R2013 - SLIMTX4MIX Input 3 Volume */
+ { 0x000007DE, 0x0000 }, /* R2014 - SLIMTX4MIX Input 4 Source */
+ { 0x000007DF, 0x0080 }, /* R2015 - SLIMTX4MIX Input 4 Volume */
+ { 0x000007E0, 0x0000 }, /* R2016 - SLIMTX5MIX Input 1 Source */
+ { 0x000007E1, 0x0080 }, /* R2017 - SLIMTX5MIX Input 1 Volume */
+ { 0x000007E2, 0x0000 }, /* R2018 - SLIMTX5MIX Input 2 Source */
+ { 0x000007E3, 0x0080 }, /* R2019 - SLIMTX5MIX Input 2 Volume */
+ { 0x000007E4, 0x0000 }, /* R2020 - SLIMTX5MIX Input 3 Source */
+ { 0x000007E5, 0x0080 }, /* R2021 - SLIMTX5MIX Input 3 Volume */
+ { 0x000007E6, 0x0000 }, /* R2022 - SLIMTX5MIX Input 4 Source */
+ { 0x000007E7, 0x0080 }, /* R2023 - SLIMTX5MIX Input 4 Volume */
+ { 0x000007E8, 0x0000 }, /* R2024 - SLIMTX6MIX Input 1 Source */
+ { 0x000007E9, 0x0080 }, /* R2025 - SLIMTX6MIX Input 1 Volume */
+ { 0x000007EA, 0x0000 }, /* R2026 - SLIMTX6MIX Input 2 Source */
+ { 0x000007EB, 0x0080 }, /* R2027 - SLIMTX6MIX Input 2 Volume */
+ { 0x000007EC, 0x0000 }, /* R2028 - SLIMTX6MIX Input 3 Source */
+ { 0x000007ED, 0x0080 }, /* R2029 - SLIMTX6MIX Input 3 Volume */
+ { 0x000007EE, 0x0000 }, /* R2030 - SLIMTX6MIX Input 4 Source */
+ { 0x000007EF, 0x0080 }, /* R2031 - SLIMTX6MIX Input 4 Volume */
+ { 0x000007F0, 0x0000 }, /* R2032 - SLIMTX7MIX Input 1 Source */
+ { 0x000007F1, 0x0080 }, /* R2033 - SLIMTX7MIX Input 1 Volume */
+ { 0x000007F2, 0x0000 }, /* R2034 - SLIMTX7MIX Input 2 Source */
+ { 0x000007F3, 0x0080 }, /* R2035 - SLIMTX7MIX Input 2 Volume */
+ { 0x000007F4, 0x0000 }, /* R2036 - SLIMTX7MIX Input 3 Source */
+ { 0x000007F5, 0x0080 }, /* R2037 - SLIMTX7MIX Input 3 Volume */
+ { 0x000007F6, 0x0000 }, /* R2038 - SLIMTX7MIX Input 4 Source */
+ { 0x000007F7, 0x0080 }, /* R2039 - SLIMTX7MIX Input 4 Volume */
+ { 0x000007F8, 0x0000 }, /* R2040 - SLIMTX8MIX Input 1 Source */
+ { 0x000007F9, 0x0080 }, /* R2041 - SLIMTX8MIX Input 1 Volume */
+ { 0x000007FA, 0x0000 }, /* R2042 - SLIMTX8MIX Input 2 Source */
+ { 0x000007FB, 0x0080 }, /* R2043 - SLIMTX8MIX Input 2 Volume */
+ { 0x000007FC, 0x0000 }, /* R2044 - SLIMTX8MIX Input 3 Source */
+ { 0x000007FD, 0x0080 }, /* R2045 - SLIMTX8MIX Input 3 Volume */
+ { 0x000007FE, 0x0000 }, /* R2046 - SLIMTX8MIX Input 4 Source */
+ { 0x000007FF, 0x0080 }, /* R2047 - SLIMTX8MIX Input 4 Volume */
+ { 0x00000880, 0x0000 }, /* R2176 - EQ1MIX Input 1 Source */
+ { 0x00000881, 0x0080 }, /* R2177 - EQ1MIX Input 1 Volume */
+ { 0x00000882, 0x0000 }, /* R2178 - EQ1MIX Input 2 Source */
+ { 0x00000883, 0x0080 }, /* R2179 - EQ1MIX Input 2 Volume */
+ { 0x00000884, 0x0000 }, /* R2180 - EQ1MIX Input 3 Source */
+ { 0x00000885, 0x0080 }, /* R2181 - EQ1MIX Input 3 Volume */
+ { 0x00000886, 0x0000 }, /* R2182 - EQ1MIX Input 4 Source */
+ { 0x00000887, 0x0080 }, /* R2183 - EQ1MIX Input 4 Volume */
+ { 0x00000888, 0x0000 }, /* R2184 - EQ2MIX Input 1 Source */
+ { 0x00000889, 0x0080 }, /* R2185 - EQ2MIX Input 1 Volume */
+ { 0x0000088A, 0x0000 }, /* R2186 - EQ2MIX Input 2 Source */
+ { 0x0000088B, 0x0080 }, /* R2187 - EQ2MIX Input 2 Volume */
+ { 0x0000088C, 0x0000 }, /* R2188 - EQ2MIX Input 3 Source */
+ { 0x0000088D, 0x0080 }, /* R2189 - EQ2MIX Input 3 Volume */
+ { 0x0000088E, 0x0000 }, /* R2190 - EQ2MIX Input 4 Source */
+ { 0x0000088F, 0x0080 }, /* R2191 - EQ2MIX Input 4 Volume */
+ { 0x00000890, 0x0000 }, /* R2192 - EQ3MIX Input 1 Source */
+ { 0x00000891, 0x0080 }, /* R2193 - EQ3MIX Input 1 Volume */
+ { 0x00000892, 0x0000 }, /* R2194 - EQ3MIX Input 2 Source */
+ { 0x00000893, 0x0080 }, /* R2195 - EQ3MIX Input 2 Volume */
+ { 0x00000894, 0x0000 }, /* R2196 - EQ3MIX Input 3 Source */
+ { 0x00000895, 0x0080 }, /* R2197 - EQ3MIX Input 3 Volume */
+ { 0x00000896, 0x0000 }, /* R2198 - EQ3MIX Input 4 Source */
+ { 0x00000897, 0x0080 }, /* R2199 - EQ3MIX Input 4 Volume */
+ { 0x00000898, 0x0000 }, /* R2200 - EQ4MIX Input 1 Source */
+ { 0x00000899, 0x0080 }, /* R2201 - EQ4MIX Input 1 Volume */
+ { 0x0000089A, 0x0000 }, /* R2202 - EQ4MIX Input 2 Source */
+ { 0x0000089B, 0x0080 }, /* R2203 - EQ4MIX Input 2 Volume */
+ { 0x0000089C, 0x0000 }, /* R2204 - EQ4MIX Input 3 Source */
+ { 0x0000089D, 0x0080 }, /* R2205 - EQ4MIX Input 3 Volume */
+ { 0x0000089E, 0x0000 }, /* R2206 - EQ4MIX Input 4 Source */
+ { 0x0000089F, 0x0080 }, /* R2207 - EQ4MIX Input 4 Volume */
+ { 0x000008C0, 0x0000 }, /* R2240 - DRC1LMIX Input 1 Source */
+ { 0x000008C1, 0x0080 }, /* R2241 - DRC1LMIX Input 1 Volume */
+ { 0x000008C2, 0x0000 }, /* R2242 - DRC1LMIX Input 2 Source */
+ { 0x000008C3, 0x0080 }, /* R2243 - DRC1LMIX Input 2 Volume */
+ { 0x000008C4, 0x0000 }, /* R2244 - DRC1LMIX Input 3 Source */
+ { 0x000008C5, 0x0080 }, /* R2245 - DRC1LMIX Input 3 Volume */
+ { 0x000008C6, 0x0000 }, /* R2246 - DRC1LMIX Input 4 Source */
+ { 0x000008C7, 0x0080 }, /* R2247 - DRC1LMIX Input 4 Volume */
+ { 0x000008C8, 0x0000 }, /* R2248 - DRC1RMIX Input 1 Source */
+ { 0x000008C9, 0x0080 }, /* R2249 - DRC1RMIX Input 1 Volume */
+ { 0x000008CA, 0x0000 }, /* R2250 - DRC1RMIX Input 2 Source */
+ { 0x000008CB, 0x0080 }, /* R2251 - DRC1RMIX Input 2 Volume */
+ { 0x000008CC, 0x0000 }, /* R2252 - DRC1RMIX Input 3 Source */
+ { 0x000008CD, 0x0080 }, /* R2253 - DRC1RMIX Input 3 Volume */
+ { 0x000008CE, 0x0000 }, /* R2254 - DRC1RMIX Input 4 Source */
+ { 0x000008CF, 0x0080 }, /* R2255 - DRC1RMIX Input 4 Volume */
+ { 0x00000900, 0x0000 }, /* R2304 - HPLP1MIX Input 1 Source */
+ { 0x00000901, 0x0080 }, /* R2305 - HPLP1MIX Input 1 Volume */
+ { 0x00000902, 0x0000 }, /* R2306 - HPLP1MIX Input 2 Source */
+ { 0x00000903, 0x0080 }, /* R2307 - HPLP1MIX Input 2 Volume */
+ { 0x00000904, 0x0000 }, /* R2308 - HPLP1MIX Input 3 Source */
+ { 0x00000905, 0x0080 }, /* R2309 - HPLP1MIX Input 3 Volume */
+ { 0x00000906, 0x0000 }, /* R2310 - HPLP1MIX Input 4 Source */
+ { 0x00000907, 0x0080 }, /* R2311 - HPLP1MIX Input 4 Volume */
+ { 0x00000908, 0x0000 }, /* R2312 - HPLP2MIX Input 1 Source */
+ { 0x00000909, 0x0080 }, /* R2313 - HPLP2MIX Input 1 Volume */
+ { 0x0000090A, 0x0000 }, /* R2314 - HPLP2MIX Input 2 Source */
+ { 0x0000090B, 0x0080 }, /* R2315 - HPLP2MIX Input 2 Volume */
+ { 0x0000090C, 0x0000 }, /* R2316 - HPLP2MIX Input 3 Source */
+ { 0x0000090D, 0x0080 }, /* R2317 - HPLP2MIX Input 3 Volume */
+ { 0x0000090E, 0x0000 }, /* R2318 - HPLP2MIX Input 4 Source */
+ { 0x0000090F, 0x0080 }, /* R2319 - HPLP2MIX Input 4 Volume */
+ { 0x00000910, 0x0000 }, /* R2320 - HPLP3MIX Input 1 Source */
+ { 0x00000911, 0x0080 }, /* R2321 - HPLP3MIX Input 1 Volume */
+ { 0x00000912, 0x0000 }, /* R2322 - HPLP3MIX Input 2 Source */
+ { 0x00000913, 0x0080 }, /* R2323 - HPLP3MIX Input 2 Volume */
+ { 0x00000914, 0x0000 }, /* R2324 - HPLP3MIX Input 3 Source */
+ { 0x00000915, 0x0080 }, /* R2325 - HPLP3MIX Input 3 Volume */
+ { 0x00000916, 0x0000 }, /* R2326 - HPLP3MIX Input 4 Source */
+ { 0x00000917, 0x0080 }, /* R2327 - HPLP3MIX Input 4 Volume */
+ { 0x00000918, 0x0000 }, /* R2328 - HPLP4MIX Input 1 Source */
+ { 0x00000919, 0x0080 }, /* R2329 - HPLP4MIX Input 1 Volume */
+ { 0x0000091A, 0x0000 }, /* R2330 - HPLP4MIX Input 2 Source */
+ { 0x0000091B, 0x0080 }, /* R2331 - HPLP4MIX Input 2 Volume */
+ { 0x0000091C, 0x0000 }, /* R2332 - HPLP4MIX Input 3 Source */
+ { 0x0000091D, 0x0080 }, /* R2333 - HPLP4MIX Input 3 Volume */
+ { 0x0000091E, 0x0000 }, /* R2334 - HPLP4MIX Input 4 Source */
+ { 0x0000091F, 0x0080 }, /* R2335 - HPLP4MIX Input 4 Volume */
+ { 0x00000940, 0x0000 }, /* R2368 - DSP1LMIX Input 1 Source */
+ { 0x00000941, 0x0080 }, /* R2369 - DSP1LMIX Input 1 Volume */
+ { 0x00000942, 0x0000 }, /* R2370 - DSP1LMIX Input 2 Source */
+ { 0x00000943, 0x0080 }, /* R2371 - DSP1LMIX Input 2 Volume */
+ { 0x00000944, 0x0000 }, /* R2372 - DSP1LMIX Input 3 Source */
+ { 0x00000945, 0x0080 }, /* R2373 - DSP1LMIX Input 3 Volume */
+ { 0x00000946, 0x0000 }, /* R2374 - DSP1LMIX Input 4 Source */
+ { 0x00000947, 0x0080 }, /* R2375 - DSP1LMIX Input 4 Volume */
+ { 0x00000948, 0x0000 }, /* R2376 - DSP1RMIX Input 1 Source */
+ { 0x00000949, 0x0080 }, /* R2377 - DSP1RMIX Input 1 Volume */
+ { 0x0000094A, 0x0000 }, /* R2378 - DSP1RMIX Input 2 Source */
+ { 0x0000094B, 0x0080 }, /* R2379 - DSP1RMIX Input 2 Volume */
+ { 0x0000094C, 0x0000 }, /* R2380 - DSP1RMIX Input 3 Source */
+ { 0x0000094D, 0x0080 }, /* R2381 - DSP1RMIX Input 3 Volume */
+ { 0x0000094E, 0x0000 }, /* R2382 - DSP1RMIX Input 4 Source */
+ { 0x0000094F, 0x0080 }, /* R2383 - DSP1RMIX Input 4 Volume */
+ { 0x00000950, 0x0000 }, /* R2384 - DSP1AUX1MIX Input 1 Source */
+ { 0x00000958, 0x0000 }, /* R2392 - DSP1AUX2MIX Input 1 Source */
+ { 0x00000960, 0x0000 }, /* R2400 - DSP1AUX3MIX Input 1 Source */
+ { 0x00000968, 0x0000 }, /* R2408 - DSP1AUX4MIX Input 1 Source */
+ { 0x00000970, 0x0000 }, /* R2416 - DSP1AUX5MIX Input 1 Source */
+ { 0x00000978, 0x0000 }, /* R2424 - DSP1AUX6MIX Input 1 Source */
+ { 0x00000A80, 0x0000 }, /* R2688 - ASRC1LMIX Input 1 Source */
+ { 0x00000A88, 0x0000 }, /* R2696 - ASRC1RMIX Input 1 Source */
+ { 0x00000A90, 0x0000 }, /* R2704 - ASRC2LMIX Input 1 Source */
+ { 0x00000A98, 0x0000 }, /* R2712 - ASRC2RMIX Input 1 Source */
+ { 0x00000B00, 0x0000 }, /* R2816 - ISRC1DEC1MIX Input 1 Source */
+ { 0x00000B08, 0x0000 }, /* R2824 - ISRC1DEC2MIX Input 1 Source */
+ { 0x00000B20, 0x0000 }, /* R2848 - ISRC1INT1MIX Input 1 Source */
+ { 0x00000B28, 0x0000 }, /* R2856 - ISRC1INT2MIX Input 1 Source */
+ { 0x00000B40, 0x0000 }, /* R2880 - ISRC2DEC1MIX Input 1 Source */
+ { 0x00000B48, 0x0000 }, /* R2888 - ISRC2DEC2MIX Input 1 Source */
+ { 0x00000B60, 0x0000 }, /* R2912 - ISRC2INT1MIX Input 1 Source */
+ { 0x00000B68, 0x0000 }, /* R2920 - ISRC2INT2MIX Input 1 Source */
+ { 0x00000C00, 0xA101 }, /* R3072 - GPIO1 CTRL */
+ { 0x00000C01, 0xA101 }, /* R3073 - GPIO2 CTRL */
+ { 0x00000C02, 0xA101 }, /* R3074 - GPIO3 CTRL */
+ { 0x00000C03, 0xA101 }, /* R3075 - GPIO4 CTRL */
+ { 0x00000C04, 0xA101 }, /* R3076 - GPIO5 CTRL */
+ { 0x00000C0F, 0x0400 }, /* R3087 - IRQ CTRL 1 */
+ { 0x00000C10, 0x1000 }, /* R3088 - GPIO Debounce Config */
+ { 0x00000C20, 0x8002 }, /* R3104 - Misc Pad Ctrl 1 */
{ 0x00000C21, 0x0001 }, /* R3105 - Misc Pad Ctrl 2 */
- { 0x00000C22, 0x0000 }, /* R3106 - Misc Pad Ctrl 3 */
- { 0x00000C23, 0x0000 }, /* R3107 - Misc Pad Ctrl 4 */
- { 0x00000C24, 0x0000 }, /* R3108 - Misc Pad Ctrl 5 */
- { 0x00000C25, 0x0000 }, /* R3109 - Misc Pad Ctrl 6 */
- { 0x00000D08, 0xFFFF }, /* R3336 - Interrupt Status 1 Mask */
- { 0x00000D09, 0xFFFF }, /* R3337 - Interrupt Status 2 Mask */
- { 0x00000D0A, 0xFFFF }, /* R3338 - Interrupt Status 3 Mask */
- { 0x00000D0B, 0xFFFF }, /* R3339 - Interrupt Status 4 Mask */
- { 0x00000D0C, 0xFEFF }, /* R3340 - Interrupt Status 5 Mask */
- { 0x00000D0F, 0x0000 }, /* R3343 - Interrupt Control */
- { 0x00000D18, 0xFFFF }, /* R3352 - IRQ2 Status 1 Mask */
- { 0x00000D19, 0xFFFF }, /* R3353 - IRQ2 Status 2 Mask */
- { 0x00000D1A, 0xFFFF }, /* R3354 - IRQ2 Status 3 Mask */
- { 0x00000D1B, 0xFFFF }, /* R3355 - IRQ2 Status 4 Mask */
- { 0x00000D1C, 0xFFFF }, /* R3356 - IRQ2 Status 5 Mask */
- { 0x00000D1F, 0x0000 }, /* R3359 - IRQ2 Control */
+ { 0x00000C22, 0x0000 }, /* R3106 - Misc Pad Ctrl 3 */
+ { 0x00000C23, 0x0000 }, /* R3107 - Misc Pad Ctrl 4 */
+ { 0x00000C24, 0x0000 }, /* R3108 - Misc Pad Ctrl 5 */
+ { 0x00000C25, 0x0000 }, /* R3109 - Misc Pad Ctrl 6 */
+ { 0x00000D08, 0xFFFF }, /* R3336 - Interrupt Status 1 Mask */
+ { 0x00000D09, 0xFFFF }, /* R3337 - Interrupt Status 2 Mask */
+ { 0x00000D0A, 0xFFFF }, /* R3338 - Interrupt Status 3 Mask */
+ { 0x00000D0B, 0xFFFF }, /* R3339 - Interrupt Status 4 Mask */
+ { 0x00000D0C, 0xFEFF }, /* R3340 - Interrupt Status 5 Mask */
+ { 0x00000D0F, 0x0000 }, /* R3343 - Interrupt Control */
+ { 0x00000D18, 0xFFFF }, /* R3352 - IRQ2 Status 1 Mask */
+ { 0x00000D19, 0xFFFF }, /* R3353 - IRQ2 Status 2 Mask */
+ { 0x00000D1A, 0xFFFF }, /* R3354 - IRQ2 Status 3 Mask */
+ { 0x00000D1B, 0xFFFF }, /* R3355 - IRQ2 Status 4 Mask */
+ { 0x00000D1C, 0xFFFF }, /* R3356 - IRQ2 Status 5 Mask */
+ { 0x00000D1F, 0x0000 }, /* R3359 - IRQ2 Control */
{ 0x00000D41, 0x0000 }, /* R3393 - ADSP2 IRQ0 */
- { 0x00000D53, 0xFFFF }, /* R3411 - AOD IRQ Mask IRQ1 */
- { 0x00000D54, 0xFFFF }, /* R3412 - AOD IRQ Mask IRQ2 */
- { 0x00000D56, 0x0000 }, /* R3414 - Jack detect debounce */
- { 0x00000E00, 0x0000 }, /* R3584 - FX_Ctrl1 */
- { 0x00000E10, 0x6318 }, /* R3600 - EQ1_1 */
- { 0x00000E11, 0x6300 }, /* R3601 - EQ1_2 */
- { 0x00000E12, 0x0FC8 }, /* R3602 - EQ1_3 */
- { 0x00000E13, 0x03FE }, /* R3603 - EQ1_4 */
- { 0x00000E14, 0x00E0 }, /* R3604 - EQ1_5 */
- { 0x00000E15, 0x1EC4 }, /* R3605 - EQ1_6 */
- { 0x00000E16, 0xF136 }, /* R3606 - EQ1_7 */
- { 0x00000E17, 0x0409 }, /* R3607 - EQ1_8 */
- { 0x00000E18, 0x04CC }, /* R3608 - EQ1_9 */
- { 0x00000E19, 0x1C9B }, /* R3609 - EQ1_10 */
- { 0x00000E1A, 0xF337 }, /* R3610 - EQ1_11 */
- { 0x00000E1B, 0x040B }, /* R3611 - EQ1_12 */
- { 0x00000E1C, 0x0CBB }, /* R3612 - EQ1_13 */
- { 0x00000E1D, 0x16F8 }, /* R3613 - EQ1_14 */
- { 0x00000E1E, 0xF7D9 }, /* R3614 - EQ1_15 */
- { 0x00000E1F, 0x040A }, /* R3615 - EQ1_16 */
- { 0x00000E20, 0x1F14 }, /* R3616 - EQ1_17 */
- { 0x00000E21, 0x058C }, /* R3617 - EQ1_18 */
- { 0x00000E22, 0x0563 }, /* R3618 - EQ1_19 */
- { 0x00000E23, 0x4000 }, /* R3619 - EQ1_20 */
- { 0x00000E24, 0x0B75 }, /* R3620 - EQ1_21 */
- { 0x00000E26, 0x6318 }, /* R3622 - EQ2_1 */
- { 0x00000E27, 0x6300 }, /* R3623 - EQ2_2 */
- { 0x00000E28, 0x0FC8 }, /* R3624 - EQ2_3 */
- { 0x00000E29, 0x03FE }, /* R3625 - EQ2_4 */
- { 0x00000E2A, 0x00E0 }, /* R3626 - EQ2_5 */
- { 0x00000E2B, 0x1EC4 }, /* R3627 - EQ2_6 */
- { 0x00000E2C, 0xF136 }, /* R3628 - EQ2_7 */
- { 0x00000E2D, 0x0409 }, /* R3629 - EQ2_8 */
- { 0x00000E2E, 0x04CC }, /* R3630 - EQ2_9 */
- { 0x00000E2F, 0x1C9B }, /* R3631 - EQ2_10 */
- { 0x00000E30, 0xF337 }, /* R3632 - EQ2_11 */
- { 0x00000E31, 0x040B }, /* R3633 - EQ2_12 */
- { 0x00000E32, 0x0CBB }, /* R3634 - EQ2_13 */
- { 0x00000E33, 0x16F8 }, /* R3635 - EQ2_14 */
- { 0x00000E34, 0xF7D9 }, /* R3636 - EQ2_15 */
- { 0x00000E35, 0x040A }, /* R3637 - EQ2_16 */
- { 0x00000E36, 0x1F14 }, /* R3638 - EQ2_17 */
- { 0x00000E37, 0x058C }, /* R3639 - EQ2_18 */
- { 0x00000E38, 0x0563 }, /* R3640 - EQ2_19 */
- { 0x00000E39, 0x4000 }, /* R3641 - EQ2_20 */
- { 0x00000E3A, 0x0B75 }, /* R3642 - EQ2_21 */
- { 0x00000E3C, 0x6318 }, /* R3644 - EQ3_1 */
- { 0x00000E3D, 0x6300 }, /* R3645 - EQ3_2 */
- { 0x00000E3E, 0x0FC8 }, /* R3646 - EQ3_3 */
- { 0x00000E3F, 0x03FE }, /* R3647 - EQ3_4 */
- { 0x00000E40, 0x00E0 }, /* R3648 - EQ3_5 */
- { 0x00000E41, 0x1EC4 }, /* R3649 - EQ3_6 */
- { 0x00000E42, 0xF136 }, /* R3650 - EQ3_7 */
- { 0x00000E43, 0x0409 }, /* R3651 - EQ3_8 */
- { 0x00000E44, 0x04CC }, /* R3652 - EQ3_9 */
- { 0x00000E45, 0x1C9B }, /* R3653 - EQ3_10 */
- { 0x00000E46, 0xF337 }, /* R3654 - EQ3_11 */
- { 0x00000E47, 0x040B }, /* R3655 - EQ3_12 */
- { 0x00000E48, 0x0CBB }, /* R3656 - EQ3_13 */
- { 0x00000E49, 0x16F8 }, /* R3657 - EQ3_14 */
- { 0x00000E4A, 0xF7D9 }, /* R3658 - EQ3_15 */
- { 0x00000E4B, 0x040A }, /* R3659 - EQ3_16 */
- { 0x00000E4C, 0x1F14 }, /* R3660 - EQ3_17 */
- { 0x00000E4D, 0x058C }, /* R3661 - EQ3_18 */
- { 0x00000E4E, 0x0563 }, /* R3662 - EQ3_19 */
- { 0x00000E4F, 0x4000 }, /* R3663 - EQ3_20 */
- { 0x00000E50, 0x0B75 }, /* R3664 - EQ3_21 */
- { 0x00000E52, 0x6318 }, /* R3666 - EQ4_1 */
- { 0x00000E53, 0x6300 }, /* R3667 - EQ4_2 */
- { 0x00000E54, 0x0FC8 }, /* R3668 - EQ4_3 */
- { 0x00000E55, 0x03FE }, /* R3669 - EQ4_4 */
- { 0x00000E56, 0x00E0 }, /* R3670 - EQ4_5 */
- { 0x00000E57, 0x1EC4 }, /* R3671 - EQ4_6 */
- { 0x00000E58, 0xF136 }, /* R3672 - EQ4_7 */
- { 0x00000E59, 0x0409 }, /* R3673 - EQ4_8 */
- { 0x00000E5A, 0x04CC }, /* R3674 - EQ4_9 */
- { 0x00000E5B, 0x1C9B }, /* R3675 - EQ4_10 */
- { 0x00000E5C, 0xF337 }, /* R3676 - EQ4_11 */
- { 0x00000E5D, 0x040B }, /* R3677 - EQ4_12 */
- { 0x00000E5E, 0x0CBB }, /* R3678 - EQ4_13 */
- { 0x00000E5F, 0x16F8 }, /* R3679 - EQ4_14 */
- { 0x00000E60, 0xF7D9 }, /* R3680 - EQ4_15 */
- { 0x00000E61, 0x040A }, /* R3681 - EQ4_16 */
- { 0x00000E62, 0x1F14 }, /* R3682 - EQ4_17 */
- { 0x00000E63, 0x058C }, /* R3683 - EQ4_18 */
- { 0x00000E64, 0x0563 }, /* R3684 - EQ4_19 */
- { 0x00000E65, 0x4000 }, /* R3685 - EQ4_20 */
- { 0x00000E66, 0x0B75 }, /* R3686 - EQ4_21 */
- { 0x00000E80, 0x0018 }, /* R3712 - DRC1 ctrl1 */
- { 0x00000E81, 0x0933 }, /* R3713 - DRC1 ctrl2 */
- { 0x00000E82, 0x0018 }, /* R3714 - DRC1 ctrl3 */
- { 0x00000E83, 0x0000 }, /* R3715 - DRC1 ctrl4 */
- { 0x00000E84, 0x0000 }, /* R3716 - DRC1 ctrl5 */
- { 0x00000EC0, 0x0000 }, /* R3776 - HPLPF1_1 */
- { 0x00000EC1, 0x0000 }, /* R3777 - HPLPF1_2 */
- { 0x00000EC4, 0x0000 }, /* R3780 - HPLPF2_1 */
- { 0x00000EC5, 0x0000 }, /* R3781 - HPLPF2_2 */
- { 0x00000EC8, 0x0000 }, /* R3784 - HPLPF3_1 */
- { 0x00000EC9, 0x0000 }, /* R3785 - HPLPF3_2 */
- { 0x00000ECC, 0x0000 }, /* R3788 - HPLPF4_1 */
- { 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */
- { 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */
- { 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */
+ { 0x00000D53, 0xFFFF }, /* R3411 - AOD IRQ Mask IRQ1 */
+ { 0x00000D54, 0xFFFF }, /* R3412 - AOD IRQ Mask IRQ2 */
+ { 0x00000D56, 0x0000 }, /* R3414 - Jack detect debounce */
+ { 0x00000E00, 0x0000 }, /* R3584 - FX_Ctrl1 */
+ { 0x00000E10, 0x6318 }, /* R3600 - EQ1_1 */
+ { 0x00000E11, 0x6300 }, /* R3601 - EQ1_2 */
+ { 0x00000E12, 0x0FC8 }, /* R3602 - EQ1_3 */
+ { 0x00000E13, 0x03FE }, /* R3603 - EQ1_4 */
+ { 0x00000E14, 0x00E0 }, /* R3604 - EQ1_5 */
+ { 0x00000E15, 0x1EC4 }, /* R3605 - EQ1_6 */
+ { 0x00000E16, 0xF136 }, /* R3606 - EQ1_7 */
+ { 0x00000E17, 0x0409 }, /* R3607 - EQ1_8 */
+ { 0x00000E18, 0x04CC }, /* R3608 - EQ1_9 */
+ { 0x00000E19, 0x1C9B }, /* R3609 - EQ1_10 */
+ { 0x00000E1A, 0xF337 }, /* R3610 - EQ1_11 */
+ { 0x00000E1B, 0x040B }, /* R3611 - EQ1_12 */
+ { 0x00000E1C, 0x0CBB }, /* R3612 - EQ1_13 */
+ { 0x00000E1D, 0x16F8 }, /* R3613 - EQ1_14 */
+ { 0x00000E1E, 0xF7D9 }, /* R3614 - EQ1_15 */
+ { 0x00000E1F, 0x040A }, /* R3615 - EQ1_16 */
+ { 0x00000E20, 0x1F14 }, /* R3616 - EQ1_17 */
+ { 0x00000E21, 0x058C }, /* R3617 - EQ1_18 */
+ { 0x00000E22, 0x0563 }, /* R3618 - EQ1_19 */
+ { 0x00000E23, 0x4000 }, /* R3619 - EQ1_20 */
+ { 0x00000E24, 0x0B75 }, /* R3620 - EQ1_21 */
+ { 0x00000E26, 0x6318 }, /* R3622 - EQ2_1 */
+ { 0x00000E27, 0x6300 }, /* R3623 - EQ2_2 */
+ { 0x00000E28, 0x0FC8 }, /* R3624 - EQ2_3 */
+ { 0x00000E29, 0x03FE }, /* R3625 - EQ2_4 */
+ { 0x00000E2A, 0x00E0 }, /* R3626 - EQ2_5 */
+ { 0x00000E2B, 0x1EC4 }, /* R3627 - EQ2_6 */
+ { 0x00000E2C, 0xF136 }, /* R3628 - EQ2_7 */
+ { 0x00000E2D, 0x0409 }, /* R3629 - EQ2_8 */
+ { 0x00000E2E, 0x04CC }, /* R3630 - EQ2_9 */
+ { 0x00000E2F, 0x1C9B }, /* R3631 - EQ2_10 */
+ { 0x00000E30, 0xF337 }, /* R3632 - EQ2_11 */
+ { 0x00000E31, 0x040B }, /* R3633 - EQ2_12 */
+ { 0x00000E32, 0x0CBB }, /* R3634 - EQ2_13 */
+ { 0x00000E33, 0x16F8 }, /* R3635 - EQ2_14 */
+ { 0x00000E34, 0xF7D9 }, /* R3636 - EQ2_15 */
+ { 0x00000E35, 0x040A }, /* R3637 - EQ2_16 */
+ { 0x00000E36, 0x1F14 }, /* R3638 - EQ2_17 */
+ { 0x00000E37, 0x058C }, /* R3639 - EQ2_18 */
+ { 0x00000E38, 0x0563 }, /* R3640 - EQ2_19 */
+ { 0x00000E39, 0x4000 }, /* R3641 - EQ2_20 */
+ { 0x00000E3A, 0x0B75 }, /* R3642 - EQ2_21 */
+ { 0x00000E3C, 0x6318 }, /* R3644 - EQ3_1 */
+ { 0x00000E3D, 0x6300 }, /* R3645 - EQ3_2 */
+ { 0x00000E3E, 0x0FC8 }, /* R3646 - EQ3_3 */
+ { 0x00000E3F, 0x03FE }, /* R3647 - EQ3_4 */
+ { 0x00000E40, 0x00E0 }, /* R3648 - EQ3_5 */
+ { 0x00000E41, 0x1EC4 }, /* R3649 - EQ3_6 */
+ { 0x00000E42, 0xF136 }, /* R3650 - EQ3_7 */
+ { 0x00000E43, 0x0409 }, /* R3651 - EQ3_8 */
+ { 0x00000E44, 0x04CC }, /* R3652 - EQ3_9 */
+ { 0x00000E45, 0x1C9B }, /* R3653 - EQ3_10 */
+ { 0x00000E46, 0xF337 }, /* R3654 - EQ3_11 */
+ { 0x00000E47, 0x040B }, /* R3655 - EQ3_12 */
+ { 0x00000E48, 0x0CBB }, /* R3656 - EQ3_13 */
+ { 0x00000E49, 0x16F8 }, /* R3657 - EQ3_14 */
+ { 0x00000E4A, 0xF7D9 }, /* R3658 - EQ3_15 */
+ { 0x00000E4B, 0x040A }, /* R3659 - EQ3_16 */
+ { 0x00000E4C, 0x1F14 }, /* R3660 - EQ3_17 */
+ { 0x00000E4D, 0x058C }, /* R3661 - EQ3_18 */
+ { 0x00000E4E, 0x0563 }, /* R3662 - EQ3_19 */
+ { 0x00000E4F, 0x4000 }, /* R3663 - EQ3_20 */
+ { 0x00000E50, 0x0B75 }, /* R3664 - EQ3_21 */
+ { 0x00000E52, 0x6318 }, /* R3666 - EQ4_1 */
+ { 0x00000E53, 0x6300 }, /* R3667 - EQ4_2 */
+ { 0x00000E54, 0x0FC8 }, /* R3668 - EQ4_3 */
+ { 0x00000E55, 0x03FE }, /* R3669 - EQ4_4 */
+ { 0x00000E56, 0x00E0 }, /* R3670 - EQ4_5 */
+ { 0x00000E57, 0x1EC4 }, /* R3671 - EQ4_6 */
+ { 0x00000E58, 0xF136 }, /* R3672 - EQ4_7 */
+ { 0x00000E59, 0x0409 }, /* R3673 - EQ4_8 */
+ { 0x00000E5A, 0x04CC }, /* R3674 - EQ4_9 */
+ { 0x00000E5B, 0x1C9B }, /* R3675 - EQ4_10 */
+ { 0x00000E5C, 0xF337 }, /* R3676 - EQ4_11 */
+ { 0x00000E5D, 0x040B }, /* R3677 - EQ4_12 */
+ { 0x00000E5E, 0x0CBB }, /* R3678 - EQ4_13 */
+ { 0x00000E5F, 0x16F8 }, /* R3679 - EQ4_14 */
+ { 0x00000E60, 0xF7D9 }, /* R3680 - EQ4_15 */
+ { 0x00000E61, 0x040A }, /* R3681 - EQ4_16 */
+ { 0x00000E62, 0x1F14 }, /* R3682 - EQ4_17 */
+ { 0x00000E63, 0x058C }, /* R3683 - EQ4_18 */
+ { 0x00000E64, 0x0563 }, /* R3684 - EQ4_19 */
+ { 0x00000E65, 0x4000 }, /* R3685 - EQ4_20 */
+ { 0x00000E66, 0x0B75 }, /* R3686 - EQ4_21 */
+ { 0x00000E80, 0x0018 }, /* R3712 - DRC1 ctrl1 */
+ { 0x00000E81, 0x0933 }, /* R3713 - DRC1 ctrl2 */
+ { 0x00000E82, 0x0018 }, /* R3714 - DRC1 ctrl3 */
+ { 0x00000E83, 0x0000 }, /* R3715 - DRC1 ctrl4 */
+ { 0x00000E84, 0x0000 }, /* R3716 - DRC1 ctrl5 */
+ { 0x00000EC0, 0x0000 }, /* R3776 - HPLPF1_1 */
+ { 0x00000EC1, 0x0000 }, /* R3777 - HPLPF1_2 */
+ { 0x00000EC4, 0x0000 }, /* R3780 - HPLPF2_1 */
+ { 0x00000EC5, 0x0000 }, /* R3781 - HPLPF2_2 */
+ { 0x00000EC8, 0x0000 }, /* R3784 - HPLPF3_1 */
+ { 0x00000EC9, 0x0000 }, /* R3785 - HPLPF3_2 */
+ { 0x00000ECC, 0x0000 }, /* R3788 - HPLPF4_1 */
+ { 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */
+ { 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */
+ { 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */
{ 0x00000EE3, 0x4000 }, /* R3811 - ASRC_RATE2 */
- { 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */
- { 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */
- { 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */
- { 0x00000EF3, 0x0000 }, /* R3827 - ISRC 2 CTRL 1 */
- { 0x00000EF4, 0x0000 }, /* R3828 - ISRC 2 CTRL 2 */
- { 0x00000EF5, 0x0000 }, /* R3829 - ISRC 2 CTRL 3 */
- { 0x00001100, 0x0010 }, /* R4352 - DSP1 Control 1 */
+ { 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */
+ { 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */
+ { 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */
+ { 0x00000EF3, 0x0000 }, /* R3827 - ISRC 2 CTRL 1 */
+ { 0x00000EF4, 0x0000 }, /* R3828 - ISRC 2 CTRL 2 */
+ { 0x00000EF5, 0x0000 }, /* R3829 - ISRC 2 CTRL 3 */
+ { 0x00001100, 0x0010 }, /* R4352 - DSP1 Control 1 */
};
static bool wm5102_readable_register(struct device *dev, unsigned int reg)
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 8588dbad3301..953d0790ffd5 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -406,8 +406,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
goto err;
}
- ret = regulator_bulk_enable(wm8994->num_supplies,
- wm8994->supplies);
+ ret = regulator_bulk_enable(wm8994->num_supplies, wm8994->supplies);
if (ret != 0) {
dev_err(wm8994->dev, "Failed to enable supplies: %d\n", ret);
goto err_regulator_free;
@@ -612,8 +611,7 @@ static void wm8994_device_exit(struct wm8994 *wm8994)
{
pm_runtime_disable(wm8994->dev);
wm8994_irq_exit(wm8994);
- regulator_bulk_disable(wm8994->num_supplies,
- wm8994->supplies);
+ regulator_bulk_disable(wm8994->num_supplies, wm8994->supplies);
regulator_bulk_free(wm8994->num_supplies, wm8994->supplies);
mfd_remove_devices(wm8994->dev);
}
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index 2e5233b60971..1b35e33d2434 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -9,18 +9,119 @@
#include <linux/pci.h>
#include <linux/slab.h>
-#include <linux/anon_inodes.h>
#include <linux/file.h>
#include <misc/cxl.h>
-#include <linux/fs.h>
#include <asm/pnv-pci.h>
#include <linux/msi.h>
+#include <linux/module.h>
+#include <linux/mount.h>
#include "cxl.h"
+/*
+ * Since we want to track memory mappings to be able to force-unmap
+ * when the AFU is no longer reachable, we need an inode. For devices
+ * opened through the cxl user API, this is not a problem, but a
+ * userland process can also get a cxl fd through the cxl_get_fd()
+ * API, which is used by the cxlflash driver.
+ *
+ * Therefore we implement our own simple pseudo-filesystem and inode
+ * allocator. We don't use the anonymous inode, as we need the
+ * meta-data associated with it (address_space) and it is shared by
+ * other drivers/processes, so it could lead to cxl unmapping VMAs
+ * from random processes.
+ */
+
+#define CXL_PSEUDO_FS_MAGIC 0x1697697f
+
+static int cxl_fs_cnt;
+static struct vfsmount *cxl_vfs_mount;
+
+static const struct dentry_operations cxl_fs_dops = {
+ .d_dname = simple_dname,
+};
+
+static struct dentry *cxl_fs_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data)
+{
+ return mount_pseudo(fs_type, "cxl:", NULL, &cxl_fs_dops,
+ CXL_PSEUDO_FS_MAGIC);
+}
+
+static struct file_system_type cxl_fs_type = {
+ .name = "cxl",
+ .owner = THIS_MODULE,
+ .mount = cxl_fs_mount,
+ .kill_sb = kill_anon_super,
+};
+
+
+void cxl_release_mapping(struct cxl_context *ctx)
+{
+ if (ctx->kernelapi && ctx->mapping)
+ simple_release_fs(&cxl_vfs_mount, &cxl_fs_cnt);
+}
+
+static struct file *cxl_getfile(const char *name,
+ const struct file_operations *fops,
+ void *priv, int flags)
+{
+ struct qstr this;
+ struct path path;
+ struct file *file;
+ struct inode *inode = NULL;
+ int rc;
+
+ /* strongly inspired by anon_inode_getfile() */
+
+ if (fops->owner && !try_module_get(fops->owner))
+ return ERR_PTR(-ENOENT);
+
+ rc = simple_pin_fs(&cxl_fs_type, &cxl_vfs_mount, &cxl_fs_cnt);
+ if (rc < 0) {
+ pr_err("Cannot mount cxl pseudo filesystem: %d\n", rc);
+ file = ERR_PTR(rc);
+ goto err_module;
+ }
+
+ inode = alloc_anon_inode(cxl_vfs_mount->mnt_sb);
+ if (IS_ERR(inode)) {
+ file = ERR_CAST(inode);
+ goto err_fs;
+ }
+
+ file = ERR_PTR(-ENOMEM);
+ this.name = name;
+ this.len = strlen(name);
+ this.hash = 0;
+ path.dentry = d_alloc_pseudo(cxl_vfs_mount->mnt_sb, &this);
+ if (!path.dentry)
+ goto err_inode;
+
+ path.mnt = mntget(cxl_vfs_mount);
+ d_instantiate(path.dentry, inode);
+
+ file = alloc_file(&path, OPEN_FMODE(flags), fops);
+ if (IS_ERR(file))
+ goto err_dput;
+ file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
+ file->private_data = priv;
+
+ return file;
+
+err_dput:
+ path_put(&path);
+err_inode:
+ iput(inode);
+err_fs:
+ simple_release_fs(&cxl_vfs_mount, &cxl_fs_cnt);
+err_module:
+ module_put(fops->owner);
+ return file;
+}
+
struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
{
- struct address_space *mapping;
struct cxl_afu *afu;
struct cxl_context *ctx;
int rc;
@@ -30,38 +131,20 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
return ERR_CAST(afu);
ctx = cxl_context_alloc();
- if (IS_ERR(ctx)) {
- rc = PTR_ERR(ctx);
- goto err_dev;
- }
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
ctx->kernelapi = true;
- /*
- * Make our own address space since we won't have one from the
- * filesystem like the user api has, and even if we do associate a file
- * with this context we don't want to use the global anonymous inode's
- * address space as that can invalidate unrelated users:
- */
- mapping = kmalloc(sizeof(struct address_space), GFP_KERNEL);
- if (!mapping) {
- rc = -ENOMEM;
- goto err_ctx;
- }
- address_space_init_once(mapping);
-
/* Make it a slave context. We can promote it later? */
- rc = cxl_context_init(ctx, afu, false, mapping);
+ rc = cxl_context_init(ctx, afu, false);
if (rc)
- goto err_mapping;
+ goto err_ctx;
return ctx;
-err_mapping:
- kfree(mapping);
err_ctx:
kfree(ctx);
-err_dev:
return ERR_PTR(rc);
}
EXPORT_SYMBOL_GPL(cxl_dev_context_init);
@@ -340,6 +423,11 @@ struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops,
{
struct file *file;
int rc, flags, fdtmp;
+ char *name = NULL;
+
+ /* only allow one per context */
+ if (ctx->mapping)
+ return ERR_PTR(-EEXIST);
flags = O_RDWR | O_CLOEXEC;
@@ -363,12 +451,13 @@ struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops,
} else /* use default ops */
fops = (struct file_operations *)&afu_fops;
- file = anon_inode_getfile("cxl", fops, ctx, flags);
+ name = kasprintf(GFP_KERNEL, "cxl:%d", ctx->pe);
+ file = cxl_getfile(name, fops, ctx, flags);
+ kfree(name);
if (IS_ERR(file))
goto err_fd;
- file->f_mapping = ctx->mapping;
-
+ cxl_context_set_mapping(ctx, file->f_mapping);
*fd = fdtmp;
return file;
@@ -541,7 +630,7 @@ int _cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
if (remaining > 0) {
new_ctx = cxl_dev_context_init(pdev);
- if (!new_ctx) {
+ if (IS_ERR(new_ctx)) {
pr_warn("%s: Failed to allocate enough contexts for MSIs\n", pci_name(pdev));
return -ENOSPC;
}
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 5e506c19108a..3907387b6d15 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -34,8 +34,7 @@ struct cxl_context *cxl_context_alloc(void)
/*
* Initialises a CXL context.
*/
-int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
- struct address_space *mapping)
+int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
{
int i;
@@ -44,7 +43,7 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
ctx->master = master;
ctx->pid = ctx->glpid = NULL; /* Set in start work ioctl */
mutex_init(&ctx->mapping_lock);
- ctx->mapping = mapping;
+ ctx->mapping = NULL;
/*
* Allocate the segment table before we put it in the IDR so that we
@@ -114,16 +113,23 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
return 0;
}
+void cxl_context_set_mapping(struct cxl_context *ctx,
+ struct address_space *mapping)
+{
+ mutex_lock(&ctx->mapping_lock);
+ ctx->mapping = mapping;
+ mutex_unlock(&ctx->mapping_lock);
+}
+
static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct cxl_context *ctx = vma->vm_file->private_data;
- unsigned long address = (unsigned long)vmf->virtual_address;
u64 area, offset;
offset = vmf->pgoff << PAGE_SHIFT;
pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n",
- __func__, ctx->pe, address, offset);
+ __func__, ctx->pe, vmf->address, offset);
if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
area = ctx->afu->psn_phys;
@@ -155,7 +161,7 @@ static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
- vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
+ vm_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT);
mutex_unlock(&ctx->status_mutex);
@@ -300,8 +306,6 @@ static void reclaim_ctx(struct rcu_head *rcu)
if (ctx->ff_page)
__free_page(ctx->ff_page);
ctx->sstp = NULL;
- if (ctx->kernelapi)
- kfree(ctx->mapping);
kfree(ctx->irq_bitmap);
@@ -313,6 +317,8 @@ static void reclaim_ctx(struct rcu_head *rcu)
void cxl_context_free(struct cxl_context *ctx)
{
+ if (ctx->kernelapi && ctx->mapping)
+ cxl_release_mapping(ctx);
mutex_lock(&ctx->afu->contexts_lock);
idr_remove(&ctx->afu->contexts_idr, ctx->pe);
mutex_unlock(&ctx->afu->contexts_lock);
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index a144073593fa..b24d76723fb0 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -817,8 +817,9 @@ void cxl_dump_debug_buffer(void *addr, size_t size);
void init_cxl_native(void);
struct cxl_context *cxl_context_alloc(void);
-int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
- struct address_space *mapping);
+int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master);
+void cxl_context_set_mapping(struct cxl_context *ctx,
+ struct address_space *mapping);
void cxl_context_free(struct cxl_context *ctx);
int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma);
unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
@@ -877,6 +878,7 @@ void cxl_native_err_irq_dump_regs(struct cxl *adapter);
void cxl_stop_trace(struct cxl *cxl);
int cxl_pci_vphb_add(struct cxl_afu *afu);
void cxl_pci_vphb_remove(struct cxl_afu *afu);
+void cxl_release_mapping(struct cxl_context *ctx);
extern struct pci_driver cxl_pci_driver;
extern struct platform_driver cxl_of_driver;
diff --git a/drivers/misc/cxl/debugfs.c b/drivers/misc/cxl/debugfs.c
index ec7b8a017439..9c06ac8fa5ac 100644
--- a/drivers/misc/cxl/debugfs.c
+++ b/drivers/misc/cxl/debugfs.c
@@ -43,12 +43,14 @@ static int debugfs_io_u64_set(void *data, u64 val)
out_be64((u64 __iomem *)data, val);
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_io_x64, debugfs_io_u64_get, debugfs_io_u64_set, "0x%016llx\n");
+DEFINE_DEBUGFS_ATTRIBUTE(fops_io_x64, debugfs_io_u64_get, debugfs_io_u64_set,
+ "0x%016llx\n");
static struct dentry *debugfs_create_io_x64(const char *name, umode_t mode,
struct dentry *parent, u64 __iomem *value)
{
- return debugfs_create_file(name, mode, parent, (void __force *)value, &fops_io_x64);
+ return debugfs_create_file_unsafe(name, mode, parent,
+ (void __force *)value, &fops_io_x64);
}
void cxl_debugfs_add_adapter_psl_regs(struct cxl *adapter, struct dentry *dir)
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index 77080cc5fa0a..859959f19f10 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -86,9 +86,12 @@ static int __afu_open(struct inode *inode, struct file *file, bool master)
goto err_put_afu;
}
- if ((rc = cxl_context_init(ctx, afu, master, inode->i_mapping)))
+ rc = cxl_context_init(ctx, afu, master);
+ if (rc)
goto err_put_afu;
+ cxl_context_set_mapping(ctx, inode->i_mapping);
+
pr_devel("afu_open pe: %i\n", ctx->pe);
file->private_data = ctx;
cxl_ctx_get();
diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
index 3e102cd6ed91..e04bc4ddfd74 100644
--- a/drivers/misc/cxl/guest.c
+++ b/drivers/misc/cxl/guest.c
@@ -887,7 +887,7 @@ static void afu_handle_errstate(struct work_struct *work)
afu_guest->previous_state == H_STATE_PERM_UNAVAILABLE)
return;
- if (afu_guest->handle_err == true)
+ if (afu_guest->handle_err)
schedule_delayed_work(&afu_guest->work_err,
msecs_to_jiffies(3000));
}
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index dec60f58a767..1a402bbed687 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -104,7 +104,7 @@ irqreturn_t cxl_irq(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_i
} else {
spin_lock(&ctx->lock);
ctx->afu_err = irq_info->afu_err;
- ctx->pending_afu_err = 1;
+ ctx->pending_afu_err = true;
spin_unlock(&ctx->lock);
wake_up_all(&ctx->wq);
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index a217a74ccc98..09505f432eda 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -10,7 +10,6 @@
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/sched.h>
#include <linux/mutex.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
@@ -54,7 +53,7 @@ static int afu_control(struct cxl_afu *afu, u64 command, u64 clear,
AFU_Cntl | command);
cpu_relax();
AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
- };
+ }
if (AFU_Cntl & CXL_AFU_Cntl_An_RA) {
/*
@@ -167,7 +166,7 @@ int cxl_psl_purge(struct cxl_afu *afu)
cpu_relax();
}
PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
- };
+ }
end = local_clock();
pr_devel("PSL purged in %lld ns\n", end - start);
@@ -931,9 +930,18 @@ static irqreturn_t native_irq_multiplexed(int irq, void *data)
struct cxl_afu *afu = data;
struct cxl_context *ctx;
struct cxl_irq_info irq_info;
- int ph = cxl_p2n_read(afu, CXL_PSL_PEHandle_An) & 0xffff;
- int ret;
-
+ u64 phreg = cxl_p2n_read(afu, CXL_PSL_PEHandle_An);
+ int ph, ret;
+
+ /* check if eeh kicked in while the interrupt was in flight */
+ if (unlikely(phreg == ~0ULL)) {
+ dev_warn(&afu->dev,
+ "Ignoring slice interrupt(%d) due to fenced card",
+ irq);
+ return IRQ_HANDLED;
+ }
+ /* Mask the pe-handle from register value */
+ ph = phreg & 0xffff;
if ((ret = native_get_irq_info(afu, &irq_info))) {
WARN(1, "Unable to get CXL IRQ Info: %i\n", ret);
return fail_psl_irq(afu, &irq_info);
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index e96be9ca4e60..80a87ab25b83 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1921,7 +1921,7 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
goto err;
ctx = cxl_dev_context_init(afu_dev);
- if (!ctx)
+ if (IS_ERR(ctx))
goto err;
afu_dev->dev.archdata.cxl_ctx = ctx;
diff --git a/drivers/misc/cxl/phb.c b/drivers/misc/cxl/phb.c
index 0935d44c1770..6ec69ada19f4 100644
--- a/drivers/misc/cxl/phb.c
+++ b/drivers/misc/cxl/phb.c
@@ -20,7 +20,7 @@ bool _cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu
* in the virtual phb, we'll need a default context to attach them to.
*/
ctx = cxl_dev_context_init(dev);
- if (!ctx)
+ if (IS_ERR(ctx))
return false;
dev->dev.archdata.cxl_ctx = ctx;
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index 520f58439080..e05c3245930a 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -76,7 +76,7 @@
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
#include "ibmasm.h"
#include "remote.h"
diff --git a/drivers/misc/ibmasm/module.c b/drivers/misc/ibmasm/module.c
index 6b3bf9ab051d..c5a456b0a564 100644
--- a/drivers/misc/ibmasm/module.c
+++ b/drivers/misc/ibmasm/module.c
@@ -170,7 +170,7 @@ static void ibmasm_remove_one(struct pci_dev *pdev)
ibmasm_unregister_uart(sp);
dbg("Sending OS down message\n");
if (ibmasm_send_os_state(sp, SYSTEM_STATE_OS_DOWN))
- err("failed to get repsonse to 'Send OS State' command\n");
+ err("failed to get response to 'Send OS State' command\n");
dbg("Disabling heartbeats\n");
ibmasm_heartbeat_exit(sp);
dbg("Disabling interrupts\n");
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 18e05ca7584f..3600c9993a98 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -152,6 +152,9 @@ static void mei_mkhi_fix(struct mei_cl_device *cldev)
{
int ret;
+ if (!cldev->bus->hbm_f_os_supported)
+ return;
+
ret = mei_cldev_enable(cldev);
if (ret)
return;
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 0037153c80a6..2d9c5dd06e42 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -450,7 +450,7 @@ bool mei_cldev_enabled(struct mei_cl_device *cldev)
EXPORT_SYMBOL_GPL(mei_cldev_enabled);
/**
- * mei_cldev_enable_device - enable me client device
+ * mei_cldev_enable - enable me client device
* create connection with me client
*
* @cldev: me client device
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 391936c1aa04..b0395601c6ae 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -1541,7 +1541,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1;
if (rets < 0)
- return rets;
+ goto err;
if (rets == 0) {
cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
@@ -1575,11 +1575,8 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
cb->buf.size, cb->buf_idx);
rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx);
- if (rets) {
- cl->status = rets;
- list_move_tail(&cb->list, &cmpl_list->list);
- return rets;
- }
+ if (rets)
+ goto err;
cl->status = 0;
cl->writing_state = MEI_WRITING;
@@ -1587,14 +1584,21 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
cb->completed = mei_hdr.msg_complete == 1;
if (first_chunk) {
- if (mei_cl_tx_flow_ctrl_creds_reduce(cl))
- return -EIO;
+ if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) {
+ rets = -EIO;
+ goto err;
+ }
}
if (mei_hdr.msg_complete)
list_move_tail(&cb->list, &dev->write_waiting_list.list);
return 0;
+
+err:
+ cl->status = rets;
+ list_move_tail(&cb->list, &cmpl_list->list);
+ return rets;
}
/**
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index c6c051b52f55..c6217a4993ad 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -180,6 +180,8 @@ static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf,
dev->hbm_f_ev_supported);
pos += scnprintf(buf + pos, bufsz - pos, "\tFA: %01d\n",
dev->hbm_f_fa_supported);
+ pos += scnprintf(buf + pos, bufsz - pos, "\tOS: %01d\n",
+ dev->hbm_f_os_supported);
}
pos += scnprintf(buf + pos, bufsz - pos, "pg: %s, %s\n",
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index dd7f15a65eed..25b4a1ba522d 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -989,6 +989,10 @@ static void mei_hbm_config_features(struct mei_device *dev)
/* Fixed Address Client Support */
if (dev->version.major_version >= HBM_MAJOR_VERSION_FA)
dev->hbm_f_fa_supported = 1;
+
+ /* OS ver message Support */
+ if (dev->version.major_version >= HBM_MAJOR_VERSION_OS)
+ dev->hbm_f_os_supported = 1;
}
/**
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index 9daf3f9aed25..e1e4d47d4d7d 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -76,6 +76,12 @@
#define HBM_MINOR_VERSION_FA 0
#define HBM_MAJOR_VERSION_FA 2
+/*
+ * MEI version with OS ver message support
+ */
+#define HBM_MINOR_VERSION_OS 0
+#define HBM_MAJOR_VERSION_OS 2
+
/* Host bus message command opcode */
#define MEI_HBM_CMD_OP_MSK 0x7f
/* Host bus message command RESPONSE */
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 699693cd8c59..8dadb98662a9 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -406,6 +406,7 @@ const char *mei_pg_state_str(enum mei_pg_state state);
* @hbm_f_ev_supported : hbm feature event notification
* @hbm_f_fa_supported : hbm feature fixed address client
* @hbm_f_ie_supported : hbm feature immediate reply to enum request
+ * @hbm_f_os_supported : hbm feature support OS ver message
*
* @me_clients_rwsem: rw lock over me_clients list
* @me_clients : list of FW clients
@@ -487,6 +488,7 @@ struct mei_device {
unsigned int hbm_f_ev_supported:1;
unsigned int hbm_f_fa_supported:1;
unsigned int hbm_f_ie_supported:1;
+ unsigned int hbm_f_os_supported:1;
struct rw_semaphore me_clients_rwsem;
struct list_head me_clients;
diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c
index 33741ad4a74a..af2e077da4b8 100644
--- a/drivers/misc/sgi-gru/grumain.c
+++ b/drivers/misc/sgi-gru/grumain.c
@@ -932,7 +932,7 @@ int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long paddr, vaddr;
unsigned long expires;
- vaddr = (unsigned long)vmf->virtual_address;
+ vaddr = vmf->address;
gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n",
vma, vaddr, GSEG_BASE(vaddr));
STAT(nopfn);
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index f84b53d6ce50..b33ab8ce47ab 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -19,12 +19,17 @@
*/
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/genalloc.h>
#include <linux/io.h>
#include <linux/list_sort.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/slab.h>
+#include <linux/mfd/syscon.h>
+#include <soc/at91/atmel-secumod.h>
#define SRAM_GRANULARITY 32
@@ -334,12 +339,33 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
return ret;
}
+static int atmel_securam_wait(void)
+{
+ struct regmap *regmap;
+ u32 val;
+
+ regmap = syscon_regmap_lookup_by_compatible("atmel,sama5d2-secumod");
+ if (IS_ERR(regmap))
+ return -ENODEV;
+
+ return regmap_read_poll_timeout(regmap, AT91_SECUMOD_RAMRDY, val,
+ val & AT91_SECUMOD_RAMRDY_READY,
+ 10000, 500000);
+}
+
+static const struct of_device_id sram_dt_ids[] = {
+ { .compatible = "mmio-sram" },
+ { .compatible = "atmel,sama5d2-securam", .data = atmel_securam_wait },
+ {}
+};
+
static int sram_probe(struct platform_device *pdev)
{
struct sram_dev *sram;
struct resource *res;
size_t size;
int ret;
+ int (*init_func)(void);
sram = devm_kzalloc(&pdev->dev, sizeof(*sram), GFP_KERNEL);
if (!sram)
@@ -384,6 +410,13 @@ static int sram_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sram);
+ init_func = of_device_get_match_data(&pdev->dev);
+ if (init_func) {
+ ret = init_func();
+ if (ret)
+ return ret;
+ }
+
dev_dbg(sram->dev, "SRAM pool: %zu KiB @ 0x%p\n",
gen_pool_size(sram->pool) / 1024, sram->virt_base);
@@ -405,17 +438,10 @@ static int sram_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_OF
-static const struct of_device_id sram_dt_ids[] = {
- { .compatible = "mmio-sram" },
- {}
-};
-#endif
-
static struct platform_driver sram_driver = {
.driver = {
.name = "sram",
- .of_match_table = of_match_ptr(sram_dt_ids),
+ .of_match_table = sram_dt_ids,
},
.probe = sram_probe,
.remove = sram_remove,
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index bab3f07b1117..cb1698f268f1 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -43,7 +43,7 @@
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "queue.h"
#include "block.h"
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 543eadd230e5..1076b9d89df3 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -496,8 +496,7 @@ static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
* Returns enum mmc_blk_status after checking errors.
*/
static enum mmc_blk_status mmc_wait_for_data_req_done(struct mmc_host *host,
- struct mmc_request *mrq,
- struct mmc_async_req *next_req)
+ struct mmc_request *mrq)
{
struct mmc_command *cmd;
struct mmc_context_info *context_info = &host->context_info;
@@ -507,7 +506,7 @@ static enum mmc_blk_status mmc_wait_for_data_req_done(struct mmc_host *host,
wait_event_interruptible(context_info->wait,
(context_info->is_done_rcv ||
context_info->is_new_req));
- context_info->is_waiting_last_req = false;
+
if (context_info->is_done_rcv) {
context_info->is_done_rcv = false;
cmd = mrq->cmd;
@@ -527,10 +526,9 @@ static enum mmc_blk_status mmc_wait_for_data_req_done(struct mmc_host *host,
__mmc_start_request(host, mrq);
continue; /* wait for done/new event again */
}
- } else if (context_info->is_new_req) {
- if (!next_req)
- return MMC_BLK_NEW_REQUEST;
}
+
+ return MMC_BLK_NEW_REQUEST;
}
mmc_retune_release(host);
return status;
@@ -660,7 +658,7 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
mmc_pre_req(host, areq->mrq);
if (host->areq) {
- status = mmc_wait_for_data_req_done(host, host->areq->mrq, areq);
+ status = mmc_wait_for_data_req_done(host, host->areq->mrq);
if (status == MMC_BLK_NEW_REQUEST) {
if (ret_stat)
*ret_stat = status;
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index b11c3455b040..e6ea8503f40c 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -506,9 +506,6 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
}
} while (busy);
- if (host->ops->card_busy && send_status)
- return mmc_switch_status(card);
-
return 0;
}
@@ -577,24 +574,26 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
if (!use_busy_signal)
goto out;
- /* Switch to new timing before poll and check switch status. */
- if (timing)
- mmc_set_timing(host, timing);
-
/*If SPI or used HW busy detection above, then we don't need to poll. */
if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
- mmc_host_is_spi(host)) {
- if (send_status)
- err = mmc_switch_status(card);
+ mmc_host_is_spi(host))
goto out_tim;
- }
/* Let's try to poll to find out when the command is completed. */
err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err);
+ if (err)
+ goto out;
out_tim:
- if (err && timing)
- mmc_set_timing(host, old_timing);
+ /* Switch to new timing before check switch status. */
+ if (timing)
+ mmc_set_timing(host, timing);
+
+ if (send_status) {
+ err = mmc_switch_status(card);
+ if (err && timing)
+ mmc_set_timing(host, old_timing);
+ }
out:
mmc_retune_release(host);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index deb90c2ff6b4..a614f37faf27 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -223,6 +223,7 @@ static int mmc_decode_scr(struct mmc_card *card)
static int mmc_read_ssr(struct mmc_card *card)
{
unsigned int au, es, et, eo;
+ u32 *raw_ssr;
int i;
if (!(card->csd.cmdclass & CCC_APP_SPEC)) {
@@ -231,14 +232,21 @@ static int mmc_read_ssr(struct mmc_card *card)
return 0;
}
- if (mmc_app_sd_status(card, card->raw_ssr)) {
+ raw_ssr = kmalloc(sizeof(card->raw_ssr), GFP_KERNEL);
+ if (!raw_ssr)
+ return -ENOMEM;
+
+ if (mmc_app_sd_status(card, raw_ssr)) {
pr_warn("%s: problem reading SD Status register\n",
mmc_hostname(card->host));
+ kfree(raw_ssr);
return 0;
}
for (i = 0; i < 16; i++)
- card->raw_ssr[i] = be32_to_cpu(card->raw_ssr[i]);
+ card->raw_ssr[i] = be32_to_cpu(raw_ssr[i]);
+
+ kfree(raw_ssr);
/*
* UNSTUFF_BITS only works with four u32s so we have to offset the
diff --git a/drivers/mmc/host/android-goldfish.c b/drivers/mmc/host/android-goldfish.c
index dca5518b0139..590a8a4522be 100644
--- a/drivers/mmc/host/android-goldfish.c
+++ b/drivers/mmc/host/android-goldfish.c
@@ -49,7 +49,7 @@
#include <asm/types.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define DRIVER_NAME "goldfish_mmc"
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index b352760c041e..09739352834c 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -578,13 +578,15 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
{
struct meson_host *host = dev_id;
struct mmc_request *mrq;
- struct mmc_command *cmd = host->cmd;
+ struct mmc_command *cmd;
u32 irq_en, status, raw_status;
irqreturn_t ret = IRQ_HANDLED;
if (WARN_ON(!host))
return IRQ_NONE;
+ cmd = host->cmd;
+
mrq = host->mrq;
if (WARN_ON(!mrq))
@@ -670,10 +672,10 @@ static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
int ret = IRQ_HANDLED;
if (WARN_ON(!mrq))
- ret = IRQ_NONE;
+ return IRQ_NONE;
if (WARN_ON(!cmd))
- ret = IRQ_NONE;
+ return IRQ_NONE;
data = cmd->data;
if (data) {
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 44ecebd1ea8c..c8b8ac66ff7e 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -309,6 +309,9 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host)
cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
cmd1 = cmd->arg;
+ if (cmd->opcode == MMC_STOP_TRANSMISSION)
+ cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
+
if (host->sdio_irq_en) {
ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
@@ -417,8 +420,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
ssp->base + HW_SSP_BLOCK_SIZE);
}
- if ((cmd->opcode == MMC_STOP_TRANSMISSION) ||
- (cmd->opcode == SD_IO_RW_EXTENDED))
+ if (cmd->opcode == SD_IO_RW_EXTENDED)
cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
cmd1 = cmd->arg;
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 160f695cc09c..278a5a435ab7 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -395,7 +395,8 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
/* Power on the SDHCI controller and its children */
acpi_device_fix_up_power(device);
list_for_each_entry(child, &device->children, node)
- acpi_device_fix_up_power(child);
+ if (child->status.present && child->status.enabled)
+ acpi_device_fix_up_power(child);
if (acpi_bus_get_status(device) || !device->status.present)
return -ENODEV;
diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c
index 1501cfdac473..4b0ecb981842 100644
--- a/drivers/mmc/host/sdhci-cadence.c
+++ b/drivers/mmc/host/sdhci-cadence.c
@@ -262,6 +262,7 @@ disable_clk:
}
static const struct of_device_id sdhci_cdns_match[] = {
+ { .compatible = "socionext,uniphier-sd4hc" },
{ .compatible = "cdns,sd4hc" },
{ /* sentinel */ }
};
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 111991e5b9a0..23909804ffb8 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1576,6 +1576,9 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
unsigned long flags;
u8 ctrl;
+ if (ios->power_mode == MMC_POWER_UNDEFINED)
+ return;
+
spin_lock_irqsave(&host->lock, flags);
if (host->flags & SDHCI_DEVICE_DEAD) {
@@ -2938,22 +2941,24 @@ int sdhci_runtime_resume_host(struct sdhci_host *host)
sdhci_init(host, 0);
- /* Force clock and power re-program */
- host->pwr = 0;
- host->clock = 0;
- mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
- mmc->ops->set_ios(mmc, &mmc->ios);
+ if (mmc->ios.power_mode != MMC_POWER_UNDEFINED) {
+ /* Force clock and power re-program */
+ host->pwr = 0;
+ host->clock = 0;
+ mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
+ mmc->ops->set_ios(mmc, &mmc->ios);
- if ((host_flags & SDHCI_PV_ENABLED) &&
- !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
- spin_lock_irqsave(&host->lock, flags);
- sdhci_enable_preset_value(host, true);
- spin_unlock_irqrestore(&host->lock, flags);
- }
+ if ((host_flags & SDHCI_PV_ENABLED) &&
+ !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
+ spin_lock_irqsave(&host->lock, flags);
+ sdhci_enable_preset_value(host, true);
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
- if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
- mmc->ops->hs400_enhanced_strobe)
- mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
+ if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
+ mmc->ops->hs400_enhanced_strobe)
+ mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
+ }
spin_lock_irqsave(&host->lock, flags);
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
index 377947580203..283ff7e17a0f 100644
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -229,12 +229,10 @@ static int bcm47xxpart_parse(struct mtd_info *master,
last_trx_part = curr_part - 1;
- /*
- * We have whole TRX scanned, skip to the next part. Use
- * roundown (not roundup), as the loop will increase
- * offset in next step.
- */
- offset = rounddown(offset + trx->length, blocksize);
+ /* Jump to the end of TRX */
+ offset = roundup(offset + trx->length, blocksize);
+ /* Next loop iteration will increase the offset */
+ offset -= blocksize;
continue;
}
diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c
index 1c65c15b31a1..514be04c0b6c 100644
--- a/drivers/mtd/devices/bcm47xxsflash.c
+++ b/drivers/mtd/devices/bcm47xxsflash.c
@@ -296,16 +296,30 @@ static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
dev_err(dev, "can't request region for resource %pR\n", res);
return -EBUSY;
}
- b47s->window = ioremap_cache(res->start, resource_size(res));
- if (!b47s->window) {
- dev_err(dev, "ioremap failed for resource %pR\n", res);
- return -ENOMEM;
- }
b47s->bcma_cc = container_of(sflash, struct bcma_drv_cc, sflash);
b47s->cc_read = bcm47xxsflash_bcma_cc_read;
b47s->cc_write = bcm47xxsflash_bcma_cc_write;
+ /*
+ * On old MIPS devices cache was magically invalidated when needed,
+ * allowing us to use cached access and gain some performance. Trying
+ * the same on ARM based BCM53573 results in flash corruptions, we need
+ * to use uncached access for it.
+ *
+ * It may be arch specific, but right now there is only 1 ARM SoC using
+ * this driver, so let's follow Broadcom's reference code and check
+ * ChipCommon revision.
+ */
+ if (b47s->bcma_cc->core->id.rev == 54)
+ b47s->window = ioremap_nocache(res->start, resource_size(res));
+ else
+ b47s->window = ioremap_cache(res->start, resource_size(res));
+ if (!b47s->window) {
+ dev_err(dev, "ioremap failed for resource %pR\n", res);
+ return -ENOMEM;
+ }
+
switch (b47s->bcma_cc->capabilities & BCMA_CC_CAP_FLASHT) {
case BCMA_CC_FLASHT_STSER:
b47s->type = BCM47XXSFLASH_TYPE_ST;
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
index 220f9200fa52..cadea0620cd0 100644
--- a/drivers/mtd/devices/pmc551.c
+++ b/drivers/mtd/devices/pmc551.c
@@ -82,7 +82,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/ptrace.h>
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
index a70eb83e68f1..8087c36dc693 100644
--- a/drivers/mtd/devices/slram.c
+++ b/drivers/mtd/devices/slram.c
@@ -30,7 +30,7 @@
#include <linux/module.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/ptrace.h>
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index 9fb3b0dcdac2..664d206a4cbe 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -70,7 +70,7 @@
#include <linux/hdreg.h>
#include <linux/vmalloc.h>
#include <linux/blkpg.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/mtd/ftl.h>
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index b66b541877f0..8db740d6eb08 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -34,7 +34,7 @@
#include <linux/mtd/nftl.h>
#include <linux/mtd/inftl.h>
#include <linux/mtd/nand.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/errno.h>
#include <asm/io.h>
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
index 1388c8d7f309..8d6bb189ea8e 100644
--- a/drivers/mtd/inftlmount.c
+++ b/drivers/mtd/inftlmount.c
@@ -27,7 +27,7 @@
#include <linux/module.h>
#include <asm/errno.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
diff --git a/drivers/mtd/maps/sc520cdp.c b/drivers/mtd/maps/sc520cdp.c
index 093edd51bdc7..9b1c13aa9f20 100644
--- a/drivers/mtd/maps/sc520cdp.c
+++ b/drivers/mtd/maps/sc520cdp.c
@@ -227,7 +227,7 @@ static void sc520cdp_setup_par(void)
static int __init init_sc520cdp(void)
{
- int i, devices_found = 0;
+ int i, j, devices_found = 0;
#ifdef REPROGRAM_PAR
/* reprogram PAR registers so flash appears at the desired addresses */
@@ -243,6 +243,12 @@ static int __init init_sc520cdp(void)
if (!sc520cdp_map[i].virt) {
printk("Failed to ioremap_nocache\n");
+ for (j = 0; j < i; j++) {
+ if (mymtd[j]) {
+ map_destroy(mymtd[j]);
+ iounmap(sc520cdp_map[j].virt);
+ }
+ }
return -EIO;
}
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index d459aca07881..414956eca0c9 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -16,7 +16,7 @@
#include <linux/of_device.h>
#include <linux/slab.h>
#include <asm/prom.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 8d58acf33021..df8a5ef334c0 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -31,7 +31,7 @@
#include <linux/spinlock.h>
#include <linux/hdreg.h>
#include <linux/mutex.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "mtdcore.h"
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 2a47a3f0e730..ce5ccc573a9c 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -37,7 +37,7 @@
#include <linux/mtd/partitions.h>
#include <linux/mtd/map.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "mtdcore.h"
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index d46e4adf6d2b..052772f7caef 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -46,8 +46,7 @@
#include "mtdcore.h"
-static struct backing_dev_info mtd_bdi = {
-};
+static struct backing_dev_info *mtd_bdi;
#ifdef CONFIG_PM_SLEEP
@@ -500,7 +499,7 @@ int add_mtd_device(struct mtd_info *mtd)
if (WARN_ONCE(mtd->backing_dev_info, "MTD already registered\n"))
return -EEXIST;
- mtd->backing_dev_info = &mtd_bdi;
+ mtd->backing_dev_info = mtd_bdi;
BUG_ON(mtd->writesize == 0);
mutex_lock(&mtd_table_mutex);
@@ -1274,8 +1273,8 @@ static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
int section,
struct mtd_oob_region *oobregion))
{
- struct mtd_oob_region oobregion = { };
- int section = 0, ret;
+ struct mtd_oob_region oobregion;
+ int section, ret;
ret = mtd_ooblayout_find_region(mtd, start, &section,
&oobregion, iter);
@@ -1283,7 +1282,7 @@ static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
while (!ret) {
int cnt;
- cnt = oobregion.length > nbytes ? nbytes : oobregion.length;
+ cnt = min_t(int, nbytes, oobregion.length);
memcpy(buf, oobbuf + oobregion.offset, cnt);
buf += cnt;
nbytes -= cnt;
@@ -1317,8 +1316,8 @@ static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
int section,
struct mtd_oob_region *oobregion))
{
- struct mtd_oob_region oobregion = { };
- int section = 0, ret;
+ struct mtd_oob_region oobregion;
+ int section, ret;
ret = mtd_ooblayout_find_region(mtd, start, &section,
&oobregion, iter);
@@ -1326,7 +1325,7 @@ static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
while (!ret) {
int cnt;
- cnt = oobregion.length > nbytes ? nbytes : oobregion.length;
+ cnt = min_t(int, nbytes, oobregion.length);
memcpy(oobbuf + oobregion.offset, buf, cnt);
buf += cnt;
nbytes -= cnt;
@@ -1354,7 +1353,7 @@ static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
int section,
struct mtd_oob_region *oobregion))
{
- struct mtd_oob_region oobregion = { };
+ struct mtd_oob_region oobregion;
int section = 0, ret, nbytes = 0;
while (1) {
@@ -1771,18 +1770,20 @@ static const struct file_operations mtd_proc_ops = {
/*====================================================================*/
/* Init code */
-static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name)
+static struct backing_dev_info * __init mtd_bdi_init(char *name)
{
+ struct backing_dev_info *bdi;
int ret;
- ret = bdi_init(bdi);
- if (!ret)
- ret = bdi_register(bdi, NULL, "%s", name);
+ bdi = kzalloc(sizeof(*bdi), GFP_KERNEL);
+ if (!bdi)
+ return ERR_PTR(-ENOMEM);
+ ret = bdi_setup_and_register(bdi, name);
if (ret)
- bdi_destroy(bdi);
+ kfree(bdi);
- return ret;
+ return ret ? ERR_PTR(ret) : bdi;
}
static struct proc_dir_entry *proc_mtd;
@@ -1795,9 +1796,11 @@ static int __init init_mtd(void)
if (ret)
goto err_reg;
- ret = mtd_bdi_init(&mtd_bdi, "mtd");
- if (ret)
+ mtd_bdi = mtd_bdi_init("mtd");
+ if (IS_ERR(mtd_bdi)) {
+ ret = PTR_ERR(mtd_bdi);
goto err_bdi;
+ }
proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops);
@@ -1810,6 +1813,8 @@ static int __init init_mtd(void)
out_procfs:
if (proc_mtd)
remove_proc_entry("mtd", NULL);
+ bdi_destroy(mtd_bdi);
+ kfree(mtd_bdi);
err_bdi:
class_unregister(&mtd_class);
err_reg:
@@ -1823,7 +1828,8 @@ static void __exit cleanup_mtd(void)
if (proc_mtd)
remove_proc_entry("mtd", NULL);
class_unregister(&mtd_class);
- bdi_destroy(&mtd_bdi);
+ bdi_destroy(mtd_bdi);
+ kfree(mtd_bdi);
idr_destroy(&mtd_idr);
}
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index cb06bdd21a1b..c40e2c951758 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -587,7 +587,7 @@ retry:
ret = wait_event_interruptible(wq, erase.state == MTD_ERASE_DONE ||
erase.state == MTD_ERASE_FAILED);
if (ret) {
- dev_err(d->dev, "Interrupted erase block %#llx erassure on %s",
+ dev_err(d->dev, "Interrupted erase block %#llx erasure on %s\n",
erase.addr, mtd->name);
return -EINTR;
}
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 7b7a887b4709..9ce5dcb4abd0 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -179,15 +179,6 @@ config MTD_NAND_S3C2410_DEBUG
help
Enable debugging of the S3C NAND driver
-config MTD_NAND_S3C2410_HWECC
- bool "Samsung S3C NAND Hardware ECC"
- depends on MTD_NAND_S3C2410
- help
- Enable the use of the controller's internal ECC generator when
- using NAND. Early versions of the chips have had problems with
- incorrect ECC generation, and if using these, the default of
- software ECC is preferable.
-
config MTD_NAND_NDFC
tristate "NDFC NanD Flash Controller"
depends on 4xx
@@ -205,6 +196,13 @@ config MTD_NAND_S3C2410_CLKSTOP
when the is NAND chip selected or released, but will save
approximately 5mA of power when there is nothing happening.
+config MTD_NAND_TANGO
+ tristate "NAND Flash support for Tango chips"
+ depends on ARCH_TANGO || COMPILE_TEST
+ depends on HAS_DMA
+ help
+ Enables the NAND Flash controller on Tango chips.
+
config MTD_NAND_DISKONCHIP
tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation)"
depends on HAS_IOMEM
@@ -426,6 +424,12 @@ config MTD_NAND_ORION
No board specific support is done by this driver, each board
must advertise a platform_device for the driver to attach.
+config MTD_NAND_OXNAS
+ tristate "NAND Flash support for Oxford Semiconductor SoC"
+ depends on HAS_IOMEM
+ help
+ This enables the NAND flash controller on Oxford Semiconductor SoCs.
+
config MTD_NAND_FSL_ELBC
tristate "NAND support for Freescale eLBC controllers"
depends on FSL_SOC
@@ -537,7 +541,7 @@ config MTD_NAND_FSMC
Flexible Static Memory Controller (FSMC)
config MTD_NAND_XWAY
- tristate "Support for NAND on Lantiq XWAY SoC"
+ bool "Support for NAND on Lantiq XWAY SoC"
depends on LANTIQ && SOC_TYPE_XWAY
help
Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index cafde6f3d957..19a66e404d5b 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_MTD_NAND_DENALI_DT) += denali_dt.o
obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o
obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o
obj-$(CONFIG_MTD_NAND_S3C2410) += s3c2410.o
+obj-$(CONFIG_MTD_NAND_TANGO) += tango_nand.o
obj-$(CONFIG_MTD_NAND_DAVINCI) += davinci_nand.o
obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o
obj-$(CONFIG_MTD_NAND_DOCG4) += docg4.o
@@ -35,6 +36,7 @@ obj-$(CONFIG_MTD_NAND_TMIO) += tmio_nand.o
obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o
obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o
obj-$(CONFIG_MTD_NAND_ORION) += orion_nand.o
+obj-$(CONFIG_MTD_NAND_OXNAS) += oxnas_nand.o
obj-$(CONFIG_MTD_NAND_FSL_ELBC) += fsl_elbc_nand.o
obj-$(CONFIG_MTD_NAND_FSL_IFC) += fsl_ifc_nand.o
obj-$(CONFIG_MTD_NAND_FSL_UPM) += fsl_upm.o
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index 78e12cc8bac2..5d6c26f3cf7f 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -234,10 +234,9 @@ static int ams_delta_init(struct platform_device *pdev)
goto out_gpio;
/* Scan to find existence of the device */
- if (nand_scan(ams_delta_mtd, 1)) {
- err = -ENXIO;
+ err = nand_scan(ams_delta_mtd, 1);
+ if (err)
goto out_mtd;
- }
/* Register the partitions */
mtd_device_register(ams_delta_mtd, partition_info,
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 68b9160108c9..9ebd5ecefea6 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -2267,10 +2267,9 @@ static int atmel_nand_probe(struct platform_device *pdev)
dev_info(host->dev, "No DMA support for NAND access.\n");
/* first scan to find the device and get the page size */
- if (nand_scan_ident(mtd, 1, NULL)) {
- res = -ENXIO;
+ res = nand_scan_ident(mtd, 1, NULL);
+ if (res)
goto err_scan_ident;
- }
if (host->board.on_flash_bbt || on_flash_bbt)
nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
@@ -2304,10 +2303,9 @@ static int atmel_nand_probe(struct platform_device *pdev)
}
/* second phase scan */
- if (nand_scan_tail(mtd)) {
- res = -ENXIO;
+ res = nand_scan_tail(mtd);
+ if (res)
goto err_scan_tail;
- }
mtd->name = "atmel_nand";
res = mtd_device_register(mtd, host->board.parts,
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
index 9d2424bfdbf5..42ebd73f821d 100644
--- a/drivers/mtd/nand/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/brcmnand/brcmnand.c
@@ -2209,8 +2209,9 @@ static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn)
nand_writereg(ctrl, cfg_offs,
nand_readreg(ctrl, cfg_offs) & ~CFG_BUS_WIDTH);
- if (nand_scan_ident(mtd, 1, NULL))
- return -ENXIO;
+ ret = nand_scan_ident(mtd, 1, NULL);
+ if (ret)
+ return ret;
chip->options |= NAND_NO_SUBPAGE_WRITE;
/*
@@ -2234,8 +2235,9 @@ static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn)
if (ret)
return ret;
- if (nand_scan_tail(mtd))
- return -ENXIO;
+ ret = nand_scan_tail(mtd);
+ if (ret)
+ return ret;
return mtd_device_register(mtd, NULL, 0);
}
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index 0b0c93702abb..d40c32d311d8 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -725,10 +725,9 @@ static int cafe_nand_probe(struct pci_dev *pdev,
usedma = 0;
/* Scan to find existence of the device */
- if (nand_scan_ident(mtd, 2, NULL)) {
- err = -ENXIO;
+ err = nand_scan_ident(mtd, 2, NULL);
+ if (err)
goto out_irq;
- }
cafe->dmabuf = dma_alloc_coherent(&cafe->pdev->dev,
2112 + sizeof(struct nand_buffers) +
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index 49133783ca53..226ac0bcafc6 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -195,9 +195,9 @@ static int __init cmx270_init(void)
this->write_buf = cmx270_write_buf;
/* Scan to find existence of the device */
- if (nand_scan (cmx270_nand_mtd, 1)) {
+ ret = nand_scan(cmx270_nand_mtd, 1);
+ if (ret) {
pr_notice("No NAND device\n");
- ret = -ENXIO;
goto err_scan;
}
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
index a65e4e0f57a1..594b28684138 100644
--- a/drivers/mtd/nand/cs553x_nand.c
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -242,10 +242,9 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
}
/* Scan to find existence of the device */
- if (nand_scan(new_mtd, 1)) {
- err = -ENXIO;
+ err = nand_scan(new_mtd, 1);
+ if (err)
goto out_free;
- }
cs553x_mtd[cs] = new_mtd;
goto out;
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 0476ae8776d9..73b9d4e2dca0 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -21,7 +21,6 @@
#include <linux/dma-mapping.h>
#include <linux/wait.h>
#include <linux/mutex.h>
-#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/module.h>
@@ -182,9 +181,6 @@ static uint16_t denali_nand_reset(struct denali_nand_info *denali)
{
int i;
- dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
for (i = 0; i < denali->max_banks; i++)
iowrite32(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT,
denali->flash_reg + INTR_STATUS(i));
@@ -234,9 +230,6 @@ static void nand_onfi_timing_set(struct denali_nand_info *denali,
uint16_t acc_clks;
uint16_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
- dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
en_lo = CEIL_DIV(Trp[mode], CLK_X);
en_hi = CEIL_DIV(Treh[mode], CLK_X);
#if ONFI_BLOOM_TIME
@@ -403,7 +396,7 @@ static void get_hynix_nand_para(struct denali_nand_info *denali,
break;
default:
dev_warn(denali->dev,
- "Spectra: Unknown Hynix NAND (Device ID: 0x%x).\n"
+ "Unknown Hynix NAND (Device ID: 0x%x).\n"
"Will use default parameter values instead.\n",
device_id);
}
@@ -474,33 +467,6 @@ static void detect_max_banks(struct denali_nand_info *denali)
denali->max_banks = 1 << (features & FEATURES__N_BANKS);
}
-static void detect_partition_feature(struct denali_nand_info *denali)
-{
- /*
- * For MRST platform, denali->fwblks represent the
- * number of blocks firmware is taken,
- * FW is in protect partition and MTD driver has no
- * permission to access it. So let driver know how many
- * blocks it can't touch.
- */
- if (ioread32(denali->flash_reg + FEATURES) & FEATURES__PARTITION) {
- if ((ioread32(denali->flash_reg + PERM_SRC_ID(1)) &
- PERM_SRC_ID__SRCID) == SPECTRA_PARTITION_ID) {
- denali->fwblks =
- ((ioread32(denali->flash_reg + MIN_MAX_BANK(1)) &
- MIN_MAX_BANK__MIN_VALUE) *
- denali->blksperchip)
- +
- (ioread32(denali->flash_reg + MIN_BLK_ADDR(1)) &
- MIN_BLK_ADDR__VALUE);
- } else {
- denali->fwblks = SPECTRA_START_BLOCK;
- }
- } else {
- denali->fwblks = SPECTRA_START_BLOCK;
- }
-}
-
static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
{
uint16_t status = PASS;
@@ -508,9 +474,6 @@ static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
uint8_t maf_id, device_id;
int i;
- dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
/*
* Use read id method to get device ID and other params.
* For some NAND chips, controller can't report the correct
@@ -552,8 +515,6 @@ static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
find_valid_banks(denali);
- detect_partition_feature(denali);
-
/*
* If the user specified to override the default timings
* with a specific ONFI mode, we apply those changes here.
@@ -567,9 +528,6 @@ static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
static void denali_set_intr_modes(struct denali_nand_info *denali,
uint16_t INT_ENABLE)
{
- dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
if (INT_ENABLE)
iowrite32(1, denali->flash_reg + GLOBAL_INT_ENABLE);
else
@@ -605,7 +563,6 @@ static void denali_irq_init(struct denali_nand_info *denali)
static void denali_irq_cleanup(int irqnum, struct denali_nand_info *denali)
{
denali_set_intr_modes(denali, false);
- free_irq(irqnum, denali);
}
static void denali_irq_enable(struct denali_nand_info *denali,
@@ -1437,9 +1394,6 @@ static struct nand_bbt_descr bbt_mirror_descr = {
/* initialize driver data structures */
static void denali_drv_init(struct denali_nand_info *denali)
{
- denali->idx = 0;
-
- /* setup interrupt handler */
/*
* the completion object will be used to notify
* the callee that the interrupt is done
@@ -1485,14 +1439,12 @@ int denali_init(struct denali_nand_info *denali)
denali_hw_init(denali);
denali_drv_init(denali);
- /*
- * denali_isr register is done after all the hardware
- * initilization is finished
- */
- if (request_irq(denali->irq, denali_isr, IRQF_SHARED,
- DENALI_NAND_NAME, denali)) {
- pr_err("Spectra: Unable to allocate IRQ\n");
- return -ENODEV;
+ /* Request IRQ after all the hardware initialization is finished */
+ ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
+ IRQF_SHARED, DENALI_NAND_NAME, denali);
+ if (ret) {
+ dev_err(denali->dev, "Unable to request IRQ\n");
+ return ret;
}
/* now that our ISR is registered, we can enable interrupts */
@@ -1510,10 +1462,9 @@ int denali_init(struct denali_nand_info *denali)
* this is the first stage in a two step process to register
* with the nand subsystem
*/
- if (nand_scan_ident(mtd, denali->max_banks, NULL)) {
- ret = -ENXIO;
+ ret = nand_scan_ident(mtd, denali->max_banks, NULL);
+ if (ret)
goto failed_req_irq;
- }
/* allocate the right size buffer now */
devm_kfree(denali->dev, denali->buf.buf);
@@ -1528,7 +1479,7 @@ int denali_init(struct denali_nand_info *denali)
/* Is 32-bit DMA supported? */
ret = dma_set_mask(denali->dev, DMA_BIT_MASK(32));
if (ret) {
- pr_err("Spectra: no usable DMA configuration\n");
+ dev_err(denali->dev, "No usable DMA configuration\n");
goto failed_req_irq;
}
@@ -1536,7 +1487,7 @@ int denali_init(struct denali_nand_info *denali)
mtd->writesize + mtd->oobsize,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(denali->dev, denali->buf.dma_buf)) {
- dev_err(denali->dev, "Spectra: failed to map DMA buffer\n");
+ dev_err(denali->dev, "Failed to map DMA buffer\n");
ret = -EIO;
goto failed_req_irq;
}
@@ -1547,16 +1498,16 @@ int denali_init(struct denali_nand_info *denali)
* the real pagesize and anything necessery
*/
denali->devnum = ioread32(denali->flash_reg + DEVICES_CONNECTED);
- denali->nand.chipsize <<= (denali->devnum - 1);
- denali->nand.page_shift += (denali->devnum - 1);
+ denali->nand.chipsize <<= denali->devnum - 1;
+ denali->nand.page_shift += denali->devnum - 1;
denali->nand.pagemask = (denali->nand.chipsize >>
denali->nand.page_shift) - 1;
- denali->nand.bbt_erase_shift += (denali->devnum - 1);
+ denali->nand.bbt_erase_shift += denali->devnum - 1;
denali->nand.phys_erase_shift = denali->nand.bbt_erase_shift;
- denali->nand.chip_shift += (denali->devnum - 1);
- mtd->writesize <<= (denali->devnum - 1);
- mtd->oobsize <<= (denali->devnum - 1);
- mtd->erasesize <<= (denali->devnum - 1);
+ denali->nand.chip_shift += denali->devnum - 1;
+ mtd->writesize <<= denali->devnum - 1;
+ mtd->oobsize <<= denali->devnum - 1;
+ mtd->erasesize <<= denali->devnum - 1;
mtd->size = denali->nand.numchips * denali->nand.chipsize;
denali->bbtskipbytes *= denali->devnum;
@@ -1606,14 +1557,6 @@ int denali_init(struct denali_nand_info *denali)
denali->nand.ecc.bytes *= denali->devnum;
denali->nand.ecc.strength *= denali->devnum;
- /*
- * Let driver know the total blocks number and how many blocks
- * contained by each nand chip. blksperchip will help driver to
- * know how many blocks is taken by FW.
- */
- denali->totalblks = mtd->size >> denali->nand.phys_erase_shift;
- denali->blksperchip = denali->totalblks / denali->nand.numchips;
-
/* override the default read operations */
denali->nand.ecc.size = ECC_SECTOR_SIZE * denali->devnum;
denali->nand.ecc.read_page = denali_read_page;
@@ -1624,15 +1567,13 @@ int denali_init(struct denali_nand_info *denali)
denali->nand.ecc.write_oob = denali_write_oob;
denali->nand.erase = denali_erase;
- if (nand_scan_tail(mtd)) {
- ret = -ENXIO;
+ ret = nand_scan_tail(mtd);
+ if (ret)
goto failed_req_irq;
- }
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
- dev_err(denali->dev, "Spectra: Failed to register MTD: %d\n",
- ret);
+ dev_err(denali->dev, "Failed to register MTD: %d\n", ret);
goto failed_req_irq;
}
return 0;
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
index e7ab4866a5da..ea22191e8515 100644
--- a/drivers/mtd/nand/denali.h
+++ b/drivers/mtd/nand/denali.h
@@ -383,14 +383,6 @@
#define CLK_X 5
#define CLK_MULTI 4
-/* spectraswconfig.h */
-#define CMD_DMA 0
-
-#define SPECTRA_PARTITION_ID 0
-/**** Block Table and Reserved Block Parameters *****/
-#define SPECTRA_START_BLOCK 3
-#define NUM_FREE_BLOCKS_GATE 30
-
/* KBV - Updated to LNW scratch register address */
#define SCRATCH_REG_ADDR CONFIG_MTD_NAND_DENALI_SCRATCH_REG_ADDR
#define SCRATCH_REG_SIZE 64
@@ -467,13 +459,9 @@ struct denali_nand_info {
spinlock_t irq_lock;
uint32_t irq_status;
int irq_debug_array[32];
- int idx;
int irq;
uint32_t devnum; /* represent how many nands connected */
- uint32_t fwblks; /* represent how many blocks FW used */
- uint32_t totalblks;
- uint32_t blksperchip;
uint32_t bbtskipbytes;
uint32_t max_banks;
};
diff --git a/drivers/mtd/nand/denali_dt.c b/drivers/mtd/nand/denali_dt.c
index 0cb1e8d9fbfc..5607fcd3b8ed 100644
--- a/drivers/mtd/nand/denali_dt.c
+++ b/drivers/mtd/nand/denali_dt.c
@@ -21,7 +21,6 @@
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/slab.h>
#include "denali.h"
@@ -110,7 +109,7 @@ static int denali_dt_remove(struct platform_device *ofdev)
struct denali_dt *dt = platform_get_drvdata(ofdev);
denali_remove(&dt->denali);
- clk_disable(dt->clk);
+ clk_disable_unprepare(dt->clk);
return 0;
}
diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
index de31514df282..ac843238b77e 100644
--- a/drivers/mtd/nand/denali_pci.c
+++ b/drivers/mtd/nand/denali_pci.c
@@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/slab.h>
#include "denali.h"
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index d4f454a4b35e..4924b43977ef 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -926,8 +926,8 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
/*
* Scan to find existence of the device
*/
- if (nand_scan_ident(mtd, 1, NULL)) {
- ret = -ENXIO;
+ ret = nand_scan_ident(mtd, 1, NULL);
+ if (ret) {
dev_err(&pdev->dev, "No NAND Device found!\n");
goto err_scan_ident;
}
@@ -992,10 +992,9 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
}
/* Second stage of scan to fill MTD data-structures */
- if (nand_scan_tail(mtd)) {
- ret = -ENXIO;
+ ret = nand_scan_tail(mtd);
+ if (ret)
goto err_probe;
- }
/*
* The partition information can is accessed by (in the same precedence)
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index 6317f6836022..0d24857469ab 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -286,10 +286,9 @@ static int gpio_nand_probe(struct platform_device *pdev)
if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
gpio_direction_output(gpiomtd->plat.gpio_nwp, 1);
- if (nand_scan(mtd, 1)) {
- ret = -ENXIO;
+ ret = nand_scan(mtd, 1);
+ if (ret)
goto err_wp;
- }
if (gpiomtd->plat.adjust_parts)
gpiomtd->plat.adjust_parts(&gpiomtd->plat, mtd->size);
diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c
index 9432546f4cd4..e40364eeb556 100644
--- a/drivers/mtd/nand/hisi504_nand.c
+++ b/drivers/mtd/nand/hisi504_nand.c
@@ -774,10 +774,8 @@ static int hisi_nfc_probe(struct platform_device *pdev)
}
ret = nand_scan_ident(mtd, max_chips, NULL);
- if (ret) {
- ret = -ENODEV;
+ if (ret)
goto err_res;
- }
host->buffer = dmam_alloc_coherent(dev, mtd->writesize + mtd->oobsize,
&host->dma_buffer, GFP_KERNEL);
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index 852388171f20..846a66c1b133 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -747,10 +747,9 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
* Scan to find existance of the device and
* Get the type of NAND device SMALL block or LARGE block
*/
- if (nand_scan_ident(mtd, 1, NULL)) {
- res = -ENXIO;
+ res = nand_scan_ident(mtd, 1, NULL);
+ if (res)
goto err_exit3;
- }
host->dma_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
if (!host->dma_buf) {
@@ -776,7 +775,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
init_completion(&host->comp_controller);
host->irq = platform_get_irq(pdev, 0);
- if ((host->irq < 0) || (host->irq >= NR_IRQS)) {
+ if (host->irq < 0) {
dev_err(&pdev->dev, "failed to get platform irq\n");
res = -EINVAL;
goto err_exit3;
@@ -793,10 +792,9 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
* Fills out all the uninitialized function pointers with the defaults
* And scans for a bad block table if appropriate.
*/
- if (nand_scan_tail(mtd)) {
- res = -ENXIO;
+ res = nand_scan_tail(mtd);
+ if (res)
goto err_exit4;
- }
mtd->name = DRV_NAME;
diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c
index 8d3edc34958e..53bafe23ab39 100644
--- a/drivers/mtd/nand/lpc32xx_slc.c
+++ b/drivers/mtd/nand/lpc32xx_slc.c
@@ -894,10 +894,9 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
}
/* Find NAND device */
- if (nand_scan_ident(mtd, 1, NULL)) {
- res = -ENXIO;
+ res = nand_scan_ident(mtd, 1, NULL);
+ if (res)
goto err_exit3;
- }
/* OOB and ECC CPU and DMA work areas */
host->ecc_buf = (uint32_t *)(host->data_buf + LPC32XX_DMA_DATA_SIZE);
@@ -929,10 +928,9 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
/*
* Fills out all the uninitialized function pointers with the defaults
*/
- if (nand_scan_tail(mtd)) {
- res = -ENXIO;
+ res = nand_scan_tail(mtd);
+ if (res)
goto err_exit3;
- }
mtd->name = "nxp_lpc3220_slc";
res = mtd_device_register(mtd, host->ncfg->parts,
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index 7eacb2f545f5..6d6eaed2d20c 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -777,9 +777,9 @@ static int mpc5121_nfc_probe(struct platform_device *op)
}
/* Detect NAND chips */
- if (nand_scan(mtd, be32_to_cpup(chips_no))) {
+ retval = nand_scan(mtd, be32_to_cpup(chips_no));
+ if (retval) {
dev_err(dev, "NAND Flash not found !\n");
- retval = -ENXIO;
goto error;
}
diff --git a/drivers/mtd/nand/mtk_nand.c b/drivers/mtd/nand/mtk_nand.c
index 5223a2182ee4..6c3eed3c2094 100644
--- a/drivers/mtd/nand/mtk_nand.c
+++ b/drivers/mtd/nand/mtk_nand.c
@@ -1297,7 +1297,7 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
ret = nand_scan_ident(mtd, nsels, NULL);
if (ret)
- return -ENODEV;
+ return ret;
/* store bbt magic in page, cause OOB is not protected */
if (nand->bbt_options & NAND_BBT_USE_FLASH)
@@ -1323,7 +1323,7 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
ret = nand_scan_tail(mtd);
if (ret)
- return -ENODEV;
+ return ret;
ret = mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
if (ret) {
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index d7f724b24fd7..61ca020c5272 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -1747,10 +1747,9 @@ static int mxcnd_probe(struct platform_device *pdev)
}
/* first scan to find the device and get the page size */
- if (nand_scan_ident(mtd, is_imx25_nfc(host) ? 4 : 1, NULL)) {
- err = -ENXIO;
+ err = nand_scan_ident(mtd, is_imx25_nfc(host) ? 4 : 1, NULL);
+ if (err)
goto escan;
- }
switch (this->ecc.mode) {
case NAND_ECC_HW:
@@ -1808,10 +1807,9 @@ static int mxcnd_probe(struct platform_device *pdev)
}
/* second phase scan */
- if (nand_scan_tail(mtd)) {
- err = -ENXIO;
+ err = nand_scan_tail(mtd);
+ if (err)
goto escan;
- }
/* Register the partitions */
mtd_device_parse_register(mtd, part_probes,
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 3bde96a3f7bf..ec1c28aaaf23 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -709,6 +709,25 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
nand_wait_ready(mtd);
}
+static void nand_ccs_delay(struct nand_chip *chip)
+{
+ /*
+ * The controller already takes care of waiting for tCCS when the RNDIN
+ * or RNDOUT command is sent, return directly.
+ */
+ if (!(chip->options & NAND_WAIT_TCCS))
+ return;
+
+ /*
+ * Wait tCCS_min if it is correctly defined, otherwise wait 500ns
+ * (which should be safe for all NANDs).
+ */
+ if (chip->data_interface && chip->data_interface->timings.sdr.tCCS_min)
+ ndelay(chip->data_interface->timings.sdr.tCCS_min / 1000);
+ else
+ ndelay(500);
+}
+
/**
* nand_command_lp - [DEFAULT] Send command to NAND large page device
* @mtd: MTD device structure
@@ -773,10 +792,13 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
case NAND_CMD_ERASE1:
case NAND_CMD_ERASE2:
case NAND_CMD_SEQIN:
- case NAND_CMD_RNDIN:
case NAND_CMD_STATUS:
return;
+ case NAND_CMD_RNDIN:
+ nand_ccs_delay(chip);
+ return;
+
case NAND_CMD_RESET:
if (chip->dev_ready)
break;
@@ -795,6 +817,8 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
chip->cmd_ctrl(mtd, NAND_CMD_NONE,
NAND_NCE | NAND_CTRL_CHANGE);
+
+ nand_ccs_delay(chip);
return;
case NAND_CMD_READ0:
@@ -1946,7 +1970,8 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
__func__, buf);
read_retry:
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
+ if (nand_standard_page_accessors(&chip->ecc))
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
/*
* Now read the page into the buffer. Absent an error,
@@ -2634,7 +2659,8 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
else
subpage = 0;
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
+ if (nand_standard_page_accessors(&chip->ecc))
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
if (unlikely(raw))
status = chip->ecc.write_page_raw(mtd, chip, buf,
@@ -2657,7 +2683,8 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
if (!cached || !NAND_HAS_CACHEPROG(chip)) {
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ if (nand_standard_page_accessors(&chip->ecc))
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
status = chip->waitfunc(mtd, chip);
/*
* See if operation failed and additional status checks are
@@ -3985,10 +4012,9 @@ static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip,
/*
* Get the flash and manufacturer id and lookup if the type is supported.
*/
-static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
- struct nand_chip *chip,
- int *maf_id, int *dev_id,
- struct nand_flash_dev *type)
+static int nand_get_flash_type(struct mtd_info *mtd, struct nand_chip *chip,
+ int *maf_id, int *dev_id,
+ struct nand_flash_dev *type)
{
int busw;
int i, maf_idx;
@@ -4026,7 +4052,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
if (id_data[0] != *maf_id || id_data[1] != *dev_id) {
pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
*maf_id, *dev_id, id_data[0], id_data[1]);
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
}
if (!type)
@@ -4053,7 +4079,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
}
if (!type->name)
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
if (!mtd->name)
mtd->name = type->name;
@@ -4098,7 +4124,7 @@ ident_done:
pr_warn("bus width %d instead %d bit\n",
(chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
busw ? 16 : 8);
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
nand_decode_bbm_options(mtd, chip, id_data);
@@ -4140,7 +4166,7 @@ ident_done:
pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
(int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
- return type;
+ return 0;
}
static const char * const nand_ecc_modes[] = {
@@ -4306,7 +4332,6 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
{
int i, nand_maf_id, nand_dev_id;
struct nand_chip *chip = mtd_to_nand(mtd);
- struct nand_flash_dev *type;
int ret;
ret = nand_dt_init(chip);
@@ -4329,14 +4354,12 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
nand_set_defaults(chip, chip->options & NAND_BUSWIDTH_16);
/* Read the flash type */
- type = nand_get_flash_type(mtd, chip, &nand_maf_id,
- &nand_dev_id, table);
-
- if (IS_ERR(type)) {
+ ret = nand_get_flash_type(mtd, chip, &nand_maf_id, &nand_dev_id, table);
+ if (ret) {
if (!(chip->options & NAND_SCAN_SILENT_NODEV))
pr_warn("No NAND device found\n");
chip->select_chip(mtd, -1);
- return PTR_ERR(type);
+ return ret;
}
/* Initialize the ->data_interface field. */
@@ -4515,6 +4538,26 @@ static bool nand_ecc_strength_good(struct mtd_info *mtd)
return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
}
+static bool invalid_ecc_page_accessors(struct nand_chip *chip)
+{
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (nand_standard_page_accessors(ecc))
+ return false;
+
+ /*
+ * NAND_ECC_CUSTOM_PAGE_ACCESS flag is set, make sure the NAND
+ * controller driver implements all the page accessors because
+ * default helpers are not suitable when the core does not
+ * send the READ0/PAGEPROG commands.
+ */
+ return (!ecc->read_page || !ecc->write_page ||
+ !ecc->read_page_raw || !ecc->write_page_raw ||
+ (NAND_HAS_SUBPAGE_READ(chip) && !ecc->read_subpage) ||
+ (NAND_HAS_SUBPAGE_WRITE(chip) && !ecc->write_subpage &&
+ ecc->hwctl && ecc->calculate));
+}
+
/**
* nand_scan_tail - [NAND Interface] Scan for the NAND device
* @mtd: MTD device structure
@@ -4535,6 +4578,11 @@ int nand_scan_tail(struct mtd_info *mtd)
!(chip->bbt_options & NAND_BBT_USE_FLASH)))
return -EINVAL;
+ if (invalid_ecc_page_accessors(chip)) {
+ pr_err("Invalid ECC page accessors setup\n");
+ return -EINVAL;
+ }
+
if (!(chip->options & NAND_OWN_BUFFERS)) {
nbuf = kzalloc(sizeof(*nbuf) + mtd->writesize
+ mtd->oobsize * 3, GFP_KERNEL);
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index 2af9869a115e..b3a332f37e14 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -36,6 +36,9 @@ struct nand_flash_dev nand_flash_ids[] = {
{"TC58NVG2S0F 4G 3.3V 8-bit",
{ .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x15, 0x01, 0x08} },
SZ_4K, SZ_512, SZ_256K, 0, 8, 224, NAND_ECC_INFO(4, SZ_512) },
+ {"TC58NVG2S0H 4G 3.3V 8-bit",
+ { .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x16, 0x08, 0x00} },
+ SZ_4K, SZ_512, SZ_256K, 0, 8, 256, NAND_ECC_INFO(8, SZ_512) },
{"TC58NVG3S0F 8G 3.3V 8-bit",
{ .id = {0x98, 0xd3, 0x90, 0x26, 0x76, 0x15, 0x02, 0x08} },
SZ_4K, SZ_1K, SZ_256K, 0, 8, 232, NAND_ECC_INFO(4, SZ_512) },
diff --git a/drivers/mtd/nand/nand_timings.c b/drivers/mtd/nand/nand_timings.c
index 13a587407be3..f06312df3669 100644
--- a/drivers/mtd/nand/nand_timings.c
+++ b/drivers/mtd/nand/nand_timings.c
@@ -18,6 +18,8 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
{
.type = NAND_SDR_IFACE,
.timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
.tADL_min = 400000,
.tALH_min = 20000,
.tALS_min = 50000,
@@ -58,6 +60,8 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
{
.type = NAND_SDR_IFACE,
.timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
.tADL_min = 400000,
.tALH_min = 10000,
.tALS_min = 25000,
@@ -98,6 +102,8 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
{
.type = NAND_SDR_IFACE,
.timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
.tADL_min = 400000,
.tALH_min = 10000,
.tALS_min = 15000,
@@ -138,6 +144,8 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
{
.type = NAND_SDR_IFACE,
.timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
.tADL_min = 400000,
.tALH_min = 5000,
.tALS_min = 10000,
@@ -178,6 +186,8 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
{
.type = NAND_SDR_IFACE,
.timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
.tADL_min = 400000,
.tALH_min = 5000,
.tALS_min = 10000,
@@ -218,6 +228,8 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
{
.type = NAND_SDR_IFACE,
.timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
.tADL_min = 400000,
.tALH_min = 5000,
.tALS_min = 10000,
@@ -290,10 +302,22 @@ int onfi_init_data_interface(struct nand_chip *chip,
*iface = onfi_sdr_timings[timing_mode];
/*
- * TODO: initialize timings that cannot be deduced from timing mode:
+ * Initialize timings that cannot be deduced from timing mode:
* tR, tPROG, tCCS, ...
* These information are part of the ONFI parameter page.
*/
+ if (chip->onfi_version) {
+ struct nand_onfi_params *params = &chip->onfi_params;
+ struct nand_sdr_timings *timings = &iface->timings.sdr;
+
+ /* microseconds -> picoseconds */
+ timings->tPROG_max = 1000000UL * le16_to_cpu(params->t_prog);
+ timings->tBERS_max = 1000000UL * le16_to_cpu(params->t_bers);
+ timings->tR_max = 1000000UL * le16_to_cpu(params->t_r);
+
+ /* nanoseconds -> picoseconds */
+ timings->tCCS_min = 1000UL * le16_to_cpu(params->t_ccs);
+ }
return 0;
}
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 1eb934414eb5..c84742671a5f 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -525,24 +525,20 @@ static int nandsim_debugfs_create(struct nandsim *dev)
{
struct nandsim_debug_info *dbg = &dev->dbg;
struct dentry *dent;
- int err;
if (!IS_ENABLED(CONFIG_DEBUG_FS))
return 0;
dent = debugfs_create_dir("nandsim", NULL);
- if (IS_ERR_OR_NULL(dent)) {
- int err = dent ? -ENODEV : PTR_ERR(dent);
-
- NS_ERR("cannot create \"nandsim\" debugfs directory, err %d\n",
- err);
- return err;
+ if (!dent) {
+ NS_ERR("cannot create \"nandsim\" debugfs directory\n");
+ return -ENODEV;
}
dbg->dfs_root = dent;
dent = debugfs_create_file("wear_report", S_IRUSR,
dbg->dfs_root, dev, &dfs_fops);
- if (IS_ERR_OR_NULL(dent))
+ if (!dent)
goto out_remove;
dbg->dfs_wear_report = dent;
@@ -550,8 +546,7 @@ static int nandsim_debugfs_create(struct nandsim *dev)
out_remove:
debugfs_remove_recursive(dbg->dfs_root);
- err = dent ? PTR_ERR(dent) : -ENODEV;
- return err;
+ return -ENODEV;
}
/**
@@ -2313,8 +2308,6 @@ static int __init ns_init_module(void)
retval = nand_scan_ident(nsmtd, 1, NULL);
if (retval) {
NS_ERR("cannot scan NAND Simulator device\n");
- if (retval > 0)
- retval = -ENXIO;
goto error;
}
@@ -2350,8 +2343,6 @@ static int __init ns_init_module(void)
retval = nand_scan_tail(nsmtd);
if (retval) {
NS_ERR("can't register NAND Simulator\n");
- if (retval > 0)
- retval = -ENXIO;
goto error;
}
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 5513bfd9cdc9..2a52101120d4 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1895,10 +1895,10 @@ static int omap_nand_probe(struct platform_device *pdev)
/* scan NAND device connected to chip controller */
nand_chip->options |= info->devsize & NAND_BUSWIDTH_16;
- if (nand_scan_ident(mtd, 1, NULL)) {
+ err = nand_scan_ident(mtd, 1, NULL);
+ if (err) {
dev_err(&info->pdev->dev,
"scan failed, may be bus-width mismatch\n");
- err = -ENXIO;
goto return_error;
}
@@ -2154,10 +2154,9 @@ static int omap_nand_probe(struct platform_device *pdev)
scan_tail:
/* second phase scan */
- if (nand_scan_tail(mtd)) {
- err = -ENXIO;
+ err = nand_scan_tail(mtd);
+ if (err)
goto return_error;
- }
if (dev->of_node)
mtd_device_register(mtd, NULL, 0);
@@ -2197,6 +2196,7 @@ static const struct of_device_id omap_nand_ids[] = {
{ .compatible = "ti,omap2-nand", },
{},
};
+MODULE_DEVICE_TABLE(of, omap_nand_ids);
static struct platform_driver omap_nand_driver = {
.probe = omap_nand_probe,
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 40a7c4a2cf0d..4a91c5d000be 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -155,10 +155,9 @@ static int __init orion_nand_probe(struct platform_device *pdev)
clk_put(clk);
}
- if (nand_scan(mtd, 1)) {
- ret = -ENXIO;
+ ret = nand_scan(mtd, 1);
+ if (ret)
goto no_dev;
- }
mtd->name = "orion_nand";
ret = mtd_device_register(mtd, board->parts, board->nr_parts);
diff --git a/drivers/mtd/nand/oxnas_nand.c b/drivers/mtd/nand/oxnas_nand.c
new file mode 100644
index 000000000000..3e3bf3b364d2
--- /dev/null
+++ b/drivers/mtd/nand/oxnas_nand.c
@@ -0,0 +1,195 @@
+/*
+ * Oxford Semiconductor OXNAS NAND driver
+
+ * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
+ * Heavily based on plat_nand.c :
+ * Author: Vitaly Wool <vitalywool@gmail.com>
+ * Copyright (C) 2013 Ma Haijun <mahaijuns@gmail.com>
+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of.h>
+
+/* Nand commands */
+#define OXNAS_NAND_CMD_ALE BIT(18)
+#define OXNAS_NAND_CMD_CLE BIT(19)
+
+#define OXNAS_NAND_MAX_CHIPS 1
+
+struct oxnas_nand_ctrl {
+ struct nand_hw_control base;
+ void __iomem *io_base;
+ struct clk *clk;
+ struct nand_chip *chips[OXNAS_NAND_MAX_CHIPS];
+};
+
+static uint8_t oxnas_nand_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip);
+
+ return readb(oxnas->io_base);
+}
+
+static void oxnas_nand_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip);
+
+ ioread8_rep(oxnas->io_base, buf, len);
+}
+
+static void oxnas_nand_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip);
+
+ iowrite8_rep(oxnas->io_base, buf, len);
+}
+
+/* Single CS command control */
+static void oxnas_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip);
+
+ if (ctrl & NAND_CLE)
+ writeb(cmd, oxnas->io_base + OXNAS_NAND_CMD_CLE);
+ else if (ctrl & NAND_ALE)
+ writeb(cmd, oxnas->io_base + OXNAS_NAND_CMD_ALE);
+}
+
+/*
+ * Probe for the NAND device.
+ */
+static int oxnas_nand_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *nand_np;
+ struct oxnas_nand_ctrl *oxnas;
+ struct nand_chip *chip;
+ struct mtd_info *mtd;
+ struct resource *res;
+ int nchips = 0;
+ int count = 0;
+ int err = 0;
+
+ /* Allocate memory for the device structure (and zero it) */
+ oxnas = devm_kzalloc(&pdev->dev, sizeof(struct nand_chip),
+ GFP_KERNEL);
+ if (!oxnas)
+ return -ENOMEM;
+
+ nand_hw_control_init(&oxnas->base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ oxnas->io_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(oxnas->io_base))
+ return PTR_ERR(oxnas->io_base);
+
+ oxnas->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(oxnas->clk))
+ oxnas->clk = NULL;
+
+ /* Only a single chip node is supported */
+ count = of_get_child_count(np);
+ if (count > 1)
+ return -EINVAL;
+
+ clk_prepare_enable(oxnas->clk);
+ device_reset_optional(&pdev->dev);
+
+ for_each_child_of_node(np, nand_np) {
+ chip = devm_kzalloc(&pdev->dev, sizeof(struct nand_chip),
+ GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->controller = &oxnas->base;
+
+ nand_set_flash_node(chip, nand_np);
+ nand_set_controller_data(chip, oxnas);
+
+ mtd = nand_to_mtd(chip);
+ mtd->dev.parent = &pdev->dev;
+ mtd->priv = chip;
+
+ chip->cmd_ctrl = oxnas_nand_cmd_ctrl;
+ chip->read_buf = oxnas_nand_read_buf;
+ chip->read_byte = oxnas_nand_read_byte;
+ chip->write_buf = oxnas_nand_write_buf;
+ chip->chip_delay = 30;
+
+ /* Scan to find existence of the device */
+ err = nand_scan(mtd, 1);
+ if (err)
+ return err;
+
+ err = mtd_device_register(mtd, NULL, 0);
+ if (err) {
+ nand_release(mtd);
+ return err;
+ }
+
+ oxnas->chips[nchips] = chip;
+ ++nchips;
+ }
+
+ /* Exit if no chips found */
+ if (!nchips)
+ return -ENODEV;
+
+ platform_set_drvdata(pdev, oxnas);
+
+ return 0;
+}
+
+static int oxnas_nand_remove(struct platform_device *pdev)
+{
+ struct oxnas_nand_ctrl *oxnas = platform_get_drvdata(pdev);
+
+ if (oxnas->chips[0])
+ nand_release(nand_to_mtd(oxnas->chips[0]));
+
+ clk_disable_unprepare(oxnas->clk);
+
+ return 0;
+}
+
+static const struct of_device_id oxnas_nand_match[] = {
+ { .compatible = "oxsemi,ox820-nand" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, oxnas_nand_match);
+
+static struct platform_driver oxnas_nand_driver = {
+ .probe = oxnas_nand_probe,
+ .remove = oxnas_nand_remove,
+ .driver = {
+ .name = "oxnas_nand",
+ .of_match_table = oxnas_nand_match,
+ },
+};
+
+module_platform_driver(oxnas_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_DESCRIPTION("Oxnas NAND driver");
+MODULE_ALIAS("platform:oxnas_nand");
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 5de7591b0510..074b8b01289e 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -156,10 +156,9 @@ static int pasemi_nand_probe(struct platform_device *ofdev)
chip->bbt_options = NAND_BBT_USE_FLASH;
/* Scan to find existence of the device */
- if (nand_scan(pasemi_nand_mtd, 1)) {
- err = -ENXIO;
+ err = nand_scan(pasemi_nand_mtd, 1);
+ if (err)
goto out_lpc;
- }
if (mtd_device_register(pasemi_nand_mtd, NULL, 0)) {
dev_err(dev, "Unable to register MTD device\n");
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index 415a53a0deeb..791de3e4bbb6 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -86,10 +86,9 @@ static int plat_nand_probe(struct platform_device *pdev)
}
/* Scan to find existence of the device */
- if (nand_scan(mtd, pdata->chip.nr_chips)) {
- err = -ENXIO;
+ err = nand_scan(mtd, pdata->chip.nr_chips);
+ if (err)
goto out;
- }
part_types = pdata->chip.part_probe_types;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index b121bf4ed73a..649ba8200832 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -1680,8 +1680,9 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
chip->ecc.strength = pdata->ecc_strength;
chip->ecc.size = pdata->ecc_step_size;
- if (nand_scan_ident(mtd, 1, NULL))
- return -ENODEV;
+ ret = nand_scan_ident(mtd, 1, NULL);
+ if (ret)
+ return ret;
if (!pdata->keep_config) {
ret = pxa3xx_nand_init(host);
@@ -1774,8 +1775,11 @@ static int alloc_nand_resource(struct platform_device *pdev)
int ret, irq, cs;
pdata = dev_get_platdata(&pdev->dev);
- if (pdata->num_cs <= 0)
+ if (pdata->num_cs <= 0) {
+ dev_err(&pdev->dev, "invalid number of chip selects\n");
return -ENODEV;
+ }
+
info = devm_kzalloc(&pdev->dev,
sizeof(*info) + sizeof(*host) * pdata->num_cs,
GFP_KERNEL);
@@ -1813,8 +1817,9 @@ static int alloc_nand_resource(struct platform_device *pdev)
nand_hw_control_init(chip->controller);
info->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(info->clk)) {
- dev_err(&pdev->dev, "failed to get nand clock\n");
- return PTR_ERR(info->clk);
+ ret = PTR_ERR(info->clk);
+ dev_err(&pdev->dev, "failed to get nand clock: %d\n", ret);
+ return ret;
}
ret = clk_prepare_enable(info->clk);
if (ret < 0)
@@ -1842,6 +1847,7 @@ static int alloc_nand_resource(struct platform_device *pdev)
info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(info->mmio_base)) {
ret = PTR_ERR(info->mmio_base);
+ dev_err(&pdev->dev, "failed to map register space: %d\n", ret);
goto fail_disable_clk;
}
info->mmio_phys = r->start;
@@ -1861,7 +1867,7 @@ static int alloc_nand_resource(struct platform_device *pdev)
pxa3xx_nand_irq_thread, IRQF_ONESHOT,
pdev->name, info);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to request IRQ\n");
+ dev_err(&pdev->dev, "failed to request IRQ: %d\n", ret);
goto fail_free_buf;
}
@@ -1960,10 +1966,8 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
}
ret = alloc_nand_resource(pdev);
- if (ret) {
- dev_err(&pdev->dev, "alloc nand resource failed\n");
+ if (ret)
return ret;
- }
info = platform_get_drvdata(pdev);
probe_success = 0;
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index d459c19d78de..f0b030d44f71 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -39,6 +39,8 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/cpufreq.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
@@ -185,6 +187,22 @@ struct s3c2410_nand_info {
#endif
};
+struct s3c24XX_nand_devtype_data {
+ enum s3c_cpu_type type;
+};
+
+static const struct s3c24XX_nand_devtype_data s3c2410_nand_devtype_data = {
+ .type = TYPE_S3C2410,
+};
+
+static const struct s3c24XX_nand_devtype_data s3c2412_nand_devtype_data = {
+ .type = TYPE_S3C2412,
+};
+
+static const struct s3c24XX_nand_devtype_data s3c2440_nand_devtype_data = {
+ .type = TYPE_S3C2440,
+};
+
/* conversion functions */
static struct s3c2410_nand_mtd *s3c2410_nand_mtd_toours(struct mtd_info *mtd)
@@ -497,7 +515,6 @@ static int s3c2412_nand_devready(struct mtd_info *mtd)
/* ECC handling functions */
-#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
u_char *read_ecc, u_char *calc_ecc)
{
@@ -649,7 +666,6 @@ static int s3c2440_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
return 0;
}
-#endif
/* over-ride the standard functions for a little more speed. We can
* use read/write block to move the data buffers to/from the controller
@@ -796,6 +812,30 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
return -ENODEV;
}
+static int s3c2410_nand_setup_data_interface(struct mtd_info *mtd,
+ const struct nand_data_interface *conf,
+ bool check_only)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ struct s3c2410_platform_nand *pdata = info->platform;
+ const struct nand_sdr_timings *timings;
+ int tacls;
+
+ timings = nand_get_sdr_timings(conf);
+ if (IS_ERR(timings))
+ return -ENOTSUPP;
+
+ tacls = timings->tCLS_min - timings->tWP_min;
+ if (tacls < 0)
+ tacls = 0;
+
+ pdata->tacls = DIV_ROUND_UP(tacls, 1000);
+ pdata->twrph0 = DIV_ROUND_UP(timings->tWP_min, 1000);
+ pdata->twrph1 = DIV_ROUND_UP(timings->tCLH_min, 1000);
+
+ return s3c2410_nand_setrate(info);
+}
+
/**
* s3c2410_nand_init_chip - initialise a single instance of an chip
* @info: The base NAND controller the chip is on.
@@ -810,9 +850,12 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
struct s3c2410_nand_mtd *nmtd,
struct s3c2410_nand_set *set)
{
+ struct device_node *np = info->device->of_node;
struct nand_chip *chip = &nmtd->chip;
void __iomem *regs = info->regs;
+ nand_set_flash_node(chip, set->of_node);
+
chip->write_buf = s3c2410_nand_write_buf;
chip->read_buf = s3c2410_nand_read_buf;
chip->select_chip = s3c2410_nand_select_chip;
@@ -821,6 +864,13 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
chip->options = set->options;
chip->controller = &info->controller;
+ /*
+ * let's keep behavior unchanged for legacy boards booting via pdata and
+ * auto-detect timings only when booting with a device tree.
+ */
+ if (np)
+ chip->setup_data_interface = s3c2410_nand_setup_data_interface;
+
switch (info->cpu_type) {
case TYPE_S3C2410:
chip->IO_ADDR_W = regs + S3C2410_NFDATA;
@@ -858,58 +908,14 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
nmtd->info = info;
nmtd->set = set;
-#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
- chip->ecc.calculate = s3c2410_nand_calculate_ecc;
- chip->ecc.correct = s3c2410_nand_correct_data;
- chip->ecc.mode = NAND_ECC_HW;
- chip->ecc.strength = 1;
+ chip->ecc.mode = info->platform->ecc_mode;
- switch (info->cpu_type) {
- case TYPE_S3C2410:
- chip->ecc.hwctl = s3c2410_nand_enable_hwecc;
- chip->ecc.calculate = s3c2410_nand_calculate_ecc;
- break;
-
- case TYPE_S3C2412:
- chip->ecc.hwctl = s3c2412_nand_enable_hwecc;
- chip->ecc.calculate = s3c2412_nand_calculate_ecc;
- break;
-
- case TYPE_S3C2440:
- chip->ecc.hwctl = s3c2440_nand_enable_hwecc;
- chip->ecc.calculate = s3c2440_nand_calculate_ecc;
- break;
- }
-#else
- chip->ecc.mode = NAND_ECC_SOFT;
- chip->ecc.algo = NAND_ECC_HAMMING;
-#endif
-
- if (set->disable_ecc)
- chip->ecc.mode = NAND_ECC_NONE;
-
- switch (chip->ecc.mode) {
- case NAND_ECC_NONE:
- dev_info(info->device, "NAND ECC disabled\n");
- break;
- case NAND_ECC_SOFT:
- dev_info(info->device, "NAND soft ECC\n");
- break;
- case NAND_ECC_HW:
- dev_info(info->device, "NAND hardware ECC\n");
- break;
- default:
- dev_info(info->device, "NAND ECC UNKNOWN\n");
- break;
- }
-
- /* If you use u-boot BBT creation code, specifying this flag will
- * let the kernel fish out the BBT from the NAND, and also skip the
- * full NAND scan that can take 1/2s or so. Little things... */
- if (set->flash_bbt) {
+ /*
+ * If you use u-boot BBT creation code, specifying this flag will
+ * let the kernel fish out the BBT from the NAND.
+ */
+ if (set->flash_bbt)
chip->bbt_options |= NAND_BBT_USE_FLASH;
- chip->options |= NAND_SKIP_BBTSCAN;
- }
}
/**
@@ -923,28 +929,146 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
*
* The internal state is currently limited to the ECC state information.
*/
-static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
- struct s3c2410_nand_mtd *nmtd)
+static int s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
+ struct s3c2410_nand_mtd *nmtd)
{
struct nand_chip *chip = &nmtd->chip;
- dev_dbg(info->device, "chip %p => page shift %d\n",
- chip, chip->page_shift);
+ switch (chip->ecc.mode) {
- if (chip->ecc.mode != NAND_ECC_HW)
- return;
+ case NAND_ECC_NONE:
+ dev_info(info->device, "ECC disabled\n");
+ break;
+
+ case NAND_ECC_SOFT:
+ /*
+ * This driver expects Hamming based ECC when ecc_mode is set
+ * to NAND_ECC_SOFT. Force ecc.algo to NAND_ECC_HAMMING to
+ * avoid adding an extra ecc_algo field to
+ * s3c2410_platform_nand.
+ */
+ chip->ecc.algo = NAND_ECC_HAMMING;
+ dev_info(info->device, "soft ECC\n");
+ break;
+
+ case NAND_ECC_HW:
+ chip->ecc.calculate = s3c2410_nand_calculate_ecc;
+ chip->ecc.correct = s3c2410_nand_correct_data;
+ chip->ecc.strength = 1;
+
+ switch (info->cpu_type) {
+ case TYPE_S3C2410:
+ chip->ecc.hwctl = s3c2410_nand_enable_hwecc;
+ chip->ecc.calculate = s3c2410_nand_calculate_ecc;
+ break;
+
+ case TYPE_S3C2412:
+ chip->ecc.hwctl = s3c2412_nand_enable_hwecc;
+ chip->ecc.calculate = s3c2412_nand_calculate_ecc;
+ break;
+
+ case TYPE_S3C2440:
+ chip->ecc.hwctl = s3c2440_nand_enable_hwecc;
+ chip->ecc.calculate = s3c2440_nand_calculate_ecc;
+ break;
+ }
+
+ dev_dbg(info->device, "chip %p => page shift %d\n",
+ chip, chip->page_shift);
/* change the behaviour depending on whether we are using
* the large or small page nand device */
+ if (chip->page_shift > 10) {
+ chip->ecc.size = 256;
+ chip->ecc.bytes = 3;
+ } else {
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 3;
+ mtd_set_ooblayout(nand_to_mtd(chip),
+ &s3c2410_ooblayout_ops);
+ }
- if (chip->page_shift > 10) {
- chip->ecc.size = 256;
- chip->ecc.bytes = 3;
- } else {
- chip->ecc.size = 512;
- chip->ecc.bytes = 3;
- mtd_set_ooblayout(nand_to_mtd(chip), &s3c2410_ooblayout_ops);
+ dev_info(info->device, "hardware ECC\n");
+ break;
+
+ default:
+ dev_err(info->device, "invalid ECC mode!\n");
+ return -EINVAL;
}
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->options |= NAND_SKIP_BBTSCAN;
+
+ return 0;
+}
+
+static const struct of_device_id s3c24xx_nand_dt_ids[] = {
+ {
+ .compatible = "samsung,s3c2410-nand",
+ .data = &s3c2410_nand_devtype_data,
+ }, {
+ /* also compatible with s3c6400 */
+ .compatible = "samsung,s3c2412-nand",
+ .data = &s3c2412_nand_devtype_data,
+ }, {
+ .compatible = "samsung,s3c2440-nand",
+ .data = &s3c2440_nand_devtype_data,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, s3c24xx_nand_dt_ids);
+
+static int s3c24xx_nand_probe_dt(struct platform_device *pdev)
+{
+ const struct s3c24XX_nand_devtype_data *devtype_data;
+ struct s3c2410_platform_nand *pdata;
+ struct s3c2410_nand_info *info = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node, *child;
+ struct s3c2410_nand_set *sets;
+
+ devtype_data = of_device_get_match_data(&pdev->dev);
+ if (!devtype_data)
+ return -ENODEV;
+
+ info->cpu_type = devtype_data->type;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ pdev->dev.platform_data = pdata;
+
+ pdata->nr_sets = of_get_child_count(np);
+ if (!pdata->nr_sets)
+ return 0;
+
+ sets = devm_kzalloc(&pdev->dev, sizeof(*sets) * pdata->nr_sets,
+ GFP_KERNEL);
+ if (!sets)
+ return -ENOMEM;
+
+ pdata->sets = sets;
+
+ for_each_available_child_of_node(np, child) {
+ sets->name = (char *)child->name;
+ sets->of_node = child;
+ sets->nr_chips = 1;
+
+ of_node_get(child);
+
+ sets++;
+ }
+
+ return 0;
+}
+
+static int s3c24xx_nand_probe_pdata(struct platform_device *pdev)
+{
+ struct s3c2410_nand_info *info = platform_get_drvdata(pdev);
+
+ info->cpu_type = platform_get_device_id(pdev)->driver_data;
+
+ return 0;
}
/* s3c24xx_nand_probe
@@ -956,8 +1080,7 @@ static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
*/
static int s3c24xx_nand_probe(struct platform_device *pdev)
{
- struct s3c2410_platform_nand *plat = to_nand_plat(pdev);
- enum s3c_cpu_type cpu_type;
+ struct s3c2410_platform_nand *plat;
struct s3c2410_nand_info *info;
struct s3c2410_nand_mtd *nmtd;
struct s3c2410_nand_set *sets;
@@ -967,8 +1090,6 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
int nr_sets;
int setno;
- cpu_type = platform_get_device_id(pdev)->driver_data;
-
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (info == NULL) {
err = -ENOMEM;
@@ -990,6 +1111,16 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
+ if (pdev->dev.of_node)
+ err = s3c24xx_nand_probe_dt(pdev);
+ else
+ err = s3c24xx_nand_probe_pdata(pdev);
+
+ if (err)
+ goto exit_error;
+
+ plat = to_nand_plat(pdev);
+
/* allocate and map the resource */
/* currently we assume we have the one resource */
@@ -998,7 +1129,6 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
info->device = &pdev->dev;
info->platform = plat;
- info->cpu_type = cpu_type;
info->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(info->regs)) {
@@ -1008,12 +1138,6 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "mapped registers at %p\n", info->regs);
- /* initialise the hardware */
-
- err = s3c2410_nand_inithw(info);
- if (err != 0)
- goto exit_error;
-
sets = (plat != NULL) ? plat->sets : NULL;
nr_sets = (plat != NULL) ? plat->nr_sets : 1;
@@ -1046,7 +1170,9 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
NULL);
if (nmtd->scan_res == 0) {
- s3c2410_nand_update_chip(info, nmtd);
+ err = s3c2410_nand_update_chip(info, nmtd);
+ if (err < 0)
+ goto exit_error;
nand_scan_tail(mtd);
s3c2410_nand_add_partition(info, nmtd, sets);
}
@@ -1055,6 +1181,11 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
sets++;
}
+ /* initialise the hardware */
+ err = s3c2410_nand_inithw(info);
+ if (err != 0)
+ goto exit_error;
+
err = s3c2410_nand_cpufreq_register(info);
if (err < 0) {
dev_err(&pdev->dev, "failed to init cpufreq support\n");
@@ -1155,6 +1286,7 @@ static struct platform_driver s3c24xx_nand_driver = {
.id_table = s3c24xx_driver_ids,
.driver = {
.name = "s3c24xx-nand",
+ .of_match_table = s3c24xx_nand_dt_ids,
},
};
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index 888fd314c62a..72369bd079af 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -187,17 +187,9 @@ static int socrates_nand_probe(struct platform_device *ofdev)
dev_set_drvdata(&ofdev->dev, host);
- /* first scan to find the device and get the page size */
- if (nand_scan_ident(mtd, 1, NULL)) {
- res = -ENXIO;
+ res = nand_scan(mtd, 1);
+ if (res)
goto out;
- }
-
- /* second phase scan */
- if (nand_scan_tail(mtd)) {
- res = -ENXIO;
- goto out;
- }
res = mtd_device_register(mtd, NULL, 0);
if (!res)
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index 8b8470c4e6d0..e40482a65de6 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -145,6 +145,7 @@
#define NFC_ECC_PIPELINE BIT(3)
#define NFC_ECC_EXCEPTION BIT(4)
#define NFC_ECC_BLOCK_SIZE_MSK BIT(5)
+#define NFC_ECC_BLOCK_512 BIT(5)
#define NFC_RANDOM_EN BIT(9)
#define NFC_RANDOM_DIRECTION BIT(10)
#define NFC_ECC_MODE_MSK GENMASK(15, 12)
@@ -817,6 +818,9 @@ static void sunxi_nfc_hw_ecc_enable(struct mtd_info *mtd)
ecc_ctl |= NFC_ECC_EN | NFC_ECC_MODE(data->mode) | NFC_ECC_EXCEPTION |
NFC_ECC_PIPELINE;
+ if (nand->ecc.size == 512)
+ ecc_ctl |= NFC_ECC_BLOCK_512;
+
writel(ecc_ctl, nfc->regs + NFC_REG_ECC_CTL);
}
diff --git a/drivers/mtd/nand/tango_nand.c b/drivers/mtd/nand/tango_nand.c
new file mode 100644
index 000000000000..4a5e948c62df
--- /dev/null
+++ b/drivers/mtd/nand/tango_nand.c
@@ -0,0 +1,678 @@
+/*
+ * Copyright (C) 2016 Sigma Designs
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mtd/nand.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+
+/* Offsets relative to chip->base */
+#define PBUS_CMD 0
+#define PBUS_ADDR 4
+#define PBUS_DATA 8
+
+/* Offsets relative to reg_base */
+#define NFC_STATUS 0x00
+#define NFC_FLASH_CMD 0x04
+#define NFC_DEVICE_CFG 0x08
+#define NFC_TIMING1 0x0c
+#define NFC_TIMING2 0x10
+#define NFC_XFER_CFG 0x14
+#define NFC_PKT_0_CFG 0x18
+#define NFC_PKT_N_CFG 0x1c
+#define NFC_BB_CFG 0x20
+#define NFC_ADDR_PAGE 0x24
+#define NFC_ADDR_OFFSET 0x28
+#define NFC_XFER_STATUS 0x2c
+
+/* NFC_STATUS values */
+#define CMD_READY BIT(31)
+
+/* NFC_FLASH_CMD values */
+#define NFC_READ 1
+#define NFC_WRITE 2
+
+/* NFC_XFER_STATUS values */
+#define PAGE_IS_EMPTY BIT(16)
+
+/* Offsets relative to mem_base */
+#define METADATA 0x000
+#define ERROR_REPORT 0x1c0
+
+/*
+ * Error reports are split in two bytes:
+ * byte 0 for the first packet in the page (PKT_0)
+ * byte 1 for other packets in the page (PKT_N, for N > 0)
+ * ERR_COUNT_PKT_N is the max error count over all but the first packet.
+ */
+#define DECODE_OK_PKT_0(v) ((v) & BIT(7))
+#define DECODE_OK_PKT_N(v) ((v) & BIT(15))
+#define ERR_COUNT_PKT_0(v) (((v) >> 0) & 0x3f)
+#define ERR_COUNT_PKT_N(v) (((v) >> 8) & 0x3f)
+
+/* Offsets relative to pbus_base */
+#define PBUS_CS_CTRL 0x83c
+#define PBUS_PAD_MODE 0x8f0
+
+/* PBUS_CS_CTRL values */
+#define PBUS_IORDY BIT(31)
+
+/*
+ * PBUS_PAD_MODE values
+ * In raw mode, the driver communicates directly with the NAND chips.
+ * In NFC mode, the NAND Flash controller manages the communication.
+ * We use NFC mode for read and write; raw mode for everything else.
+ */
+#define MODE_RAW 0
+#define MODE_NFC BIT(31)
+
+#define METADATA_SIZE 4
+#define BBM_SIZE 6
+#define FIELD_ORDER 15
+
+#define MAX_CS 4
+
+struct tango_nfc {
+ struct nand_hw_control hw;
+ void __iomem *reg_base;
+ void __iomem *mem_base;
+ void __iomem *pbus_base;
+ struct tango_chip *chips[MAX_CS];
+ struct dma_chan *chan;
+ int freq_kHz;
+};
+
+#define to_tango_nfc(ptr) container_of(ptr, struct tango_nfc, hw)
+
+struct tango_chip {
+ struct nand_chip nand_chip;
+ void __iomem *base;
+ u32 timing1;
+ u32 timing2;
+ u32 xfer_cfg;
+ u32 pkt_0_cfg;
+ u32 pkt_n_cfg;
+ u32 bb_cfg;
+};
+
+#define to_tango_chip(ptr) container_of(ptr, struct tango_chip, nand_chip)
+
+#define XFER_CFG(cs, page_count, steps, metadata_size) \
+ ((cs) << 24 | (page_count) << 16 | (steps) << 8 | (metadata_size))
+
+#define PKT_CFG(size, strength) ((size) << 16 | (strength))
+
+#define BB_CFG(bb_offset, bb_size) ((bb_offset) << 16 | (bb_size))
+
+#define TIMING(t0, t1, t2, t3) ((t0) << 24 | (t1) << 16 | (t2) << 8 | (t3))
+
+static void tango_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
+{
+ struct tango_chip *tchip = to_tango_chip(mtd_to_nand(mtd));
+
+ if (ctrl & NAND_CLE)
+ writeb_relaxed(dat, tchip->base + PBUS_CMD);
+
+ if (ctrl & NAND_ALE)
+ writeb_relaxed(dat, tchip->base + PBUS_ADDR);
+}
+
+static int tango_dev_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct tango_nfc *nfc = to_tango_nfc(chip->controller);
+
+ return readl_relaxed(nfc->pbus_base + PBUS_CS_CTRL) & PBUS_IORDY;
+}
+
+static u8 tango_read_byte(struct mtd_info *mtd)
+{
+ struct tango_chip *tchip = to_tango_chip(mtd_to_nand(mtd));
+
+ return readb_relaxed(tchip->base + PBUS_DATA);
+}
+
+static void tango_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+{
+ struct tango_chip *tchip = to_tango_chip(mtd_to_nand(mtd));
+
+ ioread8_rep(tchip->base + PBUS_DATA, buf, len);
+}
+
+static void tango_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+{
+ struct tango_chip *tchip = to_tango_chip(mtd_to_nand(mtd));
+
+ iowrite8_rep(tchip->base + PBUS_DATA, buf, len);
+}
+
+static void tango_select_chip(struct mtd_info *mtd, int idx)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct tango_nfc *nfc = to_tango_nfc(chip->controller);
+ struct tango_chip *tchip = to_tango_chip(chip);
+
+ if (idx < 0)
+ return; /* No "chip unselect" function */
+
+ writel_relaxed(tchip->timing1, nfc->reg_base + NFC_TIMING1);
+ writel_relaxed(tchip->timing2, nfc->reg_base + NFC_TIMING2);
+ writel_relaxed(tchip->xfer_cfg, nfc->reg_base + NFC_XFER_CFG);
+ writel_relaxed(tchip->pkt_0_cfg, nfc->reg_base + NFC_PKT_0_CFG);
+ writel_relaxed(tchip->pkt_n_cfg, nfc->reg_base + NFC_PKT_N_CFG);
+ writel_relaxed(tchip->bb_cfg, nfc->reg_base + NFC_BB_CFG);
+}
+
+/*
+ * The controller does not check for bitflips in erased pages,
+ * therefore software must check instead.
+ */
+static int check_erased_page(struct nand_chip *chip, u8 *buf)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *meta = chip->oob_poi + BBM_SIZE;
+ u8 *ecc = chip->oob_poi + BBM_SIZE + METADATA_SIZE;
+ const int ecc_size = chip->ecc.bytes;
+ const int pkt_size = chip->ecc.size;
+ int i, res, meta_len, bitflips = 0;
+
+ for (i = 0; i < chip->ecc.steps; ++i) {
+ meta_len = i ? 0 : METADATA_SIZE;
+ res = nand_check_erased_ecc_chunk(buf, pkt_size, ecc, ecc_size,
+ meta, meta_len,
+ chip->ecc.strength);
+ if (res < 0)
+ mtd->ecc_stats.failed++;
+
+ bitflips = max(res, bitflips);
+ buf += pkt_size;
+ ecc += ecc_size;
+ }
+
+ return bitflips;
+}
+
+static int decode_error_report(struct tango_nfc *nfc)
+{
+ u32 status, res;
+
+ status = readl_relaxed(nfc->reg_base + NFC_XFER_STATUS);
+ if (status & PAGE_IS_EMPTY)
+ return 0;
+
+ res = readl_relaxed(nfc->mem_base + ERROR_REPORT);
+
+ if (DECODE_OK_PKT_0(res) && DECODE_OK_PKT_N(res))
+ return max(ERR_COUNT_PKT_0(res), ERR_COUNT_PKT_N(res));
+
+ return -EBADMSG;
+}
+
+static void tango_dma_callback(void *arg)
+{
+ complete(arg);
+}
+
+static int do_dma(struct tango_nfc *nfc, int dir, int cmd, const void *buf,
+ int len, int page)
+{
+ void __iomem *addr = nfc->reg_base + NFC_STATUS;
+ struct dma_chan *chan = nfc->chan;
+ struct dma_async_tx_descriptor *desc;
+ struct scatterlist sg;
+ struct completion tx_done;
+ int err = -EIO;
+ u32 res, val;
+
+ sg_init_one(&sg, buf, len);
+ if (dma_map_sg(chan->device->dev, &sg, 1, dir) != 1)
+ return -EIO;
+
+ desc = dmaengine_prep_slave_sg(chan, &sg, 1, dir, DMA_PREP_INTERRUPT);
+ if (!desc)
+ goto dma_unmap;
+
+ desc->callback = tango_dma_callback;
+ desc->callback_param = &tx_done;
+ init_completion(&tx_done);
+
+ writel_relaxed(MODE_NFC, nfc->pbus_base + PBUS_PAD_MODE);
+
+ writel_relaxed(page, nfc->reg_base + NFC_ADDR_PAGE);
+ writel_relaxed(0, nfc->reg_base + NFC_ADDR_OFFSET);
+ writel_relaxed(cmd, nfc->reg_base + NFC_FLASH_CMD);
+
+ dmaengine_submit(desc);
+ dma_async_issue_pending(chan);
+
+ res = wait_for_completion_timeout(&tx_done, HZ);
+ if (res > 0)
+ err = readl_poll_timeout(addr, val, val & CMD_READY, 0, 1000);
+
+ writel_relaxed(MODE_RAW, nfc->pbus_base + PBUS_PAD_MODE);
+
+dma_unmap:
+ dma_unmap_sg(chan->device->dev, &sg, 1, dir);
+
+ return err;
+}
+
+static int tango_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ u8 *buf, int oob_required, int page)
+{
+ struct tango_nfc *nfc = to_tango_nfc(chip->controller);
+ int err, res, len = mtd->writesize;
+
+ if (oob_required)
+ chip->ecc.read_oob(mtd, chip, page);
+
+ err = do_dma(nfc, DMA_FROM_DEVICE, NFC_READ, buf, len, page);
+ if (err)
+ return err;
+
+ res = decode_error_report(nfc);
+ if (res < 0) {
+ chip->ecc.read_oob_raw(mtd, chip, page);
+ res = check_erased_page(chip, buf);
+ }
+
+ return res;
+}
+
+static int tango_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const u8 *buf, int oob_required, int page)
+{
+ struct tango_nfc *nfc = to_tango_nfc(chip->controller);
+ int err, len = mtd->writesize;
+
+ /* Calling tango_write_oob() would send PAGEPROG twice */
+ if (oob_required)
+ return -ENOTSUPP;
+
+ writel_relaxed(0xffffffff, nfc->mem_base + METADATA);
+ err = do_dma(nfc, DMA_TO_DEVICE, NFC_WRITE, buf, len, page);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void aux_read(struct nand_chip *chip, u8 **buf, int len, int *pos)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ *pos += len;
+
+ if (!*buf) {
+ /* skip over "len" bytes */
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, *pos, -1);
+ } else {
+ tango_read_buf(mtd, *buf, len);
+ *buf += len;
+ }
+}
+
+static void aux_write(struct nand_chip *chip, const u8 **buf, int len, int *pos)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ *pos += len;
+
+ if (!*buf) {
+ /* skip over "len" bytes */
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, *pos, -1);
+ } else {
+ tango_write_buf(mtd, *buf, len);
+ *buf += len;
+ }
+}
+
+/*
+ * Physical page layout (not drawn to scale)
+ *
+ * NB: Bad Block Marker area splits PKT_N in two (N1, N2).
+ *
+ * +---+-----------------+-------+-----+-----------+-----+----+-------+
+ * | M | PKT_0 | ECC_0 | ... | N1 | BBM | N2 | ECC_N |
+ * +---+-----------------+-------+-----+-----------+-----+----+-------+
+ *
+ * Logical page layout:
+ *
+ * +-----+---+-------+-----+-------+
+ * oob = | BBM | M | ECC_0 | ... | ECC_N |
+ * +-----+---+-------+-----+-------+
+ *
+ * +-----------------+-----+-----------------+
+ * buf = | PKT_0 | ... | PKT_N |
+ * +-----------------+-----+-----------------+
+ */
+static void raw_read(struct nand_chip *chip, u8 *buf, u8 *oob)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *oob_orig = oob;
+ const int page_size = mtd->writesize;
+ const int ecc_size = chip->ecc.bytes;
+ const int pkt_size = chip->ecc.size;
+ int pos = 0; /* position within physical page */
+ int rem = page_size; /* bytes remaining until BBM area */
+
+ if (oob)
+ oob += BBM_SIZE;
+
+ aux_read(chip, &oob, METADATA_SIZE, &pos);
+
+ while (rem > pkt_size) {
+ aux_read(chip, &buf, pkt_size, &pos);
+ aux_read(chip, &oob, ecc_size, &pos);
+ rem = page_size - pos;
+ }
+
+ aux_read(chip, &buf, rem, &pos);
+ aux_read(chip, &oob_orig, BBM_SIZE, &pos);
+ aux_read(chip, &buf, pkt_size - rem, &pos);
+ aux_read(chip, &oob, ecc_size, &pos);
+}
+
+static void raw_write(struct nand_chip *chip, const u8 *buf, const u8 *oob)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const u8 *oob_orig = oob;
+ const int page_size = mtd->writesize;
+ const int ecc_size = chip->ecc.bytes;
+ const int pkt_size = chip->ecc.size;
+ int pos = 0; /* position within physical page */
+ int rem = page_size; /* bytes remaining until BBM area */
+
+ if (oob)
+ oob += BBM_SIZE;
+
+ aux_write(chip, &oob, METADATA_SIZE, &pos);
+
+ while (rem > pkt_size) {
+ aux_write(chip, &buf, pkt_size, &pos);
+ aux_write(chip, &oob, ecc_size, &pos);
+ rem = page_size - pos;
+ }
+
+ aux_write(chip, &buf, rem, &pos);
+ aux_write(chip, &oob_orig, BBM_SIZE, &pos);
+ aux_write(chip, &buf, pkt_size - rem, &pos);
+ aux_write(chip, &oob, ecc_size, &pos);
+}
+
+static int tango_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ u8 *buf, int oob_required, int page)
+{
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ raw_read(chip, buf, chip->oob_poi);
+ return 0;
+}
+
+static int tango_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ const u8 *buf, int oob_required, int page)
+{
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
+ raw_write(chip, buf, chip->oob_poi);
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ return 0;
+}
+
+static int tango_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ raw_read(chip, NULL, chip->oob_poi);
+ return 0;
+}
+
+static int tango_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
+ raw_write(chip, NULL, chip->oob_poi);
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ chip->waitfunc(mtd, chip);
+ return 0;
+}
+
+static int oob_ecc(struct mtd_info *mtd, int idx, struct mtd_oob_region *res)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (idx >= ecc->steps)
+ return -ERANGE;
+
+ res->offset = BBM_SIZE + METADATA_SIZE + ecc->bytes * idx;
+ res->length = ecc->bytes;
+
+ return 0;
+}
+
+static int oob_free(struct mtd_info *mtd, int idx, struct mtd_oob_region *res)
+{
+ return -ERANGE; /* no free space in spare area */
+}
+
+static const struct mtd_ooblayout_ops tango_nand_ooblayout_ops = {
+ .ecc = oob_ecc,
+ .free = oob_free,
+};
+
+static u32 to_ticks(int kHz, int ps)
+{
+ return DIV_ROUND_UP_ULL((u64)kHz * ps, NSEC_PER_SEC);
+}
+
+static int tango_set_timings(struct mtd_info *mtd,
+ const struct nand_data_interface *conf,
+ bool check_only)
+{
+ const struct nand_sdr_timings *sdr = nand_get_sdr_timings(conf);
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct tango_nfc *nfc = to_tango_nfc(chip->controller);
+ struct tango_chip *tchip = to_tango_chip(chip);
+ u32 Trdy, Textw, Twc, Twpw, Tacc, Thold, Trpw, Textr;
+ int kHz = nfc->freq_kHz;
+
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ if (check_only)
+ return 0;
+
+ Trdy = to_ticks(kHz, sdr->tCEA_max - sdr->tREA_max);
+ Textw = to_ticks(kHz, sdr->tWB_max);
+ Twc = to_ticks(kHz, sdr->tWC_min);
+ Twpw = to_ticks(kHz, sdr->tWC_min - sdr->tWP_min);
+
+ Tacc = to_ticks(kHz, sdr->tREA_max);
+ Thold = to_ticks(kHz, sdr->tREH_min);
+ Trpw = to_ticks(kHz, sdr->tRC_min - sdr->tREH_min);
+ Textr = to_ticks(kHz, sdr->tRHZ_max);
+
+ tchip->timing1 = TIMING(Trdy, Textw, Twc, Twpw);
+ tchip->timing2 = TIMING(Tacc, Thold, Trpw, Textr);
+
+ return 0;
+}
+
+static int chip_init(struct device *dev, struct device_node *np)
+{
+ u32 cs;
+ int err, res;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ struct tango_chip *tchip;
+ struct nand_ecc_ctrl *ecc;
+ struct tango_nfc *nfc = dev_get_drvdata(dev);
+
+ tchip = devm_kzalloc(dev, sizeof(*tchip), GFP_KERNEL);
+ if (!tchip)
+ return -ENOMEM;
+
+ res = of_property_count_u32_elems(np, "reg");
+ if (res < 0)
+ return res;
+
+ if (res != 1)
+ return -ENOTSUPP; /* Multi-CS chips are not supported */
+
+ err = of_property_read_u32_index(np, "reg", 0, &cs);
+ if (err)
+ return err;
+
+ if (cs >= MAX_CS)
+ return -EINVAL;
+
+ chip = &tchip->nand_chip;
+ ecc = &chip->ecc;
+ mtd = nand_to_mtd(chip);
+
+ chip->read_byte = tango_read_byte;
+ chip->write_buf = tango_write_buf;
+ chip->read_buf = tango_read_buf;
+ chip->select_chip = tango_select_chip;
+ chip->cmd_ctrl = tango_cmd_ctrl;
+ chip->dev_ready = tango_dev_ready;
+ chip->setup_data_interface = tango_set_timings;
+ chip->options = NAND_USE_BOUNCE_BUFFER |
+ NAND_NO_SUBPAGE_WRITE |
+ NAND_WAIT_TCCS;
+ chip->controller = &nfc->hw;
+ tchip->base = nfc->pbus_base + (cs * 256);
+
+ nand_set_flash_node(chip, np);
+ mtd_set_ooblayout(mtd, &tango_nand_ooblayout_ops);
+ mtd->dev.parent = dev;
+
+ err = nand_scan_ident(mtd, 1, NULL);
+ if (err)
+ return err;
+
+ ecc->mode = NAND_ECC_HW;
+ ecc->algo = NAND_ECC_BCH;
+ ecc->bytes = DIV_ROUND_UP(ecc->strength * FIELD_ORDER, BITS_PER_BYTE);
+
+ ecc->read_page_raw = tango_read_page_raw;
+ ecc->write_page_raw = tango_write_page_raw;
+ ecc->read_page = tango_read_page;
+ ecc->write_page = tango_write_page;
+ ecc->read_oob = tango_read_oob;
+ ecc->write_oob = tango_write_oob;
+ ecc->options = NAND_ECC_CUSTOM_PAGE_ACCESS;
+
+ err = nand_scan_tail(mtd);
+ if (err)
+ return err;
+
+ tchip->xfer_cfg = XFER_CFG(cs, 1, ecc->steps, METADATA_SIZE);
+ tchip->pkt_0_cfg = PKT_CFG(ecc->size + METADATA_SIZE, ecc->strength);
+ tchip->pkt_n_cfg = PKT_CFG(ecc->size, ecc->strength);
+ tchip->bb_cfg = BB_CFG(mtd->writesize, BBM_SIZE);
+
+ err = mtd_device_register(mtd, NULL, 0);
+ if (err)
+ return err;
+
+ nfc->chips[cs] = tchip;
+
+ return 0;
+}
+
+static int tango_nand_remove(struct platform_device *pdev)
+{
+ int cs;
+ struct tango_nfc *nfc = platform_get_drvdata(pdev);
+
+ dma_release_channel(nfc->chan);
+
+ for (cs = 0; cs < MAX_CS; ++cs) {
+ if (nfc->chips[cs])
+ nand_release(nand_to_mtd(&nfc->chips[cs]->nand_chip));
+ }
+
+ return 0;
+}
+
+static int tango_nand_probe(struct platform_device *pdev)
+{
+ int err;
+ struct clk *clk;
+ struct resource *res;
+ struct tango_nfc *nfc;
+ struct device_node *np;
+
+ nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nfc->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(nfc->reg_base))
+ return PTR_ERR(nfc->reg_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ nfc->mem_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(nfc->mem_base))
+ return PTR_ERR(nfc->mem_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ nfc->pbus_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(nfc->pbus_base))
+ return PTR_ERR(nfc->pbus_base);
+
+ writel_relaxed(MODE_RAW, nfc->pbus_base + PBUS_PAD_MODE);
+
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ nfc->chan = dma_request_chan(&pdev->dev, "rxtx");
+ if (IS_ERR(nfc->chan))
+ return PTR_ERR(nfc->chan);
+
+ platform_set_drvdata(pdev, nfc);
+ nand_hw_control_init(&nfc->hw);
+ nfc->freq_kHz = clk_get_rate(clk) / 1000;
+
+ for_each_child_of_node(pdev->dev.of_node, np) {
+ err = chip_init(&pdev->dev, np);
+ if (err) {
+ tango_nand_remove(pdev);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static const struct of_device_id tango_nand_ids[] = {
+ { .compatible = "sigma,smp8758-nand" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver tango_nand_driver = {
+ .probe = tango_nand_probe,
+ .remove = tango_nand_remove,
+ .driver = {
+ .name = "tango-nand",
+ .of_match_table = tango_nand_ids,
+ },
+};
+
+module_platform_driver(tango_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sigma Designs");
+MODULE_DESCRIPTION("Tango4 NAND Flash controller driver");
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index 08b30549ec0a..fc5e773f8b60 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -435,10 +435,10 @@ static int tmio_probe(struct platform_device *dev)
nand_chip->waitfunc = tmio_nand_wait;
/* Scan to find existence of the device */
- if (nand_scan(mtd, 1)) {
- retval = -ENODEV;
+ retval = nand_scan(mtd, 1);
+ if (retval)
goto err_irq;
- }
+
/* Register the partitions */
retval = mtd_device_parse_register(mtd, NULL, NULL,
data ? data->partition : NULL,
diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c
index 3ad514c44dcb..3ea4bb19e12d 100644
--- a/drivers/mtd/nand/vf610_nfc.c
+++ b/drivers/mtd/nand/vf610_nfc.c
@@ -717,10 +717,9 @@ static int vf610_nfc_probe(struct platform_device *pdev)
vf610_nfc_preinit_controller(nfc);
/* first scan to find the device and get the page size */
- if (nand_scan_ident(mtd, 1, NULL)) {
- err = -ENXIO;
+ err = nand_scan_ident(mtd, 1, NULL);
+ if (err)
goto error;
- }
vf610_nfc_init_controller(nfc);
@@ -775,10 +774,9 @@ static int vf610_nfc_probe(struct platform_device *pdev)
}
/* second phase scan */
- if (nand_scan_tail(mtd)) {
- err = -ENXIO;
+ err = nand_scan_tail(mtd);
+ if (err)
goto error;
- }
platform_set_drvdata(pdev, mtd);
diff --git a/drivers/mtd/nand/xway_nand.c b/drivers/mtd/nand/xway_nand.c
index 1f2948c0c458..895101a5e686 100644
--- a/drivers/mtd/nand/xway_nand.c
+++ b/drivers/mtd/nand/xway_nand.c
@@ -232,7 +232,6 @@ static const struct of_device_id xway_nand_match[] = {
{ .compatible = "lantiq,nand-xway" },
{},
};
-MODULE_DEVICE_TABLE(of, xway_nand_match);
static struct platform_driver xway_nand_driver = {
.probe = xway_nand_probe,
@@ -243,6 +242,4 @@ static struct platform_driver xway_nand_driver = {
},
};
-module_platform_driver(xway_nand_driver);
-
-MODULE_LICENSE("GPL");
+builtin_platform_driver(xway_nand_driver);
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index 46f27de018c3..e21161353e76 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -25,7 +25,7 @@
#include <linux/module.h>
#include <asm/errno.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/init.h>
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index d403ba7b8f43..d489fbd07c12 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -1077,12 +1077,14 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
/* Get flash device data */
for_each_available_child_of_node(dev->of_node, np) {
- if (of_property_read_u32(np, "reg", &cs)) {
+ ret = of_property_read_u32(np, "reg", &cs);
+ if (ret) {
dev_err(dev, "Couldn't determine chip select.\n");
goto err;
}
- if (cs > CQSPI_MAX_CHIPSELECT) {
+ if (cs >= CQSPI_MAX_CHIPSELECT) {
+ ret = -EINVAL;
dev_err(dev, "Chip select %d out of range.\n", cs);
goto err;
}
diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
index 5c82e4ef1904..b4d8953fb30a 100644
--- a/drivers/mtd/spi-nor/fsl-quadspi.c
+++ b/drivers/mtd/spi-nor/fsl-quadspi.c
@@ -224,7 +224,7 @@ struct fsl_qspi_devtype_data {
int driver_data;
};
-static struct fsl_qspi_devtype_data vybrid_data = {
+static const struct fsl_qspi_devtype_data vybrid_data = {
.devtype = FSL_QUADSPI_VYBRID,
.rxfifo = 128,
.txfifo = 64,
@@ -232,7 +232,7 @@ static struct fsl_qspi_devtype_data vybrid_data = {
.driver_data = QUADSPI_QUIRK_SWAP_ENDIAN,
};
-static struct fsl_qspi_devtype_data imx6sx_data = {
+static const struct fsl_qspi_devtype_data imx6sx_data = {
.devtype = FSL_QUADSPI_IMX6SX,
.rxfifo = 128,
.txfifo = 512,
@@ -241,7 +241,7 @@ static struct fsl_qspi_devtype_data imx6sx_data = {
| QUADSPI_QUIRK_TKT245618,
};
-static struct fsl_qspi_devtype_data imx7d_data = {
+static const struct fsl_qspi_devtype_data imx7d_data = {
.devtype = FSL_QUADSPI_IMX7D,
.rxfifo = 512,
.txfifo = 512,
@@ -250,7 +250,7 @@ static struct fsl_qspi_devtype_data imx7d_data = {
| QUADSPI_QUIRK_4X_INT_CLK,
};
-static struct fsl_qspi_devtype_data imx6ul_data = {
+static const struct fsl_qspi_devtype_data imx6ul_data = {
.devtype = FSL_QUADSPI_IMX6UL,
.rxfifo = 128,
.txfifo = 512,
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index d0fc165d7d66..da7cd69d4857 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -799,6 +799,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
{ "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
+ { "at25df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
{ "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
{ "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
@@ -825,6 +826,7 @@ static const struct flash_info spi_nor_ids[] = {
/* Everspin */
{ "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
/* Fujitsu */
{ "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
@@ -872,11 +874,13 @@ static const struct flash_info spi_nor_ids[] = {
{ "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
{ "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
+ { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K) },
{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
{ "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_QUAD_READ) },
{ "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
/* Micron */
+ { "n25q016a", INFO(0x20bb15, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
{ "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
{ "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
@@ -905,7 +909,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
{ "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
{ "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
- { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
+ { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
@@ -921,6 +925,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) },
{ "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) },
{ "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ) },
+ { "s25fl208k", INFO(0x014014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ) },
/* SST -- large erase sizes are "overlays", "sectors" are 4K */
{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
@@ -1255,6 +1260,13 @@ static int spansion_quad_enable(struct spi_nor *nor)
return -EINVAL;
}
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret) {
+ dev_err(nor->dev,
+ "timeout while writing configuration register\n");
+ return ret;
+ }
+
/* read back and check it */
ret = read_cr(nor);
if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index 31f89f1c6123..a306de4318d7 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -33,7 +33,7 @@
#include <linux/if_arp.h>
#include <linux/slab.h>
#include <net/route.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "ipddp.h" /* Our stuff */
@@ -190,7 +190,7 @@ static netdev_tx_t ipddp_xmit(struct sk_buff *skb, struct net_device *dev)
*/
static int ipddp_create(struct ipddp_route *new_rt)
{
- struct ipddp_route *rt = kmalloc(sizeof(*rt), GFP_KERNEL);
+ struct ipddp_route *rt = kzalloc(sizeof(*rt), GFP_KERNEL);
if (rt == NULL)
return -ENOMEM;
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 26ba4b794a0b..7a85495dbb0c 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -31,5 +31,4 @@ obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
obj-$(CONFIG_CAN_XILINXCAN) += xilinx_can.o
obj-$(CONFIG_PCH_CAN) += pch_can.o
-subdir-ccflags-y += -D__CHECK_ENDIAN__
subdir-ccflags-$(CONFIG_CAN_DEBUG_DEVICES) += -DDEBUG
diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c
index 52fe50725d74..4063215c9b54 100644
--- a/drivers/net/can/softing/softing_fw.c
+++ b/drivers/net/can/softing/softing_fw.c
@@ -390,7 +390,7 @@ static void softing_initialize_timestamp(struct softing *card)
ovf = 0x100000000ULL * 16;
do_div(ovf, card->pdat->freq ?: 16);
- card->ts_overflow = ktime_add_us(ktime_set(0, 0), ovf);
+ card->ts_overflow = ktime_add_us(0, ovf);
}
ktime_t softing_raw2ktime(struct softing *card, u32 raw)
@@ -647,7 +647,7 @@ int softing_startstop(struct net_device *dev, int up)
open_candev(netdev);
if (dev != netdev) {
/* notify other busses on the restart */
- softing_netdev_rx(netdev, &msg, ktime_set(0, 0));
+ softing_netdev_rx(netdev, &msg, 0);
++priv->can.can_stats.restarts;
}
netif_wake_queue(netdev);
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 7621f91a8a20..5f64deec9f6c 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -192,7 +192,7 @@ static int softing_handle_1(struct softing *card)
/* a dead bus has no overflows */
continue;
++netdev->stats.rx_over_errors;
- softing_netdev_rx(netdev, &msg, ktime_set(0, 0));
+ softing_netdev_rx(netdev, &msg, 0);
}
/* prepare for other use */
memset(&msg, 0, sizeof(msg));
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 9ec33b51a0ed..2ce7ae97ac91 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -393,7 +393,7 @@ static int bcm_sf2_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
return bcm_sf2_sw_indir_rw(priv, 1, addr, regnum, 0);
else
- return mdiobus_read(priv->master_mii_bus, addr, regnum);
+ return mdiobus_read_nested(priv->master_mii_bus, addr, regnum);
}
static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
@@ -407,7 +407,7 @@ static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val);
else
- mdiobus_write(priv->master_mii_bus, addr, regnum, val);
+ mdiobus_write_nested(priv->master_mii_bus, addr, regnum, val);
return 0;
}
@@ -982,6 +982,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
struct device_node *dn = pdev->dev.of_node;
struct b53_platform_data *pdata;
+ struct dsa_switch_ops *ops;
struct bcm_sf2_priv *priv;
struct b53_device *dev;
struct dsa_switch *ds;
@@ -995,6 +996,10 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ ops = devm_kzalloc(&pdev->dev, sizeof(*ops), GFP_KERNEL);
+ if (!ops)
+ return -ENOMEM;
+
dev = b53_switch_alloc(&pdev->dev, &bcm_sf2_io_ops, priv);
if (!dev)
return -ENOMEM;
@@ -1014,6 +1019,8 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
ds = dev->ds;
/* Override the parts that are non-standard wrt. normal b53 devices */
+ memcpy(ops, ds->ops, sizeof(*ops));
+ ds->ops = ops;
ds->ops->get_tag_protocol = bcm_sf2_sw_get_tag_protocol;
ds->ops->setup = bcm_sf2_sw_setup;
ds->ops->get_phy_flags = bcm_sf2_sw_get_phy_flags;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 4da379f28d5d..f7222dc6581d 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1775,6 +1775,9 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
continue;
+ if (!ds->ports[port].netdev)
+ continue;
+
if (vlan.data[i] ==
GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
continue;
@@ -1783,6 +1786,9 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
chip->ports[port].bridge_dev)
break; /* same bridge, check next VLAN */
+ if (!chip->ports[i].bridge_dev)
+ continue;
+
netdev_warn(ds->ports[port].netdev,
"hardware VLAN %d already used by %s\n",
vlan.vid,
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index a10ad74cc8d2..fe13bfea30ac 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -127,7 +127,7 @@
#include <linux/if_eql.h>
#include <linux/pkt_sched.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
static int eql_open(struct net_device *dev);
static int eql_close(struct net_device *dev);
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index a7533780dddc..c7f9f2c77da7 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -88,7 +88,7 @@
#include <linux/eisa.h>
#include <linux/bitops.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/irq.h>
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index b9f4c463e516..e7b1fa56b290 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -72,7 +72,7 @@ static int max_interrupt_work = 20;
#include <linux/ethtool.h>
#include <linux/bitops.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/dma.h>
@@ -627,6 +627,8 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr,
spin_lock_init(&vp->lock);
+ setup_timer(&vp->timer, corkscrew_timer, (unsigned long) dev);
+
/* Read the station address from the EEPROM. */
EL3WINDOW(0);
for (i = 0; i < 0x18; i++) {
@@ -707,6 +709,7 @@ static int corkscrew_open(struct net_device *dev)
{
int ioaddr = dev->base_addr;
struct corkscrew_private *vp = netdev_priv(dev);
+ bool armtimer = false;
__u32 config;
int i;
@@ -731,12 +734,7 @@ static int corkscrew_open(struct net_device *dev)
if (corkscrew_debug > 1)
pr_debug("%s: Initial media type %s.\n",
dev->name, media_tbl[dev->if_port].name);
-
- init_timer(&vp->timer);
- vp->timer.expires = jiffies + media_tbl[dev->if_port].wait;
- vp->timer.data = (unsigned long) dev;
- vp->timer.function = corkscrew_timer; /* timer handler */
- add_timer(&vp->timer);
+ armtimer = true;
} else
dev->if_port = vp->default_media;
@@ -776,6 +774,9 @@ static int corkscrew_open(struct net_device *dev)
return -EAGAIN;
}
+ if (armtimer)
+ mod_timer(&vp->timer, jiffies + media_tbl[dev->if_port].wait);
+
if (corkscrew_debug > 1) {
EL3WINDOW(4);
pr_debug("%s: corkscrew_open() irq %d media status %4.4x.\n",
@@ -1426,7 +1427,7 @@ static int corkscrew_close(struct net_device *dev)
dev->name, rx_nocopy, rx_copy, queued_packet);
}
- del_timer(&vp->timer);
+ del_timer_sync(&vp->timer);
/* Turn off statistics ASAP. We update lp->stats below. */
outw(StatsDisable, ioaddr + EL3_CMD);
diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c
index 9359a37fedc0..47c844cc9d27 100644
--- a/drivers/net/ethernet/3com/3c574_cs.c
+++ b/drivers/net/ethernet/3com/3c574_cs.c
@@ -92,7 +92,7 @@ earlier 3Com products.
#include <pcmcia/ciscode.h>
#include <pcmcia/ds.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
/*====================================================================*/
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index b3560a364e53..40196f41768a 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -92,7 +92,7 @@ static int vortex_debug = 1;
#include <linux/gfp.h>
#include <asm/irq.h> /* For nr_irqs only. */
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/* Kernel compatibility defines, some common to David Hinds' PCMCIA package.
This is only in the support-all-kernels source code. */
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index a0cacbe846ba..9fe3990319ec 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -119,7 +119,7 @@ static const int multicast_filter_limit = 32;
#include <linux/bitops.h>
#include <asm/processor.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/in6.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index 1d84a0544ace..3da1fc539ef9 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -46,7 +46,7 @@
#include <asm/io.h>
#include <asm/byteorder.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define AXNET_CMD 0x00
#define AXNET_DATAPORT 0x10 /* NatSemi-defined port window offset. */
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index 07355302443d..1bdea746926c 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -54,7 +54,7 @@ static int options[MAX_UNITS];
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "8390.h"
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 63079a6e20d9..bd0a2a14b649 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -49,7 +49,7 @@
#include <asm/io.h>
#include <asm/byteorder.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define PCNET_CMD 0x00
#define PCNET_DATAPORT 0x10 /* NatSemi-defined port window offset. */
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 6e16e441f85e..e4c28fed61d5 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -166,7 +166,6 @@ source "drivers/net/ethernet/seeq/Kconfig"
source "drivers/net/ethernet/silan/Kconfig"
source "drivers/net/ethernet/sis/Kconfig"
source "drivers/net/ethernet/sfc/Kconfig"
-source "drivers/net/ethernet/sfc/falcon/Kconfig"
source "drivers/net/ethernet/sgi/Kconfig"
source "drivers/net/ethernet/smsc/Kconfig"
source "drivers/net/ethernet/stmicro/Kconfig"
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 3aaad33cdbc6..c12d2618eebf 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -45,7 +45,7 @@
#include <linux/mm.h>
#include <linux/firmware.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
/*
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 16f0c70266bc..a1a52eb53b14 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -80,7 +80,7 @@
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/byteorder.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define DRV_NAME "acenic"
diff --git a/drivers/net/ethernet/altera/Makefile b/drivers/net/ethernet/altera/Makefile
index 3eff2fd3997e..d4a187e45369 100644
--- a/drivers/net/ethernet/altera/Makefile
+++ b/drivers/net/ethernet/altera/Makefile
@@ -5,4 +5,3 @@
obj-$(CONFIG_ALTERA_TSE) += altera_tse.o
altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \
altera_msgdma.o altera_sgdma.o altera_utils.o
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 11cf1e3e0295..9595f1bc535b 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -87,7 +87,7 @@ Revision History:
#include <asm/io.h>
#include <asm/byteorder.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#if IS_ENABLED(CONFIG_VLAN_8021Q)
#define AMD8111E_VLAN_TAG_USED 1
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 113a3b3cc50c..b556c926557a 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -151,7 +151,7 @@ Include Files
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
/* ----------------------------------------------------------------------------
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 155190db682d..9943629fcbf9 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -539,6 +539,7 @@ static irqreturn_t xgbe_isr(int irq, void *data)
}
}
+isr_done:
/* If there is not a separate AN irq, handle it here */
if (pdata->dev_irq == pdata->an_irq)
pdata->phy_if.an_isr(irq, pdata);
@@ -551,7 +552,6 @@ static irqreturn_t xgbe_isr(int irq, void *data)
if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq))
pdata->i2c_if.i2c_isr(irq, pdata);
-isr_done:
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
index b03e4f58d02e..a533a6cc2d53 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
@@ -122,7 +122,7 @@
#include "xgbe.h"
#include "xgbe-common.h"
-static cycle_t xgbe_cc_read(const struct cyclecounter *cc)
+static u64 xgbe_cc_read(const struct cyclecounter *cc)
{
struct xgbe_prv_data *pdata = container_of(cc,
struct xgbe_prv_data,
diff --git a/drivers/net/ethernet/atheros/alx/Makefile b/drivers/net/ethernet/atheros/alx/Makefile
index 5901fa407d52..ed4a605874a3 100644
--- a/drivers/net/ethernet/atheros/alx/Makefile
+++ b/drivers/net/ethernet/atheros/alx/Makefile
@@ -1,3 +1,2 @@
obj-$(CONFIG_ALX) += alx.o
alx-objs := main.o ethtool.o hw.o
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 1df3048a3cdb..48707ed76ffc 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -32,7 +32,7 @@
#include <linux/slab.h>
#include <linux/phy.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/irq.h>
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 25d1eb4933d0..744ed6ddaf37 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -710,11 +710,8 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
unsigned int pkts_compl = 0, bytes_compl = 0;
struct bcm_sysport_cb *cb;
- struct netdev_queue *txq;
u32 hw_ind;
- txq = netdev_get_tx_queue(ndev, ring->index);
-
/* Compute how many descriptors have been processed since last call */
hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
@@ -745,9 +742,6 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
ring->c_index = c_index;
- if (netif_tx_queue_stopped(txq) && pkts_compl)
- netif_tx_wake_queue(txq);
-
netif_dbg(priv, tx_done, ndev,
"ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
ring->index, ring->c_index, pkts_compl, bytes_compl);
@@ -759,16 +753,33 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
struct bcm_sysport_tx_ring *ring)
{
+ struct netdev_queue *txq;
unsigned int released;
unsigned long flags;
+ txq = netdev_get_tx_queue(priv->netdev, ring->index);
+
spin_lock_irqsave(&ring->lock, flags);
released = __bcm_sysport_tx_reclaim(priv, ring);
+ if (released)
+ netif_tx_wake_queue(txq);
+
spin_unlock_irqrestore(&ring->lock, flags);
return released;
}
+/* Locked version of the per-ring TX reclaim, but does not wake the queue */
+static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
+ struct bcm_sysport_tx_ring *ring)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ __bcm_sysport_tx_reclaim(priv, ring);
+ spin_unlock_irqrestore(&ring->lock, flags);
+}
+
static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
{
struct bcm_sysport_tx_ring *ring =
@@ -1012,15 +1023,6 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
goto out;
}
- /* Insert TSB and checksum infos */
- if (priv->tsb_en) {
- skb = bcm_sysport_insert_tsb(skb, dev);
- if (!skb) {
- ret = NETDEV_TX_OK;
- goto out;
- }
- }
-
/* The Ethernet switch we are interfaced with needs packets to be at
* least 64 bytes (including FCS) otherwise they will be discarded when
* they enter the switch port logic. When Broadcom tags are enabled, we
@@ -1028,13 +1030,21 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
* (including FCS and tag) because the length verification is done after
* the Broadcom tag is stripped off the ingress packet.
*/
- if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
+ if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
ret = NETDEV_TX_OK;
goto out;
}
- skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ?
- ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len;
+ /* Insert TSB and checksum infos */
+ if (priv->tsb_en) {
+ skb = bcm_sysport_insert_tsb(skb, dev);
+ if (!skb) {
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+ }
+
+ skb_len = skb->len;
mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
if (dma_mapping_error(kdev, mapping)) {
@@ -1253,7 +1263,7 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
napi_disable(&ring->napi);
netif_napi_del(&ring->napi);
- bcm_sysport_tx_reclaim(priv, ring);
+ bcm_sysport_tx_clean(priv, ring);
kfree(ring->cbs);
ring->cbs = NULL;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 688617ac8c29..d8d06fdfc42b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -15223,7 +15223,7 @@ void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
}
/* Read the PHC */
-static cycle_t bnx2x_cyclecounter_read(const struct cyclecounter *cc)
+static u64 bnx2x_cyclecounter_read(const struct cyclecounter *cc)
{
struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter);
int port = BP_PORT(bp);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 3f77d0863543..6fad22adbbb9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -585,7 +585,7 @@ int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
mcast.mcast_list_len = mc_num;
rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_SET);
if (rc)
- BNX2X_ERR("Faled to set multicasts\n");
+ BNX2X_ERR("Failed to set multicasts\n");
} else {
/* clear existing mcasts */
rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 185e9e047aa9..ae42de4fdddf 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -8720,11 +8720,14 @@ static void tg3_free_consistent(struct tg3 *tp)
tg3_mem_rx_release(tp);
tg3_mem_tx_release(tp);
+ /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
+ tg3_full_lock(tp, 0);
if (tp->hw_stats) {
dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
tp->hw_stats, tp->stats_mapping);
tp->hw_stats = NULL;
}
+ tg3_full_unlock(tp);
}
/*
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
index 4e5c3874a50f..bba81735ce87 100644
--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -1676,10 +1676,10 @@ bna_cb_ioceth_reset(void *arg)
}
static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
- bna_cb_ioceth_enable,
- bna_cb_ioceth_disable,
- bna_cb_ioceth_hbfail,
- bna_cb_ioceth_reset
+ .enable_cbfn = bna_cb_ioceth_enable,
+ .disable_cbfn = bna_cb_ioceth_disable,
+ .hbfail_cbfn = bna_cb_ioceth_hbfail,
+ .reset_cbfn = bna_cb_ioceth_reset
};
static void bna_attr_init(struct bna_ioceth *ioceth)
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index f0bcb15d3fec..608bea171956 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -31,4 +31,13 @@ config MACB
To compile this driver as a module, choose M here: the module
will be called macb.
+config MACB_PCI
+ tristate "Cadence PCI MACB/GEM support"
+ depends on MACB && PCI && COMMON_CLK
+ ---help---
+ This is PCI wrapper for MACB driver.
+
+ To compile this driver as a module, choose M here: the module
+ will be called macb_pci.
+
endif # NET_CADENCE
diff --git a/drivers/net/ethernet/cadence/Makefile b/drivers/net/ethernet/cadence/Makefile
index 91f79b1f0505..4ba75594d5c5 100644
--- a/drivers/net/ethernet/cadence/Makefile
+++ b/drivers/net/ethernet/cadence/Makefile
@@ -3,3 +3,4 @@
#
obj-$(CONFIG_MACB) += macb.o
+obj-$(CONFIG_MACB_PCI) += macb_pci.o
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 538544a7c642..c0fb80acc2da 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -404,6 +404,8 @@ static int macb_mii_probe(struct net_device *dev)
phy_irq = gpio_to_irq(pdata->phy_irq_pin);
phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
}
+ } else {
+ phydev->irq = PHY_POLL;
}
/* attach the mac to the phy */
@@ -482,6 +484,9 @@ static int macb_mii_init(struct macb *bp)
goto err_out_unregister_bus;
}
} else {
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ bp->mii_bus->irq[i] = PHY_POLL;
+
if (pdata)
bp->mii_bus->phy_mask = pdata->phy_mask;
@@ -2523,16 +2528,24 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
struct clk **hclk, struct clk **tx_clk,
struct clk **rx_clk)
{
+ struct macb_platform_data *pdata;
int err;
- *pclk = devm_clk_get(&pdev->dev, "pclk");
+ pdata = dev_get_platdata(&pdev->dev);
+ if (pdata) {
+ *pclk = pdata->pclk;
+ *hclk = pdata->hclk;
+ } else {
+ *pclk = devm_clk_get(&pdev->dev, "pclk");
+ *hclk = devm_clk_get(&pdev->dev, "hclk");
+ }
+
if (IS_ERR(*pclk)) {
err = PTR_ERR(*pclk);
dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
return err;
}
- *hclk = devm_clk_get(&pdev->dev, "hclk");
if (IS_ERR(*hclk)) {
err = PTR_ERR(*hclk);
dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
@@ -3107,15 +3120,23 @@ static const struct of_device_id macb_dt_ids[] = {
MODULE_DEVICE_TABLE(of, macb_dt_ids);
#endif /* CONFIG_OF */
+static const struct macb_config default_gem_config = {
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = macb_init,
+ .jumbo_max_len = 10240,
+};
+
static int macb_probe(struct platform_device *pdev)
{
+ const struct macb_config *macb_config = &default_gem_config;
int (*clk_init)(struct platform_device *, struct clk **,
struct clk **, struct clk **, struct clk **)
- = macb_clk_init;
- int (*init)(struct platform_device *) = macb_init;
+ = macb_config->clk_init;
+ int (*init)(struct platform_device *) = macb_config->init;
struct device_node *np = pdev->dev.of_node;
struct device_node *phy_node;
- const struct macb_config *macb_config = NULL;
struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
unsigned int queue_mask, num_queues;
struct macb_platform_data *pdata;
diff --git a/drivers/net/ethernet/cadence/macb_pci.c b/drivers/net/ethernet/cadence/macb_pci.c
new file mode 100644
index 000000000000..9906fda76087
--- /dev/null
+++ b/drivers/net/ethernet/cadence/macb_pci.c
@@ -0,0 +1,146 @@
+/**
+ * Cadence GEM PCI wrapper.
+ *
+ * Copyright (C) 2016 Cadence Design Systems - http://www.cadence.com
+ *
+ * Authors: Rafal Ozieblo <rafalo@cadence.com>
+ * Bartosz Folta <bfolta@cadence.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_data/macb.h>
+#include <linux/platform_device.h>
+#include "macb.h"
+
+#define PCI_DRIVER_NAME "macb_pci"
+#define PLAT_DRIVER_NAME "macb"
+
+#define CDNS_VENDOR_ID 0x17cd
+#define CDNS_DEVICE_ID 0xe007
+
+#define GEM_PCLK_RATE 50000000
+#define GEM_HCLK_RATE 50000000
+
+static int macb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int err;
+ struct platform_device *plat_dev;
+ struct platform_device_info plat_info;
+ struct macb_platform_data plat_data;
+ struct resource res[2];
+
+ /* enable pci device */
+ err = pcim_enable_device(pdev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Enabling PCI device has failed: %d", err);
+ return err;
+ }
+
+ pci_set_master(pdev);
+
+ /* set up resources */
+ memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res));
+ res[0].start = pci_resource_start(pdev, 0);
+ res[0].end = pci_resource_end(pdev, 0);
+ res[0].name = PCI_DRIVER_NAME;
+ res[0].flags = IORESOURCE_MEM;
+ res[1].start = pci_irq_vector(pdev, 0);
+ res[1].name = PCI_DRIVER_NAME;
+ res[1].flags = IORESOURCE_IRQ;
+
+ dev_info(&pdev->dev, "EMAC physical base addr: %pa\n",
+ &res[0].start);
+
+ /* set up macb platform data */
+ memset(&plat_data, 0, sizeof(plat_data));
+
+ /* initialize clocks */
+ plat_data.pclk = clk_register_fixed_rate(&pdev->dev, "pclk", NULL, 0,
+ GEM_PCLK_RATE);
+ if (IS_ERR(plat_data.pclk)) {
+ err = PTR_ERR(plat_data.pclk);
+ goto err_pclk_register;
+ }
+
+ plat_data.hclk = clk_register_fixed_rate(&pdev->dev, "hclk", NULL, 0,
+ GEM_HCLK_RATE);
+ if (IS_ERR(plat_data.hclk)) {
+ err = PTR_ERR(plat_data.hclk);
+ goto err_hclk_register;
+ }
+
+ /* set up platform device info */
+ memset(&plat_info, 0, sizeof(plat_info));
+ plat_info.parent = &pdev->dev;
+ plat_info.fwnode = pdev->dev.fwnode;
+ plat_info.name = PLAT_DRIVER_NAME;
+ plat_info.id = pdev->devfn;
+ plat_info.res = res;
+ plat_info.num_res = ARRAY_SIZE(res);
+ plat_info.data = &plat_data;
+ plat_info.size_data = sizeof(plat_data);
+ plat_info.dma_mask = pdev->dma_mask;
+
+ /* register platform device */
+ plat_dev = platform_device_register_full(&plat_info);
+ if (IS_ERR(plat_dev)) {
+ err = PTR_ERR(plat_dev);
+ goto err_plat_dev_register;
+ }
+
+ pci_set_drvdata(pdev, plat_dev);
+
+ return 0;
+
+err_plat_dev_register:
+ clk_unregister(plat_data.hclk);
+
+err_hclk_register:
+ clk_unregister(plat_data.pclk);
+
+err_pclk_register:
+ return err;
+}
+
+static void macb_remove(struct pci_dev *pdev)
+{
+ struct platform_device *plat_dev = pci_get_drvdata(pdev);
+ struct macb_platform_data *plat_data = dev_get_platdata(&plat_dev->dev);
+
+ platform_device_unregister(plat_dev);
+ clk_unregister(plat_data->pclk);
+ clk_unregister(plat_data->hclk);
+}
+
+static struct pci_device_id dev_id_table[] = {
+ { PCI_DEVICE(CDNS_VENDOR_ID, CDNS_DEVICE_ID), },
+ { 0, }
+};
+
+static struct pci_driver macb_pci_driver = {
+ .name = PCI_DRIVER_NAME,
+ .id_table = dev_id_table,
+ .probe = macb_probe,
+ .remove = macb_remove,
+};
+
+module_pci_driver(macb_pci_driver);
+MODULE_DEVICE_TABLE(pci, dev_id_table);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cadence NIC PCI wrapper");
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index bbc8bd16cb97..dcbce6cac63e 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -77,7 +77,7 @@ config OCTEON_MGMT_ETHERNET
config LIQUIDIO_VF
tristate "Cavium LiquidIO VF support"
depends on 64BIT && PCI_MSI
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
---help---
This driver supports Cavium LiquidIO Intelligent Server Adapter
based on CN23XX chips.
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 16e12c45904b..21f80f5744ba 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -1469,6 +1469,12 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size);
p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys,
p->agl_prt_ctl_size);
+ if (!p->mix || !p->agl || !p->agl_prt_ctl) {
+ dev_err(&pdev->dev, "failed to map I/O memory\n");
+ result = -ENOMEM;
+ goto err;
+ }
+
spin_lock_init(&p->lock);
skb_queue_head_init(&p->tx_list);
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 9211c750e064..2f85b64f01fa 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -47,8 +47,9 @@ struct lmac {
struct bgx {
u8 bgx_id;
struct lmac lmac[MAX_LMAC_PER_BGX];
- int lmac_count;
+ u8 lmac_count;
u8 max_lmac;
+ u8 acpi_lmac_idx;
void __iomem *reg_base;
struct pci_dev *pdev;
bool is_dlm;
@@ -1143,13 +1144,13 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle,
if (acpi_bus_get_device(handle, &adev))
goto out;
- acpi_get_mac_address(dev, adev, bgx->lmac[bgx->lmac_count].mac);
+ acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac);
- SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, dev);
+ SET_NETDEV_DEV(&bgx->lmac[bgx->acpi_lmac_idx].netdev, dev);
- bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count;
+ bgx->lmac[bgx->acpi_lmac_idx].lmacid = bgx->acpi_lmac_idx;
+ bgx->acpi_lmac_idx++; /* move to next LMAC */
out:
- bgx->lmac_count++;
return AE_OK;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 81d1d0bc7553..d8aff7a4b3c7 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -44,7 +44,7 @@
#include <linux/mii.h>
#include <linux/sockios.h>
#include <linux/dma-mapping.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "cpl5_cmd.h"
#include "regs.h"
@@ -568,28 +568,33 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
}
-static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct adapter *adapter = dev->ml_priv;
struct port_info *p = &adapter->port[dev->if_port];
+ u32 supported, advertising;
- cmd->supported = p->link_config.supported;
- cmd->advertising = p->link_config.advertising;
+ supported = p->link_config.supported;
+ advertising = p->link_config.advertising;
if (netif_carrier_ok(dev)) {
- ethtool_cmd_speed_set(cmd, p->link_config.speed);
- cmd->duplex = p->link_config.duplex;
+ cmd->base.speed = p->link_config.speed;
+ cmd->base.duplex = p->link_config.duplex;
} else {
- ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
- cmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
- cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
- cmd->phy_address = p->phy->mdio.prtad;
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->autoneg = p->link_config.autoneg;
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
+ cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
+ cmd->base.phy_address = p->phy->mdio.prtad;
+ cmd->base.autoneg = p->link_config.autoneg;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
return 0;
}
@@ -628,36 +633,41 @@ static int speed_duplex_to_caps(int speed, int duplex)
ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
ADVERTISED_10000baseT_Full)
-static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct adapter *adapter = dev->ml_priv;
struct port_info *p = &adapter->port[dev->if_port];
struct link_config *lc = &p->link_config;
+ u32 advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
if (!(lc->supported & SUPPORTED_Autoneg))
return -EOPNOTSUPP; /* can't change speed/duplex */
- if (cmd->autoneg == AUTONEG_DISABLE) {
- u32 speed = ethtool_cmd_speed(cmd);
- int cap = speed_duplex_to_caps(speed, cmd->duplex);
+ if (cmd->base.autoneg == AUTONEG_DISABLE) {
+ u32 speed = cmd->base.speed;
+ int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
if (!(lc->supported & cap) || (speed == SPEED_1000))
return -EINVAL;
lc->requested_speed = speed;
- lc->requested_duplex = cmd->duplex;
+ lc->requested_duplex = cmd->base.duplex;
lc->advertising = 0;
} else {
- cmd->advertising &= ADVERTISED_MASK;
- if (cmd->advertising & (cmd->advertising - 1))
- cmd->advertising = lc->supported;
- cmd->advertising &= lc->supported;
- if (!cmd->advertising)
+ advertising &= ADVERTISED_MASK;
+ if (advertising & (advertising - 1))
+ advertising = lc->supported;
+ advertising &= lc->supported;
+ if (!advertising)
return -EINVAL;
lc->requested_speed = SPEED_INVALID;
lc->requested_duplex = DUPLEX_INVALID;
- lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
+ lc->advertising = advertising | ADVERTISED_Autoneg;
}
- lc->autoneg = cmd->autoneg;
+ lc->autoneg = cmd->base.autoneg;
if (netif_running(dev))
t1_link_start(p->phy, p->mac, lc);
return 0;
@@ -788,8 +798,6 @@ static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
}
static const struct ethtool_ops t1_ethtool_ops = {
- .get_settings = get_settings,
- .set_settings = set_settings,
.get_drvinfo = get_drvinfo,
.get_msglevel = get_msglevel,
.set_msglevel = set_msglevel,
@@ -807,6 +815,8 @@ static const struct ethtool_ops t1_ethtool_ops = {
.get_ethtool_stats = get_stats,
.get_regs_len = get_regs_len,
.get_regs = get_regs,
+ .get_link_ksettings = get_link_ksettings,
+ .set_link_ksettings = set_link_ksettings,
};
static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 092b3c16440b..d76491676b51 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -50,7 +50,7 @@
#include <linux/stringify.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "common.h"
#include "cxgb3_ioctl.h"
@@ -1801,27 +1801,31 @@ static int set_phys_id(struct net_device *dev,
return 0;
}
-static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct port_info *p = netdev_priv(dev);
+ u32 supported;
- cmd->supported = p->link_config.supported;
- cmd->advertising = p->link_config.advertising;
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ p->link_config.supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ p->link_config.advertising);
if (netif_carrier_ok(dev)) {
- ethtool_cmd_speed_set(cmd, p->link_config.speed);
- cmd->duplex = p->link_config.duplex;
+ cmd->base.speed = p->link_config.speed;
+ cmd->base.duplex = p->link_config.duplex;
} else {
- ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
- cmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
- cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
- cmd->phy_address = p->phy.mdio.prtad;
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->autoneg = p->link_config.autoneg;
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
+
+ cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
+ cmd->base.phy_address = p->phy.mdio.prtad;
+ cmd->base.autoneg = p->link_config.autoneg;
return 0;
}
@@ -1860,44 +1864,49 @@ static int speed_duplex_to_caps(int speed, int duplex)
ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
ADVERTISED_10000baseT_Full)
-static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct port_info *p = netdev_priv(dev);
struct link_config *lc = &p->link_config;
+ u32 advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
if (!(lc->supported & SUPPORTED_Autoneg)) {
/*
* PHY offers a single speed/duplex. See if that's what's
* being requested.
*/
- if (cmd->autoneg == AUTONEG_DISABLE) {
- u32 speed = ethtool_cmd_speed(cmd);
- int cap = speed_duplex_to_caps(speed, cmd->duplex);
+ if (cmd->base.autoneg == AUTONEG_DISABLE) {
+ u32 speed = cmd->base.speed;
+ int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
if (lc->supported & cap)
return 0;
}
return -EINVAL;
}
- if (cmd->autoneg == AUTONEG_DISABLE) {
- u32 speed = ethtool_cmd_speed(cmd);
- int cap = speed_duplex_to_caps(speed, cmd->duplex);
+ if (cmd->base.autoneg == AUTONEG_DISABLE) {
+ u32 speed = cmd->base.speed;
+ int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
if (!(lc->supported & cap) || (speed == SPEED_1000))
return -EINVAL;
lc->requested_speed = speed;
- lc->requested_duplex = cmd->duplex;
+ lc->requested_duplex = cmd->base.duplex;
lc->advertising = 0;
} else {
- cmd->advertising &= ADVERTISED_MASK;
- cmd->advertising &= lc->supported;
- if (!cmd->advertising)
+ advertising &= ADVERTISED_MASK;
+ advertising &= lc->supported;
+ if (!advertising)
return -EINVAL;
lc->requested_speed = SPEED_INVALID;
lc->requested_duplex = DUPLEX_INVALID;
- lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
+ lc->advertising = advertising | ADVERTISED_Autoneg;
}
- lc->autoneg = cmd->autoneg;
+ lc->autoneg = cmd->base.autoneg;
if (netif_running(dev))
t3_link_start(&p->phy, &p->mac, lc);
return 0;
@@ -2097,8 +2106,6 @@ static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
}
static const struct ethtool_ops cxgb_ethtool_ops = {
- .get_settings = get_settings,
- .set_settings = set_settings,
.get_drvinfo = get_drvinfo,
.get_msglevel = get_msglevel,
.set_msglevel = set_msglevel,
@@ -2120,6 +2127,8 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.get_regs_len = get_regs_len,
.get_regs = get_regs,
.get_wol = get_wol,
+ .get_link_ksettings = get_link_ksettings,
+ .set_link_ksettings = set_link_ksettings,
};
static int in_range(int val, int lo, int hi)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 66c37fac59b2..6f951877430b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -63,7 +63,7 @@
#include <net/addrconf.h>
#include <net/bonding.h>
#include <net/addrconf.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/crash_dump.h>
#include "cxgb4.h"
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index fba3b2ad382d..a267173f5997 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -76,6 +76,7 @@ enum {
CPL_PASS_ESTABLISH = 0x41,
CPL_RX_DATA_DDP = 0x42,
CPL_PASS_ACCEPT_REQ = 0x44,
+ CPL_RX_ISCSI_CMP = 0x45,
CPL_TRACE_PKT_T5 = 0x48,
CPL_RX_ISCSI_DDP = 0x49,
@@ -934,6 +935,18 @@ struct cpl_iscsi_data {
__u8 status;
};
+struct cpl_rx_iscsi_cmp {
+ union opcode_tid ot;
+ __be16 pdu_len_ddp;
+ __be16 len;
+ __be32 seq;
+ __be16 urg;
+ __u8 rsvd;
+ __u8 status;
+ __be32 ulp_crc;
+ __be32 ddpvld;
+};
+
struct cpl_tx_data_iso {
__be32 op_to_scsi;
__u8 reserved1;
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
index 0f0de5b63622..d04a6c163445 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
@@ -133,17 +133,15 @@ cxgb_find_route6(struct cxgb4_lld_info *lldi,
if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
fl6.flowi6_oif = sin6_scope_id;
dst = ip6_route_output(&init_net, NULL, &fl6);
- if (!dst)
- goto out;
- if (!cxgb_our_interface(lldi, get_real_dev,
- ip6_dst_idev(dst)->dev) &&
- !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
+ if (dst->error ||
+ (!cxgb_our_interface(lldi, get_real_dev,
+ ip6_dst_idev(dst)->dev) &&
+ !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK))) {
dst_release(dst);
- dst = NULL;
+ return NULL;
}
}
-out:
return dst;
}
EXPORT_SYMBOL(cxgb_find_route6);
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index a1de0d12927d..396c88678eab 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -715,16 +715,18 @@ static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
-static int ep93xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int ep93xx_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct ep93xx_priv *ep = netdev_priv(dev);
- return mii_ethtool_gset(&ep->mii, cmd);
+ return mii_ethtool_get_link_ksettings(&ep->mii, cmd);
}
-static int ep93xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int ep93xx_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct ep93xx_priv *ep = netdev_priv(dev);
- return mii_ethtool_sset(&ep->mii, cmd);
+ return mii_ethtool_set_link_ksettings(&ep->mii, cmd);
}
static int ep93xx_nway_reset(struct net_device *dev)
@@ -741,10 +743,10 @@ static u32 ep93xx_get_link(struct net_device *dev)
static const struct ethtool_ops ep93xx_ethtool_ops = {
.get_drvinfo = ep93xx_get_drvinfo,
- .get_settings = ep93xx_get_settings,
- .set_settings = ep93xx_set_settings,
.nway_reset = ep93xx_nway_reset,
.get_link = ep93xx_get_link,
+ .get_link_ksettings = ep93xx_get_link_ksettings,
+ .set_link_ksettings = ep93xx_set_link_ksettings,
};
static const struct net_device_ops ep93xx_netdev_ops = {
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index f1a81c52afe3..008dc8161775 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -570,19 +570,21 @@ static void dm9000_set_msglevel(struct net_device *dev, u32 value)
dm->msg_enable = value;
}
-static int dm9000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int dm9000_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct board_info *dm = to_dm9000_board(dev);
- mii_ethtool_gset(&dm->mii, cmd);
+ mii_ethtool_get_link_ksettings(&dm->mii, cmd);
return 0;
}
-static int dm9000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int dm9000_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct board_info *dm = to_dm9000_board(dev);
- return mii_ethtool_sset(&dm->mii, cmd);
+ return mii_ethtool_set_link_ksettings(&dm->mii, cmd);
}
static int dm9000_nway_reset(struct net_device *dev)
@@ -741,8 +743,6 @@ static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
static const struct ethtool_ops dm9000_ethtool_ops = {
.get_drvinfo = dm9000_get_drvinfo,
- .get_settings = dm9000_get_settings,
- .set_settings = dm9000_set_settings,
.get_msglevel = dm9000_get_msglevel,
.set_msglevel = dm9000_set_msglevel,
.nway_reset = dm9000_nway_reset,
@@ -752,6 +752,8 @@ static const struct ethtool_ops dm9000_ethtool_ops = {
.get_eeprom_len = dm9000_get_eeprom_len,
.get_eeprom = dm9000_get_eeprom,
.set_eeprom = dm9000_set_eeprom,
+ .get_link_ksettings = dm9000_get_link_ksettings,
+ .set_link_ksettings = dm9000_set_link_ksettings,
};
static void dm9000_show_carrier(struct board_info *db,
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 90c573b8ccaf..57c17e797ae3 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -49,7 +49,7 @@
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/unaligned.h>
/* These identify the driver base version and may not be removed. */
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 51fda3a6b13f..df4a871df633 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -472,7 +472,7 @@
#include <asm/dma.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#ifdef CONFIG_PPC_PMAC
#include <asm/machdep.h>
#endif /* CONFIG_PPC_PMAC */
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index df4994919456..07e10a45beaa 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -90,7 +90,7 @@
#include <asm/processor.h>
#include <asm/io.h>
#include <asm/dma.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/irq.h>
#ifdef CONFIG_TULIP_DM910X
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 5f1377449b8f..17e566a8b345 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -31,7 +31,7 @@
#include <linux/mii.h>
#include <linux/crc32.h>
#include <asm/unaligned.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#ifdef CONFIG_SPARC
#include <asm/prom.h>
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index e1c4133b8787..f82ebe5d89ee 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -40,7 +40,7 @@
#include <asm/processor.h>
#include <asm/io.h>
#include <asm/dma.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define uw32(reg, val) iowrite32(val, ioaddr + (reg))
#define ur32(reg) ioread32(ioaddr + (reg))
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index feda96d585e7..bc9bf88e5831 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -129,7 +129,7 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
#include <linux/rtnetlink.h>
#include <linux/crc32.h>
#include <linux/bitops.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/io.h>
#include <asm/irq.h>
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index 19e4ea15b504..a8de79355578 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -30,7 +30,7 @@
#include <linux/delay.h>
#include <linux/bitops.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
#ifdef CONFIG_NET_POLL_CONTROLLER
#include <asm/irq.h>
diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
index 8f4f61262d5c..5d8ae5320242 100644
--- a/drivers/net/ethernet/dlink/dl2k.h
+++ b/drivers/net/ethernet/dlink/dl2k.h
@@ -31,7 +31,7 @@
#include <linux/bitops.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/time.h>
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index eab36acfc0d1..2e5b66762e15 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -91,7 +91,7 @@ static char *media[MAX_UNITS];
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/bitops.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/io.h>
#include <linux/delay.h>
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index 57650953ff83..7bf78a0d322c 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -253,7 +253,7 @@ static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer)
if (!netif_running(priv->net_dev))
return HRTIMER_NORESTART;
- hrtimer_forward_now(timer, ktime_set(0, polling_frequency));
+ hrtimer_forward_now(timer, polling_frequency);
return HRTIMER_RESTART;
}
@@ -427,8 +427,7 @@ static int ec_bhf_open(struct net_device *net_dev)
hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
priv->hrtimer.function = ec_bhf_timer_fun;
- hrtimer_start(&priv->hrtimer, ktime_set(0, polling_frequency),
- HRTIMER_MODE_REL);
+ hrtimer_start(&priv->hrtimer, polling_frequency, HRTIMER_MODE_REL);
return 0;
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 6cfa63a5e9b4..4c30c44b242e 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -65,7 +65,7 @@
/* Number of bytes of an RX frame that are copied to skb->data */
#define BE_HDR_LEN ((u16) 64)
/* allocate extra space to allow tunneling decapsulation without head reallocation */
-#define BE_RX_SKB_ALLOC_SIZE (BE_HDR_LEN + 64)
+#define BE_RX_SKB_ALLOC_SIZE 256
#define BE_MAX_JUMBO_FRAME_SIZE 9018
#define BE_MIN_MTU 256
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 0e74529a4209..30e855004c57 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1118,7 +1118,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
err:
mutex_unlock(&adapter->mcc_lock);
- if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
+ if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
status = -EPERM;
return status;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 7e1633bf5a22..1a7f8ad7b9c6 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -275,8 +275,7 @@ static int be_dev_mac_add(struct be_adapter *adapter, u8 *mac)
/* Check if mac has already been added as part of uc-list */
for (i = 0; i < adapter->uc_macs; i++) {
- if (ether_addr_equal((u8 *)&adapter->uc_list[i * ETH_ALEN],
- mac)) {
+ if (ether_addr_equal(adapter->uc_list[i].mac, mac)) {
/* mac already added, skip addition */
adapter->pmac_id[0] = adapter->pmac_id[i + 1];
return 0;
@@ -319,6 +318,13 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
return 0;
+ /* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC
+ * address
+ */
+ if (BEx_chip(adapter) && be_virtfn(adapter) &&
+ !check_privilege(adapter, BE_PRIV_FILTMGMT))
+ return -EPERM;
+
/* if device is not running, copy MAC to netdev->dev_addr */
if (!netif_running(netdev))
goto done;
@@ -1655,14 +1661,12 @@ static void be_clear_mc_list(struct be_adapter *adapter)
static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx)
{
- if (ether_addr_equal((u8 *)&adapter->uc_list[uc_idx * ETH_ALEN],
- adapter->dev_mac)) {
+ if (ether_addr_equal(adapter->uc_list[uc_idx].mac, adapter->dev_mac)) {
adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0];
return 0;
}
- return be_cmd_pmac_add(adapter,
- (u8 *)&adapter->uc_list[uc_idx * ETH_ALEN],
+ return be_cmd_pmac_add(adapter, adapter->uc_list[uc_idx].mac,
adapter->if_handle,
&adapter->pmac_id[uc_idx + 1], 0);
}
@@ -1698,9 +1702,8 @@ static void be_set_uc_list(struct be_adapter *adapter)
}
if (adapter->update_uc_list) {
- i = 1; /* First slot is claimed by the Primary MAC */
-
/* cache the uc-list in adapter array */
+ i = 0;
netdev_for_each_uc_addr(ha, netdev) {
ether_addr_copy(adapter->uc_list[i].mac, ha->addr);
i++;
@@ -3613,7 +3616,11 @@ static void be_rx_qs_destroy(struct be_adapter *adapter)
static void be_disable_if_filters(struct be_adapter *adapter)
{
- be_dev_mac_del(adapter, adapter->pmac_id[0]);
+ /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */
+ if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
+ check_privilege(adapter, BE_PRIV_FILTMGMT))
+ be_dev_mac_del(adapter, adapter->pmac_id[0]);
+
be_clear_uc_list(adapter);
be_clear_mc_list(adapter);
@@ -3766,8 +3773,9 @@ static int be_enable_if_filters(struct be_adapter *adapter)
if (status)
return status;
- /* For BE3 VFs, the PF programs the initial MAC address */
- if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
+ /* Don't add MAC on BE3 VFs without FILTMGMT privilege */
+ if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
+ check_privilege(adapter, BE_PRIV_FILTMGMT)) {
status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
if (status)
return status;
@@ -5155,7 +5163,9 @@ static netdev_features_t be_features_check(struct sk_buff *skb,
skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
skb->inner_protocol != htons(ETH_P_TEB) ||
skb_inner_mac_header(skb) - skb_transport_header(skb) !=
- sizeof(struct udphdr) + sizeof(struct vxlanhdr))
+ sizeof(struct udphdr) + sizeof(struct vxlanhdr) ||
+ !adapter->vxlan_port ||
+ udp_hdr(skb)->dest != adapter->vxlan_port)
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
return features;
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index 6967b287b6e7..9cb436cb3745 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -88,7 +88,7 @@ static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/byteorder.h>
/* These identify the driver base version and may not be removed. */
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index 4a13115155c9..c46df5c82af5 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -4,8 +4,6 @@
obj-$(CONFIG_FEC) += fec.o
fec-objs :=fec_main.o fec_ptp.o
-CFLAGS_fec_main.o := -D__CHECK_ENDIAN__
-CFLAGS_fec_ptp.o := -D__CHECK_ENDIAN__
obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
diff --git a/drivers/net/ethernet/freescale/dpaa/Kconfig b/drivers/net/ethernet/freescale/dpaa/Kconfig
index f3a3454805f9..a654736237a9 100644
--- a/drivers/net/ethernet/freescale/dpaa/Kconfig
+++ b/drivers/net/ethernet/freescale/dpaa/Kconfig
@@ -1,6 +1,6 @@
menuconfig FSL_DPAA_ETH
tristate "DPAA Ethernet"
- depends on FSL_SOC && FSL_DPAA && FSL_FMAN
+ depends on FSL_DPAA && FSL_FMAN
select PHYLIB
select FSL_FMAN_MAC
---help---
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 3c48a84dec86..c9b7ad65e563 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -733,7 +733,8 @@ static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
/* Enable Congestion State Change Notifications and CS taildrop */
- initcgr.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES;
+ memset(&initcgr, 0, sizeof(initcgr));
+ initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES);
initcgr.cgr.cscn_en = QM_CGR_EN;
/* Set different thresholds based on the MAC speed.
@@ -747,7 +748,7 @@ static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
cs_th = DPAA_CS_THRESHOLD_1G;
qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
- initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
+ initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
initcgr.cgr.cstd_en = QM_CGR_EN;
err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT,
@@ -896,18 +897,18 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
if (dpaa_fq->init) {
memset(&initfq, 0, sizeof(initfq));
- initfq.we_mask = QM_INITFQ_WE_FQCTRL;
+ initfq.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL);
/* Note: we may get to keep an empty FQ in cache */
- initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
+ initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_PREFERINCACHE);
/* Try to reduce the number of portal interrupts for
* Tx Confirmation FQs.
*/
if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
- initfq.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
+ initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE);
/* FQ placement */
- initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_DESTWQ);
qm_fqd_set_destwq(&initfq.fqd, dpaa_fq->channel, dpaa_fq->wq);
@@ -920,8 +921,8 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
if (dpaa_fq->fq_type == FQ_TYPE_TX ||
dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM ||
dpaa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) {
- initfq.we_mask |= QM_INITFQ_WE_CGID;
- initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE;
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
+ initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid;
/* Set a fixed overhead accounting, in an attempt to
* reduce the impact of fixed-size skb shells and the
@@ -932,7 +933,7 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
* insufficient value, but even that is better than
* no overhead accounting at all.
*/
- initfq.we_mask |= QM_INITFQ_WE_OAC;
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
qm_fqd_set_oal(&initfq.fqd,
min(sizeof(struct sk_buff) +
@@ -941,9 +942,9 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
}
if (td_enable) {
- initfq.we_mask |= QM_INITFQ_WE_TDTHRESH;
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_TDTHRESH);
qm_fqd_set_taildrop(&initfq.fqd, DPAA_FQ_TD, 1);
- initfq.fqd.fq_ctrl = QM_FQCTRL_TDE;
+ initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_TDE);
}
if (dpaa_fq->fq_type == FQ_TYPE_TX) {
@@ -951,7 +952,8 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
if (queue_id >= 0)
confq = priv->conf_fqs[queue_id];
if (confq) {
- initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+ initfq.we_mask |=
+ cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
/* ContextA: OVOM=1(use contextA2 bits instead of ICAD)
* A2V=1 (contextA A2 field is valid)
* A0V=1 (contextA A0 field is valid)
@@ -959,8 +961,8 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
* ContextA A2: EBD=1 (deallocate buffers inside FMan)
* ContextB B0(ASPID): 0 (absolute Virtual Storage ID)
*/
- initfq.fqd.context_a.hi = 0x1e000000;
- initfq.fqd.context_a.lo = 0x80000000;
+ qm_fqd_context_a_set64(&initfq.fqd,
+ 0x1e00000080000000ULL);
}
}
@@ -968,13 +970,13 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
if (priv->use_ingress_cgr &&
(dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
dpaa_fq->fq_type == FQ_TYPE_RX_ERROR)) {
- initfq.we_mask |= QM_INITFQ_WE_CGID;
- initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE;
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
+ initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid;
/* Set a fixed overhead accounting, just like for the
* egress CGR.
*/
- initfq.we_mask |= QM_INITFQ_WE_OAC;
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
qm_fqd_set_oal(&initfq.fqd,
min(sizeof(struct sk_buff) +
@@ -984,9 +986,8 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
/* Initialization common to all ingress queues */
if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
- initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
- initfq.fqd.fq_ctrl |=
- QM_FQCTRL_HOLDACTIVE;
+ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
+ initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE);
initfq.fqd.context_a.stashing.exclusive =
QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
QM_STASHING_EXCL_ANNOTATION;
@@ -1350,7 +1351,7 @@ static int dpaa_enable_tx_csum(struct dpaa_priv *priv,
parse_result->l4_off = (u8)skb_transport_offset(skb);
/* Enable L3 (and L4, if TCP or UDP) HW checksum. */
- fd->cmd |= FM_FD_CMD_RPD | FM_FD_CMD_DTC;
+ fd->cmd |= cpu_to_be32(FM_FD_CMD_RPD | FM_FD_CMD_DTC);
/* On P1023 and similar platforms fd->cmd interpretation could
* be disabled by setting CONTEXT_A bit ICMD; currently this bit
@@ -1732,7 +1733,7 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
/* Fill in the rest of the FD fields */
qm_fd_set_contig(fd, priv->tx_headroom, skb->len);
- fd->cmd |= FM_FD_CMD_FCO;
+ fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
/* Map the entire buffer size that may be seen by FMan, but no more */
addr = dma_map_single(dev, skbh,
@@ -1840,7 +1841,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
}
fd->bpid = FSL_DPAA_BPID_INV;
- fd->cmd |= FM_FD_CMD_FCO;
+ fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
qm_fd_addr_set64(fd, addr);
return 0;
@@ -1867,7 +1868,7 @@ static inline int dpaa_xmit(struct dpaa_priv *priv,
egress_fq = priv->egress_fqs[queue];
if (fd->bpid == FSL_DPAA_BPID_INV)
- fd->cmd |= qman_fq_fqid(priv->conf_fqs[queue]);
+ fd->cmd |= cpu_to_be32(qman_fq_fqid(priv->conf_fqs[queue]));
/* Trace this Tx fd */
trace_dpaa_tx_fd(priv->net_dev, egress_fq, fd);
@@ -1960,17 +1961,17 @@ static void dpaa_rx_error(struct net_device *net_dev,
{
if (net_ratelimit())
netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n",
- fd->status & FM_FD_STAT_RX_ERRORS);
+ be32_to_cpu(fd->status) & FM_FD_STAT_RX_ERRORS);
percpu_priv->stats.rx_errors++;
- if (fd->status & FM_FD_ERR_DMA)
+ if (be32_to_cpu(fd->status) & FM_FD_ERR_DMA)
percpu_priv->rx_errors.dme++;
- if (fd->status & FM_FD_ERR_PHYSICAL)
+ if (be32_to_cpu(fd->status) & FM_FD_ERR_PHYSICAL)
percpu_priv->rx_errors.fpe++;
- if (fd->status & FM_FD_ERR_SIZE)
+ if (be32_to_cpu(fd->status) & FM_FD_ERR_SIZE)
percpu_priv->rx_errors.fse++;
- if (fd->status & FM_FD_ERR_PRS_HDR_ERR)
+ if (be32_to_cpu(fd->status) & FM_FD_ERR_PRS_HDR_ERR)
percpu_priv->rx_errors.phe++;
dpaa_fd_release(net_dev, fd);
@@ -1986,7 +1987,7 @@ static void dpaa_tx_error(struct net_device *net_dev,
if (net_ratelimit())
netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
- fd->status & FM_FD_STAT_TX_ERRORS);
+ be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS);
percpu_priv->stats.tx_errors++;
@@ -2020,10 +2021,11 @@ static void dpaa_tx_conf(struct net_device *net_dev,
{
struct sk_buff *skb;
- if (unlikely(fd->status & FM_FD_STAT_TX_ERRORS) != 0) {
+ if (unlikely(be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS)) {
if (net_ratelimit())
netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
- fd->status & FM_FD_STAT_TX_ERRORS);
+ be32_to_cpu(fd->status) &
+ FM_FD_STAT_TX_ERRORS);
percpu_priv->stats.tx_errors++;
}
@@ -2100,6 +2102,8 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
struct sk_buff *skb;
int *count_ptr;
+ fd_status = be32_to_cpu(fd->status);
+ fd_format = qm_fd_get_format(fd);
net_dev = ((struct dpaa_fq *)fq)->net_dev;
priv = netdev_priv(net_dev);
dpaa_bp = dpaa_bpid2pool(dq->fd.bpid);
@@ -2288,7 +2292,8 @@ static int dpaa_open(struct net_device *net_dev)
net_dev->phydev = mac_dev->init_phy(net_dev, priv->mac_dev);
if (!net_dev->phydev) {
netif_err(priv, ifup, net_dev, "init_phy() failed\n");
- return -ENODEV;
+ err = -ENODEV;
+ goto phy_init_failed;
}
for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
@@ -2311,6 +2316,7 @@ mac_start_failed:
for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++)
fman_port_disable(mac_dev->port[i]);
+phy_init_failed:
dpaa_eth_napi_disable(priv);
return err;
@@ -2417,12 +2423,13 @@ static int dpaa_ingress_cgr_init(struct dpaa_priv *priv)
}
/* Enable CS TD, but disable Congestion State Change Notifications. */
- initcgr.we_mask = QM_CGR_WE_CS_THRES;
+ memset(&initcgr, 0, sizeof(initcgr));
+ initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES);
initcgr.cgr.cscn_en = QM_CGR_EN;
cs_th = DPAA_INGRESS_CS_THRESHOLD;
qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
- initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
+ initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
initcgr.cgr.cstd_en = QM_CGR_EN;
/* This CGR will be associated with the SWP affined to the current CPU.
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index f9e74461bdc0..6ebad3fac81d 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -230,7 +230,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
* cyclecounter structure used to construct a ns counter from the
* arbitrary fixed point registers
*/
-static cycle_t fec_ptp_read(const struct cyclecounter *cc)
+static u64 fec_ptp_read(const struct cyclecounter *cc)
{
struct fec_enet_private *fep =
container_of(cc, struct fec_enet_private, cc);
diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig
index 79b7c84b7869..dc0850b3b517 100644
--- a/drivers/net/ethernet/freescale/fman/Kconfig
+++ b/drivers/net/ethernet/freescale/fman/Kconfig
@@ -1,6 +1,6 @@
config FSL_FMAN
tristate "FMan support"
- depends on FSL_SOC || COMPILE_TEST
+ depends on FSL_SOC || ARCH_LAYERSCAPE || COMPILE_TEST
select GENERIC_ALLOCATOR
select PHYLIB
default n
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index dafd9e1baba2..f60845f0c6ca 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -1890,6 +1890,7 @@ static int fman_reset(struct fman *fman)
goto _return;
} else {
+#ifdef CONFIG_PPC
struct device_node *guts_node;
struct ccsr_guts __iomem *guts_regs;
u32 devdisr2, reg;
@@ -1921,6 +1922,7 @@ static int fman_reset(struct fman *fman)
/* Enable all MACs */
iowrite32be(reg, &guts_regs->devdisr2);
+#endif
/* Perform FMan reset */
iowrite32be(FPM_RSTC_FM_RESET, &fman->fpm_regs->fm_rstc);
@@ -1932,25 +1934,31 @@ static int fman_reset(struct fman *fman)
} while (((ioread32be(&fman->fpm_regs->fm_rstc)) &
FPM_RSTC_FM_RESET) && --count);
if (count == 0) {
+#ifdef CONFIG_PPC
iounmap(guts_regs);
of_node_put(guts_node);
+#endif
err = -EBUSY;
goto _return;
}
+#ifdef CONFIG_PPC
/* Restore devdisr2 value */
iowrite32be(devdisr2, &guts_regs->devdisr2);
iounmap(guts_regs);
of_node_put(guts_node);
+#endif
goto _return;
+#ifdef CONFIG_PPC
guts_regs:
of_node_put(guts_node);
guts_node:
dev_dbg(fman->dev, "%s: Didn't perform FManV3 reset due to Errata A007273!\n",
__func__);
+#endif
}
_return:
return err;
@@ -2868,6 +2876,13 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
fman->dev = &of_dev->dev;
+ err = of_platform_populate(fm_node, NULL, NULL, &of_dev->dev);
+ if (err) {
+ dev_err(&of_dev->dev, "%s: of_platform_populate() failed\n",
+ __func__);
+ goto fman_free;
+ }
+
return fman;
fman_node_put:
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 69ca42ce5dd5..0b31f8502ada 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -594,6 +594,7 @@ static const u16 phy2speed[] = {
[PHY_INTERFACE_MODE_RGMII_RXID] = SPEED_1000,
[PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000,
[PHY_INTERFACE_MODE_RTBI] = SPEED_1000,
+ [PHY_INTERFACE_MODE_QSGMII] = SPEED_1000,
[PHY_INTERFACE_MODE_XGMII] = SPEED_10000
};
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index d9f3a480ca1b..1f98838f32b7 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -44,7 +44,7 @@
#include <linux/vmalloc.h>
#include <asm/pgtable.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "fs_enet.h"
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
index 120c758f5d01..6e64989f8478 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
@@ -42,7 +42,7 @@
#include <asm/pgtable.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "fs_enet.h"
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index 777beffa1e1e..db9c0bcf54cd 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -36,7 +36,7 @@
#include <linux/gfp.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#ifdef CONFIG_8xx
#include <asm/8xx_immap.h>
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
index 15abd37d70e3..96d44cf44fe0 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
@@ -35,7 +35,7 @@
#include <linux/of_platform.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#ifdef CONFIG_8xx
#include <asm/8xx_immap.h>
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index a89267b94352..1582d82483ec 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -35,7 +35,7 @@
#include <asm/pgtable.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/mpc5xxx.h>
#include "fs_enet.h"
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 756f7e763d5f..a6e7afa878be 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -93,7 +93,7 @@
#include <asm/mpc85xx.h>
#endif
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/crc32.h>
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 6e8a9c8467b9..5aa814799d70 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -40,7 +40,7 @@
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/crc32.h>
#include <linux/workqueue.h>
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 56588f2e1d91..a93e0199c369 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -32,7 +32,7 @@
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/crc32.h>
#include <asm/types.h>
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 53c5fcf1436c..9d660888510f 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -37,7 +37,7 @@
#include <linux/of_net.h>
#include <linux/of_platform.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <soc/fsl/qe/immap_qe.h>
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 8ba636f61b50..b642990b549c 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -32,7 +32,7 @@
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/types.h>
#include "ucc_geth.h"
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index 51c4abc51bf4..a69cd19a55ae 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -54,7 +54,7 @@
#include <pcmcia/ciscode.h>
#include <pcmcia/ds.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
/*====================================================================*/
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 854befde0a08..97b184774784 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -828,6 +828,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
priv = netdev_priv(ndev);
priv->ndev = ndev;
platform_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->base = devm_ioremap_resource(d, res);
@@ -903,7 +904,6 @@ static int hip04_mac_probe(struct platform_device *pdev)
ndev->priv_flags |= IFF_UNICAST_FLT;
ndev->irq = irq;
netif_napi_add(ndev, &priv->napi, hip04_rx_poll, NAPI_POLL_WEIGHT);
- SET_NETDEV_DEV(ndev, &pdev->dev);
hip04_reset_ppe(priv);
if (priv->phy_mode == PHY_INTERFACE_MODE_MII)
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
index 49863068c59e..979852d56f31 100644
--- a/drivers/net/ethernet/hisilicon/hisi_femac.c
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -805,6 +805,7 @@ static int hisi_femac_drv_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
priv = netdev_priv(ndev);
priv->dev = dev;
@@ -882,7 +883,6 @@ static int hisi_femac_drv_probe(struct platform_device *pdev)
ndev->netdev_ops = &hisi_femac_netdev_ops;
ndev->ethtool_ops = &hisi_femac_ethtools_ops;
netif_napi_add(ndev, &priv->napi, hisi_femac_poll, FEMAC_POLL_WEIGHT);
- SET_NETDEV_DEV(ndev, &pdev->dev);
hisi_femac_port_init(priv);
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index ee7e9ce2f5b3..418ca1f3774a 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -1316,10 +1316,11 @@ static int hix5hd2_dev_remove(struct platform_device *pdev)
}
static const struct of_device_id hix5hd2_of_match[] = {
- { .compatible = "hisilicon,hisi-gemac-v1", .data = (void *)GEMAC_V1 },
- { .compatible = "hisilicon,hisi-gemac-v2", .data = (void *)GEMAC_V2 },
- { .compatible = "hisilicon,hix5hd2-gemac", .data = (void *)GEMAC_V1 },
- { .compatible = "hisilicon,hi3798cv200-gemac", .data = (void *)GEMAC_V2 },
+ { .compatible = "hisilicon,hisi-gmac-v1", .data = (void *)GEMAC_V1 },
+ { .compatible = "hisilicon,hisi-gmac-v2", .data = (void *)GEMAC_V2 },
+ { .compatible = "hisilicon,hix5hd2-gmac", .data = (void *)GEMAC_V1 },
+ { .compatible = "hisilicon,hi3798cv200-gmac", .data = (void *)GEMAC_V2 },
+ { .compatible = "hisilicon,hi3516a-gmac", .data = (void *)GEMAC_V2 },
{},
};
@@ -1327,7 +1328,7 @@ MODULE_DEVICE_TABLE(of, hix5hd2_of_match);
static struct platform_driver hix5hd2_dev_driver = {
.driver = {
- .name = "hisi-gemac",
+ .name = "hisi-gmac",
.of_match_table = hix5hd2_of_match,
},
.probe = hix5hd2_dev_probe,
@@ -1338,4 +1339,4 @@ module_platform_driver(hix5hd2_dev_driver);
MODULE_DESCRIPTION("HISILICON Gigabit Ethernet MAC driver");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:hisi-gemac");
+MODULE_ALIAS("platform:hisi-gmac");
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 52a69c925965..5909615c27f7 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -47,7 +47,7 @@
#include <asm/processor.h>
#include <asm/io.h>
#include <asm/dma.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/dcr.h>
#include <asm/dcr-regs.h>
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index fbece63395a8..a831f947ca8c 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1181,7 +1181,9 @@ map_failed:
static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt)
{
+ struct tcphdr *tcph;
int offset = 0;
+ int hdr_len;
/* only TCP packets will be aggregated */
if (skb->protocol == htons(ETH_P_IP)) {
@@ -1208,14 +1210,20 @@ static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt)
/* if mss is not set through Large Packet bit/mss in rx buffer,
* expect that the mss will be written to the tcp header checksum.
*/
+ tcph = (struct tcphdr *)(skb->data + offset);
if (lrg_pkt) {
skb_shinfo(skb)->gso_size = mss;
} else if (offset) {
- struct tcphdr *tcph = (struct tcphdr *)(skb->data + offset);
-
skb_shinfo(skb)->gso_size = ntohs(tcph->check);
tcph->check = 0;
}
+
+ if (skb_shinfo(skb)->gso_size) {
+ hdr_len = offset + tcph->doff * 4;
+ skb_shinfo(skb)->gso_segs =
+ DIV_ROUND_UP(skb->len - hdr_len,
+ skb_shinfo(skb)->gso_size);
+ }
}
static int ibmveth_poll(struct napi_struct *napi, int budget)
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index ffcf35af4881..eccf1da9356b 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -4305,24 +4305,24 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter)
/**
* e1000e_sanitize_systim - sanitize raw cycle counter reads
* @hw: pointer to the HW structure
- * @systim: cycle_t value read, sanitized and returned
+ * @systim: time value read, sanitized and returned
*
* Errata for 82574/82583 possible bad bits read from SYSTIMH/L:
* check to see that the time is incrementing at a reasonable
* rate and is a multiple of incvalue.
**/
-static cycle_t e1000e_sanitize_systim(struct e1000_hw *hw, cycle_t systim)
+static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim)
{
u64 time_delta, rem, temp;
- cycle_t systim_next;
+ u64 systim_next;
u32 incvalue;
int i;
incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
/* latch SYSTIMH on read of SYSTIML */
- systim_next = (cycle_t)er32(SYSTIML);
- systim_next |= (cycle_t)er32(SYSTIMH) << 32;
+ systim_next = (u64)er32(SYSTIML);
+ systim_next |= (u64)er32(SYSTIMH) << 32;
time_delta = systim_next - systim;
temp = time_delta;
@@ -4342,13 +4342,13 @@ static cycle_t e1000e_sanitize_systim(struct e1000_hw *hw, cycle_t systim)
* e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
* @cc: cyclecounter structure
**/
-static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
+static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc)
{
struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
cc);
struct e1000_hw *hw = &adapter->hw;
u32 systimel, systimeh;
- cycle_t systim;
+ u64 systim;
/* SYSTIMH latching upon SYSTIML read does not work well.
* This means that if SYSTIML overflows after we read it but before
* we read SYSTIMH, the value of SYSTIMH has been incremented and we
@@ -4368,8 +4368,8 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
systimel = systimel_2;
}
}
- systim = (cycle_t)systimel;
- systim |= (cycle_t)systimeh << 32;
+ systim = (u64)systimel;
+ systim |= (u64)systimeh << 32;
if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW)
systim = e1000e_sanitize_systim(hw, systim);
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index ad03763e009a..34cc3be0df8e 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -127,8 +127,8 @@ static int e1000e_phc_get_syncdevicetime(ktime_t *device,
unsigned long flags;
int i;
u32 tsync_ctrl;
- cycle_t dev_cycles;
- cycle_t sys_cycles;
+ u64 dev_cycles;
+ u64 sys_cycles;
tsync_ctrl = er32(TSYNCTXCTL);
tsync_ctrl |= E1000_TSYNCTXCTL_START_SYNC |
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index d11093dce1b9..acbc3abe2ddd 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -210,7 +210,12 @@ struct igb_tx_buffer {
struct igb_rx_buffer {
dma_addr_t dma;
struct page *page;
- unsigned int page_offset;
+#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
+ __u32 page_offset;
+#else
+ __u16 page_offset;
+#endif
+ __u16 pagecnt_bias;
};
struct igb_tx_queue_stats {
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index cae24a8ccf47..1515abaa5ac9 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3947,11 +3947,23 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
if (!buffer_info->page)
continue;
- dma_unmap_page(rx_ring->dev,
- buffer_info->dma,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- __free_page(buffer_info->page);
+ /* Invalidate cache lines that may have been written to by
+ * device so that we avoid corrupting memory.
+ */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ buffer_info->dma,
+ buffer_info->page_offset,
+ IGB_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
+ /* free resources associated with mapping */
+ dma_unmap_page_attrs(rx_ring->dev,
+ buffer_info->dma,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ __page_frag_cache_drain(buffer_info->page,
+ buffer_info->pagecnt_bias);
buffer_info->page = NULL;
}
@@ -6812,12 +6824,6 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
/* transfer page from old buffer to new buffer */
*new_buff = *old_buff;
-
- /* sync the buffer for use by the device */
- dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
- old_buff->page_offset,
- IGB_RX_BUFSZ,
- DMA_FROM_DEVICE);
}
static inline bool igb_page_is_reserved(struct page *page)
@@ -6829,13 +6835,15 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
struct page *page,
unsigned int truesize)
{
+ unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--;
+
/* avoid re-using remote pages */
if (unlikely(igb_page_is_reserved(page)))
return false;
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely(page_count(page) != 1))
+ if (unlikely(page_ref_count(page) != pagecnt_bias))
return false;
/* flip page offset to other buffer */
@@ -6848,10 +6856,14 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
return false;
#endif
- /* Even if we own the page, we are not allowed to use atomic_set()
- * This would break get_page_unless_zero() users.
+ /* If we have drained the page fragment pool we need to update
+ * the pagecnt_bias and page count so that we fully restock the
+ * number of references the driver holds.
*/
- page_ref_inc(page);
+ if (unlikely(pagecnt_bias == 1)) {
+ page_ref_add(page, USHRT_MAX);
+ rx_buffer->pagecnt_bias = USHRT_MAX;
+ }
return true;
}
@@ -6903,7 +6915,6 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
return true;
/* this page cannot be reused so discard it */
- __free_page(page);
return false;
}
@@ -6938,6 +6949,13 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
page = rx_buffer->page;
prefetchw(page);
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ size,
+ DMA_FROM_DEVICE);
+
if (likely(!skb)) {
void *page_addr = page_address(page) +
rx_buffer->page_offset;
@@ -6962,21 +6980,18 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
prefetchw(skb->data);
}
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_buffer->dma,
- rx_buffer->page_offset,
- size,
- DMA_FROM_DEVICE);
-
/* pull page into skb */
if (igb_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) {
/* hand second half of page back to the ring */
igb_reuse_rx_page(rx_ring, rx_buffer);
} else {
- /* we are not reusing the buffer so unmap it */
- dma_unmap_page(rx_ring->dev, rx_buffer->dma,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ /* We are not reusing the buffer so unmap it and free
+ * any references we are holding to it
+ */
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ __page_frag_cache_drain(page, rx_buffer->pagecnt_bias);
}
/* clear contents of rx_buffer */
@@ -7234,7 +7249,8 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
}
/* map page for use */
- dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
/* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
@@ -7249,6 +7265,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
bi->dma = dma;
bi->page = page;
bi->page_offset = 0;
+ bi->pagecnt_bias = 1;
return true;
}
@@ -7275,6 +7292,12 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
if (!igb_alloc_mapped_page(rx_ring, bi))
break;
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+ bi->page_offset,
+ IGB_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index c30eea8399a7..c4477552ce9e 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -77,7 +77,7 @@
static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
/* SYSTIM read access for the 82576 */
-static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc)
+static u64 igb_ptp_read_82576(const struct cyclecounter *cc)
{
struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
struct e1000_hw *hw = &igb->hw;
@@ -94,7 +94,7 @@ static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc)
}
/* SYSTIM read access for the 82580 */
-static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)
+static u64 igb_ptp_read_82580(const struct cyclecounter *cc)
{
struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
struct e1000_hw *hw = &igb->hw;
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
index d2b29b490ae0..e5d72559cca9 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
@@ -30,7 +30,7 @@
#include "ixgb.h"
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define IXGB_ALL_RAR_ENTRIES 16
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index a92277683a64..1efb404431e9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -245,7 +245,7 @@ static void ixgbe_ptp_setup_sdp_x540(struct ixgbe_adapter *adapter)
* result of SYSTIME is 32bits of "billions of cycles" and 32 bits of
* "cycles", rather than seconds and nanoseconds.
*/
-static cycle_t ixgbe_ptp_read_X550(const struct cyclecounter *hw_cc)
+static u64 ixgbe_ptp_read_X550(const struct cyclecounter *hw_cc)
{
struct ixgbe_adapter *adapter =
container_of(hw_cc, struct ixgbe_adapter, hw_cc);
@@ -282,7 +282,7 @@ static cycle_t ixgbe_ptp_read_X550(const struct cyclecounter *hw_cc)
* cyclecounter structure used to construct a ns counter from the
* arbitrary fixed point registers
*/
-static cycle_t ixgbe_ptp_read_82599(const struct cyclecounter *cc)
+static u64 ixgbe_ptp_read_82599(const struct cyclecounter *cc)
{
struct ixgbe_adapter *adapter =
container_of(cc, struct ixgbe_adapter, hw_cc);
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index cbeea915f026..8037426ec50f 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -900,10 +900,10 @@ static void korina_restart_task(struct work_struct *work)
DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR,
&lp->rx_dma_regs->dmasm);
- korina_free_ring(dev);
-
napi_disable(&lp->napi);
+ korina_free_ring(dev);
+
if (korina_init(dev) < 0) {
printk(KERN_ERR "%s: cannot restart device\n", dev->name);
return;
@@ -1064,12 +1064,12 @@ static int korina_close(struct net_device *dev)
tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR;
writel(tmp, &lp->rx_dma_regs->dmasm);
- korina_free_ring(dev);
-
napi_disable(&lp->napi);
cancel_work_sync(&lp->restart_task);
+ korina_free_ring(dev);
+
free_irq(lp->rx_irq, dev);
free_irq(lp->tx_irq, dev);
free_irq(lp->ovr_irq, dev);
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 5f62c3d70df9..1fa7c03edec2 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2713,7 +2713,7 @@ static const struct of_device_id mv643xx_eth_shared_ids[] = {
MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids);
#endif
-#if defined(CONFIG_OF) && !defined(CONFIG_MV64X60)
+#if defined(CONFIG_OF_IRQ) && !defined(CONFIG_MV64X60)
#define mv643xx_eth_property(_np, _name, _v) \
do { \
u32 tmp; \
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index dabc5418efcc..4fe430ceb194 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -770,6 +770,17 @@ struct mvpp2_rx_desc {
u32 reserved8;
};
+struct mvpp2_txq_pcpu_buf {
+ /* Transmitted SKB */
+ struct sk_buff *skb;
+
+ /* Physical address of transmitted buffer */
+ dma_addr_t phys;
+
+ /* Size transmitted */
+ size_t size;
+};
+
/* Per-CPU Tx queue control */
struct mvpp2_txq_pcpu {
int cpu;
@@ -785,11 +796,8 @@ struct mvpp2_txq_pcpu {
/* Number of Tx DMA descriptors reserved for each CPU */
int reserved_num;
- /* Array of transmitted skb */
- struct sk_buff **tx_skb;
-
- /* Array of transmitted buffers' physical addresses */
- dma_addr_t *tx_buffs;
+ /* Infos about transmitted buffers */
+ struct mvpp2_txq_pcpu_buf *buffs;
/* Index of last TX DMA descriptor that was inserted */
int txq_put_index;
@@ -979,10 +987,11 @@ static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
struct sk_buff *skb,
struct mvpp2_tx_desc *tx_desc)
{
- txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb;
- if (skb)
- txq_pcpu->tx_buffs[txq_pcpu->txq_put_index] =
- tx_desc->buf_phys_addr;
+ struct mvpp2_txq_pcpu_buf *tx_buf =
+ txq_pcpu->buffs + txq_pcpu->txq_put_index;
+ tx_buf->skb = skb;
+ tx_buf->size = tx_desc->data_size;
+ tx_buf->phys = tx_desc->buf_phys_addr;
txq_pcpu->txq_put_index++;
if (txq_pcpu->txq_put_index == txq_pcpu->size)
txq_pcpu->txq_put_index = 0;
@@ -4401,17 +4410,16 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
int i;
for (i = 0; i < num; i++) {
- dma_addr_t buf_phys_addr =
- txq_pcpu->tx_buffs[txq_pcpu->txq_get_index];
- struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index];
+ struct mvpp2_txq_pcpu_buf *tx_buf =
+ txq_pcpu->buffs + txq_pcpu->txq_get_index;
mvpp2_txq_inc_get(txq_pcpu);
- dma_unmap_single(port->dev->dev.parent, buf_phys_addr,
- skb_headlen(skb), DMA_TO_DEVICE);
- if (!skb)
+ dma_unmap_single(port->dev->dev.parent, tx_buf->phys,
+ tx_buf->size, DMA_TO_DEVICE);
+ if (!tx_buf->skb)
continue;
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(tx_buf->skb);
}
}
@@ -4651,15 +4659,10 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
for_each_present_cpu(cpu) {
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
txq_pcpu->size = txq->size;
- txq_pcpu->tx_skb = kmalloc(txq_pcpu->size *
- sizeof(*txq_pcpu->tx_skb),
- GFP_KERNEL);
- if (!txq_pcpu->tx_skb)
- goto error;
-
- txq_pcpu->tx_buffs = kmalloc(txq_pcpu->size *
- sizeof(dma_addr_t), GFP_KERNEL);
- if (!txq_pcpu->tx_buffs)
+ txq_pcpu->buffs = kmalloc(txq_pcpu->size *
+ sizeof(struct mvpp2_txq_pcpu_buf),
+ GFP_KERNEL);
+ if (!txq_pcpu->buffs)
goto error;
txq_pcpu->count = 0;
@@ -4673,8 +4676,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
error:
for_each_present_cpu(cpu) {
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
- kfree(txq_pcpu->tx_skb);
- kfree(txq_pcpu->tx_buffs);
+ kfree(txq_pcpu->buffs);
}
dma_free_coherent(port->dev->dev.parent,
@@ -4693,8 +4695,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
for_each_present_cpu(cpu) {
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
- kfree(txq_pcpu->tx_skb);
- kfree(txq_pcpu->tx_buffs);
+ kfree(txq_pcpu->buffs);
}
if (txq->descs)
@@ -4912,7 +4913,7 @@ static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
if (!port_pcpu->timer_scheduled) {
port_pcpu->timer_scheduled = true;
- interval = ktime_set(0, MVPP2_TXDONE_HRTIMER_PERIOD_NS);
+ interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
hrtimer_start(&port_pcpu->tx_done_timer, interval,
HRTIMER_MODE_REL_PINNED);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index a849da92f857..6b8635378f1f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -101,13 +101,19 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
{
struct mlx4_cq *cq;
+ rcu_read_lock();
cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
cqn & (dev->caps.num_cqs - 1));
+ rcu_read_unlock();
+
if (!cq) {
mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
return;
}
+ /* Acessing the CQ outside of rcu_read_lock is safe, because
+ * the CQ is freed only after interrupt handling is completed.
+ */
++cq->arm_sn;
cq->comp(cq);
@@ -118,23 +124,19 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
struct mlx4_cq *cq;
- spin_lock(&cq_table->lock);
-
+ rcu_read_lock();
cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
- if (cq)
- atomic_inc(&cq->refcount);
-
- spin_unlock(&cq_table->lock);
+ rcu_read_unlock();
if (!cq) {
- mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
+ mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn);
return;
}
+ /* Acessing the CQ outside of rcu_read_lock is safe, because
+ * the CQ is freed only after interrupt handling is completed.
+ */
cq->event(cq, event_type);
-
- if (atomic_dec_and_test(&cq->refcount))
- complete(&cq->free);
}
static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
@@ -301,9 +303,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
if (err)
return err;
- spin_lock_irq(&cq_table->lock);
+ spin_lock(&cq_table->lock);
err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
- spin_unlock_irq(&cq_table->lock);
+ spin_unlock(&cq_table->lock);
if (err)
goto err_icm;
@@ -349,9 +351,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
return 0;
err_radix:
- spin_lock_irq(&cq_table->lock);
+ spin_lock(&cq_table->lock);
radix_tree_delete(&cq_table->tree, cq->cqn);
- spin_unlock_irq(&cq_table->lock);
+ spin_unlock(&cq_table->lock);
err_icm:
mlx4_cq_free_icm(dev, cq->cqn);
@@ -370,15 +372,15 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
if (err)
mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
+ spin_lock(&cq_table->lock);
+ radix_tree_delete(&cq_table->tree, cq->cqn);
+ spin_unlock(&cq_table->lock);
+
synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
- spin_lock_irq(&cq_table->lock);
- radix_tree_delete(&cq_table->tree, cq->cqn);
- spin_unlock_irq(&cq_table->lock);
-
if (atomic_dec_and_test(&cq->refcount))
complete(&cq->free);
wait_for_completion(&cq->free);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index a5fc46bbcbe2..504461a464c5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -38,7 +38,7 @@
/* mlx4_en_read_clock - read raw cycle counter (to be used by time counter)
*/
-static cycle_t mlx4_en_read_clock(const struct cyclecounter *tc)
+static u64 mlx4_en_read_clock(const struct cyclecounter *tc)
{
struct mlx4_en_dev *mdev =
container_of(tc, struct mlx4_en_dev, cycles);
@@ -245,13 +245,9 @@ static u32 freq_to_shift(u16 freq)
{
u32 freq_khz = freq * 1000;
u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
- u64 tmp_rounded =
- roundup_pow_of_two(max_val_cycles) > max_val_cycles ?
- roundup_pow_of_two(max_val_cycles) - 1 : UINT_MAX;
- u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
- max_val_cycles : tmp_rounded;
+ u64 max_val_cycles_rounded = 1ULL << fls64(max_val_cycles - 1);
/* calculate max possible multiplier in order to fit in 64bit */
- u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
+ u64 max_mul = div64_u64(ULLONG_MAX, max_val_cycles_rounded);
/* This comes from the reverse of clocksource_khz2mult */
return ilog2(div_u64(max_mul * freq_khz, 1000000));
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index bcd955339058..761f8b12399c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1638,7 +1638,8 @@ int mlx4_en_start_port(struct net_device *dev)
/* Configure tx cq's and rings */
for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
- u8 num_tx_rings_p_up = t == TX ? priv->num_tx_rings_p_up : 1;
+ u8 num_tx_rings_p_up = t == TX ?
+ priv->num_tx_rings_p_up : priv->tx_ring_num[t];
for (i = 0; i < priv->tx_ring_num[t]; i++) {
/* Configure cq */
@@ -1747,8 +1748,11 @@ int mlx4_en_start_port(struct net_device *dev)
/* Process all completions if exist to prevent
* the queues freezing if they are full
*/
- for (i = 0; i < priv->rx_ring_num; i++)
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ local_bh_disable();
napi_schedule(&priv->rx_cq[i]->napi);
+ local_bh_enable();
+ }
netif_tx_start_all_queues(dev);
netif_device_attach(dev);
@@ -2276,7 +2280,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
if (priv->tx_ring_num[TX_XDP] &&
!mlx4_en_check_xdp_mtu(dev, new_mtu))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
dev->mtu = new_mtu;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 3c37e216bbf3..eac527e25ec9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -445,8 +445,14 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
ring->stride = stride;
- if (ring->stride <= TXBB_SIZE)
+ if (ring->stride <= TXBB_SIZE) {
+ /* Stamp first unused send wqe */
+ __be32 *ptr = (__be32 *)ring->buf;
+ __be32 stamp = cpu_to_be32(1 << STAMP_SHIFT);
+ *ptr = stamp;
+ /* Move pointer to start of rx section */
ring->buf += TXBB_SIZE;
+ }
ring->log_stride = ffs(ring->stride) - 1;
ring->buf_size = ring->size * ring->stride;
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index cd3638e6fe25..0509996957d9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -554,8 +554,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
break;
case MLX4_EVENT_TYPE_SRQ_LIMIT:
- mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
- __func__);
+ mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n",
+ __func__, be32_to_cpu(eqe->event.srq.srqn),
+ eq->eqn);
case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
if (mlx4_is_master(dev)) {
/* forward only to slave owning the SRQ */
@@ -570,15 +571,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
eq->eqn, eq->cons_index, ret);
break;
}
- mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
- __func__, slave,
- be32_to_cpu(eqe->event.srq.srqn),
- eqe->type, eqe->subtype);
+ if (eqe->type ==
+ MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
+ mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
+ __func__, slave,
+ be32_to_cpu(eqe->event.srq.srqn),
+ eqe->type, eqe->subtype);
if (!ret && slave != dev->caps.function) {
- mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
- __func__, eqe->type,
- eqe->subtype, slave);
+ if (eqe->type ==
+ MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
+ mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
+ __func__, eqe->type,
+ eqe->subtype, slave);
mlx4_slave_event(dev, slave, eqe);
break;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index 2a9dd460a95f..e1f9e7cebf8f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -118,8 +118,13 @@ static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
if (!buf)
return -ENOMEM;
+ if (offset_in_page(buf)) {
+ dma_free_coherent(dev, PAGE_SIZE << order,
+ buf, sg_dma_address(mem));
+ return -ENOMEM;
+ }
+
sg_set_buf(mem, buf, PAGE_SIZE << order);
- BUG_ON(mem->offset);
sg_dma_len(mem) = PAGE_SIZE << order;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 75d07fa9d0b1..bffa6f345f2f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -42,6 +42,7 @@
#include <linux/io-mapping.h>
#include <linux/delay.h>
#include <linux/kmod.h>
+#include <linux/etherdevice.h>
#include <net/devlink.h>
#include <linux/mlx4/device.h>
@@ -782,6 +783,23 @@ int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
}
EXPORT_SYMBOL(mlx4_is_slave_active);
+void mlx4_handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
+ struct _rule_hw *eth_header)
+{
+ if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
+ is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
+ struct mlx4_net_trans_rule_hw_eth *eth =
+ (struct mlx4_net_trans_rule_hw_eth *)eth_header;
+ struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
+ bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
+ next_rule->rsvd == 0;
+
+ if (last_rule)
+ ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
+ }
+}
+EXPORT_SYMBOL(mlx4_handle_eth_header_mcast_prio);
+
static void slave_adjust_steering_mode(struct mlx4_dev *dev,
struct mlx4_dev_cap *dev_cap,
struct mlx4_init_hca_param *hca_param)
@@ -1823,10 +1841,10 @@ static void unmap_bf_area(struct mlx4_dev *dev)
io_mapping_free(mlx4_priv(dev)->bf_mapping);
}
-cycle_t mlx4_read_clock(struct mlx4_dev *dev)
+u64 mlx4_read_clock(struct mlx4_dev *dev)
{
u32 clockhi, clocklo, clockhi1;
- cycle_t cycles;
+ u64 cycles;
int i;
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -4020,49 +4038,51 @@ int mlx4_restart_one(struct pci_dev *pdev)
return err;
}
+#define MLX_SP(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_FORCE_SENSE_PORT }
+#define MLX_VF(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_IS_VF }
+#define MLX_GN(id) { PCI_VDEVICE(MELLANOX, id), 0 }
+
static const struct pci_device_id mlx4_pci_table[] = {
- /* MT25408 "Hermon" SDR */
- { PCI_VDEVICE(MELLANOX, 0x6340), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT25408 "Hermon" DDR */
- { PCI_VDEVICE(MELLANOX, 0x634a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT25408 "Hermon" QDR */
- { PCI_VDEVICE(MELLANOX, 0x6354), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT25408 "Hermon" DDR PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x6732), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT25408 "Hermon" QDR PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x673c), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT25408 "Hermon" EN 10GigE */
- { PCI_VDEVICE(MELLANOX, 0x6368), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x6750), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT25458 ConnectX EN 10GBASE-T 10GigE */
- { PCI_VDEVICE(MELLANOX, 0x6372), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
- { PCI_VDEVICE(MELLANOX, 0x675a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT26468 ConnectX EN 10GigE PCIe gen2*/
- { PCI_VDEVICE(MELLANOX, 0x6764), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
- { PCI_VDEVICE(MELLANOX, 0x6746), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT26478 ConnectX2 40GigE PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x676e), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT25400 Family [ConnectX-2 Virtual Function] */
- { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_PCI_DEV_IS_VF },
+ /* MT25408 "Hermon" */
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_SDR), /* SDR */
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_DDR), /* DDR */
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_QDR), /* QDR */
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2), /* DDR Gen2 */
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2), /* QDR Gen2 */
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_EN), /* EN 10GigE */
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2), /* EN 10GigE Gen2 */
+ /* MT25458 ConnectX EN 10GBASE-T */
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN),
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2), /* Gen2 */
+ /* MT26468 ConnectX EN 10GigE PCIe Gen2*/
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2),
+ /* MT26438 ConnectX EN 40GigE PCIe Gen2 5GT/s */
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2),
+ /* MT26478 ConnectX2 40GigE PCIe Gen2 */
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX2),
+ /* MT25400 Family [ConnectX-2] */
+ MLX_VF(0x1002), /* Virtual Function */
/* MT27500 Family [ConnectX-3] */
- { PCI_VDEVICE(MELLANOX, 0x1003), 0 },
- /* MT27500 Family [ConnectX-3 Virtual Function] */
- { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_PCI_DEV_IS_VF },
- { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */
- { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */
- { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */
- { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */
- { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */
- { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */
- { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */
- { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */
- { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */
- { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */
- { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */
- { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */
+ MLX_GN(PCI_DEVICE_ID_MELLANOX_CONNECTX3),
+ MLX_VF(0x1004), /* Virtual Function */
+ MLX_GN(0x1005), /* MT27510 Family */
+ MLX_GN(0x1006), /* MT27511 Family */
+ MLX_GN(PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO), /* MT27520 Family */
+ MLX_GN(0x1008), /* MT27521 Family */
+ MLX_GN(0x1009), /* MT27530 Family */
+ MLX_GN(0x100a), /* MT27531 Family */
+ MLX_GN(0x100b), /* MT27540 Family */
+ MLX_GN(0x100c), /* MT27541 Family */
+ MLX_GN(0x100d), /* MT27550 Family */
+ MLX_GN(0x100e), /* MT27551 Family */
+ MLX_GN(0x100f), /* MT27560 Family */
+ MLX_GN(0x1010), /* MT27561 Family */
+
+ /*
+ * See the mellanox_check_broken_intx_masking() quirk when
+ * adding devices
+ */
+
{ 0, }
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index c548beaaf910..1822382212ee 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2980,6 +2980,9 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
put_res(dev, slave, srqn, RES_SRQ);
qp->srq = srq;
}
+
+ /* Save param3 for dynamic changes from VST back to VGT */
+ qp->param3 = qpc->param3;
put_res(dev, slave, rcqn, RES_CQ);
put_res(dev, slave, mtt_base, RES_MTT);
res_end_move(dev, slave, RES_QP, qpn);
@@ -3772,7 +3775,6 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
int qpn = vhcr->in_modifier & 0x7fffff;
struct res_qp *qp;
u8 orig_sched_queue;
- __be32 orig_param3 = qpc->param3;
u8 orig_vlan_control = qpc->pri_path.vlan_control;
u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
u8 orig_pri_path_fl = qpc->pri_path.fl;
@@ -3814,7 +3816,6 @@ out:
*/
if (!err) {
qp->sched_queue = orig_sched_queue;
- qp->param3 = orig_param3;
qp->vlan_control = orig_vlan_control;
qp->fvl_rx = orig_fvl_rx;
qp->pri_path_fl = orig_pri_path_fl;
@@ -4164,22 +4165,6 @@ static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
return 0;
}
-static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
- struct _rule_hw *eth_header)
-{
- if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
- is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
- struct mlx4_net_trans_rule_hw_eth *eth =
- (struct mlx4_net_trans_rule_hw_eth *)eth_header;
- struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
- bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
- next_rule->rsvd == 0;
-
- if (last_rule)
- ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
- }
-}
-
/*
* In case of missing eth header, append eth header with a MAC address
* assigned to the VF.
@@ -4363,10 +4348,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
- handle_eth_header_mcast_prio(ctrl, rule_header);
-
- if (slave == dev->caps.function)
- goto execute;
+ mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
switch (header_id) {
case MLX4_NET_TRANS_RULE_ID_ETH:
@@ -4394,7 +4376,6 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
goto err_put_qp;
}
-execute:
err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
vhcr->in_modifier, 0,
MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
@@ -4473,6 +4454,7 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
struct res_qp *rqp;
struct res_fs_rule *rrule;
u64 mirr_reg_id;
+ int qpn;
if (dev->caps.steering_mode !=
MLX4_STEERING_MODE_DEVICE_MANAGED)
@@ -4489,10 +4471,11 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
}
mirr_reg_id = rrule->mirr_rule_id;
kfree(rrule->mirr_mbox);
+ qpn = rrule->qpn;
/* Release the rule form busy state before removal */
put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
- err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
+ err = get_res(dev, slave, qpn, RES_QP, &rqp);
if (err)
return err;
@@ -4517,7 +4500,7 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
if (!err)
atomic_dec(&rqp->ref_count);
out:
- put_res(dev, slave, rrule->qpn, RES_QP);
+ put_res(dev, slave, qpn, RES_QP);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
index 2cd8e56a573b..746a92c13644 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
@@ -49,7 +49,7 @@ void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp,
hwts->hwtstamp = ns_to_ktime(nsec);
}
-static cycle_t mlx5e_read_internal_timer(const struct cyclecounter *cc)
+static u64 mlx5e_read_internal_timer(const struct cyclecounter *cc)
{
struct mlx5e_tstamp *tstamp = container_of(cc, struct mlx5e_tstamp,
cycles);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 7f6c225666c1..f0b460f47f29 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -723,6 +723,9 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
int i;
struct ieee_ets ets;
+ if (!MLX5_CAP_GEN(priv->mdev, ets))
+ return;
+
memset(&ets, 0, sizeof(ets));
ets.ets_cap = mlx5_max_tc(priv->mdev) + 1;
for (i = 0; i < ets.ets_cap; i++) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 352462af8d51..33a399a8b5d5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -171,7 +171,6 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
return NUM_SW_COUNTERS +
MLX5E_NUM_Q_CNTRS(priv) +
NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS +
- NUM_PCIE_COUNTERS +
MLX5E_NUM_RQ_STATS(priv) +
MLX5E_NUM_SQ_STATS(priv) +
MLX5E_NUM_PFC_COUNTERS(priv) +
@@ -219,14 +218,6 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
pport_2819_stats_desc[i].format);
- for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pcie_perf_stats_desc[i].format);
-
- for (i = 0; i < NUM_PCIE_TAS_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pcie_tas_stats_desc[i].format);
-
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
@@ -339,14 +330,6 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
pport_2819_stats_desc, i);
- for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
- pcie_perf_stats_desc, i);
-
- for (i = 0; i < NUM_PCIE_TAS_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_tas_counters,
- pcie_tas_stats_desc, i);
-
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 3691451c728c..d088effd7160 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -247,6 +247,7 @@ static int set_flow_attrs(u32 *match_c, u32 *match_v,
}
if (fs->flow_type & FLOW_MAC_EXT &&
!is_zero_ether_addr(fs->m_ext.h_dest)) {
+ mask_spec(fs->m_ext.h_dest, fs->h_ext.h_dest, ETH_ALEN);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
outer_headers_c, dmac_47_16),
fs->m_ext.h_dest);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index cbfa38fc72c0..2b7dd315020c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -291,36 +291,12 @@ static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
&qcnt->rx_out_of_buffer);
}
-static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
-{
- struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
- struct mlx5_core_dev *mdev = priv->mdev;
- int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
- void *out;
- u32 *in;
-
- in = mlx5_vzalloc(sz);
- if (!in)
- return;
-
- out = pcie_stats->pcie_perf_counters;
- MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
-
- out = pcie_stats->pcie_tas_counters;
- MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
-
- kvfree(in);
-}
-
void mlx5e_update_stats(struct mlx5e_priv *priv)
{
mlx5e_update_q_counter(priv);
mlx5e_update_vport_counters(priv);
mlx5e_update_pport_counters(priv);
mlx5e_update_sw_counters(priv);
- mlx5e_update_pcie_counters(priv);
}
void mlx5e_update_stats_work(struct work_struct *work)
@@ -3699,14 +3675,8 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
- struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5_eswitch *esw = mdev->priv.eswitch;
-
mlx5e_vxlan_cleanup(priv);
- if (MLX5_CAP_GEN(mdev, vport_group_manager))
- mlx5_eswitch_unregister_vport_rep(esw, 0);
-
if (priv->xdp_prog)
bpf_prog_put(priv->xdp_prog);
}
@@ -3805,14 +3775,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
mlx5_lag_add(mdev, netdev);
- if (mlx5e_vxlan_allowed(mdev)) {
- rtnl_lock();
- udp_tunnel_get_rx_info(netdev);
- rtnl_unlock();
- }
-
mlx5e_enable_async_events(priv);
- queue_work(priv->wq, &priv->set_rx_mode_work);
if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
@@ -3822,13 +3785,30 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
rep.netdev = netdev;
mlx5_eswitch_register_vport_rep(esw, 0, &rep);
}
+
+ if (netdev->reg_state != NETREG_REGISTERED)
+ return;
+
+ /* Device already registered: sync netdev system state */
+ if (mlx5e_vxlan_allowed(mdev)) {
+ rtnl_lock();
+ udp_tunnel_get_rx_info(netdev);
+ rtnl_unlock();
+ }
+
+ queue_work(priv->wq, &priv->set_rx_mode_work);
}
static void mlx5e_nic_disable(struct mlx5e_priv *priv)
{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+
queue_work(priv->wq, &priv->set_rx_mode_work);
+ if (MLX5_CAP_GEN(mdev, vport_group_manager))
+ mlx5_eswitch_unregister_vport_rep(esw, 0);
mlx5e_disable_async_events(priv);
- mlx5_lag_remove(priv->mdev);
+ mlx5_lag_remove(mdev);
}
static const struct mlx5e_profile mlx5e_nic_profile = {
@@ -3966,10 +3946,6 @@ void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
const struct mlx5e_profile *profile = priv->profile;
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
- if (profile->disable)
- profile->disable(priv);
-
- flush_workqueue(priv->wq);
rtnl_lock();
if (netif_running(netdev))
@@ -3977,6 +3953,10 @@ void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
netif_device_detach(netdev);
rtnl_unlock();
+ if (profile->disable)
+ profile->disable(priv);
+ flush_workqueue(priv->wq);
+
mlx5e_destroy_q_counter(priv);
profile->cleanup_rx(priv);
mlx5e_close_drop_rq(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
index 1fffe48a93cc..cbfac06b7ffd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
@@ -109,7 +109,6 @@ static bool mlx5e_am_on_top(struct mlx5e_rx_am *am)
switch (am->tune_state) {
case MLX5E_AM_PARKING_ON_TOP:
case MLX5E_AM_PARKING_TIRED:
- WARN_ONCE(true, "mlx5e_am_on_top: PARKING\n");
return true;
case MLX5E_AM_GOING_RIGHT:
return (am->steps_left > 1) && (am->steps_right == 1);
@@ -123,7 +122,6 @@ static void mlx5e_am_turn(struct mlx5e_rx_am *am)
switch (am->tune_state) {
case MLX5E_AM_PARKING_ON_TOP:
case MLX5E_AM_PARKING_TIRED:
- WARN_ONCE(true, "mlx5e_am_turn: PARKING\n");
break;
case MLX5E_AM_GOING_RIGHT:
am->tune_state = MLX5E_AM_GOING_LEFT;
@@ -144,7 +142,6 @@ static int mlx5e_am_step(struct mlx5e_rx_am *am)
switch (am->tune_state) {
case MLX5E_AM_PARKING_ON_TOP:
case MLX5E_AM_PARKING_TIRED:
- WARN_ONCE(true, "mlx5e_am_step: PARKING\n");
break;
case MLX5E_AM_GOING_RIGHT:
if (am->profile_ix == (MLX5E_PARAMS_AM_NUM_PROFILES - 1))
@@ -282,10 +279,8 @@ static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
u32 delta_us = ktime_us_delta(end->time, start->time);
unsigned int npkts = end->pkt_ctr - start->pkt_ctr;
- if (!delta_us) {
- WARN_ONCE(true, "mlx5e_am_calc_stats: delta_us=0\n");
+ if (!delta_us)
return;
- }
curr_stats->ppms = (npkts * USEC_PER_MSEC) / delta_us;
curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index f202f872f57f..ba5db1dd23a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -39,7 +39,7 @@
#define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \
(*(u32 *)((char *)ptr + dsc[i].offset))
#define MLX5E_READ_CTR32_BE(ptr, dsc, i) \
- be32_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
+ be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
@@ -276,32 +276,6 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
{ "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
};
-#define PCIE_PERF_OFF(c) \
- MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
-#define PCIE_PERF_GET(pcie_stats, c) \
- MLX5_GET(mpcnt_reg, pcie_stats->pcie_perf_counters, \
- counter_set.pcie_perf_cntrs_grp_data_layout.c)
-#define PCIE_TAS_OFF(c) \
- MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_tas_cntrs_grp_data_layout.c)
-#define PCIE_TAS_GET(pcie_stats, c) \
- MLX5_GET(mpcnt_reg, pcie_stats->pcie_tas_counters, \
- counter_set.pcie_tas_cntrs_grp_data_layout.c)
-
-struct mlx5e_pcie_stats {
- __be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
- __be64 pcie_tas_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
-};
-
-static const struct counter_desc pcie_perf_stats_desc[] = {
- { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
- { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
-};
-
-static const struct counter_desc pcie_tas_stats_desc[] = {
- { "tx_pci_transport_nonfatal_msg", PCIE_TAS_OFF(non_fatal_err_msg_sent) },
- { "tx_pci_transport_fatal_msg", PCIE_TAS_OFF(fatal_err_msg_sent) },
-};
-
struct mlx5e_rq_stats {
u64 packets;
u64 bytes;
@@ -386,8 +360,6 @@ static const struct counter_desc sq_stats_desc[] = {
#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
-#define NUM_PCIE_PERF_COUNTERS ARRAY_SIZE(pcie_perf_stats_desc)
-#define NUM_PCIE_TAS_COUNTERS ARRAY_SIZE(pcie_tas_stats_desc)
#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \
ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
#define NUM_PPORT_PER_PRIO_PFC_COUNTERS \
@@ -397,7 +369,6 @@ static const struct counter_desc sq_stats_desc[] = {
NUM_PPORT_2819_COUNTERS + \
NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \
NUM_PPORT_PRIO)
-#define NUM_PCIE_COUNTERS (NUM_PCIE_PERF_COUNTERS + NUM_PCIE_TAS_COUNTERS)
#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
@@ -406,7 +377,6 @@ struct mlx5e_stats {
struct mlx5e_qcounter_stats qcnt;
struct mlx5e_vport_stats vport;
struct mlx5e_pport_stats pport;
- struct mlx5e_pcie_stats pcie;
struct rtnl_link_stats64 vf_vport;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index f8829b517156..46bef6a26a8c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -161,15 +161,21 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
}
}
+/* we get here also when setting rule to the FW failed, etc. It means that the
+ * flow rule itself might not exist, but some offloading related to the actions
+ * should be cleaned.
+ */
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_fc *counter = NULL;
- counter = mlx5_flow_rule_counter(flow->rule);
-
- mlx5_del_flow_rules(flow->rule);
+ if (!IS_ERR(flow->rule)) {
+ counter = mlx5_flow_rule_counter(flow->rule);
+ mlx5_del_flow_rules(flow->rule);
+ mlx5_fc_destroy(priv->mdev, counter);
+ }
if (esw && esw->mode == SRIOV_OFFLOADS) {
mlx5_eswitch_del_vlan_action(esw, flow->attr);
@@ -177,8 +183,6 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
mlx5e_detach_encap(priv, flow);
}
- mlx5_fc_destroy(priv->mdev, counter);
-
if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
mlx5_destroy_flow_table(priv->fs.tc.t);
priv->fs.tc.t = NULL;
@@ -225,6 +229,11 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers);
+ struct flow_dissector_key_control *enc_control =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_CONTROL,
+ f->key);
+
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
struct flow_dissector_key_ports *key =
skb_flow_dissector_target(f->dissector,
@@ -237,28 +246,34 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
/* Full udp dst port must be given */
if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
- return -EOPNOTSUPP;
-
- /* udp src port isn't supported */
- if (memchr_inv(&mask->src, 0, sizeof(mask->src)))
- return -EOPNOTSUPP;
+ goto vxlan_match_offload_err;
if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
parse_vxlan_attr(spec, f);
- else
+ else {
+ netdev_warn(priv->netdev,
+ "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
return -EOPNOTSUPP;
+ }
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
udp_dport, ntohs(mask->dst));
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
udp_dport, ntohs(key->dst));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ udp_sport, ntohs(mask->src));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ udp_sport, ntohs(key->src));
} else { /* udp dst port must be given */
- return -EOPNOTSUPP;
+vxlan_match_offload_err:
+ netdev_warn(priv->netdev,
+ "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
+ return -EOPNOTSUPP;
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
+ if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
struct flow_dissector_key_ipv4_addrs *key =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
@@ -280,10 +295,10 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
ntohl(key->dst));
- }
- MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
+ }
/* Enforce DMAC when offloading incoming tunneled flows.
* Flow counters require a match on the DMAC.
@@ -346,6 +361,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
if (parse_tunnel_attr(priv, spec, f))
return -EOPNOTSUPP;
break;
+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+ netdev_warn(priv->netdev,
+ "IPv6 tunnel decap offload isn't supported\n");
default:
return -EOPNOTSUPP;
}
@@ -375,6 +393,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
key->flags & FLOW_DIS_IS_FRAGMENT);
+
+ /* the HW doesn't need L3 inline to match on frag=no */
+ if (key->flags & FLOW_DIS_IS_FRAGMENT)
+ *min_inline = MLX5_INLINE_MODE_IP;
}
}
@@ -646,18 +668,18 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
int ttl;
#if IS_ENABLED(CONFIG_INET)
+ int ret;
+
rt = ip_route_output_key(dev_net(mirred_dev), fl4);
- if (IS_ERR(rt)) {
- pr_warn("%s: no route to %pI4\n", __func__, &fl4->daddr);
- return -EOPNOTSUPP;
- }
+ ret = PTR_ERR_OR_ZERO(rt);
+ if (ret)
+ return ret;
#else
return -EOPNOTSUPP;
#endif
if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) {
- pr_warn("%s: Can't offload the flow, netdevices aren't on the same HW e-switch\n",
- __func__);
+ pr_warn("%s: can't offload, devices not on same HW e-switch\n", __func__);
ip_rt_put(rt);
return -EOPNOTSUPP;
}
@@ -718,8 +740,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
struct net_device **out_dev)
{
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
+ struct neighbour *n = NULL;
struct flowi4 fl4 = {};
- struct neighbour *n;
char *encap_header;
int encap_size;
__be32 saddr;
@@ -750,7 +772,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
e->out_dev = *out_dev;
if (!(n->nud_state & NUD_VALID)) {
- err = -ENOTSUPP;
+ pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr);
+ err = -EOPNOTSUPP;
goto out;
}
@@ -772,6 +795,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
encap_size, encap_header, &e->encap_id);
out:
+ if (err && n)
+ neigh_release(n);
kfree(encap_header);
return err;
}
@@ -792,9 +817,17 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
int tunnel_type;
int err;
- /* udp dst port must be given */
+ /* udp dst port must be set */
if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
+ goto vxlan_encap_offload_err;
+
+ /* setting udp src port isn't supported */
+ if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
+vxlan_encap_offload_err:
+ netdev_warn(priv->netdev,
+ "must set udp dst port and not set udp src port\n");
return -EOPNOTSUPP;
+ }
if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) &&
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
@@ -802,6 +835,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
info.tun_id = tunnel_id_to_key32(key->tun_id);
tunnel_type = MLX5_HEADER_TYPE_VXLAN;
} else {
+ netdev_warn(priv->netdev,
+ "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
return -EOPNOTSUPP;
}
@@ -809,6 +844,9 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
case AF_INET:
info.daddr = key->u.ipv4.dst;
break;
+ case AF_INET6:
+ netdev_warn(priv->netdev,
+ "IPv6 tunnel encap offload isn't supported\n");
default:
return -EOPNOTSUPP;
}
@@ -986,7 +1024,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
if (IS_ERR(flow->rule)) {
err = PTR_ERR(flow->rule);
- goto err_free;
+ goto err_del_rule;
}
err = rhashtable_insert_fast(&tc->ht, &flow->node,
@@ -997,7 +1035,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
goto out;
err_del_rule:
- mlx5_del_flow_rules(flow->rule);
+ mlx5e_tc_del_flow(priv, flow);
err_free:
kfree(flow);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index d6807c3cc461..f14d9c9ba773 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1860,7 +1860,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
if (!ESW_ALLOWED(esw))
return -EPERM;
- if (!LEGAL_VPORT(esw, vport))
+ if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac))
return -EINVAL;
mutex_lock(&esw->state_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 466e161010f7..03293ed1cc22 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -695,6 +695,12 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
if (err)
goto err_reps;
}
+
+ /* disable PF RoCE so missed packets don't go through RoCE steering */
+ mlx5_dev_list_lock();
+ mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+ mlx5_dev_list_unlock();
+
return 0;
err_reps:
@@ -718,6 +724,11 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
{
int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
+ /* enable back PF RoCE */
+ mlx5_dev_list_lock();
+ mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+ mlx5_dev_list_unlock();
+
mlx5_eswitch_disable_sriov(esw);
err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index a263d8904a4c..0ac7a2fc916c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1263,6 +1263,7 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
handle = add_rule_fte(fte, fg, dest, dest_num, false);
if (IS_ERR(handle)) {
+ unlock_ref_node(&fte->node);
kfree(fte);
goto unlock_fg;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 3b026c151cf2..7431f633de31 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -75,7 +75,7 @@ static void mlx5_fc_stats_insert(struct rb_root *root, struct mlx5_fc *counter)
struct rb_node *parent = NULL;
while (*new) {
- struct mlx5_fc *this = container_of(*new, struct mlx5_fc, node);
+ struct mlx5_fc *this = rb_entry(*new, struct mlx5_fc, node);
int result = counter->id - this->id;
parent = *new;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 7b4c339a8a9a..d01e9f21d469 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -503,6 +503,13 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
to_fw_pkey_sz(dev, 128));
+ /* Check log_max_qp from HCA caps to set in current profile */
+ if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < profile[prof_sel].log_max_qp) {
+ mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
+ profile[prof_sel].log_max_qp,
+ MLX5_CAP_GEN_MAX(dev, log_max_qp));
+ profile[prof_sel].log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
+ }
if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
prof->log_max_qp);
@@ -557,7 +564,7 @@ int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id)
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
-cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev)
+u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev)
{
u32 timer_h, timer_h1, timer_l;
@@ -567,7 +574,7 @@ cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev)
if (timer_h != timer_h1) /* wrap around */
timer_l = ioread32be(&dev->iseg->internal_timer_l);
- return (cycle_t)timer_l | (cycle_t)timer_h1 << 32;
+ return (u64)timer_l | (u64)timer_h1 << 32;
}
static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
@@ -575,7 +582,6 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
struct mlx5_priv *priv = &mdev->priv;
struct msix_entry *msix = priv->msix_arr;
int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
- int numa_node = priv->numa_node;
int err;
if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
@@ -583,7 +589,7 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
return -ENOMEM;
}
- cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+ cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
priv->irq_info[i].mask);
err = irq_set_affinity_hint(irq, priv->irq_info[i].mask);
@@ -1189,6 +1195,9 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
{
int err = 0;
+ if (cleanup)
+ mlx5_drain_health_wq(dev);
+
mutex_lock(&dev->intf_state_mutex);
if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
@@ -1351,7 +1360,7 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
mlx5_enter_error_state(dev);
mlx5_unload_one(dev, priv, false);
- /* In case of kernel call save the pci state and drain health wq */
+ /* In case of kernel call save the pci state and drain the health wq */
if (state) {
pci_save_state(pdev);
mlx5_drain_health_wq(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index e0a8fbdd1446..d4a99c9757cb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -106,7 +106,7 @@ int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
u32 element_id);
int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev);
-cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev);
+u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev);
u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx);
struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn);
void mlx5_cq_tasklet_cb(unsigned long data);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index d147ddd97997..0af3338bfcb4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -209,21 +209,21 @@ MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1);
/* pci_eqe_cmd_token
* Command completion event - token
*/
-MLXSW_ITEM32(pci, eqe, cmd_token, 0x08, 16, 16);
+MLXSW_ITEM32(pci, eqe, cmd_token, 0x00, 16, 16);
/* pci_eqe_cmd_status
* Command completion event - status
*/
-MLXSW_ITEM32(pci, eqe, cmd_status, 0x08, 0, 8);
+MLXSW_ITEM32(pci, eqe, cmd_status, 0x00, 0, 8);
/* pci_eqe_cmd_out_param_h
* Command completion event - output parameter - higher part
*/
-MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x0C, 0, 32);
+MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x04, 0, 32);
/* pci_eqe_cmd_out_param_l
* Command completion event - output parameter - lower part
*/
-MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x10, 0, 32);
+MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x08, 0, 32);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index fece974b4edd..003093abb170 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -684,6 +684,7 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
dev_kfree_skb_any(skb_orig);
return NETDEV_TX_OK;
}
+ dev_consume_skb_any(skb_orig);
}
if (eth_skb_pad(skb)) {
@@ -2404,7 +2405,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
local_port);
return err;
}
- err = __mlxsw_sp_port_create(mlxsw_sp, local_port, false,
+ err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split,
module, width, lane);
if (err)
goto err_port_create;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 53126bf68ea9..01d0efa9c5c7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -942,7 +942,7 @@ static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work)
char rauht_pl[MLXSW_REG_RAUHT_LEN];
struct net_device *dev;
bool entry_connected;
- u8 nud_state;
+ u8 nud_state, dead;
bool updating;
bool removing;
bool adding;
@@ -953,10 +953,11 @@ static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work)
dip = ntohl(*((__be32 *) n->primary_key));
memcpy(neigh_entry->ha, n->ha, sizeof(neigh_entry->ha));
nud_state = n->nud_state;
+ dead = n->dead;
dev = n->dev;
read_unlock_bh(&n->lock);
- entry_connected = nud_state & NUD_VALID;
+ entry_connected = nud_state & NUD_VALID && !dead;
adding = (!neigh_entry->offloaded) && entry_connected;
updating = neigh_entry->offloaded && entry_connected;
removing = neigh_entry->offloaded && !entry_connected;
@@ -1351,7 +1352,7 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_neigh_entry *neigh_entry;
struct net_device *dev = fib_nh->nh_dev;
struct neighbour *n;
- u8 nud_state;
+ u8 nud_state, dead;
/* Take a reference of neigh here ensuring that neigh would
* not be detructed before the nexthop entry is finished.
@@ -1383,8 +1384,9 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
read_lock_bh(&n->lock);
nud_state = n->nud_state;
+ dead = n->dead;
read_unlock_bh(&n->lock);
- __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID));
+ __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
return 0;
}
@@ -1394,6 +1396,7 @@ static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
+ __mlxsw_sp_nexthop_neigh_update(nh, true);
list_del(&nh->neigh_list_node);
/* If that is the last nexthop connected to that neigh, remove from
@@ -1452,6 +1455,8 @@ mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp,
nh = &nh_grp->nexthops[i];
mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
}
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+ WARN_ON_ONCE(nh_grp->adj_index_valid);
kfree(nh_grp);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 150ccf5192a9..2e88115e8735 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -345,6 +345,7 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
dev_kfree_skb_any(skb_orig);
return NETDEV_TX_OK;
}
+ dev_consume_skb_any(skb_orig);
}
mlxsw_sx_txhdr_construct(skb, &tx_info);
/* TX header is consumed by HW on the way so we shouldn't count its
diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c
index f3bb9055a292..44bb04d4d21b 100644
--- a/drivers/net/ethernet/microchip/encx24j600-regmap.c
+++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c
@@ -26,11 +26,11 @@ static inline bool is_bits_set(int value, int mask)
}
static int encx24j600_switch_bank(struct encx24j600_context *ctx,
- int bank)
+ int bank)
{
int ret = 0;
-
int bank_opcode = BANK_SELECT(bank);
+
ret = spi_write(ctx->spi, &bank_opcode, 1);
if (ret == 0)
ctx->bank = bank;
@@ -39,7 +39,7 @@ static int encx24j600_switch_bank(struct encx24j600_context *ctx,
}
static int encx24j600_cmdn(struct encx24j600_context *ctx, u8 opcode,
- const void *buf, size_t len)
+ const void *buf, size_t len)
{
struct spi_message m;
struct spi_transfer t[2] = { { .tx_buf = &opcode, .len = 1, },
@@ -54,12 +54,14 @@ static int encx24j600_cmdn(struct encx24j600_context *ctx, u8 opcode,
static void regmap_lock_mutex(void *context)
{
struct encx24j600_context *ctx = context;
+
mutex_lock(&ctx->mutex);
}
static void regmap_unlock_mutex(void *context)
{
struct encx24j600_context *ctx = context;
+
mutex_unlock(&ctx->mutex);
}
@@ -128,6 +130,7 @@ static int regmap_encx24j600_sfr_update(struct encx24j600_context *ctx,
if (reg < 0x80) {
int ret = 0;
+
cmd = banked_code | banked_reg;
if ((banked_reg < 0x16) && (ctx->bank != bank))
ret = encx24j600_switch_bank(ctx, bank);
@@ -174,6 +177,7 @@ static int regmap_encx24j600_sfr_write(void *context, u8 reg, u8 *val,
size_t len)
{
struct encx24j600_context *ctx = context;
+
return regmap_encx24j600_sfr_update(ctx, reg, val, len, WCRU, WCRCODE);
}
@@ -228,9 +232,9 @@ int regmap_encx24j600_spi_write(void *context, u8 reg, const u8 *data,
if (reg < 0xc0)
return encx24j600_cmdn(ctx, reg, data, count);
- else
- /* SPI 1-byte command. Ignore data */
- return spi_write(ctx->spi, &reg, 1);
+
+ /* SPI 1-byte command. Ignore data */
+ return spi_write(ctx->spi, &reg, 1);
}
EXPORT_SYMBOL_GPL(regmap_encx24j600_spi_write);
@@ -495,6 +499,7 @@ static struct regmap_config phycfg = {
.writeable_reg = encx24j600_phymap_writeable,
.volatile_reg = encx24j600_phymap_volatile,
};
+
static struct regmap_bus phymap_encx24j600 = {
.reg_write = regmap_encx24j600_phy_reg_write,
.reg_read = regmap_encx24j600_phy_reg_read,
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index b14f0305aa31..fbce6166504e 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -30,7 +30,7 @@
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
static int debug = -1;
-module_param(debug, int, 0);
+module_param(debug, int, 0000);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
/* SRAM memory layout:
@@ -105,6 +105,7 @@ static u16 encx24j600_read_reg(struct encx24j600_priv *priv, u8 reg)
struct net_device *dev = priv->ndev;
unsigned int val = 0;
int ret = regmap_read(priv->ctx.regmap, reg, &val);
+
if (unlikely(ret))
netif_err(priv, drv, dev, "%s: error %d reading reg %02x\n",
__func__, ret, reg);
@@ -115,6 +116,7 @@ static void encx24j600_write_reg(struct encx24j600_priv *priv, u8 reg, u16 val)
{
struct net_device *dev = priv->ndev;
int ret = regmap_write(priv->ctx.regmap, reg, val);
+
if (unlikely(ret))
netif_err(priv, drv, dev, "%s: error %d writing reg %02x=%04x\n",
__func__, ret, reg, val);
@@ -125,6 +127,7 @@ static void encx24j600_update_reg(struct encx24j600_priv *priv, u8 reg,
{
struct net_device *dev = priv->ndev;
int ret = regmap_update_bits(priv->ctx.regmap, reg, mask, val);
+
if (unlikely(ret))
netif_err(priv, drv, dev, "%s: error %d updating reg %02x=%04x~%04x\n",
__func__, ret, reg, val, mask);
@@ -135,6 +138,7 @@ static u16 encx24j600_read_phy(struct encx24j600_priv *priv, u8 reg)
struct net_device *dev = priv->ndev;
unsigned int val = 0;
int ret = regmap_read(priv->ctx.phymap, reg, &val);
+
if (unlikely(ret))
netif_err(priv, drv, dev, "%s: error %d reading %02x\n",
__func__, ret, reg);
@@ -145,6 +149,7 @@ static void encx24j600_write_phy(struct encx24j600_priv *priv, u8 reg, u16 val)
{
struct net_device *dev = priv->ndev;
int ret = regmap_write(priv->ctx.phymap, reg, val);
+
if (unlikely(ret))
netif_err(priv, drv, dev, "%s: error %d writing reg %02x=%04x\n",
__func__, ret, reg, val);
@@ -164,6 +169,7 @@ static void encx24j600_cmd(struct encx24j600_priv *priv, u8 cmd)
{
struct net_device *dev = priv->ndev;
int ret = regmap_write(priv->ctx.regmap, cmd, 0);
+
if (unlikely(ret))
netif_err(priv, drv, dev, "%s: error %d with cmd %02x\n",
__func__, ret, cmd);
@@ -173,6 +179,7 @@ static int encx24j600_raw_read(struct encx24j600_priv *priv, u8 reg, u8 *data,
size_t count)
{
int ret;
+
mutex_lock(&priv->ctx.mutex);
ret = regmap_encx24j600_spi_read(&priv->ctx, reg, data, count);
mutex_unlock(&priv->ctx.mutex);
@@ -184,6 +191,7 @@ static int encx24j600_raw_write(struct encx24j600_priv *priv, u8 reg,
const u8 *data, size_t count)
{
int ret;
+
mutex_lock(&priv->ctx.mutex);
ret = regmap_encx24j600_spi_write(&priv->ctx, reg, data, count);
mutex_unlock(&priv->ctx.mutex);
@@ -194,6 +202,7 @@ static int encx24j600_raw_write(struct encx24j600_priv *priv, u8 reg,
static void encx24j600_update_phcon1(struct encx24j600_priv *priv)
{
u16 phcon1 = encx24j600_read_phy(priv, PHCON1);
+
if (priv->autoneg == AUTONEG_ENABLE) {
phcon1 |= ANEN | RENEG;
} else {
@@ -328,6 +337,7 @@ static int encx24j600_receive_packet(struct encx24j600_priv *priv,
{
struct net_device *dev = priv->ndev;
struct sk_buff *skb = netdev_alloc_skb(dev, rsv->len + NET_IP_ALIGN);
+
if (!skb) {
pr_err_ratelimited("RX: OOM: packet dropped\n");
dev->stats.rx_dropped++;
@@ -346,7 +356,6 @@ static int encx24j600_receive_packet(struct encx24j600_priv *priv,
/* Maintain stats */
dev->stats.rx_packets++;
dev->stats.rx_bytes += rsv->len;
- priv->next_packet = rsv->next_packet;
netif_rx(skb);
@@ -383,6 +392,8 @@ static void encx24j600_rx_packets(struct encx24j600_priv *priv, u8 packet_count)
encx24j600_receive_packet(priv, &rsv);
}
+ priv->next_packet = rsv.next_packet;
+
newrxtail = priv->next_packet - 2;
if (newrxtail == ENC_RX_BUF_START)
newrxtail = SRAM_SIZE - 2;
@@ -827,6 +838,7 @@ static void encx24j600_set_multicast_list(struct net_device *dev)
static void encx24j600_hw_tx(struct encx24j600_priv *priv)
{
struct net_device *dev = priv->ndev;
+
netif_info(priv, tx_queued, dev, "TX Packet Len:%d\n",
priv->tx_skb->len);
@@ -894,7 +906,6 @@ static void encx24j600_tx_timeout(struct net_device *dev)
dev->stats.tx_errors++;
netif_wake_queue(dev);
- return;
}
static int encx24j600_get_regs_len(struct net_device *dev)
@@ -957,12 +968,14 @@ static int encx24j600_set_settings(struct net_device *dev,
static u32 encx24j600_get_msglevel(struct net_device *dev)
{
struct encx24j600_priv *priv = netdev_priv(dev);
+
return priv->msg_enable;
}
static void encx24j600_set_msglevel(struct net_device *dev, u32 val)
{
struct encx24j600_priv *priv = netdev_priv(dev);
+
priv->msg_enable = val;
}
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 22b0821c1da0..90eac63f9606 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -51,7 +51,7 @@
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define DRV_NAME "natsemi"
#define DRV_VERSION "2.1"
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 93c4bdc0cdca..f9d2eb9a920a 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -119,7 +119,7 @@
#include <linux/slab.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define DRV_NAME "ns83820"
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index 2d04679a923a..baff744b560e 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -160,7 +160,7 @@ static int tx_params[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
#include <linux/delay.h>
#include <linux/bitops.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/io.h>
#include <asm/unaligned.h>
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 2a2ca5fa0c69..fa7770da6ef8 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -100,7 +100,7 @@ static int gx_fix;
#include <linux/ethtool.h>
#include <linux/crc32.h>
#include <linux/bitops.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/unaligned.h>
#include <asm/io.h>
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index bb74e1c10ffe..c68dbf7092b1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -1377,7 +1377,7 @@ static const char *attn_master_to_str(u8 master)
case 9: return "DBU";
case 10: return "DMAE";
default:
- return "Unkown";
+ return "Unknown";
}
}
@@ -1555,7 +1555,7 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
DORQ_REG_DB_DROP_DETAILS);
DP_INFO(p_hwfn->cdev,
- "DORQ db_drop: adress 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x\n",
+ "DORQ db_drop: address 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x\n",
qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
DORQ_REG_DB_DROP_DETAILS_ADDRESS),
(u16)(details & QED_DORQ_ATTENTION_OPAQUE_MASK),
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index 00efb1c4c57e..17a70122df05 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -1265,7 +1265,7 @@ static const struct qed_iscsi_ops qed_iscsi_ops_pass = {
.get_stats = &qed_iscsi_stats,
};
-const struct qed_iscsi_ops *qed_get_iscsi_ops()
+const struct qed_iscsi_ops *qed_get_iscsi_ops(void)
{
return &qed_iscsi_ops_pass;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index d0a58282f2a8..a39ef2e7a9a6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -369,7 +369,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
break;
default:
- DP_NOTICE(p_hwfn, "Unkown personality %d\n",
+ DP_NOTICE(p_hwfn, "Unknown personality %d\n",
p_hwfn->hw_info.personality);
p_ramrod->personality = PERSONALITY_ETH;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_roce.c b/drivers/net/ethernet/qlogic/qede/qede_roce.c
index 9867f960b063..49272716a7c4 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_roce.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_roce.c
@@ -191,8 +191,8 @@ int qede_roce_register_driver(struct qedr_driver *drv)
}
mutex_unlock(&qedr_dev_list_lock);
- DP_INFO(edev, "qedr: discovered and registered %d RoCE funcs\n",
- qedr_counter);
+ pr_notice("qedr: discovered and registered %d RoCE funcs\n",
+ qedr_counter);
return 0;
}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
index 99a14df28b96..2851b4c56570 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
@@ -201,6 +201,13 @@ int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt)
else
adpt->phydev = mdiobus_get_phy(mii_bus, phy_addr);
+ /* of_phy_find_device() claims a reference to the phydev,
+ * so we do that here manually as well. When the driver
+ * later unloads, it can unilaterally drop the reference
+ * without worrying about ACPI vs DT.
+ */
+ if (adpt->phydev)
+ get_device(&adpt->phydev->mdio.dev);
} else {
struct device_node *phy_np;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index ae32f855e31b..f46d300bd585 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -460,6 +460,12 @@ static int emac_clks_phase1_init(struct platform_device *pdev,
{
int ret;
+ /* On ACPI platforms, clocks are controlled by firmware and/or
+ * ACPI, not by drivers.
+ */
+ if (has_acpi_companion(&pdev->dev))
+ return 0;
+
ret = emac_clks_get(pdev, adpt);
if (ret)
return ret;
@@ -485,6 +491,9 @@ static int emac_clks_phase2_init(struct platform_device *pdev,
{
int ret;
+ if (has_acpi_companion(&pdev->dev))
+ return 0;
+
ret = clk_set_rate(adpt->clk[EMAC_CLK_TX], 125000000);
if (ret)
return ret;
@@ -710,8 +719,7 @@ static int emac_probe(struct platform_device *pdev)
err_undo_napi:
netif_napi_del(&adpt->rx_q.napi);
err_undo_mdiobus:
- if (!has_acpi_companion(&pdev->dev))
- put_device(&adpt->phydev->mdio.dev);
+ put_device(&adpt->phydev->mdio.dev);
mdiobus_unregister(adpt->mii_bus);
err_undo_clocks:
emac_clks_teardown(adpt);
@@ -731,8 +739,7 @@ static int emac_remove(struct platform_device *pdev)
emac_clks_teardown(adpt);
- if (!has_acpi_companion(&pdev->dev))
- put_device(&adpt->phydev->mdio.dev);
+ put_device(&adpt->phydev->mdio.dev);
mdiobus_unregister(adpt->mii_bus);
free_netdev(netdev);
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 4ff4e0491406..aa11b70b9ca4 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -472,8 +472,6 @@ static void r6040_down(struct net_device *dev)
iowrite16(adrp[0], ioaddr + MID_0L);
iowrite16(adrp[1], ioaddr + MID_0M);
iowrite16(adrp[2], ioaddr + MID_0H);
-
- phy_stop(dev->phydev);
}
static int r6040_close(struct net_device *dev)
@@ -481,12 +479,12 @@ static int r6040_close(struct net_device *dev)
struct r6040_private *lp = netdev_priv(dev);
struct pci_dev *pdev = lp->pdev;
- spin_lock_irq(&lp->lock);
+ phy_stop(dev->phydev);
napi_disable(&lp->napi);
netif_stop_queue(dev);
- r6040_down(dev);
- free_irq(dev->irq, dev);
+ spin_lock_irq(&lp->lock);
+ r6040_down(dev);
/* Free RX buffer */
r6040_free_rxbufs(dev);
@@ -496,6 +494,8 @@ static int r6040_close(struct net_device *dev)
spin_unlock_irq(&lp->lock);
+ free_irq(dev->irq, dev);
+
/* Free Descriptor memory */
if (lp->rx_ring) {
pci_free_consistent(pdev,
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index b7c89ebcf4a2..0b3cd58093d5 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -76,7 +76,7 @@
#include <linux/cache.h>
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/* These identify the driver base version and may not be removed. */
static char version[] =
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index f9b97f5946f8..8f1623bf2134 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -326,6 +326,7 @@ enum cfg_version {
static const struct pci_device_id rtl8169_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
@@ -695,7 +696,7 @@ enum rtl_tx_desc_bit_1 {
enum rtl_rx_desc_bit {
/* Rx private */
PID1 = (1 << 18), /* Protocol ID bit 1/2 */
- PID0 = (1 << 17), /* Protocol ID bit 2/2 */
+ PID0 = (1 << 17), /* Protocol ID bit 0/2 */
#define RxProtoUDP (PID1)
#define RxProtoTCP (PID0)
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 92d7692c840d..89ac1e3f6175 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -926,14 +926,10 @@ static int ravb_poll(struct napi_struct *napi, int budget)
/* Receive error message handling */
priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
- if (priv->rx_over_errors != ndev->stats.rx_over_errors) {
+ if (priv->rx_over_errors != ndev->stats.rx_over_errors)
ndev->stats.rx_over_errors = priv->rx_over_errors;
- netif_err(priv, rx_err, ndev, "Receive Descriptor Empty\n");
- }
- if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) {
+ if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
- netif_err(priv, rx_err, ndev, "Receive FIFO Overflow\n");
- }
out:
return budget - quota;
}
@@ -1508,6 +1504,19 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
entry / NUM_TX_DESC * DPTR_ALIGN;
len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
+ /* Zero length DMA descriptors are problematic as they seem to
+ * terminate DMA transfers. Avoid them by simply using a length of
+ * DPTR_ALIGN (4) when skb data is aligned to DPTR_ALIGN.
+ *
+ * As skb is guaranteed to have at least ETH_ZLEN (60) bytes of
+ * data by the call to skb_put_padto() above this is safe with
+ * respect to both the length of the first DMA descriptor (len)
+ * overflowing the available data and the length of the second DMA
+ * descriptor (skb->len - len) being negative.
+ */
+ if (len == 0)
+ len = DPTR_ALIGN;
+
memcpy(buffer, skb->data, len);
dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
if (dma_mapping_error(ndev->dev.parent, dma_addr))
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index f341c1bc7001..f729a6b43958 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -574,6 +574,7 @@ static struct sh_eth_cpu_data r8a7740_data = {
.rpadir_value = 2 << 16,
.no_trimd = 1,
.no_ade = 1,
+ .hw_crc = 1,
.tsu = 1,
.select_mii = 1,
.shift_rd0 = 1,
@@ -802,7 +803,7 @@ static struct sh_eth_cpu_data sh7734_data = {
.ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
- .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+ .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff,
.tx_check = EESR_TC1 | EESR_FTC,
.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
@@ -819,6 +820,7 @@ static struct sh_eth_cpu_data sh7734_data = {
.tsu = 1,
.hw_crc = 1,
.select_mii = 1,
+ .shift_rd0 = 1,
};
/* SH7763 */
@@ -831,7 +833,7 @@ static struct sh_eth_cpu_data sh7763_data = {
.ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
- .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+ .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff,
.tx_check = EESR_TC1 | EESR_FTC,
.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
@@ -1656,7 +1658,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
else
goto out;
- if (!likely(mdp->irq_enabled)) {
+ if (unlikely(!mdp->irq_enabled)) {
sh_eth_write(ndev, 0, EESIPR);
goto out;
}
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 46f7be85f5a3..2c032629c369 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -1,3 +1,20 @@
+#
+# Solarflare device configuration
+#
+
+config NET_VENDOR_SOLARFLARE
+ bool "Solarflare devices"
+ default y
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Solarflare devices. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_SOLARFLARE
+
config SFC
tristate "Solarflare SFC9000/SFC9100-family support"
depends on PCI
@@ -44,3 +61,7 @@ config SFC_MCDI_LOGGING
Driver-Interface) commands and responses, allowing debugging of
driver/firmware interaction. The tracing is actually enabled by
a sysfs file 'mcdi_logging' under the PCI device.
+
+source "drivers/net/ethernet/sfc/falcon/Kconfig"
+
+endif # NET_VENDOR_SOLARFLARE
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index de2947ccc5ad..5eb0e684fd76 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -1323,7 +1323,8 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
}
/* don't fail init if RSS setup doesn't work */
- efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table);
+ rc = efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table);
+ efx->rss_active = (rc == 0);
return 0;
}
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index f644216eda1b..18ebaea44e82 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -120,44 +120,53 @@ static int efx_ethtool_phys_id(struct net_device *net_dev,
}
/* This must be called with rtnl_lock held. */
-static int efx_ethtool_get_settings(struct net_device *net_dev,
- struct ethtool_cmd *ecmd)
+static int
+efx_ethtool_get_link_ksettings(struct net_device *net_dev,
+ struct ethtool_link_ksettings *cmd)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_link_state *link_state = &efx->link_state;
+ u32 supported;
mutex_lock(&efx->mac_lock);
- efx->phy_op->get_settings(efx, ecmd);
+ efx->phy_op->get_link_ksettings(efx, cmd);
mutex_unlock(&efx->mac_lock);
/* Both MACs support pause frames (bidirectional and respond-only) */
- ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
+
+ supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
if (LOOPBACK_INTERNAL(efx)) {
- ethtool_cmd_speed_set(ecmd, link_state->speed);
- ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF;
+ cmd->base.speed = link_state->speed;
+ cmd->base.duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF;
}
return 0;
}
/* This must be called with rtnl_lock held. */
-static int efx_ethtool_set_settings(struct net_device *net_dev,
- struct ethtool_cmd *ecmd)
+static int
+efx_ethtool_set_link_ksettings(struct net_device *net_dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct efx_nic *efx = netdev_priv(net_dev);
int rc;
/* GMAC does not support 1000Mbps HD */
- if ((ethtool_cmd_speed(ecmd) == SPEED_1000) &&
- (ecmd->duplex != DUPLEX_FULL)) {
+ if ((cmd->base.speed == SPEED_1000) &&
+ (cmd->base.duplex != DUPLEX_FULL)) {
netif_dbg(efx, drv, efx->net_dev,
"rejecting unsupported 1000Mbps HD setting\n");
return -EINVAL;
}
mutex_lock(&efx->mac_lock);
- rc = efx->phy_op->set_settings(efx, ecmd);
+ rc = efx->phy_op->set_link_ksettings(efx, cmd);
mutex_unlock(&efx->mac_lock);
return rc;
}
@@ -966,6 +975,8 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev,
case ETHTOOL_GRXFH: {
info->data = 0;
+ if (!efx->rss_active) /* No RSS */
+ return 0;
switch (info->flow_type) {
case UDP_V4_FLOW:
if (efx->rx_hash_udp_4tuple)
@@ -1342,8 +1353,6 @@ static int efx_ethtool_get_module_info(struct net_device *net_dev,
}
const struct ethtool_ops efx_ethtool_ops = {
- .get_settings = efx_ethtool_get_settings,
- .set_settings = efx_ethtool_set_settings,
.get_drvinfo = efx_ethtool_get_drvinfo,
.get_regs_len = efx_ethtool_get_regs_len,
.get_regs = efx_ethtool_get_regs,
@@ -1373,4 +1382,6 @@ const struct ethtool_ops efx_ethtool_ops = {
.get_ts_info = efx_ethtool_get_ts_info,
.get_module_info = efx_ethtool_get_module_info,
.get_module_eeprom = efx_ethtool_get_module_eeprom,
+ .get_link_ksettings = efx_ethtool_get_link_ksettings,
+ .set_link_ksettings = efx_ethtool_set_link_ksettings,
};
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 9dcd396784ae..c905971c5f3a 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -503,45 +503,59 @@ static void efx_mcdi_phy_remove(struct efx_nic *efx)
kfree(phy_data);
}
-static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+static void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx,
+ struct ethtool_link_ksettings *cmd)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
int rc;
-
- ecmd->supported =
- mcdi_to_ethtool_cap(phy_cfg->media, phy_cfg->supported_cap);
- ecmd->advertising = efx->link_advertising;
- ethtool_cmd_speed_set(ecmd, efx->link_state.speed);
- ecmd->duplex = efx->link_state.fd;
- ecmd->port = mcdi_to_ethtool_media(phy_cfg->media);
- ecmd->phy_address = phy_cfg->port;
- ecmd->transceiver = XCVR_INTERNAL;
- ecmd->autoneg = !!(efx->link_advertising & ADVERTISED_Autoneg);
- ecmd->mdio_support = (efx->mdio.mode_support &
+ u32 supported, advertising, lp_advertising;
+
+ supported = mcdi_to_ethtool_cap(phy_cfg->media, phy_cfg->supported_cap);
+ advertising = efx->link_advertising;
+ cmd->base.speed = efx->link_state.speed;
+ cmd->base.duplex = efx->link_state.fd;
+ cmd->base.port = mcdi_to_ethtool_media(phy_cfg->media);
+ cmd->base.phy_address = phy_cfg->port;
+ cmd->base.autoneg = !!(efx->link_advertising & ADVERTISED_Autoneg);
+ cmd->base.mdio_support = (efx->mdio.mode_support &
(MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22));
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), NULL);
if (rc)
return;
- ecmd->lp_advertising =
+ lp_advertising =
mcdi_to_ethtool_cap(phy_cfg->media,
MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP));
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
+ lp_advertising);
}
-static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+static int
+efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx,
+ const struct ethtool_link_ksettings *cmd)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 caps;
int rc;
+ u32 advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
- if (ecmd->autoneg) {
- caps = (ethtool_to_mcdi_cap(ecmd->advertising) |
+ if (cmd->base.autoneg) {
+ caps = (ethtool_to_mcdi_cap(advertising) |
1 << MC_CMD_PHY_CAP_AN_LBN);
- } else if (ecmd->duplex) {
- switch (ethtool_cmd_speed(ecmd)) {
+ } else if (cmd->base.duplex) {
+ switch (cmd->base.speed) {
case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break;
case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break;
case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break;
@@ -550,7 +564,7 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
default: return -EINVAL;
}
} else {
- switch (ethtool_cmd_speed(ecmd)) {
+ switch (cmd->base.speed) {
case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break;
case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break;
case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break;
@@ -563,9 +577,9 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
if (rc)
return rc;
- if (ecmd->autoneg) {
+ if (cmd->base.autoneg) {
efx_link_set_advertising(
- efx, ecmd->advertising | ADVERTISED_Autoneg);
+ efx, advertising | ADVERTISED_Autoneg);
phy_cfg->forced_cap = 0;
} else {
efx_link_set_advertising(efx, 0);
@@ -812,8 +826,8 @@ static const struct efx_phy_operations efx_mcdi_phy_ops = {
.poll = efx_mcdi_phy_poll,
.fini = efx_port_dummy_op_void,
.remove = efx_mcdi_phy_remove,
- .get_settings = efx_mcdi_phy_get_settings,
- .set_settings = efx_mcdi_phy_set_settings,
+ .get_link_ksettings = efx_mcdi_phy_get_link_ksettings,
+ .set_link_ksettings = efx_mcdi_phy_set_link_ksettings,
.test_alive = efx_mcdi_phy_test_alive,
.run_tests = efx_mcdi_phy_run_tests,
.test_name = efx_mcdi_phy_test_name,
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 8692e829b40f..1c62c1a00fca 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -720,8 +720,8 @@ static inline bool efx_link_state_equal(const struct efx_link_state *left,
* @reconfigure: Reconfigure PHY (e.g. for new link parameters)
* @poll: Update @link_state and report whether it changed.
* Serialised by the mac_lock.
- * @get_settings: Get ethtool settings. Serialised by the mac_lock.
- * @set_settings: Set ethtool settings. Serialised by the mac_lock.
+ * @get_link_ksettings: Get ethtool settings. Serialised by the mac_lock.
+ * @set_link_ksettings: Set ethtool settings. Serialised by the mac_lock.
* @set_npage_adv: Set abilities advertised in (Extended) Next Page
* (only needed where AN bit is set in mmds)
* @test_alive: Test that PHY is 'alive' (online)
@@ -736,10 +736,10 @@ struct efx_phy_operations {
void (*remove) (struct efx_nic *efx);
int (*reconfigure) (struct efx_nic *efx);
bool (*poll) (struct efx_nic *efx);
- void (*get_settings) (struct efx_nic *efx,
- struct ethtool_cmd *ecmd);
- int (*set_settings) (struct efx_nic *efx,
- struct ethtool_cmd *ecmd);
+ void (*get_link_ksettings)(struct efx_nic *efx,
+ struct ethtool_link_ksettings *cmd);
+ int (*set_link_ksettings)(struct efx_nic *efx,
+ const struct ethtool_link_ksettings *cmd);
void (*set_npage_adv) (struct efx_nic *efx, u32);
int (*test_alive) (struct efx_nic *efx);
const char *(*test_name) (struct efx_nic *efx, unsigned int index);
@@ -860,6 +860,7 @@ struct vfdi_status;
* @rx_hash_key: Toeplitz hash key for RSS
* @rx_indir_table: Indirection table for RSS
* @rx_scatter: Scatter mode enabled for receives
+ * @rss_active: RSS enabled on hardware
* @rx_hash_udp_4tuple: UDP 4-tuple hashing enabled
* @int_error_count: Number of internal errors seen recently
* @int_error_expire: Time at which error count will be expired
@@ -998,6 +999,7 @@ struct efx_nic {
u8 rx_hash_key[40];
u32 rx_indir_table[128];
bool rx_scatter;
+ bool rss_active;
bool rx_hash_udp_4tuple;
unsigned int_error_count;
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index a3901bc96586..4e54e5dc9fcb 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -403,6 +403,7 @@ static int siena_init_nic(struct efx_nic *efx)
efx_writeo(efx, &temp, FR_AZ_RX_CFG);
siena_rx_push_rss_config(efx, false, efx->rx_indir_table);
+ efx->rss_active = true;
/* Enable event logging */
rc = efx_mcdi_log_ctrl(efx, true, false, 0);
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 42051ab98cf0..d390b9663dc3 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -60,7 +60,7 @@
#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/pgtable.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/sn/types.h>
#include <asm/sn/ioc3.h>
#include <asm/pci/bridge.h>
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 39fca6c0b68d..19a458716f1a 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -74,7 +74,7 @@
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/uaccess.h> /* User space memory access functions */
+#include <linux/uaccess.h> /* User space memory access functions */
#include "sis900.h"
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index fe9760ffab51..55a95e1d69d6 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -86,7 +86,7 @@ static int rx_copybreak;
#include <linux/crc32.h>
#include <linux/bitops.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/byteorder.h>
/* These identify the driver base version and may not be removed. */
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index f1c75e291e55..67154621abcf 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -52,7 +52,7 @@
#include <pcmcia/ss.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/*====================================================================*/
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c
index c35597586121..3dc7d279f805 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c
@@ -60,8 +60,9 @@ struct oxnas_dwmac {
struct regmap *regmap;
};
-static int oxnas_dwmac_init(struct oxnas_dwmac *dwmac)
+static int oxnas_dwmac_init(struct platform_device *pdev, void *priv)
{
+ struct oxnas_dwmac *dwmac = priv;
unsigned int value;
int ret;
@@ -105,20 +106,20 @@ static int oxnas_dwmac_init(struct oxnas_dwmac *dwmac)
return 0;
}
+static void oxnas_dwmac_exit(struct platform_device *pdev, void *priv)
+{
+ struct oxnas_dwmac *dwmac = priv;
+
+ clk_disable_unprepare(dwmac->clk);
+}
+
static int oxnas_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
struct stmmac_resources stmmac_res;
- struct device_node *sysctrl;
struct oxnas_dwmac *dwmac;
int ret;
- sysctrl = of_parse_phandle(pdev->dev.of_node, "oxsemi,sys-ctrl", 0);
- if (!sysctrl) {
- dev_err(&pdev->dev, "failed to get sys-ctrl node\n");
- return -EINVAL;
- }
-
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
if (ret)
return ret;
@@ -128,72 +129,48 @@ static int oxnas_dwmac_probe(struct platform_device *pdev)
return PTR_ERR(plat_dat);
dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
- if (!dwmac)
- return -ENOMEM;
+ if (!dwmac) {
+ ret = -ENOMEM;
+ goto err_remove_config_dt;
+ }
dwmac->dev = &pdev->dev;
plat_dat->bsp_priv = dwmac;
+ plat_dat->init = oxnas_dwmac_init;
+ plat_dat->exit = oxnas_dwmac_exit;
- dwmac->regmap = syscon_node_to_regmap(sysctrl);
+ dwmac->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "oxsemi,sys-ctrl");
if (IS_ERR(dwmac->regmap)) {
dev_err(&pdev->dev, "failed to have sysctrl regmap\n");
- return PTR_ERR(dwmac->regmap);
+ ret = PTR_ERR(dwmac->regmap);
+ goto err_remove_config_dt;
}
dwmac->clk = devm_clk_get(&pdev->dev, "gmac");
- if (IS_ERR(dwmac->clk))
- return PTR_ERR(dwmac->clk);
+ if (IS_ERR(dwmac->clk)) {
+ ret = PTR_ERR(dwmac->clk);
+ goto err_remove_config_dt;
+ }
- ret = oxnas_dwmac_init(dwmac);
+ ret = oxnas_dwmac_init(pdev, plat_dat->bsp_priv);
if (ret)
- return ret;
+ goto err_remove_config_dt;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
- clk_disable_unprepare(dwmac->clk);
+ goto err_dwmac_exit;
- return ret;
-}
-static int oxnas_dwmac_remove(struct platform_device *pdev)
-{
- struct oxnas_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
- int ret = stmmac_dvr_remove(&pdev->dev);
-
- clk_disable_unprepare(dwmac->clk);
-
- return ret;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int oxnas_dwmac_suspend(struct device *dev)
-{
- struct oxnas_dwmac *dwmac = get_stmmac_bsp_priv(dev);
- int ret;
-
- ret = stmmac_suspend(dev);
- clk_disable_unprepare(dwmac->clk);
-
- return ret;
-}
-
-static int oxnas_dwmac_resume(struct device *dev)
-{
- struct oxnas_dwmac *dwmac = get_stmmac_bsp_priv(dev);
- int ret;
-
- ret = oxnas_dwmac_init(dwmac);
- if (ret)
- return ret;
+ return 0;
- ret = stmmac_resume(dev);
+err_dwmac_exit:
+ oxnas_dwmac_exit(pdev, plat_dat->bsp_priv);
+err_remove_config_dt:
+ stmmac_remove_config_dt(pdev, plat_dat);
return ret;
}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(oxnas_dwmac_pm_ops,
- oxnas_dwmac_suspend, oxnas_dwmac_resume);
static const struct of_device_id oxnas_dwmac_match[] = {
{ .compatible = "oxsemi,ox820-dwmac" },
@@ -203,10 +180,10 @@ MODULE_DEVICE_TABLE(of, oxnas_dwmac_match);
static struct platform_driver oxnas_dwmac_driver = {
.probe = oxnas_dwmac_probe,
- .remove = oxnas_dwmac_remove,
+ .remove = stmmac_pltfr_remove,
.driver = {
.name = "oxnas-dwmac",
- .pm = &oxnas_dwmac_pm_ops,
+ .pm = &stmmac_pltfr_pm_ops,
.of_match_table = oxnas_dwmac_match,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 77ab0a85f067..fa6e9704c077 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -864,6 +864,10 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
int ret;
struct device *dev = &bsp_priv->pdev->dev;
+ ret = gmac_clk_enable(bsp_priv, true);
+ if (ret)
+ return ret;
+
/*rmii or rgmii*/
if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII) {
dev_info(dev, "init for RGMII\n");
@@ -880,10 +884,6 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
if (ret)
return ret;
- ret = gmac_clk_enable(bsp_priv, true);
- if (ret)
- return ret;
-
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index b21d03fe4f43..be3c91c7f211 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -539,7 +539,7 @@ struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
mac->mii.reg_shift = 6;
mac->mii.reg_mask = 0x000007C0;
mac->mii.clk_csr_shift = 2;
- mac->mii.clk_csr_mask = 0xF;
+ mac->mii.clk_csr_mask = GENMASK(5, 2);
/* Get and dump the chip ID */
*synopsys_id = stmmac_get_synopsys_id(hwid);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index a1d582f47b1a..9dd2987e284d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -197,7 +197,7 @@ struct mac_device_info *dwmac100_setup(void __iomem *ioaddr, int *synopsys_id)
mac->mii.reg_shift = 6;
mac->mii.reg_mask = 0x000007C0;
mac->mii.clk_csr_shift = 2;
- mac->mii.clk_csr_mask = 0xF;
+ mac->mii.clk_csr_mask = GENMASK(5, 2);
/* Synopsys Id is not available on old chips */
*synopsys_id = 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index a340fc8bd0de..8816515e1bbb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -334,7 +334,7 @@ static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
* descriptors for the same frame has to be set before, to
* avoid race condition.
*/
- wmb();
+ dma_wmb();
p->des3 = cpu_to_le32(tdes3);
}
@@ -377,7 +377,7 @@ static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
* descriptors for the same frame has to be set before, to
* avoid race condition.
*/
- wmb();
+ dma_wmb();
p->des3 = cpu_to_le32(tdes3);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index ce97e522566a..f0d86321dfe2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -350,7 +350,7 @@ static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
* descriptors for the same frame has to be set before, to
* avoid race condition.
*/
- wmb();
+ dma_wmb();
p->des0 = cpu_to_le32(tdes0);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 3e405785b81c..e3f6389e1b01 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2125,7 +2125,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
* descriptor and then barrier is needed to make sure that
* all is coherent before granting the DMA engine.
*/
- smp_wmb();
+ dma_wmb();
if (netif_msg_pktdata(priv)) {
pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
@@ -2338,7 +2338,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
* descriptor and then barrier is needed to make sure that
* all is coherent before granting the DMA engine.
*/
- smp_wmb();
+ dma_wmb();
}
netdev_sent_queue(dev, skb->len);
@@ -2443,14 +2443,14 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
netif_dbg(priv, rx_status, priv->dev,
"refill entry #%d\n", entry);
}
- wmb();
+ dma_wmb();
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
else
priv->hw->desc->set_rx_owner(p);
- wmb();
+ dma_wmb();
entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
}
@@ -3319,8 +3319,16 @@ int stmmac_dvr_probe(struct device *device,
ndev->max_mtu = JUMBO_LEN;
else
ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
- if (priv->plat->maxmtu < ndev->max_mtu)
+ /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
+ * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
+ */
+ if ((priv->plat->maxmtu < ndev->max_mtu) &&
+ (priv->plat->maxmtu >= ndev->min_mtu))
ndev->max_mtu = priv->plat->maxmtu;
+ else if (priv->plat->maxmtu < ndev->min_mtu)
+ dev_warn(priv->device,
+ "%s: warning: maxmtu having invalid value (%d)\n",
+ __func__, priv->plat->maxmtu);
if (flow_ctrl)
priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
@@ -3332,20 +3340,14 @@ int stmmac_dvr_probe(struct device *device,
*/
if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
priv->use_riwt = 1;
- netdev_info(priv->dev, "Enable RX Mitigation via HW Watchdog Timer\n");
+ dev_info(priv->device,
+ "Enable RX Mitigation via HW Watchdog Timer\n");
}
netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
spin_lock_init(&priv->lock);
- ret = register_netdev(ndev);
- if (ret) {
- netdev_err(priv->dev, "%s: ERROR %i registering the device\n",
- __func__, ret);
- goto error_netdev_register;
- }
-
/* If a specific clk_csr value is passed from the platform
* this means that the CSR Clock Range selection cannot be
* changed at run-time and it is fixed. Viceversa the driver'll try to
@@ -3365,18 +3367,28 @@ int stmmac_dvr_probe(struct device *device,
/* MDIO bus Registration */
ret = stmmac_mdio_register(ndev);
if (ret < 0) {
- netdev_err(priv->dev,
- "%s: MDIO bus (id: %d) registration failed",
- __func__, priv->plat->bus_id);
+ dev_err(priv->device,
+ "%s: MDIO bus (id: %d) registration failed",
+ __func__, priv->plat->bus_id);
goto error_mdio_register;
}
}
- return 0;
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(priv->device, "%s: ERROR %i registering the device\n",
+ __func__, ret);
+ goto error_netdev_register;
+ }
+
+ return ret;
-error_mdio_register:
- unregister_netdev(ndev);
error_netdev_register:
+ if (priv->hw->pcs != STMMAC_PCS_RGMII &&
+ priv->hw->pcs != STMMAC_PCS_TBI &&
+ priv->hw->pcs != STMMAC_PCS_RTBI)
+ stmmac_mdio_unregister(ndev);
+error_mdio_register:
netif_napi_del(&priv->napi);
error_hw_init:
clk_disable_unprepare(priv->pclk);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 23322fd9e3ac..b0344c213752 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -81,8 +81,8 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
value |= (phyaddr << priv->hw->mii.addr_shift)
& priv->hw->mii.addr_mask;
value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
- value |= (priv->clk_csr & priv->hw->mii.clk_csr_mask)
- << priv->hw->mii.clk_csr_shift;
+ value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
+ & priv->hw->mii.clk_csr_mask;
if (priv->plat->has_gmac4)
value |= MII_GMAC4_READ;
@@ -116,16 +116,18 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
unsigned int mii_address = priv->hw->mii.addr;
unsigned int mii_data = priv->hw->mii.data;
- u32 value = MII_WRITE | MII_BUSY;
+ u32 value = MII_BUSY;
value |= (phyaddr << priv->hw->mii.addr_shift)
& priv->hw->mii.addr_mask;
value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
- value |= ((priv->clk_csr & priv->hw->mii.clk_csr_mask)
- << priv->hw->mii.clk_csr_shift);
+ value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
+ & priv->hw->mii.clk_csr_mask;
if (priv->plat->has_gmac4)
value |= MII_GMAC4_WRITE;
+ else
+ value |= MII_WRITE;
/* Wait until any existing MII operation is complete */
if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index a2831773431a..3da4737620cb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -89,6 +89,9 @@ static void stmmac_default_data(struct plat_stmmacenet_data *plat)
/* Set default value for unicast filter entries */
plat->unicast_filter_entries = 1;
+
+ /* Set the maxmtu to a default of JUMBO_LEN */
+ plat->maxmtu = JUMBO_LEN;
}
static int quark_default_data(struct plat_stmmacenet_data *plat,
@@ -126,6 +129,9 @@ static int quark_default_data(struct plat_stmmacenet_data *plat,
/* Set default value for unicast filter entries */
plat->unicast_filter_entries = 1;
+ /* Set the maxmtu to a default of JUMBO_LEN */
+ plat->maxmtu = JUMBO_LEN;
+
return 0;
}
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index e9e5ef241c6f..0e8e89f17dbb 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -99,7 +99,7 @@
#include <linux/atomic.h>
#include <asm/io.h>
#include <asm/byteorder.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define cas_page_map(x) kmap_atomic((x))
#define cas_page_unmap(x) kunmap_atomic((x))
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 66ecf0fcc330..d277e4107976 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -42,7 +42,7 @@
#include <asm/io.h>
#include <asm/byteorder.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/irq.h>
#ifdef CONFIG_SPARC
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index ca96408058b0..72ff05cd3ed8 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -49,7 +49,7 @@
#include <asm/prom.h>
#include <asm/auxio.h>
#endif
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/pgtable.h>
#include <asm/irq.h>
diff --git a/drivers/net/ethernet/sun/sunhme.h b/drivers/net/ethernet/sun/sunhme.h
index f4307654e4ae..4a8d5b18dfd5 100644
--- a/drivers/net/ethernet/sun/sunhme.h
+++ b/drivers/net/ethernet/sun/sunhme.h
@@ -302,7 +302,7 @@
* Always write the address first before setting the ownership
* bits to avoid races with the hardware scanning the ring.
*/
-typedef u32 __bitwise__ hme32;
+typedef u32 __bitwise hme32;
struct happy_meal_rxd {
hme32 rx_flags;
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 77c88fcf2b86..9b8a30bf939b 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -1210,7 +1210,7 @@ int cpmac_init(void)
goto fail_alloc;
}
-#warning FIXME: unhardcode gpio&reset bits
+ /* FIXME: unhardcode gpio&reset bits */
ar7_gpio_disable(26);
ar7_gpio_disable(27);
ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 0c0d48e5bea4..32279d21c836 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -121,7 +121,7 @@ static int cpts_fifo_read(struct cpts *cpts, int match)
return type == match ? 0 : -1;
}
-static cycle_t cpts_systim_read(const struct cyclecounter *cc)
+static u64 cpts_systim_read(const struct cyclecounter *cc)
{
u64 val = 0;
struct cpts_event *event;
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index c7e547e4f2b1..7d9e36f66735 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -94,6 +94,7 @@
/* offset relative to base of XGBE_SS_REG_INDEX */
#define XGBE10_SGMII_MODULE_OFFSET 0x100
+#define IS_SS_ID_XGBE(d) ((d)->ss_version == XGBE_SS_VERSION_10)
/* offset relative to base of XGBE_SM_REG_INDEX */
#define XGBE10_HOST_PORT_OFFSET 0x34
#define XGBE10_SLAVE_PORT_OFFSET 0x64
@@ -1746,6 +1747,17 @@ static void keystone_set_msglevel(struct net_device *ndev, u32 value)
netcp->msg_enable = value;
}
+static struct gbe_intf *keystone_get_intf_data(struct netcp_intf *netcp)
+{
+ struct gbe_intf *gbe_intf;
+
+ gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+ if (!gbe_intf)
+ gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
+
+ return gbe_intf;
+}
+
static void keystone_get_stat_strings(struct net_device *ndev,
uint32_t stringset, uint8_t *data)
{
@@ -1754,7 +1766,7 @@ static void keystone_get_stat_strings(struct net_device *ndev,
struct gbe_priv *gbe_dev;
int i;
- gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+ gbe_intf = keystone_get_intf_data(netcp);
if (!gbe_intf)
return;
gbe_dev = gbe_intf->gbe_dev;
@@ -1778,7 +1790,7 @@ static int keystone_get_sset_count(struct net_device *ndev, int stringset)
struct gbe_intf *gbe_intf;
struct gbe_priv *gbe_dev;
- gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+ gbe_intf = keystone_get_intf_data(netcp);
if (!gbe_intf)
return -EINVAL;
gbe_dev = gbe_intf->gbe_dev;
@@ -1896,7 +1908,7 @@ static void keystone_get_ethtool_stats(struct net_device *ndev,
struct gbe_intf *gbe_intf;
struct gbe_priv *gbe_dev;
- gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+ gbe_intf = keystone_get_intf_data(netcp);
if (!gbe_intf)
return;
@@ -1920,7 +1932,7 @@ static int keystone_get_link_ksettings(struct net_device *ndev,
if (!phy)
return -EINVAL;
- gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+ gbe_intf = keystone_get_intf_data(netcp);
if (!gbe_intf)
return -EINVAL;
@@ -1953,7 +1965,7 @@ static int keystone_set_link_ksettings(struct net_device *ndev,
if (!phy)
return -EINVAL;
- gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+ gbe_intf = keystone_get_intf_data(netcp);
if (!gbe_intf)
return -EINVAL;
@@ -2311,7 +2323,7 @@ static void gbe_init_host_port(struct gbe_priv *priv)
int bypass_en = 1;
/* Host Tx Pri */
- if (IS_SS_ID_NU(priv))
+ if (IS_SS_ID_NU(priv) || IS_SS_ID_XGBE(priv))
writel(HOST_TX_PRI_MAP_DEFAULT,
GBE_REG_ADDR(priv, host_port_regs, tx_pri_map));
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 0aaf975bb347..2255f9a6f3bc 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -751,7 +751,7 @@ static void tile_net_schedule_tx_wake_timer(struct net_device *dev,
&info->mpipe[instance].tx_wake[priv->echannel];
hrtimer_start(&tx_wake->timer,
- ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL),
+ TX_TIMER_DELAY_USEC * 1000UL,
HRTIMER_MODE_REL_PINNED);
}
@@ -770,7 +770,7 @@ static void tile_net_schedule_egress_timer(void)
if (!info->egress_timer_scheduled) {
hrtimer_start(&info->egress_timer,
- ktime_set(0, EGRESS_TIMER_DELAY_USEC * 1000UL),
+ EGRESS_TIMER_DELAY_USEC * 1000UL,
HRTIMER_MODE_REL_PINNED);
info->egress_timer_scheduled = true;
}
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index ba5c54249055..0a6c4e804eed 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -114,7 +114,7 @@ static const int multicast_filter_limit = 32;
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/dmi.h>
/* These identify the driver base version and may not be removed. */
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index 3b08ec766076..f71883264cc0 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -88,7 +88,7 @@
#include <pcmcia/ciscode.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#ifndef MANFID_COMPAQ
#define MANFID_COMPAQ 0x0138
diff --git a/drivers/net/fddi/skfp/hwmtm.c b/drivers/net/fddi/skfp/hwmtm.c
index e26398b5a7dc..d0a68bdd5f63 100644
--- a/drivers/net/fddi/skfp/hwmtm.c
+++ b/drivers/net/fddi/skfp/hwmtm.c
@@ -1483,7 +1483,7 @@ void mac_drv_clear_rx_queue(struct s_smc *smc)
r = queue->rx_curr_get ;
while (queue->rx_used) {
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
- DB_RX("switch OWN bit of RxD 0x%x ",r,0,5) ;
+ DB_RX("switch OWN bit of RxD 0x%p ",r,0,5) ;
r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
frag_count = 1 ;
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
@@ -1645,7 +1645,7 @@ void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
DB_TX("hwm_tx_frag: len = %d, frame_status = %x ",len,frame_status,2) ;
if (frame_status & LAN_TX) {
/* '*t' is already defined */
- DB_TX("LAN_TX: TxD = %x, virt = %x ",t,virt,3) ;
+ DB_TX("LAN_TX: TxD = %p, virt = %p ",t,virt,3) ;
t->txd_virt = virt ;
t->txd_txdscr = cpu_to_le32(smc->os.hwm.tx_descr) ;
t->txd_tbadr = cpu_to_le32(phys) ;
@@ -1819,7 +1819,7 @@ void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc)
__le32 tbctrl;
NDD_TRACE("THSB",mb,fc,0) ;
- DB_TX("smt_send_mbuf: mb = 0x%x, fc = 0x%x",mb,fc,4) ;
+ DB_TX("smt_send_mbuf: mb = 0x%p, fc = 0x%x",mb,fc,4) ;
mb->sm_off-- ; /* set to fc */
mb->sm_len++ ; /* + fc */
@@ -1960,7 +1960,7 @@ static void mac_drv_clear_txd(struct s_smc *smc)
do {
DRV_BUF_FLUSH(t1,DDI_DMA_SYNC_FORCPU) ;
- DB_TX("check OWN/EOF bit of TxD 0x%x",t1,0,5) ;
+ DB_TX("check OWN/EOF bit of TxD 0x%p",t1,0,5) ;
tbctrl = le32_to_cpu(CR_READ(t1->txd_tbctrl));
if (tbctrl & BMU_OWN || !queue->tx_used){
@@ -1988,7 +1988,7 @@ static void mac_drv_clear_txd(struct s_smc *smc)
}
else {
#ifndef PASS_1ST_TXD_2_TX_COMP
- DB_TX("mac_drv_tx_comp for TxD 0x%x",t2,0,4) ;
+ DB_TX("mac_drv_tx_comp for TxD 0x%p",t2,0,4) ;
mac_drv_tx_complete(smc,t2) ;
#else
DB_TX("mac_drv_tx_comp for TxD 0x%x",
@@ -2052,7 +2052,7 @@ void mac_drv_clear_tx_queue(struct s_smc *smc)
tx_used = queue->tx_used ;
while (tx_used) {
DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ;
- DB_TX("switch OWN bit of TxD 0x%x ",t,0,5) ;
+ DB_TX("switch OWN bit of TxD 0x%p ",t,0,5) ;
t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ;
DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
t = t->txd_next ;
diff --git a/drivers/net/fddi/skfp/pmf.c b/drivers/net/fddi/skfp/pmf.c
index 441b4dc79450..52fa162a31e0 100644
--- a/drivers/net/fddi/skfp/pmf.c
+++ b/drivers/net/fddi/skfp/pmf.c
@@ -284,7 +284,7 @@ void smt_pmf_received_pack(struct s_smc *smc, SMbuf *mb, int local)
SMbuf *reply ;
sm = smtod(mb,struct smt_header *) ;
- DB_SMT("SMT: processing PMF frame at %x len %d\n",sm,mb->sm_len) ;
+ DB_SMT("SMT: processing PMF frame at %p len %d\n",sm,mb->sm_len) ;
#ifdef DEBUG
dump_smt(smc,sm,"PMF Received") ;
#endif
diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c
index 3a639180e4a0..2414f1dc8ddd 100644
--- a/drivers/net/fddi/skfp/skfddi.c
+++ b/drivers/net/fddi/skfp/skfddi.c
@@ -88,7 +88,7 @@ static const char * const boot_msg =
#include <asm/byteorder.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "h/types.h"
#undef ADDR // undo Linux definition
diff --git a/drivers/net/fddi/skfp/smt.c b/drivers/net/fddi/skfp/smt.c
index cd78b7cacc75..e80a08903fcf 100644
--- a/drivers/net/fddi/skfp/smt.c
+++ b/drivers/net/fddi/skfp/smt.c
@@ -504,7 +504,7 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
#endif
smt_swap_para(sm,(int) mb->sm_len,1) ;
- DB_SMT("SMT : received packet [%s] at 0x%x\n",
+ DB_SMT("SMT : received packet [%s] at 0x%p\n",
smt_type_name[m_fc(mb) & 0xf],sm) ;
DB_SMT("SMT : version %d, class %s\n",sm->smt_version,
smt_class_name[(sm->smt_class>LAST_CLASS)?0 : sm->smt_class]) ;
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 98f10c216521..8b6810bad54b 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -158,9 +158,9 @@ static bool gtp_check_src_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx,
if (!pskb_may_pull(skb, hdrlen + sizeof(struct iphdr)))
return false;
- iph = (struct iphdr *)(skb->data + hdrlen + sizeof(struct iphdr));
+ iph = (struct iphdr *)(skb->data + hdrlen);
- return iph->saddr != pctx->ms_addr_ip4.s_addr;
+ return iph->saddr == pctx->ms_addr_ip4.s_addr;
}
/* Check if the inner IP source address in this packet is assigned to any
@@ -423,11 +423,11 @@ static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
/* Bits 8 7 6 5 4 3 2 1
* +--+--+--+--+--+--+--+--+
- * |version |PT| 1| E| S|PN|
+ * |version |PT| 0| E| S|PN|
* +--+--+--+--+--+--+--+--+
* 0 0 1 1 1 0 0 0
*/
- gtp1->flags = 0x38; /* v1, GTP-non-prime. */
+ gtp1->flags = 0x30; /* v1, GTP-non-prime. */
gtp1->type = GTP_TPDU;
gtp1->length = htons(payload_len);
gtp1->tid = htonl(pctx->u.v1.o_tei);
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 470b3dcd54e5..922bf440e9f1 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -13,7 +13,7 @@
*/
#include <linux/module.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/bitops.h>
#include <linux/string.h>
#include <linux/mm.h>
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 78dbc44540f6..7d054697b199 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -55,7 +55,7 @@
#include <linux/jiffies.h>
#include <linux/random.h>
#include <net/ax25.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/* --------------------------------------------------------------------- */
diff --git a/drivers/net/hamradio/baycom_par.c b/drivers/net/hamradio/baycom_par.c
index 072cddce9264..809dc25909d1 100644
--- a/drivers/net/hamradio/baycom_par.c
+++ b/drivers/net/hamradio/baycom_par.c
@@ -86,7 +86,7 @@
#include <linux/bitops.h>
#include <linux/jiffies.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/* --------------------------------------------------------------------- */
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
index 7b916d5b14b9..ebc06822fd4d 100644
--- a/drivers/net/hamradio/baycom_ser_fdx.c
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -82,7 +82,7 @@
#include <linux/jiffies.h>
#include <linux/time64.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/irq.h>
diff --git a/drivers/net/hamradio/baycom_ser_hdx.c b/drivers/net/hamradio/baycom_ser_hdx.c
index f9a8976195ba..60fcf512c208 100644
--- a/drivers/net/hamradio/baycom_ser_hdx.c
+++ b/drivers/net/hamradio/baycom_ser_hdx.c
@@ -67,7 +67,7 @@
#include <linux/string.h>
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
#include <linux/hdlcdrv.h>
#include <linux/baycom.h>
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 622ab3ab9e93..f62e7f325cf9 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -69,7 +69,7 @@
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <net/sock.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index e4137c1b3df9..2479072981a1 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -40,7 +40,7 @@
#include <asm/dma.h>
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <net/ax25.h>
#include "z8530.h"
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 4bad0b894e9c..8c3633c1d078 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -58,7 +58,7 @@
#include <linux/hdlcdrv.h>
#include <linux/random.h>
#include <net/ax25.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/crc-ccitt.h>
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 1dfe2304daa7..ece59c54a653 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -17,7 +17,7 @@
*/
#include <linux/module.h>
#include <linux/bitops.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/crc16.h>
#include <linux/string.h>
#include <linux/mm.h>
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index b8083161ef46..6754cd01c605 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -178,7 +178,7 @@
#include <asm/irq.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "z8530.h"
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index aaff07c10058..b6891ada1d7b 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -68,7 +68,7 @@
#include <linux/seq_file.h>
#include <net/net_namespace.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/init.h>
#include <linux/yam.h>
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index f5a9728b89f3..dd7fc6659ad4 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -46,7 +46,7 @@
#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define rr_if_busy(dev) netif_queue_stopped(dev)
#define rr_if_running(dev) netif_running(dev)
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index c9414c054852..fcab8019dda0 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -659,6 +659,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
* policy filters on the host). Deliver these via the VF
* interface in the guest.
*/
+ rcu_read_lock();
vf_netdev = rcu_dereference(net_device_ctx->vf_netdev);
if (vf_netdev && (vf_netdev->flags & IFF_UP))
net = vf_netdev;
@@ -667,6 +668,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci);
if (unlikely(!skb)) {
++net->stats.rx_dropped;
+ rcu_read_unlock();
return NVSP_STAT_FAIL;
}
@@ -696,6 +698,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
* TODO - use NAPI?
*/
netif_rx(skb);
+ rcu_read_unlock();
return 0;
}
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 057025722e3d..76ba7ecfe142 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -510,7 +510,7 @@ at86rf230_async_state_delay(void *context)
case STATE_TRX_OFF:
switch (ctx->to_state) {
case STATE_RX_AACK_ON:
- tim = ktime_set(0, c->t_off_to_aack * NSEC_PER_USEC);
+ tim = c->t_off_to_aack * NSEC_PER_USEC;
/* state change from TRX_OFF to RX_AACK_ON to do a
* calibration, we need to reset the timeout for the
* next one.
@@ -519,7 +519,7 @@ at86rf230_async_state_delay(void *context)
goto change;
case STATE_TX_ARET_ON:
case STATE_TX_ON:
- tim = ktime_set(0, c->t_off_to_tx_on * NSEC_PER_USEC);
+ tim = c->t_off_to_tx_on * NSEC_PER_USEC;
/* state change from TRX_OFF to TX_ON or ARET_ON to do
* a calibration, we need to reset the timeout for the
* next one.
@@ -539,8 +539,7 @@ at86rf230_async_state_delay(void *context)
* to TX_ON or TRX_OFF.
*/
if (!force) {
- tim = ktime_set(0, (c->t_frame + c->t_p_ack) *
- NSEC_PER_USEC);
+ tim = (c->t_frame + c->t_p_ack) * NSEC_PER_USEC;
goto change;
}
break;
@@ -552,7 +551,7 @@ at86rf230_async_state_delay(void *context)
case STATE_P_ON:
switch (ctx->to_state) {
case STATE_TRX_OFF:
- tim = ktime_set(0, c->t_reset_to_off * NSEC_PER_USEC);
+ tim = c->t_reset_to_off * NSEC_PER_USEC;
goto change;
default:
break;
@@ -1716,9 +1715,9 @@ static int at86rf230_probe(struct spi_device *spi)
/* Reset */
if (gpio_is_valid(rstn)) {
udelay(1);
- gpio_set_value(rstn, 0);
+ gpio_set_value_cansleep(rstn, 0);
udelay(1);
- gpio_set_value(rstn, 1);
+ gpio_set_value_cansleep(rstn, 1);
usleep_range(120, 240);
}
diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
index 1253f864737a..ef688518ad77 100644
--- a/drivers/net/ieee802154/atusb.c
+++ b/drivers/net/ieee802154/atusb.c
@@ -117,13 +117,26 @@ static int atusb_read_reg(struct atusb *atusb, uint8_t reg)
{
struct usb_device *usb_dev = atusb->usb_dev;
int ret;
+ uint8_t *buffer;
uint8_t value;
+ buffer = kmalloc(1, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
dev_dbg(&usb_dev->dev, "atusb: reg = 0x%x\n", reg);
ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
- 0, reg, &value, 1, 1000);
- return ret >= 0 ? value : ret;
+ 0, reg, buffer, 1, 1000);
+
+ if (ret >= 0) {
+ value = buffer[0];
+ kfree(buffer);
+ return value;
+ } else {
+ kfree(buffer);
+ return ret;
+ }
}
static int atusb_write_subreg(struct atusb *atusb, uint8_t reg, uint8_t mask,
@@ -549,13 +562,6 @@ static int
atusb_set_frame_retries(struct ieee802154_hw *hw, s8 retries)
{
struct atusb *atusb = hw->priv;
- struct device *dev = &atusb->usb_dev->dev;
-
- if (atusb->fw_ver_maj == 0 && atusb->fw_ver_min < 3) {
- dev_info(dev, "Automatic frame retransmission is only available from "
- "firmware version 0.3. Please update if you want this feature.");
- return -EINVAL;
- }
return atusb_write_subreg(atusb, SR_MAX_FRAME_RETRIES, retries);
}
@@ -608,9 +614,13 @@ static const struct ieee802154_ops atusb_ops = {
static int atusb_get_and_show_revision(struct atusb *atusb)
{
struct usb_device *usb_dev = atusb->usb_dev;
- unsigned char buffer[3];
+ unsigned char *buffer;
int ret;
+ buffer = kmalloc(3, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
/* Get a couple of the ATMega Firmware values */
ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0,
@@ -631,15 +641,20 @@ static int atusb_get_and_show_revision(struct atusb *atusb)
dev_info(&usb_dev->dev, "Please update to version 0.2 or newer");
}
+ kfree(buffer);
return ret;
}
static int atusb_get_and_show_build(struct atusb *atusb)
{
struct usb_device *usb_dev = atusb->usb_dev;
- char build[ATUSB_BUILD_SIZE + 1];
+ char *build;
int ret;
+ build = kmalloc(ATUSB_BUILD_SIZE + 1, GFP_KERNEL);
+ if (!build)
+ return -ENOMEM;
+
ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0,
build, ATUSB_BUILD_SIZE, 1000);
@@ -648,6 +663,7 @@ static int atusb_get_and_show_build(struct atusb *atusb)
dev_info(&usb_dev->dev, "Firmware: build %s\n", build);
}
+ kfree(build);
return ret;
}
@@ -698,7 +714,7 @@ fail:
static int atusb_set_extended_addr(struct atusb *atusb)
{
struct usb_device *usb_dev = atusb->usb_dev;
- unsigned char buffer[IEEE802154_EXTENDED_ADDR_LEN];
+ unsigned char *buffer;
__le64 extended_addr;
u64 addr;
int ret;
@@ -710,12 +726,20 @@ static int atusb_set_extended_addr(struct atusb *atusb)
return 0;
}
+ buffer = kmalloc(IEEE802154_EXTENDED_ADDR_LEN, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
/* Firmware is new enough so we fetch the address from EEPROM */
ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
ATUSB_EUI64_READ, ATUSB_REQ_FROM_DEV, 0, 0,
buffer, IEEE802154_EXTENDED_ADDR_LEN, 1000);
- if (ret < 0)
- dev_err(&usb_dev->dev, "failed to fetch extended address\n");
+ if (ret < 0) {
+ dev_err(&usb_dev->dev, "failed to fetch extended address, random address set\n");
+ ieee802154_random_extended_addr(&atusb->hw->phy->perm_extended_addr);
+ kfree(buffer);
+ return ret;
+ }
memcpy(&extended_addr, buffer, IEEE802154_EXTENDED_ADDR_LEN);
/* Check if read address is not empty and the unicast bit is set correctly */
@@ -729,6 +753,7 @@ static int atusb_set_extended_addr(struct atusb *atusb)
&addr);
}
+ kfree(buffer);
return ret;
}
@@ -770,8 +795,7 @@ static int atusb_probe(struct usb_interface *interface,
hw->parent = &usb_dev->dev;
hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT |
- IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS |
- IEEE802154_HW_FRAME_RETRIES;
+ IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS;
hw->phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL |
WPAN_PHY_FLAG_CCA_MODE;
@@ -800,6 +824,9 @@ static int atusb_probe(struct usb_interface *interface,
atusb_get_and_show_build(atusb);
atusb_set_extended_addr(atusb);
+ if (atusb->fw_ver_maj >= 0 && atusb->fw_ver_min >= 3)
+ hw->flags |= IEEE802154_HW_FRAME_RETRIES;
+
ret = atusb_get_and_clear_error(atusb);
if (ret) {
dev_err(&atusb->usb_dev->dev,
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 031093e1c25f..dbfbb33ac66c 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -99,6 +99,11 @@ struct ipvl_port {
int count;
};
+struct ipvl_skb_cb {
+ bool tx_pkt;
+};
+#define IPVL_SKB_CB(_skb) ((struct ipvl_skb_cb *)&((_skb)->cb[0]))
+
static inline struct ipvl_port *ipvlan_port_get_rcu(const struct net_device *d)
{
return rcu_dereference(d->rx_handler_data);
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index b4e990743e1d..83ce74acf82d 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -198,7 +198,7 @@ void ipvlan_process_multicast(struct work_struct *work)
unsigned int mac_hash;
int ret;
u8 pkt_type;
- bool hlocal, dlocal;
+ bool tx_pkt;
__skb_queue_head_init(&list);
@@ -207,8 +207,11 @@ void ipvlan_process_multicast(struct work_struct *work)
spin_unlock_bh(&port->backlog.lock);
while ((skb = __skb_dequeue(&list)) != NULL) {
+ struct net_device *dev = skb->dev;
+ bool consumed = false;
+
ethh = eth_hdr(skb);
- hlocal = ether_addr_equal(ethh->h_source, port->dev->dev_addr);
+ tx_pkt = IPVL_SKB_CB(skb)->tx_pkt;
mac_hash = ipvlan_mac_hash(ethh->h_dest);
if (ether_addr_equal(ethh->h_dest, port->dev->broadcast))
@@ -216,41 +219,45 @@ void ipvlan_process_multicast(struct work_struct *work)
else
pkt_type = PACKET_MULTICAST;
- dlocal = false;
rcu_read_lock();
list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
- if (hlocal && (ipvlan->dev == skb->dev)) {
- dlocal = true;
+ if (tx_pkt && (ipvlan->dev == skb->dev))
continue;
- }
if (!test_bit(mac_hash, ipvlan->mac_filters))
continue;
-
+ if (!(ipvlan->dev->flags & IFF_UP))
+ continue;
ret = NET_RX_DROP;
len = skb->len + ETH_HLEN;
nskb = skb_clone(skb, GFP_ATOMIC);
- if (!nskb)
- goto acct;
-
- nskb->pkt_type = pkt_type;
- nskb->dev = ipvlan->dev;
- if (hlocal)
- ret = dev_forward_skb(ipvlan->dev, nskb);
- else
- ret = netif_rx(nskb);
-acct:
+ local_bh_disable();
+ if (nskb) {
+ consumed = true;
+ nskb->pkt_type = pkt_type;
+ nskb->dev = ipvlan->dev;
+ if (tx_pkt)
+ ret = dev_forward_skb(ipvlan->dev, nskb);
+ else
+ ret = netif_rx(nskb);
+ }
ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
+ local_bh_enable();
}
rcu_read_unlock();
- if (dlocal) {
+ if (tx_pkt) {
/* If the packet originated here, send it out. */
skb->dev = port->dev;
skb->pkt_type = pkt_type;
dev_queue_xmit(skb);
} else {
- kfree_skb(skb);
+ if (consumed)
+ consume_skb(skb);
+ else
+ kfree_skb(skb);
}
+ if (dev)
+ dev_put(dev);
}
}
@@ -470,15 +477,24 @@ out:
}
static void ipvlan_multicast_enqueue(struct ipvl_port *port,
- struct sk_buff *skb)
+ struct sk_buff *skb, bool tx_pkt)
{
if (skb->protocol == htons(ETH_P_PAUSE)) {
kfree_skb(skb);
return;
}
+ /* Record that the deferred packet is from TX or RX path. By
+ * looking at mac-addresses on packet will lead to erronus decisions.
+ * (This would be true for a loopback-mode on master device or a
+ * hair-pin mode of the switch.)
+ */
+ IPVL_SKB_CB(skb)->tx_pkt = tx_pkt;
+
spin_lock(&port->backlog.lock);
if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) {
+ if (skb->dev)
+ dev_hold(skb->dev);
__skb_queue_tail(&port->backlog, skb);
spin_unlock(&port->backlog.lock);
schedule_work(&port->wq);
@@ -537,7 +553,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
} else if (is_multicast_ether_addr(eth->h_dest)) {
ipvlan_skb_crossing_ns(skb, NULL);
- ipvlan_multicast_enqueue(ipvlan->port, skb);
+ ipvlan_multicast_enqueue(ipvlan->port, skb, true);
return NET_XMIT_SUCCESS;
}
@@ -634,7 +650,7 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
*/
if (nskb) {
ipvlan_skb_crossing_ns(nskb, NULL);
- ipvlan_multicast_enqueue(port, nskb);
+ ipvlan_multicast_enqueue(port, nskb, false);
}
}
} else {
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 693ec5b66222..8b0f99300cbc 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -135,6 +135,7 @@ err:
static void ipvlan_port_destroy(struct net_device *dev)
{
struct ipvl_port *port = ipvlan_port_get_rtnl(dev);
+ struct sk_buff *skb;
dev->priv_flags &= ~IFF_IPVLAN_MASTER;
if (port->mode == IPVLAN_MODE_L3S) {
@@ -144,7 +145,11 @@ static void ipvlan_port_destroy(struct net_device *dev)
}
netdev_rx_handler_unregister(dev);
cancel_work_sync(&port->wq);
- __skb_queue_purge(&port->backlog);
+ while ((skb = __skb_dequeue(&port->backlog)) != NULL) {
+ if (skb->dev)
+ dev_put(skb->dev);
+ kfree_skb(skb);
+ }
kfree(port);
}
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index 7a3f990c1935..7a20a9a4663a 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -31,7 +31,7 @@
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/init.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/delay.h>
#include <linux/mutex.h>
diff --git a/drivers/net/irda/kingsun-sir.c b/drivers/net/irda/kingsun-sir.c
index fb5d162ec7d2..24c0f169a7b1 100644
--- a/drivers/net/irda/kingsun-sir.c
+++ b/drivers/net/irda/kingsun-sir.c
@@ -71,7 +71,7 @@
#include <asm/unaligned.h>
#include <asm/byteorder.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <net/irda/irda.h>
#include <net/irda/wrapper.h>
diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c
index 8e6e0edf2440..3affded3e30d 100644
--- a/drivers/net/irda/ks959-sir.c
+++ b/drivers/net/irda/ks959-sir.c
@@ -123,7 +123,7 @@
#include <asm/unaligned.h>
#include <asm/byteorder.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <net/irda/irda.h>
#include <net/irda/wrapper.h>
diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c
index 37f23a189b35..741452c7ce35 100644
--- a/drivers/net/irda/ksdazzle-sir.c
+++ b/drivers/net/irda/ksdazzle-sir.c
@@ -87,7 +87,7 @@
#include <asm/unaligned.h>
#include <asm/byteorder.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <net/irda/irda.h>
#include <net/irda/wrapper.h>
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index bca6a1e72d1d..6f6ed75b63c9 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -55,7 +55,7 @@
#include <asm/unaligned.h>
#include <asm/byteorder.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <net/irda/irda.h>
#include <net/irda/wrapper.h>
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index a0849f49bbec..ffedad2a360a 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -45,7 +45,7 @@ MODULE_LICENSE("GPL");
#include <linux/seq_file.h>
#include <linux/math64.h>
#include <linux/mutex.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/byteorder.h>
#include <net/irda/irda.h>
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index f293d33fb28f..8d5b903d1d9d 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -517,9 +517,9 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
mtt = irda_get_mtt(skb);
pr_debug("%s: %ld, mtt=%d\n", __func__, jiffies, mtt);
- if (mtt > 1000)
- mdelay(mtt / 1000);
- else if (mtt)
+ if (mtt > 1000)
+ mdelay(mtt / 1000);
+ else if (mtt)
udelay(mtt);
/* Enable DMA interrupt */
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 6255973e3dda..1e05b7c2d157 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -40,7 +40,7 @@
#include <linux/fcntl.h>
#include <linux/in.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
#include <linux/inet.h>
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 52a9d811be06..5c26653eceb5 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -679,7 +679,6 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
int depth;
bool zerocopy = false;
size_t linear;
- ssize_t n;
if (q->flags & IFF_VNET_HDR) {
vnet_hdr_len = q->vnet_hdr_sz;
@@ -690,8 +689,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
len -= vnet_hdr_len;
err = -EFAULT;
- n = copy_from_iter(&vnet_hdr, sizeof(vnet_hdr), from);
- if (n != sizeof(vnet_hdr))
+ if (!copy_from_iter_full(&vnet_hdr, sizeof(vnet_hdr), from))
goto err;
iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr));
if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index d361835b315d..8dbd59baa34d 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -279,6 +279,7 @@ config MARVELL_PHY
config MESON_GXL_PHY
tristate "Amlogic Meson GXL Internal PHY"
+ depends on ARCH_MESON || COMPILE_TEST
---help---
Currently has a driver for the Amlogic Meson GXL Internal PHY
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 36e3e2033eca..e28913d9ea7e 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -32,7 +32,7 @@
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define MII_DM9161_SCR 0x10
#define MII_DM9161_SCR_INIT 0x0610
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 1b639242f9e2..ca1b462bf7b2 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -29,6 +29,7 @@
#define MII_DP83867_MICR 0x12
#define MII_DP83867_ISR 0x13
#define DP83867_CTRL 0x1f
+#define DP83867_CFG3 0x1e
/* Extended Registers */
#define DP83867_RGMIICTL 0x0032
@@ -98,6 +99,8 @@ static int dp83867_config_intr(struct phy_device *phydev)
micr_status |=
(MII_DP83867_MICR_AN_ERR_INT_EN |
MII_DP83867_MICR_SPEED_CHNG_INT_EN |
+ MII_DP83867_MICR_AUTONEG_COMP_INT_EN |
+ MII_DP83867_MICR_LINK_STS_CHNG_INT_EN |
MII_DP83867_MICR_DUP_MODE_CHNG_INT_EN |
MII_DP83867_MICR_SLEEP_MODE_CHNG_INT_EN);
@@ -129,12 +132,16 @@ static int dp83867_of_init(struct phy_device *phydev)
ret = of_property_read_u32(of_node, "ti,rx-internal-delay",
&dp83867->rx_id_delay);
- if (ret)
+ if (ret &&
+ (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID))
return ret;
ret = of_property_read_u32(of_node, "ti,tx-internal-delay",
&dp83867->tx_id_delay);
- if (ret)
+ if (ret &&
+ (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID))
return ret;
return of_property_read_u32(of_node, "ti,fifo-depth",
@@ -214,6 +221,13 @@ static int dp83867_config_init(struct phy_device *phydev)
}
}
+ /* Enable Interrupt output INT_OE in CFG3 register */
+ if (phy_interrupt_is_valid(phydev)) {
+ val = phy_read(phydev, DP83867_CFG3);
+ val |= BIT(7);
+ phy_write(phydev, DP83867_CFG3, val);
+ }
+
return 0;
}
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 22b51f01a94a..567280a72241 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -28,7 +28,7 @@
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
MODULE_DESCRIPTION("ICPlus IP175C/IP101A/IP101G/IC1001 PHY drivers");
MODULE_AUTHOR("Michael Barkowski");
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index b9fde1bcf0f0..8d198a1f0031 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -32,7 +32,7 @@
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/* The Level one LXT970 is used by many boards */
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index e269262471a4..0b78210c0fa7 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1192,7 +1192,8 @@ static int marvell_read_status(struct phy_device *phydev)
int err;
/* Check the fiber mode first */
- if (phydev->supported & SUPPORTED_FIBRE) {
+ if (phydev->supported & SUPPORTED_FIBRE &&
+ phydev->interface != PHY_INTERFACE_MODE_SGMII) {
err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_FIBER);
if (err < 0)
goto error;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 25f93a98863b..48da6e93c3f7 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -1065,6 +1065,15 @@ void phy_state_machine(struct work_struct *work)
if (old_link != phydev->link)
phydev->state = PHY_CHANGELINK;
}
+ /*
+ * Failsafe: check that nobody set phydev->link=0 between two
+ * poll cycles, otherwise we won't leave RUNNING state as long
+ * as link remains down.
+ */
+ if (!phydev->link && phydev->state == PHY_RUNNING) {
+ phydev->state = PHY_CHANGELINK;
+ phydev_err(phydev, "no link in PHY_RUNNING\n");
+ }
break;
case PHY_CHANGELINK:
err = phy_read_status(phydev);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 9c06f8028f0c..92b08383cafa 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1187,8 +1187,8 @@ static int genphy_config_advert(struct phy_device *phydev)
*/
static int genphy_config_eee_advert(struct phy_device *phydev)
{
- u32 broken = phydev->eee_broken_modes;
- u32 old_adv, adv;
+ int broken = phydev->eee_broken_modes;
+ int old_adv, adv;
/* Nothing to disable */
if (!broken)
@@ -1665,7 +1665,7 @@ static void of_set_phy_supported(struct phy_device *phydev)
static void of_set_phy_eee_broken(struct phy_device *phydev)
{
struct device_node *node = phydev->mdio.dev.of_node;
- u32 broken;
+ u32 broken = 0;
if (!IS_ENABLED(CONFIG_OF_MDIO))
return;
@@ -1673,8 +1673,20 @@ static void of_set_phy_eee_broken(struct phy_device *phydev)
if (!node)
return;
- if (!of_property_read_u32(node, "eee-broken-modes", &broken))
- phydev->eee_broken_modes = broken;
+ if (of_property_read_bool(node, "eee-broken-100tx"))
+ broken |= MDIO_EEE_100TX;
+ if (of_property_read_bool(node, "eee-broken-1000t"))
+ broken |= MDIO_EEE_1000T;
+ if (of_property_read_bool(node, "eee-broken-10gt"))
+ broken |= MDIO_EEE_10GT;
+ if (of_property_read_bool(node, "eee-broken-1000kx"))
+ broken |= MDIO_EEE_1000KX;
+ if (of_property_read_bool(node, "eee-broken-10gkx4"))
+ broken |= MDIO_EEE_10GKX4;
+ if (of_property_read_bool(node, "eee-broken-10gkr"))
+ broken |= MDIO_EEE_10GKR;
+
+ phydev->eee_broken_modes = broken;
}
/**
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
index d470db89e8dd..dbef8002bc28 100644
--- a/drivers/net/phy/qsemi.c
+++ b/drivers/net/phy/qsemi.c
@@ -32,7 +32,7 @@
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/* ------------------------------------------------------------------------- */
/* The Quality Semiconductor QS6612 is used on the RPX CLLF */
diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
index 9c889e0303dd..feb9569e3345 100644
--- a/drivers/net/ppp/ppp_async.c
+++ b/drivers/net/ppp/ppp_async.c
@@ -34,7 +34,7 @@
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/string.h>
#define PPP_VERSION "2.4.2"
diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
index 925d3e295bac..9ae53986cb4a 100644
--- a/drivers/net/ppp/ppp_synctty.c
+++ b/drivers/net/ppp/ppp_synctty.c
@@ -47,7 +47,7 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define PPP_VERSION "2.4.2"
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index f017c72bb7fd..d7e405268983 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -83,7 +83,7 @@
#include <net/netns/generic.h>
#include <net/sock.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define PPPOE_HASH_BITS 4
#define PPPOE_HASH_SIZE (1 << PPPOE_HASH_BITS)
diff --git a/drivers/net/ppp/pppox.c b/drivers/net/ppp/pppox.c
index b9c8be6283d3..c0599b3b23c0 100644
--- a/drivers/net/ppp/pppox.c
+++ b/drivers/net/ppp/pppox.c
@@ -34,7 +34,7 @@
#include <net/sock.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
static const struct pppox_proto *pppox_protos[PX_MAX_PROTO + 1];
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index 8b8b53259783..7820fced33f6 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -55,7 +55,7 @@ static char version[] = "sb1000.c:v1.1.2 6/01/98 (fventuri@mediaone.net)\n";
#include <asm/io.h>
#include <asm/processor.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#ifdef SB1000_DEBUG
static int sb1000_debug = SB1000_DEBUG;
diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
index 27ed25252aac..5782733959f0 100644
--- a/drivers/net/slip/slhc.c
+++ b/drivers/net/slip/slhc.c
@@ -75,7 +75,7 @@
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/timer.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <net/checksum.h>
#include <asm/unaligned.h>
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index 7e933d8ff811..9841f3dc0682 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -64,7 +64,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/bitops.h>
#include <linux/sched.h>
#include <linux/string.h>
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a569e61bc1d9..cd8e02c94be0 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -73,7 +73,7 @@
#include <linux/uio.h>
#include <linux/skb_array.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/* Uncomment to enable debugging */
/* #define TUN_DEBUG 1 */
@@ -1156,7 +1156,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
bool zerocopy = false;
int err;
u32 rxhash;
- ssize_t n;
if (!(tun->dev->flags & IFF_UP))
return -EIO;
@@ -1166,8 +1165,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
return -EINVAL;
len -= sizeof(pi);
- n = copy_from_iter(&pi, sizeof(pi), from);
- if (n != sizeof(pi))
+ if (!copy_from_iter_full(&pi, sizeof(pi), from))
return -EFAULT;
}
@@ -1176,8 +1174,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
return -EINVAL;
len -= tun->vnet_hdr_sz;
- n = copy_from_iter(&gso, sizeof(gso), from);
- if (n != sizeof(gso))
+ if (!copy_from_iter_full(&gso, sizeof(gso), from))
return -EFAULT;
if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 6c646e228833..6e98ede997d3 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -1367,6 +1367,7 @@ static struct usb_driver asix_driver = {
.probe = usbnet_probe,
.suspend = asix_suspend,
.resume = asix_resume,
+ .reset_resume = asix_resume,
.disconnect = usbnet_disconnect,
.supports_autosuspend = 1,
.disable_hub_initiated_lpm = 1,
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index a1f2f6f1e614..3daa41bdd4ea 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -42,7 +42,7 @@
#include <linux/crc32.h>
#include <linux/bitops.h>
#include <linux/gfp.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#undef DEBUG
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 2d1a6f2e16ab..f317984f7536 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1282,7 +1282,7 @@ static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx)
/* start timer, if not already started */
if (!(hrtimer_active(&ctx->tx_timer) || atomic_read(&ctx->stop)))
hrtimer_start(&ctx->tx_timer,
- ktime_set(0, ctx->timer_interval),
+ ctx->timer_interval,
HRTIMER_MODE_REL);
}
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 338aed5da14d..876f02f4945e 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -54,7 +54,7 @@
#include <linux/dma-mapping.h>
#include <linux/wait.h>
#include <linux/firmware.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/byteorder.h>
#undef DEBUG
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 399f7ee57aea..24e803fe9a53 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -42,7 +42,7 @@
#include <linux/usb.h>
#include <linux/module.h>
#include <asm/byteorder.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "pegasus.h"
/*
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 7dc61228c55b..f3b48ad90865 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1730,7 +1730,7 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc)
u8 checksum = CHECKSUM_NONE;
u32 opts2, opts3;
- if (tp->version == RTL_VER_01 || tp->version == RTL_VER_02)
+ if (!(tp->netdev->features & NETIF_F_RXCSUM))
goto return_result;
opts2 = le32_to_cpu(rx_desc->opts2);
@@ -3576,39 +3576,87 @@ static bool delay_autosuspend(struct r8152 *tp)
return false;
}
-static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
+static int rtl8152_rumtime_suspend(struct r8152 *tp)
{
- struct r8152 *tp = usb_get_intfdata(intf);
struct net_device *netdev = tp->netdev;
int ret = 0;
- mutex_lock(&tp->control);
+ if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
+ u32 rcr = 0;
- if (PMSG_IS_AUTO(message)) {
- if (netif_running(netdev) && delay_autosuspend(tp)) {
+ if (delay_autosuspend(tp)) {
ret = -EBUSY;
goto out1;
}
- set_bit(SELECTIVE_SUSPEND, &tp->flags);
- } else {
- netif_device_detach(netdev);
+ if (netif_carrier_ok(netdev)) {
+ u32 ocp_data;
+
+ rcr = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
+ ocp_data = rcr & ~RCR_ACPT_ALL;
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
+ rxdy_gated_en(tp, true);
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA,
+ PLA_OOB_CTRL);
+ if (!(ocp_data & RXFIFO_EMPTY)) {
+ rxdy_gated_en(tp, false);
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr);
+ ret = -EBUSY;
+ goto out1;
+ }
+ }
+
+ clear_bit(WORK_ENABLE, &tp->flags);
+ usb_kill_urb(tp->intr_urb);
+
+ tp->rtl_ops.autosuspend_en(tp, true);
+
+ if (netif_carrier_ok(netdev)) {
+ napi_disable(&tp->napi);
+ rtl_stop_rx(tp);
+ rxdy_gated_en(tp, false);
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr);
+ napi_enable(&tp->napi);
+ }
}
+ set_bit(SELECTIVE_SUSPEND, &tp->flags);
+
+out1:
+ return ret;
+}
+
+static int rtl8152_system_suspend(struct r8152 *tp)
+{
+ struct net_device *netdev = tp->netdev;
+ int ret = 0;
+
+ netif_device_detach(netdev);
+
if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
napi_disable(&tp->napi);
- if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
- rtl_stop_rx(tp);
- tp->rtl_ops.autosuspend_en(tp, true);
- } else {
- cancel_delayed_work_sync(&tp->schedule);
- tp->rtl_ops.down(tp);
- }
+ cancel_delayed_work_sync(&tp->schedule);
+ tp->rtl_ops.down(tp);
napi_enable(&tp->napi);
}
-out1:
+
+ return ret;
+}
+
+static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct r8152 *tp = usb_get_intfdata(intf);
+ int ret;
+
+ mutex_lock(&tp->control);
+
+ if (PMSG_IS_AUTO(message))
+ ret = rtl8152_rumtime_suspend(tp);
+ else
+ ret = rtl8152_system_suspend(tp);
+
mutex_unlock(&tp->control);
return ret;
@@ -4308,6 +4356,11 @@ static int rtl8152_probe(struct usb_interface *intf,
NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
+ if (tp->version == RTL_VER_01) {
+ netdev->features &= ~NETIF_F_RXCSUM;
+ netdev->hw_features &= ~NETIF_F_RXCSUM;
+ }
+
netdev->ethtool_ops = &ops;
netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE);
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 93a1bda1c1e5..95b7bd0d7abc 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -14,7 +14,7 @@
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/usb.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/* Version Information */
#define DRIVER_VERSION "v0.6.2 (2004/08/27)"
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b425fa1013af..4a105006ca63 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -22,6 +22,7 @@
#include <linux/module.h>
#include <linux/virtio.h>
#include <linux/virtio_net.h>
+#include <linux/bpf.h>
#include <linux/scatterlist.h>
#include <linux/if_vlan.h>
#include <linux/slab.h>
@@ -81,6 +82,8 @@ struct receive_queue {
struct napi_struct napi;
+ struct bpf_prog __rcu *xdp_prog;
+
/* Chain pages by the private ptr. */
struct page *pages;
@@ -111,6 +114,9 @@ struct virtnet_info {
/* # of queue pairs currently used by the driver */
u16 curr_queue_pairs;
+ /* # of XDP queue pairs currently used by the driver */
+ u16 xdp_queue_pairs;
+
/* I like... big packets and I cannot lie! */
bool big_packets;
@@ -324,14 +330,148 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
return skb;
}
-static struct sk_buff *receive_small(struct virtnet_info *vi, void *buf, unsigned int len)
+static void virtnet_xdp_xmit(struct virtnet_info *vi,
+ struct receive_queue *rq,
+ struct send_queue *sq,
+ struct xdp_buff *xdp,
+ void *data)
+{
+ struct virtio_net_hdr_mrg_rxbuf *hdr;
+ unsigned int num_sg, len;
+ void *xdp_sent;
+ int err;
+
+ /* Free up any pending old buffers before queueing new ones. */
+ while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) {
+ if (vi->mergeable_rx_bufs) {
+ struct page *sent_page = virt_to_head_page(xdp_sent);
+
+ put_page(sent_page);
+ } else { /* small buffer */
+ struct sk_buff *skb = xdp_sent;
+
+ kfree_skb(skb);
+ }
+ }
+
+ if (vi->mergeable_rx_bufs) {
+ /* Zero header and leave csum up to XDP layers */
+ hdr = xdp->data;
+ memset(hdr, 0, vi->hdr_len);
+
+ num_sg = 1;
+ sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data);
+ } else { /* small buffer */
+ struct sk_buff *skb = data;
+
+ /* Zero header and leave csum up to XDP layers */
+ hdr = skb_vnet_hdr(skb);
+ memset(hdr, 0, vi->hdr_len);
+
+ num_sg = 2;
+ sg_init_table(sq->sg, 2);
+ sg_set_buf(sq->sg, hdr, vi->hdr_len);
+ skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
+ }
+ err = virtqueue_add_outbuf(sq->vq, sq->sg, num_sg,
+ data, GFP_ATOMIC);
+ if (unlikely(err)) {
+ if (vi->mergeable_rx_bufs) {
+ struct page *page = virt_to_head_page(xdp->data);
+
+ put_page(page);
+ } else /* small buffer */
+ kfree_skb(data);
+ return; // On error abort to avoid unnecessary kick
+ }
+
+ virtqueue_kick(sq->vq);
+}
+
+static u32 do_xdp_prog(struct virtnet_info *vi,
+ struct receive_queue *rq,
+ struct bpf_prog *xdp_prog,
+ void *data, int len)
+{
+ int hdr_padded_len;
+ struct xdp_buff xdp;
+ void *buf;
+ unsigned int qp;
+ u32 act;
+
+ if (vi->mergeable_rx_bufs) {
+ hdr_padded_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ xdp.data = data + hdr_padded_len;
+ xdp.data_end = xdp.data + (len - vi->hdr_len);
+ buf = data;
+ } else { /* small buffers */
+ struct sk_buff *skb = data;
+
+ xdp.data = skb->data;
+ xdp.data_end = xdp.data + len;
+ buf = skb->data;
+ }
+
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ switch (act) {
+ case XDP_PASS:
+ return XDP_PASS;
+ case XDP_TX:
+ qp = vi->curr_queue_pairs -
+ vi->xdp_queue_pairs +
+ smp_processor_id();
+ xdp.data = buf;
+ virtnet_xdp_xmit(vi, rq, &vi->sq[qp], &xdp, data);
+ return XDP_TX;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ case XDP_ABORTED:
+ case XDP_DROP:
+ return XDP_DROP;
+ }
+}
+
+static struct sk_buff *receive_small(struct net_device *dev,
+ struct virtnet_info *vi,
+ struct receive_queue *rq,
+ void *buf, unsigned int len)
{
struct sk_buff * skb = buf;
+ struct bpf_prog *xdp_prog;
len -= vi->hdr_len;
skb_trim(skb, len);
+ rcu_read_lock();
+ xdp_prog = rcu_dereference(rq->xdp_prog);
+ if (xdp_prog) {
+ struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
+ u32 act;
+
+ if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags))
+ goto err_xdp;
+ act = do_xdp_prog(vi, rq, xdp_prog, skb, len);
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ rcu_read_unlock();
+ goto xdp_xmit;
+ case XDP_DROP:
+ default:
+ goto err_xdp;
+ }
+ }
+ rcu_read_unlock();
+
return skb;
+
+err_xdp:
+ rcu_read_unlock();
+ dev->stats.rx_dropped++;
+ kfree_skb(skb);
+xdp_xmit:
+ return NULL;
}
static struct sk_buff *receive_big(struct net_device *dev,
@@ -354,6 +494,67 @@ err:
return NULL;
}
+/* The conditions to enable XDP should preclude the underlying device from
+ * sending packets across multiple buffers (num_buf > 1). However per spec
+ * it does not appear to be illegal to do so but rather just against convention.
+ * So in order to avoid making a system unresponsive the packets are pushed
+ * into a page and the XDP program is run. This will be extremely slow and we
+ * push a warning to the user to fix this as soon as possible. Fixing this may
+ * require resolving the underlying hardware to determine why multiple buffers
+ * are being received or simply loading the XDP program in the ingress stack
+ * after the skb is built because there is no advantage to running it here
+ * anymore.
+ */
+static struct page *xdp_linearize_page(struct receive_queue *rq,
+ u16 *num_buf,
+ struct page *p,
+ int offset,
+ unsigned int *len)
+{
+ struct page *page = alloc_page(GFP_ATOMIC);
+ unsigned int page_off = 0;
+
+ if (!page)
+ return NULL;
+
+ memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
+ page_off += *len;
+
+ while (--*num_buf) {
+ unsigned int buflen;
+ unsigned long ctx;
+ void *buf;
+ int off;
+
+ ctx = (unsigned long)virtqueue_get_buf(rq->vq, &buflen);
+ if (unlikely(!ctx))
+ goto err_buf;
+
+ buf = mergeable_ctx_to_buf_address(ctx);
+ p = virt_to_head_page(buf);
+ off = buf - page_address(p);
+
+ /* guard against a misconfigured or uncooperative backend that
+ * is sending packet larger than the MTU.
+ */
+ if ((page_off + buflen) > PAGE_SIZE) {
+ put_page(p);
+ goto err_buf;
+ }
+
+ memcpy(page_address(page) + page_off,
+ page_address(p) + off, buflen);
+ page_off += buflen;
+ put_page(p);
+ }
+
+ *len = page_off;
+ return page;
+err_buf:
+ __free_pages(page, 0);
+ return NULL;
+}
+
static struct sk_buff *receive_mergeable(struct net_device *dev,
struct virtnet_info *vi,
struct receive_queue *rq,
@@ -365,11 +566,71 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
struct page *page = virt_to_head_page(buf);
int offset = buf - page_address(page);
- unsigned int truesize = max(len, mergeable_ctx_to_buf_truesize(ctx));
+ struct sk_buff *head_skb, *curr_skb;
+ struct bpf_prog *xdp_prog;
+ unsigned int truesize;
+
+ head_skb = NULL;
+
+ rcu_read_lock();
+ xdp_prog = rcu_dereference(rq->xdp_prog);
+ if (xdp_prog) {
+ struct page *xdp_page;
+ u32 act;
+
+ /* This happens when rx buffer size is underestimated */
+ if (unlikely(num_buf > 1)) {
+ /* linearize data for XDP */
+ xdp_page = xdp_linearize_page(rq, &num_buf,
+ page, offset, &len);
+ if (!xdp_page)
+ goto err_xdp;
+ offset = 0;
+ } else {
+ xdp_page = page;
+ }
- struct sk_buff *head_skb = page_to_skb(vi, rq, page, offset, len,
- truesize);
- struct sk_buff *curr_skb = head_skb;
+ /* Transient failure which in theory could occur if
+ * in-flight packets from before XDP was enabled reach
+ * the receive path after XDP is loaded. In practice I
+ * was not able to create this condition.
+ */
+ if (unlikely(hdr->hdr.gso_type))
+ goto err_xdp;
+
+ act = do_xdp_prog(vi, rq, xdp_prog,
+ page_address(xdp_page) + offset, len);
+ switch (act) {
+ case XDP_PASS:
+ /* We can only create skb based on xdp_page. */
+ if (unlikely(xdp_page != page)) {
+ rcu_read_unlock();
+ put_page(page);
+ head_skb = page_to_skb(vi, rq, xdp_page,
+ 0, len, PAGE_SIZE);
+ ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
+ return head_skb;
+ }
+ break;
+ case XDP_TX:
+ ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
+ if (unlikely(xdp_page != page))
+ goto err_xdp;
+ rcu_read_unlock();
+ goto xdp_xmit;
+ case XDP_DROP:
+ default:
+ if (unlikely(xdp_page != page))
+ __free_pages(xdp_page, 0);
+ ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
+ goto err_xdp;
+ }
+ }
+ rcu_read_unlock();
+
+ truesize = max(len, mergeable_ctx_to_buf_truesize(ctx));
+ head_skb = page_to_skb(vi, rq, page, offset, len, truesize);
+ curr_skb = head_skb;
if (unlikely(!curr_skb))
goto err_skb;
@@ -423,6 +684,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
return head_skb;
+err_xdp:
+ rcu_read_unlock();
err_skb:
put_page(page);
while (--num_buf) {
@@ -439,6 +702,7 @@ err_skb:
err_buf:
dev->stats.rx_dropped++;
dev_kfree_skb(head_skb);
+xdp_xmit:
return NULL;
}
@@ -470,7 +734,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
else if (vi->big_packets)
skb = receive_big(dev, vi, rq, buf, len);
else
- skb = receive_small(vi, buf, len);
+ skb = receive_small(dev, vi, rq, buf, len);
if (unlikely(!skb))
return;
@@ -1337,6 +1601,13 @@ static int virtnet_set_channels(struct net_device *dev,
if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
return -EINVAL;
+ /* For now we don't support modifying channels while XDP is loaded
+ * also when XDP is loaded all RX queues have XDP programs so we only
+ * need to check a single RX queue.
+ */
+ if (vi->rq[0].xdp_prog)
+ return -EINVAL;
+
get_online_cpus();
err = virtnet_set_queues(vi, queue_pairs);
if (!err) {
@@ -1428,6 +1699,95 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
.set_settings = virtnet_set_settings,
};
+static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog)
+{
+ unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr);
+ struct virtnet_info *vi = netdev_priv(dev);
+ struct bpf_prog *old_prog;
+ u16 xdp_qp = 0, curr_qp;
+ int i, err;
+
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
+ virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
+ virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
+ virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO)) {
+ netdev_warn(dev, "can't set XDP while host is implementing LRO, disable LRO first\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
+ netdev_warn(dev, "XDP expects header/data in single page, any_header_sg required\n");
+ return -EINVAL;
+ }
+
+ if (dev->mtu > max_sz) {
+ netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz);
+ return -EINVAL;
+ }
+
+ curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
+ if (prog)
+ xdp_qp = nr_cpu_ids;
+
+ /* XDP requires extra queues for XDP_TX */
+ if (curr_qp + xdp_qp > vi->max_queue_pairs) {
+ netdev_warn(dev, "request %i queues but max is %i\n",
+ curr_qp + xdp_qp, vi->max_queue_pairs);
+ return -ENOMEM;
+ }
+
+ err = virtnet_set_queues(vi, curr_qp + xdp_qp);
+ if (err) {
+ dev_warn(&dev->dev, "XDP Device queue allocation failure.\n");
+ return err;
+ }
+
+ if (prog) {
+ prog = bpf_prog_add(prog, vi->max_queue_pairs - 1);
+ if (IS_ERR(prog)) {
+ virtnet_set_queues(vi, curr_qp);
+ return PTR_ERR(prog);
+ }
+ }
+
+ vi->xdp_queue_pairs = xdp_qp;
+ netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
+
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
+ rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+ }
+
+ return 0;
+}
+
+static bool virtnet_xdp_query(struct net_device *dev)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ if (vi->rq[i].xdp_prog)
+ return true;
+ }
+ return false;
+}
+
+static int virtnet_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return virtnet_xdp_set(dev, xdp->prog);
+ case XDP_QUERY_PROG:
+ xdp->prog_attached = virtnet_xdp_query(dev);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops virtnet_netdev = {
.ndo_open = virtnet_open,
.ndo_stop = virtnet_close,
@@ -1444,6 +1804,7 @@ static const struct net_device_ops virtnet_netdev = {
#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = virtnet_busy_poll,
#endif
+ .ndo_xdp = virtnet_xdp,
};
static void virtnet_config_changed_work(struct work_struct *work)
@@ -1505,12 +1866,20 @@ static void virtnet_free_queues(struct virtnet_info *vi)
static void free_receive_bufs(struct virtnet_info *vi)
{
+ struct bpf_prog *old_prog;
int i;
+ rtnl_lock();
for (i = 0; i < vi->max_queue_pairs; i++) {
while (vi->rq[i].pages)
__free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
+
+ old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
+ RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
+ if (old_prog)
+ bpf_prog_put(old_prog);
}
+ rtnl_unlock();
}
static void free_receive_page_frags(struct virtnet_info *vi)
@@ -1521,6 +1890,16 @@ static void free_receive_page_frags(struct virtnet_info *vi)
put_page(vi->rq[i].alloc_frag.page);
}
+static bool is_xdp_queue(struct virtnet_info *vi, int q)
+{
+ if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
+ return false;
+ else if (q < vi->curr_queue_pairs)
+ return true;
+ else
+ return false;
+}
+
static void free_unused_bufs(struct virtnet_info *vi)
{
void *buf;
@@ -1528,8 +1907,12 @@ static void free_unused_bufs(struct virtnet_info *vi)
for (i = 0; i < vi->max_queue_pairs; i++) {
struct virtqueue *vq = vi->sq[i].vq;
- while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
- dev_kfree_skb(buf);
+ while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
+ if (!is_xdp_queue(vi, i))
+ dev_kfree_skb(buf);
+ else
+ put_page(virt_to_head_page(buf));
+ }
}
for (i = 0; i < vi->max_queue_pairs; i++) {
@@ -1930,7 +2313,9 @@ static int virtnet_probe(struct virtio_device *vdev)
goto free_unregister_netdev;
}
- virtnet_set_affinity(vi);
+ rtnl_lock();
+ virtnet_set_queues(vi, vi->curr_queue_pairs);
+ rtnl_unlock();
/* Assume link up if device can't report link status,
otherwise get link status from config. */
@@ -2099,13 +2484,13 @@ static __init int virtio_net_driver_init(void)
{
int ret;
- ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "AP_VIRT_NET_ONLINE",
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online",
virtnet_cpu_online,
virtnet_cpu_down_prep);
if (ret < 0)
goto out;
virtionet_online = ret;
- ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "VIRT_NET_DEAD",
+ ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead",
NULL, virtnet_cpu_dead);
if (ret)
goto err_dead;
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 7dc37a090549..59e077be8829 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -119,9 +119,8 @@ enum {
};
/*
- * PCI vendor and device IDs.
+ * Maximum devices supported.
*/
-#define PCI_DEVICE_ID_VMWARE_VMXNET3 0x07B0
#define MAX_ETHERNET_CARDS 10
#define MAX_PCI_PASSTHRU_DEVICE 6
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 3bca24651dc0..454f907d419a 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -263,7 +263,9 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
.flowi4_iif = LOOPBACK_IFINDEX,
.flowi4_tos = RT_TOS(ip4h->tos),
.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF,
+ .flowi4_proto = ip4h->protocol,
.daddr = ip4h->daddr,
+ .saddr = ip4h->saddr,
};
struct net *net = dev_net(vrf_dev);
struct rtable *rt;
@@ -366,6 +368,8 @@ static int vrf_finish_output6(struct net *net, struct sock *sk,
struct in6_addr *nexthop;
int ret;
+ nf_reset(skb);
+
skb->protocol = htons(ETH_P_IPV6);
skb->dev = dev;
@@ -547,6 +551,8 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
u32 nexthop;
int ret = -EINVAL;
+ nf_reset(skb);
+
/* Be paranoid, rather than too clever. */
if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
struct sk_buff *skb2;
@@ -849,8 +855,6 @@ static struct sk_buff *vrf_rcv_nfhook(u8 pf, unsigned int hook,
{
struct net *net = dev_net(dev);
- nf_reset(skb);
-
if (NF_HOOK(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) < 0)
skb = NULL; /* kfree_skb(skb) handled by nf code */
@@ -965,6 +969,7 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
*/
need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
if (!ipv6_ndisc_frame(skb) && !need_strict) {
+ vrf_rx_stats(vrf_dev, skb->len);
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
@@ -1009,6 +1014,8 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
goto out;
}
+ vrf_rx_stats(vrf_dev, skb->len);
+
skb_push(skb, skb->mac_len);
dev_queue_xmit_nit(skb, vrf_dev);
skb_pull(skb, skb->mac_len);
@@ -1245,6 +1252,8 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
return -EINVAL;
vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]);
+ if (vrf->tb_id == RT_TABLE_UNSPEC)
+ return -EINVAL;
dev->priv_flags |= IFF_L3MDEV_MASTER;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index bb70dd5723b5..ca7196c40060 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1798,7 +1798,7 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device *dev,
struct vxlan_sock *sock4,
struct sk_buff *skb, int oif, u8 tos,
- __be32 daddr, __be32 *saddr,
+ __be32 daddr, __be32 *saddr, __be16 dport, __be16 sport,
struct dst_cache *dst_cache,
const struct ip_tunnel_info *info)
{
@@ -1824,6 +1824,8 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device
fl4.flowi4_proto = IPPROTO_UDP;
fl4.daddr = daddr;
fl4.saddr = *saddr;
+ fl4.fl4_dport = dport;
+ fl4.fl4_sport = sport;
rt = ip_route_output_key(vxlan->net, &fl4);
if (likely(!IS_ERR(rt))) {
@@ -1851,6 +1853,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
__be32 label,
const struct in6_addr *daddr,
struct in6_addr *saddr,
+ __be16 dport, __be16 sport,
struct dst_cache *dst_cache,
const struct ip_tunnel_info *info)
{
@@ -1877,6 +1880,8 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label);
fl6.flowi6_mark = skb->mark;
fl6.flowi6_proto = IPPROTO_UDP;
+ fl6.fl6_dport = dport;
+ fl6.fl6_sport = sport;
err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
sock6->sock->sk,
@@ -2068,6 +2073,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
rdst ? rdst->remote_ifindex : 0, tos,
dst->sin.sin_addr.s_addr,
&src->sin.sin_addr.s_addr,
+ dst_port, src_port,
dst_cache, info);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
@@ -2104,6 +2110,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
rdst ? rdst->remote_ifindex : 0, tos,
label, &dst->sin6.sin6_addr,
&src->sin6.sin6_addr,
+ dst_port, src_port,
dst_cache, info);
if (IS_ERR(ndst)) {
err = PTR_ERR(ndst);
@@ -2430,7 +2437,7 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos,
info->key.u.ipv4.dst,
- &info->key.u.ipv4.src, NULL, info);
+ &info->key.u.ipv4.src, dport, sport, NULL, info);
if (IS_ERR(rt))
return PTR_ERR(rt);
ip_rt_put(rt);
@@ -2441,7 +2448,7 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos,
info->key.label, &info->key.u.ipv6.dst,
- &info->key.u.ipv6.src, NULL, info);
+ &info->key.u.ipv6.src, dport, sport, NULL, info);
if (IS_ERR(ndst))
return PTR_ERR(ndst);
dst_release(ndst);
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index ae6ecf401189..65ee2a6f248c 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -52,7 +52,7 @@
#include <asm/io.h>
#include <asm/dma.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
static const char version[] = "DLCI driver v0.35, 4 Jan 1997, mike.mclagan@linux.org";
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 7351e5440ed7..799830ffcae2 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -95,7 +95,7 @@
#include <asm/cache.h>
#include <asm/byteorder.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/irq.h>
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 03696d35ee9c..33265eb50420 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -30,7 +30,7 @@
#include <linux/if.h>
#include <linux/hdlc.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "farsync.h"
diff --git a/drivers/net/wan/hd64570.c b/drivers/net/wan/hd64570.c
index dc334c85d966..166696d2c496 100644
--- a/drivers/net/wan/hd64570.c
+++ b/drivers/net/wan/hd64570.c
@@ -39,7 +39,7 @@
#include <linux/string.h>
#include <linux/types.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "hd64570.h"
#define get_msci(port) (phy_node(port) ? MSCI1_OFFSET : MSCI0_OFFSET)
diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c
index e92ecf1d3314..7ef49dab6855 100644
--- a/drivers/net/wan/hd64572.c
+++ b/drivers/net/wan/hd64572.c
@@ -39,7 +39,7 @@
#include <linux/string.h>
#include <linux/types.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "hd64572.h"
#define NAPI_WEIGHT 16
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 6676607164d6..9df9ed62beff 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -35,7 +35,7 @@
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <net/sock.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 001b7796740d..4698450c77d1 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -59,7 +59,7 @@
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/io.h>
#include <asm/dma.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
//#include <asm/spinlock.h>
#define DRIVER_MAJOR_VERSION 1
diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
index 5920c996fcdf..cffe23bd16e1 100644
--- a/drivers/net/wan/lmc/lmc_media.c
+++ b/drivers/net/wan/lmc/lmc_media.c
@@ -19,7 +19,7 @@
#include <asm/io.h>
#include <asm/dma.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "lmc.h"
#include "lmc_var.h"
@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
lmc_media_t lmc_ds3_media = {
- lmc_ds3_init, /* special media init stuff */
- lmc_ds3_default, /* reset to default state */
- lmc_ds3_set_status, /* reset status to state provided */
- lmc_dummy_set_1, /* set clock source */
- lmc_dummy_set2_1, /* set line speed */
- lmc_ds3_set_100ft, /* set cable length */
- lmc_ds3_set_scram, /* set scrambler */
- lmc_ds3_get_link_status, /* get link status */
- lmc_dummy_set_1, /* set link status */
- lmc_ds3_set_crc_length, /* set CRC length */
- lmc_dummy_set_1, /* set T1 or E1 circuit type */
- lmc_ds3_watchdog
+ .init = lmc_ds3_init, /* special media init stuff */
+ .defaults = lmc_ds3_default, /* reset to default state */
+ .set_status = lmc_ds3_set_status, /* reset status to state provided */
+ .set_clock_source = lmc_dummy_set_1, /* set clock source */
+ .set_speed = lmc_dummy_set2_1, /* set line speed */
+ .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
+ .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
+ .get_link_status = lmc_ds3_get_link_status, /* get link status */
+ .set_link_status = lmc_dummy_set_1, /* set link status */
+ .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
+ .watchdog = lmc_ds3_watchdog
};
lmc_media_t lmc_hssi_media = {
- lmc_hssi_init, /* special media init stuff */
- lmc_hssi_default, /* reset to default state */
- lmc_hssi_set_status, /* reset status to state provided */
- lmc_hssi_set_clock, /* set clock source */
- lmc_dummy_set2_1, /* set line speed */
- lmc_dummy_set_1, /* set cable length */
- lmc_dummy_set_1, /* set scrambler */
- lmc_hssi_get_link_status, /* get link status */
- lmc_hssi_set_link_status, /* set link status */
- lmc_hssi_set_crc_length, /* set CRC length */
- lmc_dummy_set_1, /* set T1 or E1 circuit type */
- lmc_hssi_watchdog
+ .init = lmc_hssi_init, /* special media init stuff */
+ .defaults = lmc_hssi_default, /* reset to default state */
+ .set_status = lmc_hssi_set_status, /* reset status to state provided */
+ .set_clock_source = lmc_hssi_set_clock, /* set clock source */
+ .set_speed = lmc_dummy_set2_1, /* set line speed */
+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
+ .get_link_status = lmc_hssi_get_link_status, /* get link status */
+ .set_link_status = lmc_hssi_set_link_status, /* set link status */
+ .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
+ .watchdog = lmc_hssi_watchdog
};
-lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
- lmc_ssi_default, /* reset to default state */
- lmc_ssi_set_status, /* reset status to state provided */
- lmc_ssi_set_clock, /* set clock source */
- lmc_ssi_set_speed, /* set line speed */
- lmc_dummy_set_1, /* set cable length */
- lmc_dummy_set_1, /* set scrambler */
- lmc_ssi_get_link_status, /* get link status */
- lmc_ssi_set_link_status, /* set link status */
- lmc_ssi_set_crc_length, /* set CRC length */
- lmc_dummy_set_1, /* set T1 or E1 circuit type */
- lmc_ssi_watchdog
+lmc_media_t lmc_ssi_media = {
+ .init = lmc_ssi_init, /* special media init stuff */
+ .defaults = lmc_ssi_default, /* reset to default state */
+ .set_status = lmc_ssi_set_status, /* reset status to state provided */
+ .set_clock_source = lmc_ssi_set_clock, /* set clock source */
+ .set_speed = lmc_ssi_set_speed, /* set line speed */
+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
+ .get_link_status = lmc_ssi_get_link_status, /* get link status */
+ .set_link_status = lmc_ssi_set_link_status, /* set link status */
+ .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
+ .watchdog = lmc_ssi_watchdog
};
lmc_media_t lmc_t1_media = {
- lmc_t1_init, /* special media init stuff */
- lmc_t1_default, /* reset to default state */
- lmc_t1_set_status, /* reset status to state provided */
- lmc_t1_set_clock, /* set clock source */
- lmc_dummy_set2_1, /* set line speed */
- lmc_dummy_set_1, /* set cable length */
- lmc_dummy_set_1, /* set scrambler */
- lmc_t1_get_link_status, /* get link status */
- lmc_dummy_set_1, /* set link status */
- lmc_t1_set_crc_length, /* set CRC length */
- lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
- lmc_t1_watchdog
+ .init = lmc_t1_init, /* special media init stuff */
+ .defaults = lmc_t1_default, /* reset to default state */
+ .set_status = lmc_t1_set_status, /* reset status to state provided */
+ .set_clock_source = lmc_t1_set_clock, /* set clock source */
+ .set_speed = lmc_dummy_set2_1, /* set line speed */
+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
+ .get_link_status = lmc_t1_get_link_status, /* get link status */
+ .set_link_status = lmc_dummy_set_1, /* set link status */
+ .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
+ .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
+ .watchdog = lmc_t1_watchdog
};
static void
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 3f83be98d469..3ca3419c54a0 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -63,7 +63,7 @@
#include <asm/types.h>
#include <asm/byteorder.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "sbni.h"
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index 421ac5f85699..236c62538036 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -56,7 +56,7 @@
#include <asm/io.h>
#include <asm/dma.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
static const char* version = "SDLA driver v0.30, 12 Sep 1996, mike.mclagan@linux.org";
diff --git a/drivers/net/wan/slic_ds26522.c b/drivers/net/wan/slic_ds26522.c
index b776a0ab106c..9d9b4e0def2a 100644
--- a/drivers/net/wan/slic_ds26522.c
+++ b/drivers/net/wan/slic_ds26522.c
@@ -218,7 +218,7 @@ static int slic_ds26522_probe(struct spi_device *spi)
ret = slic_ds26522_init_configure(spi);
if (ret == 0)
- pr_info("DS26522 cs%d configurated\n", spi->chip_select);
+ pr_info("DS26522 cs%d configured\n", spi->chip_select);
return ret;
}
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index 89f8d5979402..4cdebc7659dd 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -19,6 +19,4 @@ ath-objs := main.o \
ath-$(CONFIG_ATH_DEBUG) += debug.o
ath-$(CONFIG_ATH_TRACEPOINTS) += trace.o
-ccflags-y += -D__CHECK_ENDIAN__
-
CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 0457e315d336..b541a1c74488 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -2091,7 +2091,7 @@ int ath10k_pci_init_config(struct ath10k *ar)
ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
if (ret != 0) {
- ath10k_err(ar, "Faile to get early alloc val: %d\n", ret);
+ ath10k_err(ar, "Failed to get early alloc val: %d\n", ret);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index c893314a191f..50d6ee6afe26 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -8271,7 +8271,7 @@ void ath10k_wmi_free_host_mem(struct ath10k *ar)
dma_unmap_single(ar->dev,
ar->wmi.mem_chunks[i].paddr,
ar->wmi.mem_chunks[i].len,
- DMA_TO_DEVICE);
+ DMA_BIDIRECTIONAL);
kfree(ar->wmi.mem_chunks[i].vaddr);
}
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 486afa98a5b8..4e2f3ac266c3 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -2713,7 +2713,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
fifo_list = &txq->txq_fifo[txq->txq_tailidx];
if (list_empty(fifo_list)) {
ath_txq_unlock(sc, txq);
- return;
+ break;
}
bf = list_first_entry(fifo_list, struct ath_buf, list);
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index 11b544b26c74..89bf2f9eca1d 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -22,5 +22,3 @@ wil6210-y += p2p.o
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)
-
-subdir-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 4ac9ba04afed..c1b4bb03e997 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -338,7 +338,7 @@ static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
if (skb_headroom(skb) < rtap_len &&
pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) {
- wil_err(wil, "Unable to expand headrom to %d\n", rtap_len);
+ wil_err(wil, "Unable to expand headroom to %d\n", rtap_len);
return;
}
diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c
index eb92d5ab7a27..e12f62356fd1 100644
--- a/drivers/net/wireless/atmel/atmel.c
+++ b/drivers/net/wireless/atmel/atmel.c
@@ -48,7 +48,7 @@
#include <linux/timer.h>
#include <asm/byteorder.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
index d1568bed1ad1..0383ba559edc 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
@@ -19,8 +19,6 @@ ccflags-y += \
-Idrivers/net/wireless/broadcom/brcm80211/brcmfmac \
-Idrivers/net/wireless/broadcom/brcm80211/include
-ccflags-y += -D__CHECK_ENDIAN__
-
obj-$(CONFIG_BRCMFMAC) += brcmfmac.o
brcmfmac-objs += \
cfg80211.o \
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index ccae3bbe7db2..7ffc4aba5bab 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -6868,7 +6868,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
err = brcmf_p2p_attach(cfg, p2pdev_forced);
if (err) {
- brcmf_err("P2P initilisation failed (%d)\n", err);
+ brcmf_err("P2P initialisation failed (%d)\n", err);
goto wiphy_unreg_out;
}
err = brcmf_btcoex_attach(cfg);
@@ -6893,7 +6893,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
err = brcmf_fweh_activate_events(ifp);
if (err) {
brcmf_err("FWEH activation failed (%d)\n", err);
- goto wiphy_unreg_out;
+ goto detach;
}
/* Fill in some of the advertised nl80211 supported features */
@@ -6908,6 +6908,9 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
return cfg;
+detach:
+ brcmf_btcoex_detach(cfg);
+ brcmf_p2p_detach(&cfg->p2p);
wiphy_unreg_out:
wiphy_unregister(cfg->wiphy);
priv_out:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
index f273cab0da10..9a25e79a46cf 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
@@ -137,6 +137,7 @@ static int brcmf_pno_add_ssid(struct brcmf_if *ifp, struct cfg80211_ssid *ssid,
pfn.wpa_auth = cpu_to_le32(BRCMF_PNO_WPA_AUTH_ANY);
pfn.wsec = cpu_to_le32(0);
pfn.infra = cpu_to_le32(1);
+ pfn.flags = 0;
if (active)
pfn.flags = cpu_to_le32(1 << BRCMF_PNO_HIDDEN_BIT);
pfn.ssid.SSID_len = cpu_to_le32(ssid->ssid_len);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile
index 960e6b86bbcb..ed83f33aceb7 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile
@@ -16,7 +16,6 @@
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ccflags-y := \
- -D__CHECK_ENDIAN__ \
-Idrivers/net/wireless/broadcom/brcm80211/brcmsmac \
-Idrivers/net/wireless/broadcom/brcm80211/brcmsmac/phy \
-Idrivers/net/wireless/broadcom/brcm80211/include
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index 64176090b196..356aba9d3d53 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -148,7 +148,7 @@ that only one external action is invoked at a time.
#include <linux/dma-mapping.h>
#include <linux/proc_fs.h>
#include <linux/skbuff.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
#include <linux/fs.h>
#include <linux/mm.h>
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_geo.c b/drivers/net/wireless/intel/ipw2x00/libipw_geo.c
index 218f2a32de21..ce7eda20a68f 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_geo.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_geo.c
@@ -38,7 +38,7 @@
#include <linux/types.h>
#include <linux/wireless.h>
#include <linux/etherdevice.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "libipw.h"
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_module.c b/drivers/net/wireless/intel/ipw2x00/libipw_module.c
index 2332075565f2..c58c5b2dcce5 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_module.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_module.c
@@ -46,7 +46,7 @@
#include <linux/types.h>
#include <linux/wireless.h>
#include <linux/etherdevice.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <net/net_namespace.h>
#include <net/arp.h>
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
index 1c1ec7bb9302..6df19f03355a 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
@@ -29,7 +29,7 @@
#include <linux/types.h>
#include <linux/wireless.h>
#include <linux/etherdevice.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/ctype.h>
#include <net/lib80211.h>
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_tx.c b/drivers/net/wireless/intel/ipw2x00/libipw_tx.c
index e8c039879b05..048f1e3ada11 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_tx.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_tx.c
@@ -39,7 +39,7 @@
#include <linux/types.h>
#include <linux/wireless.h>
#include <linux/etherdevice.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "libipw.h"
diff --git a/drivers/net/wireless/intel/iwlegacy/Makefile b/drivers/net/wireless/intel/iwlegacy/Makefile
index c985a01a0731..c826a6b985bb 100644
--- a/drivers/net/wireless/intel/iwlegacy/Makefile
+++ b/drivers/net/wireless/intel/iwlegacy/Makefile
@@ -13,5 +13,3 @@ iwl4965-$(CONFIG_IWLEGACY_DEBUGFS) += 4965-debug.o
obj-$(CONFIG_IWL3945) += iwl3945.o
iwl3945-objs := 3945-mac.o 3945.o 3945-rs.o
iwl3945-$(CONFIG_IWLEGACY_DEBUGFS) += 3945-debug.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index 6e7ed908de0c..92e611841200 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -15,7 +15,7 @@ iwlwifi-objs += $(iwlwifi-m)
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
-ccflags-y += -D__CHECK_ENDIAN__ -I$(src)
+ccflags-y += -I$(src)
obj-$(CONFIG_IWLDVM) += dvm/
obj-$(CONFIG_IWLMVM) += mvm/
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/Makefile b/drivers/net/wireless/intel/iwlwifi/dvm/Makefile
index 4d19685f31c3..b256a354953a 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/Makefile
@@ -10,4 +10,4 @@ iwldvm-objs += rxon.o devices.o
iwldvm-$(CONFIG_IWLWIFI_LEDS) += led.o
iwldvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
-ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
+ccflags-y += -I$(src)/../
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/calib.c b/drivers/net/wireless/intel/iwlwifi/dvm/calib.c
index e9cef9de9ed8..c96f9b1d948a 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/calib.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/calib.c
@@ -900,8 +900,7 @@ static void iwlagn_gain_computation(struct iwl_priv *priv,
/* bound gain by 2 bits value max, 3rd bit is sign */
data->delta_gain_code[i] =
- min(abs(delta_g),
- (s32) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
+ min(abs(delta_g), CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
if (delta_g < 0)
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
index 1ad0ec180d5d..84813b550ef1 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
@@ -228,7 +228,7 @@ enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29),
};
-typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
+typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
/**
* enum iwl_ucode_tlv_api - ucode api
@@ -258,7 +258,7 @@ enum iwl_ucode_tlv_api {
#endif
};
-typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
+typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
/**
* enum iwl_ucode_tlv_capa - ucode capabilities
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
index 2e06dfc1c477..83ac807e547d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
@@ -9,4 +9,4 @@ iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
iwlmvm-y += tof.o fw-dbg.o
iwlmvm-$(CONFIG_PM) += d3.o
-ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
+ccflags-y += -I$(src)/../
diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c
index a8a9bd8e176a..544ef7adde7d 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_hw.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c
@@ -32,7 +32,7 @@
#include <asm/delay.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/wireless/intersil/hostap/hostap_main.c b/drivers/net/wireless/intersil/hostap/hostap_main.c
index 1a16b8cb366e..544fc09dcb62 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_main.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_main.c
@@ -27,7 +27,7 @@
#include <net/net_namespace.h>
#include <net/iw_handler.h>
#include <net/lib80211.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "hostap_wlan.h"
#include "hostap_80211.h"
diff --git a/drivers/net/wireless/intersil/orinoco/Makefile b/drivers/net/wireless/intersil/orinoco/Makefile
index bfdefb85abcd..b7ecef820f76 100644
--- a/drivers/net/wireless/intersil/orinoco/Makefile
+++ b/drivers/net/wireless/intersil/orinoco/Makefile
@@ -12,6 +12,3 @@ obj-$(CONFIG_TMD_HERMES) += orinoco_tmd.o
obj-$(CONFIG_NORTEL_HERMES) += orinoco_nortel.o
obj-$(CONFIG_PCMCIA_SPECTRUM) += spectrum_cs.o
obj-$(CONFIG_ORINOCO_USB) += orinoco_usb.o
-
-# Orinoco should be endian clean.
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/intersil/orinoco/mic.c b/drivers/net/wireless/intersil/orinoco/mic.c
index bc7397d709d3..08bc7822f820 100644
--- a/drivers/net/wireless/intersil/orinoco/mic.c
+++ b/drivers/net/wireless/intersil/orinoco/mic.c
@@ -16,7 +16,7 @@
/********************************************************************/
int orinoco_mic_init(struct orinoco_private *priv)
{
- priv->tx_tfm_mic = crypto_alloc_ahash("michael_mic", 0,
+ priv->tx_tfm_mic = crypto_alloc_shash("michael_mic", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tx_tfm_mic)) {
printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
@@ -25,7 +25,7 @@ int orinoco_mic_init(struct orinoco_private *priv)
return -ENOMEM;
}
- priv->rx_tfm_mic = crypto_alloc_ahash("michael_mic", 0,
+ priv->rx_tfm_mic = crypto_alloc_shash("michael_mic", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->rx_tfm_mic)) {
printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
@@ -40,17 +40,16 @@ int orinoco_mic_init(struct orinoco_private *priv)
void orinoco_mic_free(struct orinoco_private *priv)
{
if (priv->tx_tfm_mic)
- crypto_free_ahash(priv->tx_tfm_mic);
+ crypto_free_shash(priv->tx_tfm_mic);
if (priv->rx_tfm_mic)
- crypto_free_ahash(priv->rx_tfm_mic);
+ crypto_free_shash(priv->rx_tfm_mic);
}
-int orinoco_mic(struct crypto_ahash *tfm_michael, u8 *key,
+int orinoco_mic(struct crypto_shash *tfm_michael, u8 *key,
u8 *da, u8 *sa, u8 priority,
u8 *data, size_t data_len, u8 *mic)
{
- AHASH_REQUEST_ON_STACK(req, tfm_michael);
- struct scatterlist sg[2];
+ SHASH_DESC_ON_STACK(desc, tfm_michael);
u8 hdr[ETH_HLEN + 2]; /* size of header + padding */
int err;
@@ -67,18 +66,27 @@ int orinoco_mic(struct crypto_ahash *tfm_michael, u8 *key,
hdr[ETH_ALEN * 2 + 2] = 0;
hdr[ETH_ALEN * 2 + 3] = 0;
- /* Use scatter gather to MIC header and data in one go */
- sg_init_table(sg, 2);
- sg_set_buf(&sg[0], hdr, sizeof(hdr));
- sg_set_buf(&sg[1], data, data_len);
+ desc->tfm = tfm_michael;
+ desc->flags = 0;
- if (crypto_ahash_setkey(tfm_michael, key, MIC_KEYLEN))
- return -1;
+ err = crypto_shash_setkey(tfm_michael, key, MIC_KEYLEN);
+ if (err)
+ return err;
+
+ err = crypto_shash_init(desc);
+ if (err)
+ return err;
+
+ err = crypto_shash_update(desc, hdr, sizeof(hdr));
+ if (err)
+ return err;
+
+ err = crypto_shash_update(desc, data, data_len);
+ if (err)
+ return err;
+
+ err = crypto_shash_final(desc, mic);
+ shash_desc_zero(desc);
- ahash_request_set_tfm(req, tfm_michael);
- ahash_request_set_callback(req, 0, NULL, NULL);
- ahash_request_set_crypt(req, sg, mic, data_len + sizeof(hdr));
- err = crypto_ahash_digest(req);
- ahash_request_zero(req);
return err;
}
diff --git a/drivers/net/wireless/intersil/orinoco/mic.h b/drivers/net/wireless/intersil/orinoco/mic.h
index ce731d05cc98..e8724e889219 100644
--- a/drivers/net/wireless/intersil/orinoco/mic.h
+++ b/drivers/net/wireless/intersil/orinoco/mic.h
@@ -6,6 +6,7 @@
#define _ORINOCO_MIC_H_
#include <linux/types.h>
+#include <crypto/hash.h>
#define MICHAEL_MIC_LEN 8
@@ -15,7 +16,7 @@ struct crypto_ahash;
int orinoco_mic_init(struct orinoco_private *priv);
void orinoco_mic_free(struct orinoco_private *priv);
-int orinoco_mic(struct crypto_ahash *tfm_michael, u8 *key,
+int orinoco_mic(struct crypto_shash *tfm_michael, u8 *key,
u8 *da, u8 *sa, u8 priority,
u8 *data, size_t data_len, u8 *mic);
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco.h b/drivers/net/wireless/intersil/orinoco/orinoco.h
index 2f0c84b1c440..5fa1c3e3713f 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco.h
+++ b/drivers/net/wireless/intersil/orinoco/orinoco.h
@@ -152,8 +152,8 @@ struct orinoco_private {
u8 *wpa_ie;
int wpa_ie_len;
- struct crypto_ahash *rx_tfm_mic;
- struct crypto_ahash *tx_tfm_mic;
+ struct crypto_shash *rx_tfm_mic;
+ struct crypto_shash *tx_tfm_mic;
unsigned int wpa_enabled:1;
unsigned int tkip_cm_active:1;
diff --git a/drivers/net/wireless/intersil/prism54/isl_38xx.c b/drivers/net/wireless/intersil/prism54/isl_38xx.c
index 6700387ef9ab..ce9d4db0d9ca 100644
--- a/drivers/net/wireless/intersil/prism54/isl_38xx.c
+++ b/drivers/net/wireless/intersil/prism54/isl_38xx.c
@@ -21,7 +21,7 @@
#include <linux/delay.h>
#include <linux/ktime.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
#include "prismcompat.h"
diff --git a/drivers/net/wireless/intersil/prism54/isl_ioctl.c b/drivers/net/wireless/intersil/prism54/isl_ioctl.c
index 48e8a978a832..334717b0a2be 100644
--- a/drivers/net/wireless/intersil/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/intersil/prism54/isl_ioctl.c
@@ -26,7 +26,7 @@
#include <linux/pci.h>
#include <linux/etherdevice.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "prismcompat.h"
#include "isl_ioctl.h"
diff --git a/drivers/net/wireless/mediatek/mt7601u/Makefile b/drivers/net/wireless/mediatek/mt7601u/Makefile
index ea9ed8a5db4d..08fc802ead4b 100644
--- a/drivers/net/wireless/mediatek/mt7601u/Makefile
+++ b/drivers/net/wireless/mediatek/mt7601u/Makefile
@@ -1,5 +1,3 @@
-ccflags-y += -D__CHECK_ENDIAN__
-
obj-$(CONFIG_MT7601U) += mt7601u.o
mt7601u-objs = \
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
index 9f61293f1a56..f38c44061b5b 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
@@ -177,7 +177,7 @@ static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
if (rt2800usb_txstatus_pending(rt2x00dev)) {
/* Read register after 1 ms */
hrtimer_start(&rt2x00dev->txstatus_timer,
- ktime_set(0, TXSTATUS_READ_INTERVAL),
+ TXSTATUS_READ_INTERVAL,
HRTIMER_MODE_REL);
return false;
}
@@ -204,7 +204,7 @@ static void rt2800usb_async_read_tx_status(struct rt2x00_dev *rt2x00dev)
/* Read TX_STA_FIFO register after 2 ms */
hrtimer_start(&rt2x00dev->txstatus_timer,
- ktime_set(0, 2*TXSTATUS_READ_INTERVAL),
+ 2 * TXSTATUS_READ_INTERVAL,
HRTIMER_MODE_REL);
}
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 4fdc7223c894..b94479441b0c 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -53,7 +53,7 @@
#include <asm/io.h>
#include <asm/byteorder.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/* Warning : these stuff will slow down the driver... */
#define WIRELESS_SPY /* Enable spying addresses */
diff --git a/drivers/net/wireless/realtek/rtlwifi/Makefile b/drivers/net/wireless/realtek/rtlwifi/Makefile
index ad6d3c52ec57..84c2e826fa1d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/Makefile
@@ -30,5 +30,3 @@ obj-$(CONFIG_RTLBTCOEXIST) += btcoexist/
obj-$(CONFIG_RTL8723_COMMON) += rtl8723com/
obj-$(CONFIG_RTL8821AE) += rtl8821ae/
obj-$(CONFIG_RTL8192EE) += rtl8192ee/
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile b/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile
index 47ceecfcb7dc..d1454d4f08a5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile
@@ -3,5 +3,3 @@ btcoexist-objs := halbtc8723b2ant.o \
rtl_btc.o
obj-$(CONFIG_RTLBTCOEXIST) += btcoexist.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 2caa4ad04dba..ded1493fee9c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -1829,7 +1829,8 @@ bool rtl_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb)
spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
pskb = __skb_dequeue(&ring->queue);
- dev_kfree_skb_irq(pskb);
+ if (pskb)
+ dev_kfree_skb_irq(pskb);
/*this is wrong, fill_tx_cmddesc needs update*/
pdesc = &ring->desc[0];
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile
index 676e7de27f27..dae4f0f19cd3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile
@@ -11,5 +11,3 @@ rtl8188ee-objs := \
trx.o
obj-$(CONFIG_RTL8188EE) += rtl8188ee.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/Makefile
index aee42d7ae8a2..0546b7556259 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/Makefile
@@ -5,5 +5,3 @@ rtl8192c-common-objs := \
phy_common.o
obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c-common.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/Makefile
index c0cb0cfe7d37..577c7adbc322 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/Makefile
@@ -9,5 +9,3 @@ rtl8192ce-objs := \
trx.o
obj-$(CONFIG_RTL8192CE) += rtl8192ce.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/Makefile
index ad2de6b839ef..97437dadc287 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/Makefile
@@ -10,5 +10,3 @@ rtl8192cu-objs := \
trx.o
obj-$(CONFIG_RTL8192CU) += rtl8192cu.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/Makefile
index e3213c8264b6..d0703f20d30c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/Makefile
@@ -10,5 +10,3 @@ rtl8192de-objs := \
trx.o
obj-$(CONFIG_RTL8192DE) += rtl8192de.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/Makefile
index 0315eeda9b60..f254b9f64326 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/Makefile
@@ -12,5 +12,3 @@ rtl8192ee-objs := \
obj-$(CONFIG_RTL8192EE) += rtl8192ee.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/Makefile
index b7eb13819cbc..dfa9dbbe2cdf 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/Makefile
@@ -11,5 +11,3 @@ rtl8192se-objs := \
obj-$(CONFIG_RTL8192SE) += rtl8192se.o
-ccflags-y += -D__CHECK_ENDIAN__
-
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/Makefile
index 6220672a96f4..e7607d2cb2ef 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/Makefile
@@ -14,5 +14,3 @@ rtl8723ae-objs := \
obj-$(CONFIG_RTL8723AE) += rtl8723ae.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
index 1186755e55b8..e5505387260b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
@@ -134,7 +134,7 @@ static void _rtl8723e_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
wait_h2c_limmit--;
if (wait_h2c_limmit == 0) {
RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Wating too long for FW read clear HMEBox(%d)!\n",
+ "Waiting too long for FW read clear HMEBox(%d)!\n",
boxnum);
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/Makefile
index a77c34102792..a841cbd55d8e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/Makefile
@@ -12,5 +12,3 @@ rtl8723be-objs := \
obj-$(CONFIG_RTL8723BE) += rtl8723be.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/Makefile
index 345a68adcf38..73da75526e2a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/Makefile
@@ -5,5 +5,3 @@ rtl8723-common-objs := \
phy_common.o
obj-$(CONFIG_RTL8723_COMMON) += rtl8723-common.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/Makefile
index f7a26f71197e..8ca406b95f02 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/Makefile
@@ -12,5 +12,3 @@ rtl8821ae-objs := \
obj-$(CONFIG_RTL8821AE) += rtl8821ae.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index 0a508649903d..49015b05f3d1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -1063,6 +1063,7 @@ int rtl_usb_probe(struct usb_interface *intf,
return -ENOMEM;
}
rtlpriv = hw->priv;
+ rtlpriv->hw = hw;
rtlpriv->usb_data = kzalloc(RTL_USB_MAX_RX_COUNT * sizeof(u32),
GFP_KERNEL);
if (!rtlpriv->usb_data)
diff --git a/drivers/net/wireless/ti/wl1251/Makefile b/drivers/net/wireless/ti/wl1251/Makefile
index a5c6328b5f72..58b4f935a3f6 100644
--- a/drivers/net/wireless/ti/wl1251/Makefile
+++ b/drivers/net/wireless/ti/wl1251/Makefile
@@ -6,5 +6,3 @@ wl1251_sdio-objs += sdio.o
obj-$(CONFIG_WL1251) += wl1251.o
obj-$(CONFIG_WL1251_SPI) += wl1251_spi.o
obj-$(CONFIG_WL1251_SDIO) += wl1251_sdio.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/ti/wlcore/Makefile b/drivers/net/wireless/ti/wlcore/Makefile
index 0a69c1373643..e286713b3c18 100644
--- a/drivers/net/wireless/ti/wlcore/Makefile
+++ b/drivers/net/wireless/ti/wlcore/Makefile
@@ -8,5 +8,3 @@ wlcore-$(CONFIG_NL80211_TESTMODE) += testmode.o
obj-$(CONFIG_WLCORE) += wlcore.o
obj-$(CONFIG_WLCORE_SPI) += wlcore_spi.o
obj-$(CONFIG_WLCORE_SDIO) += wlcore_sdio.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index d9d29ab88184..acec0d9ec422 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -51,7 +51,7 @@
#include <pcmcia/ds.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "wl3501.h"
diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
index 6ccba0d862df..019a158e1128 100644
--- a/drivers/ntb/hw/amd/ntb_hw_amd.c
+++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
@@ -138,11 +138,11 @@ static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int idx,
base_addr = pci_resource_start(ndev->ntb.pdev, bar);
if (bar != 1) {
- xlat_reg = AMD_BAR23XLAT_OFFSET + ((bar - 2) << 3);
- limit_reg = AMD_BAR23LMT_OFFSET + ((bar - 2) << 3);
+ xlat_reg = AMD_BAR23XLAT_OFFSET + ((bar - 2) << 2);
+ limit_reg = AMD_BAR23LMT_OFFSET + ((bar - 2) << 2);
/* Set the limit if supported */
- limit = base_addr + size;
+ limit = size;
/* set and verify setting the translation address */
write64(addr, peer_mmio + xlat_reg);
@@ -164,14 +164,8 @@ static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int idx,
xlat_reg = AMD_BAR1XLAT_OFFSET;
limit_reg = AMD_BAR1LMT_OFFSET;
- /* split bar addr range must all be 32 bit */
- if (addr & (~0ull << 32))
- return -EINVAL;
- if ((addr + size) & (~0ull << 32))
- return -EINVAL;
-
/* Set the limit if supported */
- limit = base_addr + size;
+ limit = size;
/* set and verify setting the translation address */
write64(addr, peer_mmio + xlat_reg);
@@ -199,6 +193,11 @@ static int amd_link_is_up(struct amd_ntb_dev *ndev)
if (!ndev->peer_sta)
return NTB_LNK_STA_ACTIVE(ndev->cntl_sta);
+ if (ndev->peer_sta & AMD_LINK_UP_EVENT) {
+ ndev->peer_sta = 0;
+ return 1;
+ }
+
/* If peer_sta is reset or D0 event, the ISR has
* started a timer to check link status of hardware.
* So here just clear status bit. And if peer_sta is
@@ -207,7 +206,7 @@ static int amd_link_is_up(struct amd_ntb_dev *ndev)
*/
if (ndev->peer_sta & AMD_PEER_RESET_EVENT)
ndev->peer_sta &= ~AMD_PEER_RESET_EVENT;
- else if (ndev->peer_sta & AMD_PEER_D0_EVENT)
+ else if (ndev->peer_sta & (AMD_PEER_D0_EVENT | AMD_LINK_DOWN_EVENT))
ndev->peer_sta = 0;
return 0;
@@ -491,6 +490,8 @@ static void amd_handle_event(struct amd_ntb_dev *ndev, int vec)
break;
case AMD_PEER_D3_EVENT:
case AMD_PEER_PMETO_EVENT:
+ case AMD_LINK_UP_EVENT:
+ case AMD_LINK_DOWN_EVENT:
amd_ack_smu(ndev, status);
/* link down */
@@ -598,7 +599,7 @@ static int ndev_init_isr(struct amd_ntb_dev *ndev,
err_msix_request:
while (i-- > 0)
- free_irq(ndev->msix[i].vector, ndev);
+ free_irq(ndev->msix[i].vector, &ndev->vec[i]);
pci_disable_msix(pdev);
err_msix_enable:
kfree(ndev->msix);
diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.h b/drivers/ntb/hw/amd/ntb_hw_amd.h
index 2eac3cd3e646..13d73ed94a52 100644
--- a/drivers/ntb/hw/amd/ntb_hw_amd.h
+++ b/drivers/ntb/hw/amd/ntb_hw_amd.h
@@ -148,9 +148,12 @@ enum {
AMD_PEER_D3_EVENT = BIT(2),
AMD_PEER_PMETO_EVENT = BIT(3),
AMD_PEER_D0_EVENT = BIT(4),
+ AMD_LINK_UP_EVENT = BIT(5),
+ AMD_LINK_DOWN_EVENT = BIT(6),
AMD_EVENT_INTMASK = (AMD_PEER_FLUSH_EVENT |
AMD_PEER_RESET_EVENT | AMD_PEER_D3_EVENT |
- AMD_PEER_PMETO_EVENT | AMD_PEER_D0_EVENT),
+ AMD_PEER_PMETO_EVENT | AMD_PEER_D0_EVENT |
+ AMD_LINK_UP_EVENT | AMD_LINK_DOWN_EVENT),
AMD_PMESTAT_OFFSET = 0x480,
AMD_PMSGTRIG_OFFSET = 0x490,
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c
index 7310a261c858..eca9688bf9d9 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.c
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.c
@@ -86,7 +86,12 @@ static const struct intel_ntb_xlat_reg xeon_pri_xlat;
static const struct intel_ntb_xlat_reg xeon_sec_xlat;
static struct intel_b2b_addr xeon_b2b_usd_addr;
static struct intel_b2b_addr xeon_b2b_dsd_addr;
+static const struct intel_ntb_reg skx_reg;
+static const struct intel_ntb_alt_reg skx_pri_reg;
+static const struct intel_ntb_alt_reg skx_b2b_reg;
+static const struct intel_ntb_xlat_reg skx_sec_xlat;
static const struct ntb_dev_ops intel_ntb_ops;
+static const struct ntb_dev_ops intel_ntb3_ops;
static const struct file_operations intel_ntb_debugfs_info;
static struct dentry *debugfs_dir;
@@ -145,6 +150,9 @@ module_param_named(xeon_b2b_dsd_bar5_addr32,
MODULE_PARM_DESC(xeon_b2b_dsd_bar5_addr32,
"XEON B2B DSD split-BAR 5 32-bit address");
+static inline enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd);
+static int xeon_init_isr(struct intel_ntb_dev *ndev);
+
#ifndef ioread64
#ifdef readq
#define ioread64 readq
@@ -206,6 +214,14 @@ static inline int pdev_is_xeon(struct pci_dev *pdev)
return 0;
}
+static inline int pdev_is_skx_xeon(struct pci_dev *pdev)
+{
+ if (pdev->device == PCI_DEVICE_ID_INTEL_NTB_B2B_SKX)
+ return 1;
+
+ return 0;
+}
+
static inline void ndev_reset_unsafe_flags(struct intel_ntb_dev *ndev)
{
ndev->unsafe_flags = 0;
@@ -390,6 +406,9 @@ static irqreturn_t ndev_interrupt(struct intel_ntb_dev *ndev, int vec)
vec_mask = ndev_vec_mask(ndev, vec);
+ if ((ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) && (vec == 31))
+ vec_mask |= ndev->db_link_mask;
+
dev_dbg(ndev_dev(ndev), "vec %d vec_mask %llx\n", vec, vec_mask);
ndev->last_ts = jiffies;
@@ -409,6 +428,9 @@ static irqreturn_t ndev_vec_isr(int irq, void *dev)
{
struct intel_ntb_vec *nvec = dev;
+ dev_dbg(ndev_dev(nvec->ndev), "irq: %d nvec->num: %d\n",
+ irq, nvec->num);
+
return ndev_interrupt(nvec->ndev, nvec->num);
}
@@ -465,14 +487,14 @@ static int ndev_init_isr(struct intel_ntb_dev *ndev,
goto err_msix_request;
}
- dev_dbg(ndev_dev(ndev), "Using msix interrupts\n");
+ dev_dbg(ndev_dev(ndev), "Using %d msix interrupts\n", msix_count);
ndev->db_vec_count = msix_count;
ndev->db_vec_shift = msix_shift;
return 0;
err_msix_request:
while (i-- > 0)
- free_irq(ndev->msix[i].vector, ndev);
+ free_irq(ndev->msix[i].vector, &ndev->vec[i]);
pci_disable_msix(pdev);
err_msix_enable:
kfree(ndev->msix);
@@ -547,8 +569,171 @@ static void ndev_deinit_isr(struct intel_ntb_dev *ndev)
}
}
-static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
- size_t count, loff_t *offp)
+static ssize_t ndev_ntb3_debugfs_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *offp)
+{
+ struct intel_ntb_dev *ndev;
+ void __iomem *mmio;
+ char *buf;
+ size_t buf_size;
+ ssize_t ret, off;
+ union { u64 v64; u32 v32; u16 v16; } u;
+
+ ndev = filp->private_data;
+ mmio = ndev->self_mmio;
+
+ buf_size = min(count, 0x800ul);
+
+ buf = kmalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ off = 0;
+
+ off += scnprintf(buf + off, buf_size - off,
+ "NTB Device Information:\n");
+
+ off += scnprintf(buf + off, buf_size - off,
+ "Connection Topology -\t%s\n",
+ ntb_topo_string(ndev->ntb.topo));
+
+ off += scnprintf(buf + off, buf_size - off,
+ "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
+ off += scnprintf(buf + off, buf_size - off,
+ "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
+
+ if (!ndev->reg->link_is_up(ndev))
+ off += scnprintf(buf + off, buf_size - off,
+ "Link Status -\t\tDown\n");
+ else {
+ off += scnprintf(buf + off, buf_size - off,
+ "Link Status -\t\tUp\n");
+ off += scnprintf(buf + off, buf_size - off,
+ "Link Speed -\t\tPCI-E Gen %u\n",
+ NTB_LNK_STA_SPEED(ndev->lnk_sta));
+ off += scnprintf(buf + off, buf_size - off,
+ "Link Width -\t\tx%u\n",
+ NTB_LNK_STA_WIDTH(ndev->lnk_sta));
+ }
+
+ off += scnprintf(buf + off, buf_size - off,
+ "Memory Window Count -\t%u\n", ndev->mw_count);
+ off += scnprintf(buf + off, buf_size - off,
+ "Scratchpad Count -\t%u\n", ndev->spad_count);
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Count -\t%u\n", ndev->db_count);
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
+
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
+
+ u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Mask -\t\t%#llx\n", u.v64);
+
+ u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Bell -\t\t%#llx\n", u.v64);
+
+ off += scnprintf(buf + off, buf_size - off,
+ "\nNTB Incoming XLAT:\n");
+
+ u.v64 = ioread64(mmio + SKX_IMBAR1XBASE_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "IMBAR1XBASE -\t\t%#018llx\n", u.v64);
+
+ u.v64 = ioread64(mmio + SKX_IMBAR2XBASE_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "IMBAR2XBASE -\t\t%#018llx\n", u.v64);
+
+ u.v64 = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "IMBAR1XLMT -\t\t\t%#018llx\n", u.v64);
+
+ u.v64 = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "IMBAR2XLMT -\t\t\t%#018llx\n", u.v64);
+
+ if (ntb_topo_is_b2b(ndev->ntb.topo)) {
+ off += scnprintf(buf + off, buf_size - off,
+ "\nNTB Outgoing B2B XLAT:\n");
+
+ u.v64 = ioread64(mmio + SKX_EMBAR1XBASE_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "EMBAR1XBASE -\t\t%#018llx\n", u.v64);
+
+ u.v64 = ioread64(mmio + SKX_EMBAR2XBASE_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "EMBAR2XBASE -\t\t%#018llx\n", u.v64);
+
+ u.v64 = ioread64(mmio + SKX_EMBAR1XLMT_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "EMBAR1XLMT -\t\t%#018llx\n", u.v64);
+
+ u.v64 = ioread64(mmio + SKX_EMBAR2XLMT_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "EMBAR2XLMT -\t\t%#018llx\n", u.v64);
+
+ off += scnprintf(buf + off, buf_size - off,
+ "\nNTB Secondary BAR:\n");
+
+ u.v64 = ioread64(mmio + SKX_EMBAR0_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "EMBAR0 -\t\t%#018llx\n", u.v64);
+
+ u.v64 = ioread64(mmio + SKX_EMBAR1_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "EMBAR1 -\t\t%#018llx\n", u.v64);
+
+ u.v64 = ioread64(mmio + SKX_EMBAR2_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "EMBAR2 -\t\t%#018llx\n", u.v64);
+ }
+
+ off += scnprintf(buf + off, buf_size - off,
+ "\nNTB Statistics:\n");
+
+ u.v16 = ioread16(mmio + SKX_USMEMMISS_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "Upstream Memory Miss -\t%u\n", u.v16);
+
+ off += scnprintf(buf + off, buf_size - off,
+ "\nNTB Hardware Errors:\n");
+
+ if (!pci_read_config_word(ndev->ntb.pdev,
+ SKX_DEVSTS_OFFSET, &u.v16))
+ off += scnprintf(buf + off, buf_size - off,
+ "DEVSTS -\t\t%#06x\n", u.v16);
+
+ if (!pci_read_config_word(ndev->ntb.pdev,
+ SKX_LINK_STATUS_OFFSET, &u.v16))
+ off += scnprintf(buf + off, buf_size - off,
+ "LNKSTS -\t\t%#06x\n", u.v16);
+
+ if (!pci_read_config_dword(ndev->ntb.pdev,
+ SKX_UNCERRSTS_OFFSET, &u.v32))
+ off += scnprintf(buf + off, buf_size - off,
+ "UNCERRSTS -\t\t%#06x\n", u.v32);
+
+ if (!pci_read_config_dword(ndev->ntb.pdev,
+ SKX_CORERRSTS_OFFSET, &u.v32))
+ off += scnprintf(buf + off, buf_size - off,
+ "CORERRSTS -\t\t%#06x\n", u.v32);
+
+ ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t ndev_ntb_debugfs_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *offp)
{
struct intel_ntb_dev *ndev;
struct pci_dev *pdev;
@@ -813,6 +998,20 @@ static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
return ret;
}
+static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *offp)
+{
+ struct intel_ntb_dev *ndev = filp->private_data;
+
+ if (pdev_is_xeon(ndev->ntb.pdev) ||
+ pdev_is_atom(ndev->ntb.pdev))
+ return ndev_ntb_debugfs_read(filp, ubuf, count, offp);
+ else if (pdev_is_skx_xeon(ndev->ntb.pdev))
+ return ndev_ntb3_debugfs_read(filp, ubuf, count, offp);
+
+ return -ENXIO;
+}
+
static void ndev_init_debugfs(struct intel_ntb_dev *ndev)
{
if (!debugfs_dir) {
@@ -1428,6 +1627,383 @@ static void atom_deinit_dev(struct intel_ntb_dev *ndev)
atom_deinit_isr(ndev);
}
+/* Skylake Xeon NTB */
+
+static u64 skx_db_ioread(void __iomem *mmio)
+{
+ return ioread64(mmio);
+}
+
+static void skx_db_iowrite(u64 bits, void __iomem *mmio)
+{
+ iowrite64(bits, mmio);
+}
+
+static int skx_init_isr(struct intel_ntb_dev *ndev)
+{
+ int i;
+
+ /*
+ * The MSIX vectors and the interrupt status bits are not lined up
+ * on Skylake. By default the link status bit is bit 32, however it
+ * is by default MSIX vector0. We need to fixup to line them up.
+ * The vectors at reset is 1-32,0. We need to reprogram to 0-32.
+ */
+
+ for (i = 0; i < SKX_DB_MSIX_VECTOR_COUNT; i++)
+ iowrite8(i, ndev->self_mmio + SKX_INTVEC_OFFSET + i);
+
+ /* move link status down one as workaround */
+ if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) {
+ iowrite8(SKX_DB_MSIX_VECTOR_COUNT - 2,
+ ndev->self_mmio + SKX_INTVEC_OFFSET +
+ (SKX_DB_MSIX_VECTOR_COUNT - 1));
+ }
+
+ return ndev_init_isr(ndev, SKX_DB_MSIX_VECTOR_COUNT,
+ SKX_DB_MSIX_VECTOR_COUNT,
+ SKX_DB_MSIX_VECTOR_SHIFT,
+ SKX_DB_TOTAL_SHIFT);
+}
+
+static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev,
+ const struct intel_b2b_addr *addr,
+ const struct intel_b2b_addr *peer_addr)
+{
+ struct pci_dev *pdev;
+ void __iomem *mmio;
+ resource_size_t bar_size;
+ phys_addr_t bar_addr;
+ int b2b_bar;
+ u8 bar_sz;
+
+ pdev = ndev_pdev(ndev);
+ mmio = ndev->self_mmio;
+
+ if (ndev->b2b_idx == UINT_MAX) {
+ dev_dbg(ndev_dev(ndev), "not using b2b mw\n");
+ b2b_bar = 0;
+ ndev->b2b_off = 0;
+ } else {
+ b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
+ if (b2b_bar < 0)
+ return -EIO;
+
+ dev_dbg(ndev_dev(ndev), "using b2b mw bar %d\n", b2b_bar);
+
+ bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
+
+ dev_dbg(ndev_dev(ndev), "b2b bar size %#llx\n", bar_size);
+
+ if (b2b_mw_share && ((bar_size >> 1) >= XEON_B2B_MIN_SIZE)) {
+ dev_dbg(ndev_dev(ndev),
+ "b2b using first half of bar\n");
+ ndev->b2b_off = bar_size >> 1;
+ } else if (bar_size >= XEON_B2B_MIN_SIZE) {
+ dev_dbg(ndev_dev(ndev),
+ "b2b using whole bar\n");
+ ndev->b2b_off = 0;
+ --ndev->mw_count;
+ } else {
+ dev_dbg(ndev_dev(ndev),
+ "b2b bar size is too small\n");
+ return -EIO;
+ }
+ }
+
+ /*
+ * Reset the secondary bar sizes to match the primary bar sizes,
+ * except disable or halve the size of the b2b secondary bar.
+ */
+ pci_read_config_byte(pdev, SKX_IMBAR1SZ_OFFSET, &bar_sz);
+ dev_dbg(ndev_dev(ndev), "IMBAR1SZ %#x\n", bar_sz);
+ if (b2b_bar == 1) {
+ if (ndev->b2b_off)
+ bar_sz -= 1;
+ else
+ bar_sz = 0;
+ }
+
+ pci_write_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, bar_sz);
+ pci_read_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, &bar_sz);
+ dev_dbg(ndev_dev(ndev), "EMBAR1SZ %#x\n", bar_sz);
+
+ pci_read_config_byte(pdev, SKX_IMBAR2SZ_OFFSET, &bar_sz);
+ dev_dbg(ndev_dev(ndev), "IMBAR2SZ %#x\n", bar_sz);
+ if (b2b_bar == 2) {
+ if (ndev->b2b_off)
+ bar_sz -= 1;
+ else
+ bar_sz = 0;
+ }
+
+ pci_write_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, bar_sz);
+ pci_read_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, &bar_sz);
+ dev_dbg(ndev_dev(ndev), "EMBAR2SZ %#x\n", bar_sz);
+
+ /* SBAR01 hit by first part of the b2b bar */
+ if (b2b_bar == 0)
+ bar_addr = addr->bar0_addr;
+ else if (b2b_bar == 1)
+ bar_addr = addr->bar2_addr64;
+ else if (b2b_bar == 2)
+ bar_addr = addr->bar4_addr64;
+ else
+ return -EIO;
+
+ /* setup incoming bar limits == base addrs (zero length windows) */
+ bar_addr = addr->bar2_addr64 + (b2b_bar == 1 ? ndev->b2b_off : 0);
+ iowrite64(bar_addr, mmio + SKX_IMBAR1XLMT_OFFSET);
+ bar_addr = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET);
+ dev_dbg(ndev_dev(ndev), "IMBAR1XLMT %#018llx\n", bar_addr);
+
+ bar_addr = addr->bar4_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
+ iowrite64(bar_addr, mmio + SKX_IMBAR2XLMT_OFFSET);
+ bar_addr = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET);
+ dev_dbg(ndev_dev(ndev), "IMBAR2XLMT %#018llx\n", bar_addr);
+
+ /* zero incoming translation addrs */
+ iowrite64(0, mmio + SKX_IMBAR1XBASE_OFFSET);
+ iowrite64(0, mmio + SKX_IMBAR2XBASE_OFFSET);
+
+ ndev->peer_mmio = ndev->self_mmio;
+
+ return 0;
+}
+
+static int skx_init_ntb(struct intel_ntb_dev *ndev)
+{
+ int rc;
+
+
+ ndev->mw_count = XEON_MW_COUNT;
+ ndev->spad_count = SKX_SPAD_COUNT;
+ ndev->db_count = SKX_DB_COUNT;
+ ndev->db_link_mask = SKX_DB_LINK_BIT;
+
+ /* DB fixup for using 31 right now */
+ if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD)
+ ndev->db_link_mask |= BIT_ULL(31);
+
+ switch (ndev->ntb.topo) {
+ case NTB_TOPO_B2B_USD:
+ case NTB_TOPO_B2B_DSD:
+ ndev->self_reg = &skx_pri_reg;
+ ndev->peer_reg = &skx_b2b_reg;
+ ndev->xlat_reg = &skx_sec_xlat;
+
+ if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
+ rc = skx_setup_b2b_mw(ndev,
+ &xeon_b2b_dsd_addr,
+ &xeon_b2b_usd_addr);
+ } else {
+ rc = skx_setup_b2b_mw(ndev,
+ &xeon_b2b_usd_addr,
+ &xeon_b2b_dsd_addr);
+ }
+
+ if (rc)
+ return rc;
+
+ /* Enable Bus Master and Memory Space on the secondary side */
+ iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
+ ndev->self_mmio + SKX_SPCICMD_OFFSET);
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
+
+ ndev->reg->db_iowrite(ndev->db_valid_mask,
+ ndev->self_mmio +
+ ndev->self_reg->db_mask);
+
+ return 0;
+}
+
+static int skx_init_dev(struct intel_ntb_dev *ndev)
+{
+ struct pci_dev *pdev;
+ u8 ppd;
+ int rc;
+
+ pdev = ndev_pdev(ndev);
+
+ ndev->reg = &skx_reg;
+
+ rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd);
+ if (rc)
+ return -EIO;
+
+ ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
+ dev_dbg(ndev_dev(ndev), "ppd %#x topo %s\n", ppd,
+ ntb_topo_string(ndev->ntb.topo));
+ if (ndev->ntb.topo == NTB_TOPO_NONE)
+ return -EINVAL;
+
+ if (pdev_is_skx_xeon(pdev))
+ ndev->hwerr_flags |= NTB_HWERR_MSIX_VECTOR32_BAD;
+
+ rc = skx_init_ntb(ndev);
+ if (rc)
+ return rc;
+
+ return skx_init_isr(ndev);
+}
+
+static int intel_ntb3_link_enable(struct ntb_dev *ntb,
+ enum ntb_speed max_speed,
+ enum ntb_width max_width)
+{
+ struct intel_ntb_dev *ndev;
+ u32 ntb_ctl;
+
+ ndev = container_of(ntb, struct intel_ntb_dev, ntb);
+
+ dev_dbg(ndev_dev(ndev),
+ "Enabling link with max_speed %d max_width %d\n",
+ max_speed, max_width);
+
+ if (max_speed != NTB_SPEED_AUTO)
+ dev_dbg(ndev_dev(ndev), "ignoring max_speed %d\n", max_speed);
+ if (max_width != NTB_WIDTH_AUTO)
+ dev_dbg(ndev_dev(ndev), "ignoring max_width %d\n", max_width);
+
+ ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
+ ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
+ ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
+ ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
+ iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
+
+ return 0;
+}
+static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int idx,
+ dma_addr_t addr, resource_size_t size)
+{
+ struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+ unsigned long xlat_reg, limit_reg;
+ resource_size_t bar_size, mw_size;
+ void __iomem *mmio;
+ u64 base, limit, reg_val;
+ int bar;
+
+ if (idx >= ndev->b2b_idx && !ndev->b2b_off)
+ idx += 1;
+
+ bar = ndev_mw_to_bar(ndev, idx);
+ if (bar < 0)
+ return bar;
+
+ bar_size = pci_resource_len(ndev->ntb.pdev, bar);
+
+ if (idx == ndev->b2b_idx)
+ mw_size = bar_size - ndev->b2b_off;
+ else
+ mw_size = bar_size;
+
+ /* hardware requires that addr is aligned to bar size */
+ if (addr & (bar_size - 1))
+ return -EINVAL;
+
+ /* make sure the range fits in the usable mw size */
+ if (size > mw_size)
+ return -EINVAL;
+
+ mmio = ndev->self_mmio;
+ xlat_reg = ndev->xlat_reg->bar2_xlat + (idx * 0x10);
+ limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10);
+ base = pci_resource_start(ndev->ntb.pdev, bar);
+
+ /* Set the limit if supported, if size is not mw_size */
+ if (limit_reg && size != mw_size)
+ limit = base + size;
+ else
+ limit = base + mw_size;
+
+ /* set and verify setting the translation address */
+ iowrite64(addr, mmio + xlat_reg);
+ reg_val = ioread64(mmio + xlat_reg);
+ if (reg_val != addr) {
+ iowrite64(0, mmio + xlat_reg);
+ return -EIO;
+ }
+
+ dev_dbg(ndev_dev(ndev), "BAR %d IMBARXBASE: %#Lx\n", bar, reg_val);
+
+ /* set and verify setting the limit */
+ iowrite64(limit, mmio + limit_reg);
+ reg_val = ioread64(mmio + limit_reg);
+ if (reg_val != limit) {
+ iowrite64(base, mmio + limit_reg);
+ iowrite64(0, mmio + xlat_reg);
+ return -EIO;
+ }
+
+ dev_dbg(ndev_dev(ndev), "BAR %d IMBARXLMT: %#Lx\n", bar, reg_val);
+
+ /* setup the EP */
+ limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10) + 0x4000;
+ base = ioread64(mmio + SKX_EMBAR1_OFFSET + (8 * idx));
+ base &= ~0xf;
+
+ if (limit_reg && size != mw_size)
+ limit = base + size;
+ else
+ limit = base + mw_size;
+
+ /* set and verify setting the limit */
+ iowrite64(limit, mmio + limit_reg);
+ reg_val = ioread64(mmio + limit_reg);
+ if (reg_val != limit) {
+ iowrite64(base, mmio + limit_reg);
+ iowrite64(0, mmio + xlat_reg);
+ return -EIO;
+ }
+
+ dev_dbg(ndev_dev(ndev), "BAR %d EMBARXLMT: %#Lx\n", bar, reg_val);
+
+ return 0;
+}
+
+static int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
+{
+ struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+ int bit;
+
+ if (db_bits & ~ndev->db_valid_mask)
+ return -EINVAL;
+
+ while (db_bits) {
+ bit = __ffs(db_bits);
+ iowrite32(1, ndev->peer_mmio +
+ ndev->peer_reg->db_bell + (bit * 4));
+ db_bits &= db_bits - 1;
+ }
+
+ return 0;
+}
+
+static u64 intel_ntb3_db_read(struct ntb_dev *ntb)
+{
+ struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+ return ndev_db_read(ndev,
+ ndev->self_mmio +
+ ndev->self_reg->db_clear);
+}
+
+static int intel_ntb3_db_clear(struct ntb_dev *ntb, u64 db_bits)
+{
+ struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+ return ndev_db_write(ndev, db_bits,
+ ndev->self_mmio +
+ ndev->self_reg->db_clear);
+}
+
/* XEON */
static u64 xeon_db_ioread(void __iomem *mmio)
@@ -2120,6 +2696,24 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev,
if (rc)
goto err_init_dev;
+ } else if (pdev_is_skx_xeon(pdev)) {
+ ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
+ if (!ndev) {
+ rc = -ENOMEM;
+ goto err_ndev;
+ }
+
+ ndev_init_struct(ndev, pdev);
+ ndev->ntb.ops = &intel_ntb3_ops;
+
+ rc = intel_ntb_init_pci(ndev, pdev);
+ if (rc)
+ goto err_init_pci;
+
+ rc = skx_init_dev(ndev);
+ if (rc)
+ goto err_init_dev;
+
} else {
rc = -EINVAL;
goto err_ndev;
@@ -2143,7 +2737,7 @@ err_register:
ndev_deinit_debugfs(ndev);
if (pdev_is_atom(pdev))
atom_deinit_dev(ndev);
- else if (pdev_is_xeon(pdev))
+ else if (pdev_is_xeon(pdev) || pdev_is_skx_xeon(pdev))
xeon_deinit_dev(ndev);
err_init_dev:
intel_ntb_deinit_pci(ndev);
@@ -2161,7 +2755,7 @@ static void intel_ntb_pci_remove(struct pci_dev *pdev)
ndev_deinit_debugfs(ndev);
if (pdev_is_atom(pdev))
atom_deinit_dev(ndev);
- else if (pdev_is_xeon(pdev))
+ else if (pdev_is_xeon(pdev) || pdev_is_skx_xeon(pdev))
xeon_deinit_dev(ndev);
intel_ntb_deinit_pci(ndev);
kfree(ndev);
@@ -2257,6 +2851,36 @@ static struct intel_b2b_addr xeon_b2b_dsd_addr = {
.bar5_addr32 = XEON_B2B_BAR5_ADDR32,
};
+static const struct intel_ntb_reg skx_reg = {
+ .poll_link = xeon_poll_link,
+ .link_is_up = xeon_link_is_up,
+ .db_ioread = skx_db_ioread,
+ .db_iowrite = skx_db_iowrite,
+ .db_size = sizeof(u64),
+ .ntb_ctl = SKX_NTBCNTL_OFFSET,
+ .mw_bar = {2, 4},
+};
+
+static const struct intel_ntb_alt_reg skx_pri_reg = {
+ .db_bell = SKX_EM_DOORBELL_OFFSET,
+ .db_clear = SKX_IM_INT_STATUS_OFFSET,
+ .db_mask = SKX_IM_INT_DISABLE_OFFSET,
+ .spad = SKX_IM_SPAD_OFFSET,
+};
+
+static const struct intel_ntb_alt_reg skx_b2b_reg = {
+ .db_bell = SKX_IM_DOORBELL_OFFSET,
+ .db_clear = SKX_EM_INT_STATUS_OFFSET,
+ .db_mask = SKX_EM_INT_DISABLE_OFFSET,
+ .spad = SKX_B2B_SPAD_OFFSET,
+};
+
+static const struct intel_ntb_xlat_reg skx_sec_xlat = {
+/* .bar0_base = SKX_EMBAR0_OFFSET, */
+ .bar2_limit = SKX_IMBAR1XLMT_OFFSET,
+ .bar2_xlat = SKX_IMBAR1XBASE_OFFSET,
+};
+
/* operations for primary side of local ntb */
static const struct ntb_dev_ops intel_ntb_ops = {
.mw_count = intel_ntb_mw_count,
@@ -2284,6 +2908,31 @@ static const struct ntb_dev_ops intel_ntb_ops = {
.peer_spad_write = intel_ntb_peer_spad_write,
};
+static const struct ntb_dev_ops intel_ntb3_ops = {
+ .mw_count = intel_ntb_mw_count,
+ .mw_get_range = intel_ntb_mw_get_range,
+ .mw_set_trans = intel_ntb3_mw_set_trans,
+ .link_is_up = intel_ntb_link_is_up,
+ .link_enable = intel_ntb3_link_enable,
+ .link_disable = intel_ntb_link_disable,
+ .db_valid_mask = intel_ntb_db_valid_mask,
+ .db_vector_count = intel_ntb_db_vector_count,
+ .db_vector_mask = intel_ntb_db_vector_mask,
+ .db_read = intel_ntb3_db_read,
+ .db_clear = intel_ntb3_db_clear,
+ .db_set_mask = intel_ntb_db_set_mask,
+ .db_clear_mask = intel_ntb_db_clear_mask,
+ .peer_db_addr = intel_ntb_peer_db_addr,
+ .peer_db_set = intel_ntb3_peer_db_set,
+ .spad_is_unsafe = intel_ntb_spad_is_unsafe,
+ .spad_count = intel_ntb_spad_count,
+ .spad_read = intel_ntb_spad_read,
+ .spad_write = intel_ntb_spad_write,
+ .peer_spad_addr = intel_ntb_peer_spad_addr,
+ .peer_spad_read = intel_ntb_peer_spad_read,
+ .peer_spad_write = intel_ntb_peer_spad_write,
+};
+
static const struct file_operations intel_ntb_debugfs_info = {
.owner = THIS_MODULE,
.open = simple_open,
@@ -2307,6 +2956,7 @@ static const struct pci_device_id intel_ntb_pci_tbl[] = {
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_BDX)},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SKX)},
{0}
};
MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.h b/drivers/ntb/hw/intel/ntb_hw_intel.h
index 3ec149cf6562..f2cf8a783f1e 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.h
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.h
@@ -70,6 +70,7 @@
#define PCI_DEVICE_ID_INTEL_NTB_B2B_BDX 0x6F0D
#define PCI_DEVICE_ID_INTEL_NTB_PS_BDX 0x6F0E
#define PCI_DEVICE_ID_INTEL_NTB_SS_BDX 0x6F0F
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_SKX 0x201C
/* Intel Xeon hardware */
@@ -150,6 +151,51 @@
#define XEON_DB_TOTAL_SHIFT 16
#define XEON_SPAD_COUNT 16
+/* Intel Skylake Xeon hardware */
+#define SKX_IMBAR1SZ_OFFSET 0x00d0
+#define SKX_IMBAR2SZ_OFFSET 0x00d1
+#define SKX_EMBAR1SZ_OFFSET 0x00d2
+#define SKX_EMBAR2SZ_OFFSET 0x00d3
+#define SKX_DEVCTRL_OFFSET 0x0098
+#define SKX_DEVSTS_OFFSET 0x009a
+#define SKX_UNCERRSTS_OFFSET 0x014c
+#define SKX_CORERRSTS_OFFSET 0x0158
+#define SKX_LINK_STATUS_OFFSET 0x01a2
+
+#define SKX_NTBCNTL_OFFSET 0x0000
+#define SKX_IMBAR1XBASE_OFFSET 0x0010 /* SBAR2XLAT */
+#define SKX_IMBAR1XLMT_OFFSET 0x0018 /* SBAR2LMT */
+#define SKX_IMBAR2XBASE_OFFSET 0x0020 /* SBAR4XLAT */
+#define SKX_IMBAR2XLMT_OFFSET 0x0028 /* SBAR4LMT */
+#define SKX_IM_INT_STATUS_OFFSET 0x0040
+#define SKX_IM_INT_DISABLE_OFFSET 0x0048
+#define SKX_IM_SPAD_OFFSET 0x0080 /* SPAD */
+#define SKX_USMEMMISS_OFFSET 0x0070
+#define SKX_INTVEC_OFFSET 0x00d0
+#define SKX_IM_DOORBELL_OFFSET 0x0100 /* SDOORBELL0 */
+#define SKX_B2B_SPAD_OFFSET 0x0180 /* B2B SPAD */
+#define SKX_EMBAR0XBASE_OFFSET 0x4008 /* B2B_XLAT */
+#define SKX_EMBAR1XBASE_OFFSET 0x4010 /* PBAR2XLAT */
+#define SKX_EMBAR1XLMT_OFFSET 0x4018 /* PBAR2LMT */
+#define SKX_EMBAR2XBASE_OFFSET 0x4020 /* PBAR4XLAT */
+#define SKX_EMBAR2XLMT_OFFSET 0x4028 /* PBAR4LMT */
+#define SKX_EM_INT_STATUS_OFFSET 0x4040
+#define SKX_EM_INT_DISABLE_OFFSET 0x4048
+#define SKX_EM_SPAD_OFFSET 0x4080 /* remote SPAD */
+#define SKX_EM_DOORBELL_OFFSET 0x4100 /* PDOORBELL0 */
+#define SKX_SPCICMD_OFFSET 0x4504 /* SPCICMD */
+#define SKX_EMBAR0_OFFSET 0x4510 /* SBAR0BASE */
+#define SKX_EMBAR1_OFFSET 0x4518 /* SBAR23BASE */
+#define SKX_EMBAR2_OFFSET 0x4520 /* SBAR45BASE */
+
+#define SKX_DB_COUNT 32
+#define SKX_DB_LINK 32
+#define SKX_DB_LINK_BIT BIT_ULL(SKX_DB_LINK)
+#define SKX_DB_MSIX_VECTOR_COUNT 33
+#define SKX_DB_MSIX_VECTOR_SHIFT 1
+#define SKX_DB_TOTAL_SHIFT 33
+#define SKX_SPAD_COUNT 16
+
/* Intel Atom hardware */
#define ATOM_SBAR2XLAT_OFFSET 0x0008
@@ -240,6 +286,7 @@
#define NTB_HWERR_SDOORBELL_LOCKUP BIT_ULL(0)
#define NTB_HWERR_SB01BASE_LOCKUP BIT_ULL(1)
#define NTB_HWERR_B2BDOORBELL_BIT14 BIT_ULL(2)
+#define NTB_HWERR_MSIX_VECTOR32_BAD BIT_ULL(3)
/* flags to indicate unsafe api */
#define NTB_UNSAFE_DB BIT_ULL(0)
@@ -263,6 +310,7 @@ struct intel_ntb_reg {
struct intel_ntb_alt_reg {
unsigned long db_bell;
unsigned long db_mask;
+ unsigned long db_clear;
unsigned long spad;
};
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 4eb8adb34508..f81aa4b18d9f 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -66,6 +66,7 @@
#define NTB_TRANSPORT_VER "4"
#define NTB_TRANSPORT_NAME "ntb_transport"
#define NTB_TRANSPORT_DESC "Software Queue-Pair Transport over NTB"
+#define NTB_TRANSPORT_MIN_SPADS (MW0_SZ_HIGH + 2)
MODULE_DESCRIPTION(NTB_TRANSPORT_DESC);
MODULE_VERSION(NTB_TRANSPORT_VER);
@@ -242,9 +243,6 @@ enum {
NUM_MWS,
MW0_SZ_HIGH,
MW0_SZ_LOW,
- MW1_SZ_HIGH,
- MW1_SZ_LOW,
- MAX_SPAD,
};
#define dev_client_dev(__dev) \
@@ -811,7 +809,7 @@ static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
{
struct ntb_transport_qp *qp;
u64 qp_bitmap_alloc;
- int i;
+ unsigned int i, count;
qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
@@ -831,7 +829,8 @@ static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
* goes down, blast them now to give them a sane value the next
* time they are accessed
*/
- for (i = 0; i < MAX_SPAD; i++)
+ count = ntb_spad_count(nt->ndev);
+ for (i = 0; i < count; i++)
ntb_spad_write(nt->ndev, i, 0);
}
@@ -960,7 +959,6 @@ static void ntb_qp_link_work(struct work_struct *work)
ntb_peer_spad_write(nt->ndev, QP_LINKS, val | BIT(qp->qp_num));
/* query remote spad for qp ready bits */
- ntb_peer_spad_read(nt->ndev, QP_LINKS);
dev_dbg_ratelimited(&pdev->dev, "Remote QP link status = %x\n", val);
/* See if the remote side is up */
@@ -1064,17 +1062,12 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
{
struct ntb_transport_ctx *nt;
struct ntb_transport_mw *mw;
- unsigned int mw_count, qp_count;
+ unsigned int mw_count, qp_count, spad_count, max_mw_count_for_spads;
u64 qp_bitmap;
int node;
int rc, i;
mw_count = ntb_mw_count(ndev);
- if (ntb_spad_count(ndev) < (NUM_MWS + 1 + mw_count * 2)) {
- dev_err(&ndev->dev, "Not enough scratch pad registers for %s",
- NTB_TRANSPORT_NAME);
- return -EIO;
- }
if (ntb_db_is_unsafe(ndev))
dev_dbg(&ndev->dev,
@@ -1090,8 +1083,18 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
return -ENOMEM;
nt->ndev = ndev;
+ spad_count = ntb_spad_count(ndev);
+
+ /* Limit the MW's based on the availability of scratchpads */
+
+ if (spad_count < NTB_TRANSPORT_MIN_SPADS) {
+ nt->mw_count = 0;
+ rc = -EINVAL;
+ goto err;
+ }
- nt->mw_count = mw_count;
+ max_mw_count_for_spads = (spad_count - MW0_SZ_HIGH) / 2;
+ nt->mw_count = min(mw_count, max_mw_count_for_spads);
nt->mw_vec = kzalloc_node(mw_count * sizeof(*nt->mw_vec),
GFP_KERNEL, node);
diff --git a/drivers/nubus/proc.c b/drivers/nubus/proc.c
index 5371b374f1fe..e8f68f5732f1 100644
--- a/drivers/nubus/proc.c
+++ b/drivers/nubus/proc.c
@@ -25,7 +25,7 @@
#include <linux/init.h>
#include <linux/module.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/byteorder.h>
static int
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index d5dc80c48b4c..b3323c0697f6 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -22,9 +22,8 @@ void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns)
{
struct nd_namespace_common *ndns = *_ndns;
- dev_WARN_ONCE(dev, !mutex_is_locked(&ndns->dev.mutex)
- || ndns->claim != dev,
- "%s: invalid claim\n", __func__);
+ lockdep_assert_held(&ndns->dev.mutex);
+ dev_WARN_ONCE(dev, ndns->claim != dev, "%s: invalid claim\n", __func__);
ndns->claim = NULL;
*_ndns = NULL;
put_device(&ndns->dev);
@@ -49,9 +48,8 @@ bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
{
if (attach->claim)
return false;
- dev_WARN_ONCE(dev, !mutex_is_locked(&attach->dev.mutex)
- || *_ndns,
- "%s: invalid claim\n", __func__);
+ lockdep_assert_held(&attach->dev.mutex);
+ dev_WARN_ONCE(dev, *_ndns, "%s: invalid claim\n", __func__);
attach->claim = dev;
*_ndns = attach;
get_device(&attach->dev);
@@ -226,6 +224,12 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
resource_size_t offset, void *buf, size_t size, int rw)
{
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
+ unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
+ sector_t sector = offset >> 9;
+ int rc = 0;
+
+ if (unlikely(!size))
+ return 0;
if (unlikely(offset + size > nsio->size)) {
dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
@@ -233,17 +237,31 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
}
if (rw == READ) {
- unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
-
- if (unlikely(is_bad_pmem(&nsio->bb, offset / 512, sz_align)))
+ if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align)))
return -EIO;
return memcpy_from_pmem(buf, nsio->addr + offset, size);
- } else {
- memcpy_to_pmem(nsio->addr + offset, buf, size);
- nvdimm_flush(to_nd_region(ndns->dev.parent));
}
- return 0;
+ if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
+ if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)) {
+ long cleared;
+
+ cleared = nvdimm_clear_poison(&ndns->dev, offset, size);
+ if (cleared < size)
+ rc = -EIO;
+ if (cleared > 0 && cleared / 512) {
+ cleared /= 512;
+ badblocks_clear(&nsio->bb, sector, cleared);
+ }
+ invalidate_pmem(nsio->addr + offset, size);
+ } else
+ rc = -EIO;
+ }
+
+ memcpy_to_pmem(nsio->addr + offset, buf, size);
+ nvdimm_flush(to_nd_region(ndns->dev.parent));
+
+ return rc;
}
int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
@@ -253,7 +271,7 @@ int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
nsio->size = resource_size(res);
if (!devm_request_mem_region(dev, res->start, resource_size(res),
- dev_name(dev))) {
+ dev_name(&ndns->dev))) {
dev_warn(dev, "could not reserve region %pR\n", res);
return -EBUSY;
}
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 7ceba08774b6..9303cfeb8bee 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -317,35 +317,6 @@ ssize_t nd_sector_size_store(struct device *dev, const char *buf,
}
}
-void __nd_iostat_start(struct bio *bio, unsigned long *start)
-{
- struct gendisk *disk = bio->bi_bdev->bd_disk;
- const int rw = bio_data_dir(bio);
- int cpu = part_stat_lock();
-
- *start = jiffies;
- part_round_stats(cpu, &disk->part0);
- part_stat_inc(cpu, &disk->part0, ios[rw]);
- part_stat_add(cpu, &disk->part0, sectors[rw], bio_sectors(bio));
- part_inc_in_flight(&disk->part0, rw);
- part_stat_unlock();
-}
-EXPORT_SYMBOL(__nd_iostat_start);
-
-void nd_iostat_end(struct bio *bio, unsigned long start)
-{
- struct gendisk *disk = bio->bi_bdev->bd_disk;
- unsigned long duration = jiffies - start;
- const int rw = bio_data_dir(bio);
- int cpu = part_stat_lock();
-
- part_stat_add(cpu, &disk->part0, ticks[rw], duration);
- part_round_stats(cpu, &disk->part0);
- part_dec_in_flight(&disk->part0, rw);
- part_stat_unlock();
-}
-EXPORT_SYMBOL(nd_iostat_end);
-
static ssize_t commands_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c
index 619834e144d1..ee0b412827bf 100644
--- a/drivers/nvdimm/dimm.c
+++ b/drivers/nvdimm/dimm.c
@@ -64,6 +64,8 @@ static int nvdimm_probe(struct device *dev)
nd_label_copy(ndd, to_next_namespace_index(ndd),
to_current_namespace_index(ndd));
rc = nd_label_reserve_dpa(ndd);
+ if (ndd->ns_current >= 0)
+ nvdimm_set_aliasing(dev);
nvdimm_bus_unlock(dev);
if (rc)
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index d614493ad5ac..0eedc49e0d47 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -184,6 +184,13 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
return rc;
}
+void nvdimm_set_aliasing(struct device *dev)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+
+ nvdimm->flags |= NDD_ALIASING;
+}
+
static void nvdimm_release(struct device *dev)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
diff --git a/drivers/nvdimm/e820.c b/drivers/nvdimm/e820.c
index 11ea90120542..6f9a6ffd7cde 100644
--- a/drivers/nvdimm/e820.c
+++ b/drivers/nvdimm/e820.c
@@ -84,18 +84,8 @@ static struct platform_driver e820_pmem_driver = {
},
};
-static __init int e820_pmem_init(void)
-{
- return platform_driver_register(&e820_pmem_driver);
-}
-
-static __exit void e820_pmem_exit(void)
-{
- platform_driver_unregister(&e820_pmem_driver);
-}
+module_platform_driver(e820_pmem_driver);
MODULE_ALIAS("platform:e820_pmem*");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Intel Corporation");
-module_init(e820_pmem_init);
-module_exit(e820_pmem_exit);
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index fac7cabe8f56..dd615345699f 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -938,7 +938,7 @@ int nd_pmem_namespace_label_update(struct nd_region *nd_region,
}
for_each_dpa_resource(ndd, res)
- if (strncmp(res->name, "pmem", 3) == 0)
+ if (strncmp(res->name, "pmem", 4) == 0)
count++;
WARN_ON_ONCE(!count);
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index abe5c6bc756c..6307088b375f 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1132,7 +1132,7 @@ static ssize_t size_show(struct device *dev,
return sprintf(buf, "%llu\n", (unsigned long long)
nvdimm_namespace_capacity(to_ndns(dev)));
}
-static DEVICE_ATTR(size, S_IRUGO, size_show, size_store);
+static DEVICE_ATTR(size, 0444, size_show, size_store);
static u8 *namespace_to_uuid(struct device *dev)
{
@@ -1456,7 +1456,7 @@ static umode_t namespace_visible(struct kobject *kobj,
if (is_namespace_pmem(dev) || is_namespace_blk(dev)) {
if (a == &dev_attr_size.attr)
- return S_IWUSR | S_IRUGO;
+ return 0644;
if (is_namespace_pmem(dev) && a == &dev_attr_sector_size.attr)
return 0;
@@ -1653,7 +1653,7 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
u64 hw_start, hw_end, pmem_start, pmem_end;
struct nd_label_ent *label_ent;
- WARN_ON(!mutex_is_locked(&nd_mapping->lock));
+ lockdep_assert_held(&nd_mapping->lock);
list_for_each_entry(label_ent, &nd_mapping->labels, list) {
nd_label = label_ent->label;
if (!nd_label)
@@ -1997,7 +1997,7 @@ struct device *create_namespace_blk(struct nd_region *nd_region,
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct nd_namespace_blk *nsblk;
- char *name[NSLABEL_NAME_LEN];
+ char name[NSLABEL_NAME_LEN];
struct device *dev = NULL;
struct resource *res;
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index d3b2fca8deec..35dd75057e16 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -238,6 +238,7 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
void *buf, size_t len);
long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
unsigned int len);
+void nvdimm_set_aliasing(struct device *dev);
struct nd_btt *to_nd_btt(struct device *dev);
struct nd_gen_sb {
@@ -377,10 +378,17 @@ static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
if (!blk_queue_io_stat(disk->queue))
return false;
- __nd_iostat_start(bio, start);
+ *start = jiffies;
+ generic_start_io_acct(bio_data_dir(bio),
+ bio_sectors(bio), &disk->part0);
return true;
}
-void nd_iostat_end(struct bio *bio, unsigned long start);
+static inline void nd_iostat_end(struct bio *bio, unsigned long start)
+{
+ struct gendisk *disk = bio->bi_bdev->bd_disk;
+
+ generic_end_io_acct(bio_data_dir(bio), &disk->part0, start);
+}
static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector,
unsigned int len)
{
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index cea8350fbc7e..a2ac9e641aa9 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -108,7 +108,7 @@ static ssize_t align_show(struct device *dev,
{
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
- return sprintf(buf, "%lx\n", nd_pfn->align);
+ return sprintf(buf, "%ld\n", nd_pfn->align);
}
static ssize_t __align_store(struct nd_pfn *nd_pfn, const char *buf)
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 24618431a14b..7282d7495bf1 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -53,21 +53,24 @@ static int pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
struct device *dev = to_dev(pmem);
sector_t sector;
long cleared;
+ int rc = 0;
sector = (offset - pmem->data_offset) / 512;
- cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
+ cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
+ if (cleared < len)
+ rc = -EIO;
if (cleared > 0 && cleared / 512) {
- dev_dbg(dev, "%s: %#llx clear %ld sector%s\n",
- __func__, (unsigned long long) sector,
- cleared / 512, cleared / 512 > 1 ? "s" : "");
- badblocks_clear(&pmem->bb, sector, cleared / 512);
- } else {
- return -EIO;
+ cleared /= 512;
+ dev_dbg(dev, "%s: %#llx clear %ld sector%s\n", __func__,
+ (unsigned long long) sector, cleared,
+ cleared > 1 ? "s" : "");
+ badblocks_clear(&pmem->bb, sector, cleared);
}
invalidate_pmem(pmem->virt_addr + offset, len);
- return 0;
+
+ return rc;
}
static void write_pmem(void *pmem_addr, struct page *page,
@@ -270,7 +273,7 @@ static int pmem_attach_disk(struct device *dev,
dev_warn(dev, "unable to guarantee persistence of writes\n");
if (!devm_request_mem_region(dev, res->start, resource_size(res),
- dev_name(dev))) {
+ dev_name(&ndns->dev))) {
dev_warn(dev, "could not reserve region %pR\n", res);
return -EBUSY;
}
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 6af5e629140c..7cd705f3247c 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -509,7 +509,7 @@ void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
{
struct nd_label_ent *label_ent, *e;
- WARN_ON(!mutex_is_locked(&nd_mapping->lock));
+ lockdep_assert_held(&nd_mapping->lock);
list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
list_del(&label_ent->list);
kfree(label_ent);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index b40cfb076f02..8a3c3e32a704 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1106,12 +1106,7 @@ int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
if (ret)
return ret;
- /* Checking for ctrl->tagset is a trick to avoid sleeping on module
- * load, since we only need the quirk on reset_controller. Notice
- * that the HGST device needs this delay only in firmware activation
- * procedure; unfortunately we have no (easy) way to verify this.
- */
- if ((ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) && ctrl->tagset)
+ if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
msleep(NVME_QUIRK_DELAY_AMOUNT);
return nvme_wait_ready(ctrl, cap, false);
@@ -1193,8 +1188,8 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
}
- if (ctrl->stripe_size)
- blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9);
+ if (ctrl->quirks & NVME_QUIRK_STRIPE_SIZE)
+ blk_queue_chunk_sectors(q, ctrl->max_hw_sectors);
blk_queue_virt_boundary(q, ctrl->page_size - 1);
if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
vwc = true;
@@ -1250,19 +1245,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->max_hw_sectors =
min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
- if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && id->vs[3]) {
- unsigned int max_hw_sectors;
-
- ctrl->stripe_size = 1 << (id->vs[3] + page_shift);
- max_hw_sectors = ctrl->stripe_size >> (page_shift - 9);
- if (ctrl->max_hw_sectors) {
- ctrl->max_hw_sectors = min(max_hw_sectors,
- ctrl->max_hw_sectors);
- } else {
- ctrl->max_hw_sectors = max_hw_sectors;
- }
- }
-
nvme_set_queue_limits(ctrl, ctrl->admin_q);
ctrl->sgls = le32_to_cpu(id->sgls);
ctrl->kas = le16_to_cpu(id->kas);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 771e2e761872..fcc9dcfdf675 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1491,19 +1491,20 @@ static int
nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
{
struct nvme_fc_queue *queue = &ctrl->queues[1];
- int i, j, ret;
+ int i, ret;
for (i = 1; i < ctrl->queue_count; i++, queue++) {
ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
- if (ret) {
- for (j = i-1; j >= 0; j--)
- __nvme_fc_delete_hw_queue(ctrl,
- &ctrl->queues[j], j);
- return ret;
- }
+ if (ret)
+ goto delete_queues;
}
return 0;
+
+delete_queues:
+ for (; i >= 0; i--)
+ __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
+ return ret;
}
static int
@@ -1653,13 +1654,12 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
struct nvme_fc_fcp_op *op)
{
struct nvmefc_fcp_req *freq = &op->fcp_req;
- u32 map_len = nvme_map_len(rq);
enum dma_data_direction dir;
int ret;
freq->sg_cnt = 0;
- if (!map_len)
+ if (!blk_rq_payload_bytes(rq))
return 0;
freq->sg_table.sgl = freq->first_sgl;
@@ -1853,7 +1853,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ret)
return ret;
- data_len = nvme_map_len(rq);
+ data_len = blk_rq_payload_bytes(rq);
if (data_len)
io_dir = ((rq_data_dir(rq) == WRITE) ?
NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
@@ -2401,8 +2401,8 @@ __nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
WARN_ON_ONCE(!changed);
dev_info(ctrl->ctrl.device,
- "NVME-FC{%d}: new ctrl: NQN \"%s\" (%p)\n",
- ctrl->cnum, ctrl->ctrl.opts->subsysnqn, &ctrl);
+ "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
+ ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
kref_get(&ctrl->ctrl.kref);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index bd5321441d12..aead6d08ed2c 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -135,7 +135,6 @@ struct nvme_ctrl {
u32 page_size;
u32 max_hw_sectors;
- u32 stripe_size;
u16 oncs;
u16 vid;
atomic_t abort_limit;
@@ -226,14 +225,6 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
return (sector >> (ns->lba_shift - 9));
}
-static inline unsigned nvme_map_len(struct request *rq)
-{
- if (req_op(rq) == REQ_OP_DISCARD)
- return sizeof(struct nvme_dsm_range);
- else
- return blk_rq_bytes(rq);
-}
-
static inline void nvme_cleanup_cmd(struct request *req)
{
if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d6e6bce93d0c..3faefabf339c 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -50,7 +50,7 @@
#define NVME_AQ_DEPTH 256
#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
-
+
/*
* We handle AEN commands ourselves and don't even let the
* block layer know about them.
@@ -306,11 +306,11 @@ static __le64 **iod_list(struct request *req)
return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req));
}
-static int nvme_init_iod(struct request *rq, unsigned size,
- struct nvme_dev *dev)
+static int nvme_init_iod(struct request *rq, struct nvme_dev *dev)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
int nseg = blk_rq_nr_phys_segments(rq);
+ unsigned int size = blk_rq_payload_bytes(rq);
if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC);
@@ -420,12 +420,11 @@ static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
}
#endif
-static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req,
- int total_len)
+static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct dma_pool *pool;
- int length = total_len;
+ int length = blk_rq_payload_bytes(req);
struct scatterlist *sg = iod->sg;
int dma_len = sg_dma_len(sg);
u64 dma_addr = sg_dma_address(sg);
@@ -501,7 +500,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req,
}
static int nvme_map_data(struct nvme_dev *dev, struct request *req,
- unsigned size, struct nvme_command *cmnd)
+ struct nvme_command *cmnd)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct request_queue *q = req->q;
@@ -519,7 +518,7 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req,
DMA_ATTR_NO_WARN))
goto out;
- if (!nvme_setup_prps(dev, req, size))
+ if (!nvme_setup_prps(dev, req))
goto out_unmap;
ret = BLK_MQ_RQ_QUEUE_ERROR;
@@ -580,7 +579,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_dev *dev = nvmeq->dev;
struct request *req = bd->rq;
struct nvme_command cmnd;
- unsigned map_len;
int ret = BLK_MQ_RQ_QUEUE_OK;
/*
@@ -600,13 +598,12 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ret != BLK_MQ_RQ_QUEUE_OK)
return ret;
- map_len = nvme_map_len(req);
- ret = nvme_init_iod(req, map_len, dev);
+ ret = nvme_init_iod(req, dev);
if (ret != BLK_MQ_RQ_QUEUE_OK)
goto out_free_cmd;
if (blk_rq_nr_phys_segments(req))
- ret = nvme_map_data(dev, req, map_len, &cmnd);
+ ret = nvme_map_data(dev, req, &cmnd);
if (ret != BLK_MQ_RQ_QUEUE_OK)
goto out_cleanup_iod;
@@ -712,15 +709,8 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id);
nvme_req(req)->result = cqe.result;
blk_mq_complete_request(req, le16_to_cpu(cqe.status) >> 1);
-
}
- /* If the controller ignores the cq head doorbell and continuously
- * writes to the queue, it is theoretically possible to wrap around
- * the queue twice and mistakenly return IRQ_NONE. Linux only
- * requires that 0.1% of your interrupts are handled, so this isn't
- * a big problem.
- */
if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
return;
@@ -1282,6 +1272,24 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
return true;
}
+static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
+{
+ /* Read a config register to help see what died. */
+ u16 pci_status;
+ int result;
+
+ result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS,
+ &pci_status);
+ if (result == PCIBIOS_SUCCESSFUL)
+ dev_warn(dev->dev,
+ "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n",
+ csts, pci_status);
+ else
+ dev_warn(dev->dev,
+ "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n",
+ csts, result);
+}
+
static void nvme_watchdog_timer(unsigned long data)
{
struct nvme_dev *dev = (struct nvme_dev *)data;
@@ -1290,9 +1298,7 @@ static void nvme_watchdog_timer(unsigned long data)
/* Skip controllers under certain specific conditions. */
if (nvme_should_reset(dev, csts)) {
if (!nvme_reset(dev))
- dev_warn(dev->dev,
- "Failed status: 0x%x, reset controller.\n",
- csts);
+ nvme_warn_reset(dev, csts);
return;
}
@@ -1333,7 +1339,7 @@ static ssize_t nvme_cmb_show(struct device *dev,
{
struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
- return snprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n",
+ return scnprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n",
ndev->cmbloc, ndev->cmbsz);
}
static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);
@@ -1893,10 +1899,10 @@ static int nvme_dev_map(struct nvme_dev *dev)
if (!dev->bar)
goto release;
- return 0;
+ return 0;
release:
- pci_release_mem_regions(pdev);
- return -ENODEV;
+ pci_release_mem_regions(pdev);
+ return -ENODEV;
}
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index f42ab70ffa38..557f29b1f1bb 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -42,6 +42,28 @@
#define NVME_RDMA_MAX_INLINE_SEGMENTS 1
+static const char *const nvme_rdma_cm_status_strs[] = {
+ [NVME_RDMA_CM_INVALID_LEN] = "invalid length",
+ [NVME_RDMA_CM_INVALID_RECFMT] = "invalid record format",
+ [NVME_RDMA_CM_INVALID_QID] = "invalid queue ID",
+ [NVME_RDMA_CM_INVALID_HSQSIZE] = "invalid host SQ size",
+ [NVME_RDMA_CM_INVALID_HRQSIZE] = "invalid host RQ size",
+ [NVME_RDMA_CM_NO_RSC] = "resource not found",
+ [NVME_RDMA_CM_INVALID_IRD] = "invalid IRD",
+ [NVME_RDMA_CM_INVALID_ORD] = "Invalid ORD",
+};
+
+static const char *nvme_rdma_cm_msg(enum nvme_rdma_cm_status status)
+{
+ size_t index = status;
+
+ if (index < ARRAY_SIZE(nvme_rdma_cm_status_strs) &&
+ nvme_rdma_cm_status_strs[index])
+ return nvme_rdma_cm_status_strs[index];
+ else
+ return "unrecognized reason";
+};
+
/*
* We handle AEN commands ourselves and don't even let the
* block layer know about them.
@@ -959,8 +981,7 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
}
static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
- struct request *rq, unsigned int map_len,
- struct nvme_command *c)
+ struct request *rq, struct nvme_command *c)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_device *dev = queue->device;
@@ -992,9 +1013,9 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
}
if (count == 1) {
- if (rq_data_dir(rq) == WRITE &&
- map_len <= nvme_rdma_inline_data_size(queue) &&
- nvme_rdma_queue_idx(queue))
+ if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) &&
+ blk_rq_payload_bytes(rq) <=
+ nvme_rdma_inline_data_size(queue))
return nvme_rdma_map_sg_inline(queue, req, c);
if (dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)
@@ -1214,16 +1235,24 @@ out_destroy_queue_ib:
static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
struct rdma_cm_event *ev)
{
- if (ev->param.conn.private_data_len) {
- struct nvme_rdma_cm_rej *rej =
- (struct nvme_rdma_cm_rej *)ev->param.conn.private_data;
+ struct rdma_cm_id *cm_id = queue->cm_id;
+ int status = ev->status;
+ const char *rej_msg;
+ const struct nvme_rdma_cm_rej *rej_data;
+ u8 rej_data_len;
+
+ rej_msg = rdma_reject_msg(cm_id, status);
+ rej_data = rdma_consumer_reject_data(cm_id, ev, &rej_data_len);
+
+ if (rej_data && rej_data_len >= sizeof(u16)) {
+ u16 sts = le16_to_cpu(rej_data->sts);
dev_err(queue->ctrl->ctrl.device,
- "Connect rejected, status %d.", le16_to_cpu(rej->sts));
- /* XXX: Think of something clever to do here... */
+ "Connect rejected: status %d (%s) nvme status %d (%s).\n",
+ status, rej_msg, sts, nvme_rdma_cm_msg(sts));
} else {
dev_err(queue->ctrl->ctrl.device,
- "Connect rejected, no private data.\n");
+ "Connect rejected: status %d (%s).\n", status, rej_msg);
}
return -ECONNRESET;
@@ -1392,7 +1421,7 @@ static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
struct request *rq)
{
if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
- struct nvme_command *cmd = (struct nvme_command *)rq->cmd;
+ struct nvme_command *cmd = nvme_req(rq)->cmd;
if (rq->cmd_type != REQ_TYPE_DRV_PRIV ||
cmd->common.opcode != nvme_fabrics_command ||
@@ -1414,7 +1443,6 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_command *c = sqe->data;
bool flush = false;
struct ib_device *dev;
- unsigned int map_len;
int ret;
WARN_ON_ONCE(rq->tag < 0);
@@ -1432,8 +1460,7 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_mq_start_request(rq);
- map_len = nvme_map_len(rq);
- ret = nvme_rdma_map_data(queue, rq, map_len, c);
+ ret = nvme_rdma_map_data(queue, rq, c);
if (ret < 0) {
dev_err(queue->ctrl->ctrl.device,
"Failed to map data (%d)\n", ret);
diff --git a/drivers/nvme/host/scsi.c b/drivers/nvme/host/scsi.c
index b71e95044b43..a5c09e703bd8 100644
--- a/drivers/nvme/host/scsi.c
+++ b/drivers/nvme/host/scsi.c
@@ -2160,30 +2160,6 @@ static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
return nvme_trans_status_code(hdr, nvme_sc);
}
-static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
- u8 *cmd)
-{
- u8 immed, no_flush;
-
- immed = cmd[1] & 0x01;
- no_flush = cmd[4] & 0x04;
-
- if (immed != 0) {
- return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
- ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
- SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
- } else {
- if (no_flush == 0) {
- /* Issue NVME FLUSH command prior to START STOP UNIT */
- int res = nvme_trans_synchronize_cache(ns, hdr);
- if (res)
- return res;
- }
-
- return 0;
- }
-}
-
static int nvme_trans_format_unit(struct nvme_ns *ns, struct sg_io_hdr *hdr,
u8 *cmd)
{
@@ -2439,9 +2415,6 @@ static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr)
case SECURITY_PROTOCOL_OUT:
retcode = nvme_trans_security_protocol(ns, hdr, cmd);
break;
- case START_STOP:
- retcode = nvme_trans_start_stop(ns, hdr, cmd);
- break;
case SYNCHRONIZE_CACHE:
retcode = nvme_trans_synchronize_cache(ns, hdr);
break;
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index ec1ad2aa0a4c..95ae52390478 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -382,7 +382,6 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
{
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
- u64 val;
u32 val32;
u16 status = 0;
@@ -392,8 +391,7 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
(subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
break;
case NVME_FEAT_KATO:
- val = le64_to_cpu(req->cmd->prop_set.value);
- val32 = val & 0xffff;
+ val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
nvmet_set_result(req, req->sq->ctrl->kato);
break;
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index bcb8ebeb01c5..4e8e6a22bce1 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -845,7 +845,7 @@ fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
rport->lport = nport->lport;
nport->rport = rport;
- return ret ? ret : count;
+ return count;
}
@@ -952,7 +952,7 @@ fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
tport->lport = nport->lport;
nport->tport = tport;
- return ret ? ret : count;
+ return count;
}
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 3fbcdb7a583c..8c3760a78ac0 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -1374,6 +1374,9 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
ret = nvmet_rdma_device_removal(cm_id, queue);
break;
case RDMA_CM_EVENT_REJECTED:
+ pr_debug("Connection rejected: %s\n",
+ rdma_reject_msg(cm_id, event->status));
+ /* FALLTHROUGH */
case RDMA_CM_EVENT_UNREACHABLE:
case RDMA_CM_EVENT_CONNECT_ERROR:
nvmet_rdma_queue_connect_fail(cm_id, queue);
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 965911d9b36a..398ea7f54826 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -981,8 +981,8 @@ static int __nvmem_cell_read(struct nvmem_device *nvmem,
* @cell: nvmem cell to be read.
* @len: pointer to length of cell which will be populated on successful read.
*
- * Return: ERR_PTR() on error or a valid pointer to a char * buffer on success.
- * The buffer should be freed by the consumer with a kfree().
+ * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
+ * buffer should be freed by the consumer with a kfree().
*/
void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
{
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index ac27b9bac3b9..8e7b120696fa 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -71,7 +71,7 @@ static struct nvmem_config imx_ocotp_nvmem_config = {
static const struct of_device_id imx_ocotp_dt_ids[] = {
{ .compatible = "fsl,imx6q-ocotp", (void *)128 },
- { .compatible = "fsl,imx6sl-ocotp", (void *)32 },
+ { .compatible = "fsl,imx6sl-ocotp", (void *)64 },
{ .compatible = "fsl,imx6sx-ocotp", (void *)128 },
{ },
};
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
index b5305f08b184..2bdb6c389328 100644
--- a/drivers/nvmem/qfprom.c
+++ b/drivers/nvmem/qfprom.c
@@ -21,11 +21,11 @@ static int qfprom_reg_read(void *context,
unsigned int reg, void *_val, size_t bytes)
{
void __iomem *base = context;
- u32 *val = _val;
- int i = 0, words = bytes / 4;
+ u8 *val = _val;
+ int i = 0, words = bytes;
while (words--)
- *val++ = readl(base + reg + (i++ * 4));
+ *val++ = readb(base + reg + i++);
return 0;
}
@@ -34,11 +34,11 @@ static int qfprom_reg_write(void *context,
unsigned int reg, void *_val, size_t bytes)
{
void __iomem *base = context;
- u32 *val = _val;
- int i = 0, words = bytes / 4;
+ u8 *val = _val;
+ int i = 0, words = bytes;
while (words--)
- writel(*val++, base + reg + (i++ * 4));
+ writeb(*val++, base + reg + i++);
return 0;
}
@@ -53,7 +53,7 @@ static int qfprom_remove(struct platform_device *pdev)
static struct nvmem_config econfig = {
.name = "qfprom",
.owner = THIS_MODULE,
- .stride = 4,
+ .stride = 1,
.word_size = 1,
.reg_read = qfprom_reg_read,
.reg_write = qfprom_reg_write,
diff --git a/drivers/of/base.c b/drivers/of/base.c
index a0bccb54a9bd..d4bea3c797d6 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1534,9 +1534,12 @@ void of_print_phandle_args(const char *msg, const struct of_phandle_args *args)
{
int i;
printk("%s %s", msg, of_node_full_name(args->np));
- for (i = 0; i < args->args_count; i++)
- printk(i ? ",%08x" : ":%08x", args->args[i]);
- printk("\n");
+ for (i = 0; i < args->args_count; i++) {
+ const char delim = i ? ',' : ':';
+
+ pr_cont("%c%08x", delim, args->args[i]);
+ }
+ pr_cont("\n");
}
int of_phandle_iterator_init(struct of_phandle_iterator *it,
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 393fea85eb4e..3fda9a32defb 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -697,3 +697,4 @@ void of_msi_configure(struct device *dev, struct device_node *np)
dev_set_msi_domain(dev,
of_msi_get_domain(dev, np, DOMAIN_BUS_PLATFORM_MSI));
}
+EXPORT_SYMBOL_GPL(of_msi_configure);
diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c
index f63d4b0deff0..a53982a330ea 100644
--- a/drivers/of/of_numa.c
+++ b/drivers/of/of_numa.c
@@ -176,7 +176,12 @@ int of_node_to_nid(struct device_node *device)
np->name);
of_node_put(np);
- if (!r)
+ /*
+ * If numa=off passed on command line, or with a defective
+ * device tree, the nid may not be in the set of possible
+ * nodes. Check for this case and return NUMA_NO_NODE.
+ */
+ if (!r && nid < MAX_NUMNODES && node_possible(nid))
return nid;
return NUMA_NO_NODE;
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
index b58be12ab277..0ee42c3e66a1 100644
--- a/drivers/of/of_pci.c
+++ b/drivers/of/of_pci.c
@@ -120,6 +120,27 @@ int of_get_pci_domain_nr(struct device_node *node)
EXPORT_SYMBOL_GPL(of_get_pci_domain_nr);
/**
+ * This function will try to find the limitation of link speed by finding
+ * a property called "max-link-speed" of the given device node.
+ *
+ * @node: device tree node with the max link speed information
+ *
+ * Returns the associated max link speed from DT, or a negative value if the
+ * required property is not found or is invalid.
+ */
+int of_pci_get_max_link_speed(struct device_node *node)
+{
+ u32 max_link_speed;
+
+ if (of_property_read_u32(node, "max-link-speed", &max_link_speed) ||
+ max_link_speed > 4)
+ return -EINVAL;
+
+ return max_link_speed;
+}
+EXPORT_SYMBOL_GPL(of_pci_get_max_link_speed);
+
+/**
* of_pci_check_probe_only - Setup probe only mode if linux,pci-probe-only
* is present and valid
*/
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index e4bf07d20f9b..b8064bc2b6eb 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -45,6 +45,9 @@ static int of_dev_node_match(struct device *dev, void *data)
* of_find_device_by_node - Find the platform_device associated with a node
* @np: Pointer to device tree node
*
+ * Takes a reference to the embedded struct device which needs to be dropped
+ * after use.
+ *
* Returns platform_device pointer, or NULL if not found
*/
struct platform_device *of_find_device_by_node(struct device_node *np)
@@ -558,9 +561,6 @@ static int of_platform_device_destroy(struct device *dev, void *data)
* of the given device (and, recurrently, their children) that have been
* created from their respective device tree nodes (and only those,
* leaving others - eg. manually created - unharmed).
- *
- * Returns 0 when all children devices have been removed or
- * -EBUSY when some children remained.
*/
void of_platform_depopulate(struct device *parent)
{
diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c
index 46325d6394cf..8bf12e904fd2 100644
--- a/drivers/of/resolver.c
+++ b/drivers/of/resolver.c
@@ -28,20 +28,19 @@
* Find a node with the give full name by recursively following any of
* the child node links.
*/
-static struct device_node *__of_find_node_by_full_name(struct device_node *node,
+static struct device_node *find_node_by_full_name(struct device_node *node,
const char *full_name)
{
struct device_node *child, *found;
- if (node == NULL)
+ if (!node)
return NULL;
- /* check */
- if (of_node_cmp(node->full_name, full_name) == 0)
+ if (!of_node_cmp(node->full_name, full_name))
return of_node_get(node);
for_each_child_of_node(node, child) {
- found = __of_find_node_by_full_name(child, full_name);
+ found = find_node_by_full_name(child, full_name);
if (found != NULL) {
of_node_put(child);
return found;
@@ -51,16 +50,12 @@ static struct device_node *__of_find_node_by_full_name(struct device_node *node,
return NULL;
}
-/*
- * Find live tree's maximum phandle value.
- */
-static phandle of_get_tree_max_phandle(void)
+static phandle live_tree_max_phandle(void)
{
struct device_node *node;
phandle phandle;
unsigned long flags;
- /* now search recursively */
raw_spin_lock_irqsave(&devtree_lock, flags);
phandle = 0;
for_each_of_allnodes(node) {
@@ -73,131 +68,102 @@ static phandle of_get_tree_max_phandle(void)
return phandle;
}
-/*
- * Adjust a subtree's phandle values by a given delta.
- * Makes sure not to just adjust the device node's phandle value,
- * but modify the phandle properties values as well.
- */
-static void __of_adjust_tree_phandles(struct device_node *node,
+static void adjust_overlay_phandles(struct device_node *overlay,
int phandle_delta)
{
struct device_node *child;
struct property *prop;
phandle phandle;
- /* first adjust the node's phandle direct value */
- if (node->phandle != 0 && node->phandle != OF_PHANDLE_ILLEGAL)
- node->phandle += phandle_delta;
+ /* adjust node's phandle in node */
+ if (overlay->phandle != 0 && overlay->phandle != OF_PHANDLE_ILLEGAL)
+ overlay->phandle += phandle_delta;
- /* now adjust phandle & linux,phandle values */
- for_each_property_of_node(node, prop) {
+ /* copy adjusted phandle into *phandle properties */
+ for_each_property_of_node(overlay, prop) {
- /* only look for these two */
- if (of_prop_cmp(prop->name, "phandle") != 0 &&
- of_prop_cmp(prop->name, "linux,phandle") != 0)
+ if (of_prop_cmp(prop->name, "phandle") &&
+ of_prop_cmp(prop->name, "linux,phandle"))
continue;
- /* must be big enough */
if (prop->length < 4)
continue;
- /* read phandle value */
phandle = be32_to_cpup(prop->value);
- if (phandle == OF_PHANDLE_ILLEGAL) /* unresolved */
+ if (phandle == OF_PHANDLE_ILLEGAL)
continue;
- /* adjust */
- *(uint32_t *)prop->value = cpu_to_be32(node->phandle);
+ *(uint32_t *)prop->value = cpu_to_be32(overlay->phandle);
}
- /* now do the children recursively */
- for_each_child_of_node(node, child)
- __of_adjust_tree_phandles(child, phandle_delta);
+ for_each_child_of_node(overlay, child)
+ adjust_overlay_phandles(child, phandle_delta);
}
-static int __of_adjust_phandle_ref(struct device_node *node,
- struct property *rprop, int value)
+static int update_usages_of_a_phandle_reference(struct device_node *overlay,
+ struct property *prop_fixup, phandle phandle)
{
- phandle phandle;
struct device_node *refnode;
- struct property *sprop;
- char *propval, *propcur, *propend, *nodestr, *propstr, *s;
- int offset, propcurlen;
+ struct property *prop;
+ char *value, *cur, *end, *node_path, *prop_name, *s;
+ int offset, len;
int err = 0;
- /* make a copy */
- propval = kmalloc(rprop->length, GFP_KERNEL);
- if (!propval) {
- pr_err("%s: Could not copy value of '%s'\n",
- __func__, rprop->name);
+ value = kmalloc(prop_fixup->length, GFP_KERNEL);
+ if (!value)
return -ENOMEM;
- }
- memcpy(propval, rprop->value, rprop->length);
+ memcpy(value, prop_fixup->value, prop_fixup->length);
- propend = propval + rprop->length;
- for (propcur = propval; propcur < propend; propcur += propcurlen + 1) {
- propcurlen = strlen(propcur);
+ /* prop_fixup contains a list of tuples of path:property_name:offset */
+ end = value + prop_fixup->length;
+ for (cur = value; cur < end; cur += len + 1) {
+ len = strlen(cur);
- nodestr = propcur;
- s = strchr(propcur, ':');
+ node_path = cur;
+ s = strchr(cur, ':');
if (!s) {
- pr_err("%s: Illegal symbol entry '%s' (1)\n",
- __func__, propcur);
err = -EINVAL;
goto err_fail;
}
*s++ = '\0';
- propstr = s;
+ prop_name = s;
s = strchr(s, ':');
if (!s) {
- pr_err("%s: Illegal symbol entry '%s' (2)\n",
- __func__, (char *)rprop->value);
err = -EINVAL;
goto err_fail;
}
-
*s++ = '\0';
+
err = kstrtoint(s, 10, &offset);
- if (err != 0) {
- pr_err("%s: Could get offset '%s'\n",
- __func__, (char *)rprop->value);
+ if (err)
goto err_fail;
- }
- /* look into the resolve node for the full path */
- refnode = __of_find_node_by_full_name(node, nodestr);
- if (!refnode) {
- pr_warn("%s: Could not find refnode '%s'\n",
- __func__, (char *)rprop->value);
+ refnode = find_node_by_full_name(overlay, node_path);
+ if (!refnode)
continue;
- }
- /* now find the property */
- for_each_property_of_node(refnode, sprop) {
- if (of_prop_cmp(sprop->name, propstr) == 0)
+ for_each_property_of_node(refnode, prop) {
+ if (!of_prop_cmp(prop->name, prop_name))
break;
}
of_node_put(refnode);
- if (!sprop) {
- pr_err("%s: Could not find property '%s'\n",
- __func__, (char *)rprop->value);
+ if (!prop) {
err = -ENOENT;
goto err_fail;
}
- phandle = value;
- *(__be32 *)(sprop->value + offset) = cpu_to_be32(phandle);
+ *(__be32 *)(prop->value + offset) = cpu_to_be32(phandle);
}
err_fail:
- kfree(propval);
+ kfree(value);
return err;
}
/* compare nodes taking into account that 'name' strips out the @ part */
-static int __of_node_name_cmp(const struct device_node *dn1,
+static int node_name_cmp(const struct device_node *dn1,
const struct device_node *dn2)
{
const char *n1 = strrchr(dn1->full_name, '/') ? : "/";
@@ -208,85 +174,77 @@ static int __of_node_name_cmp(const struct device_node *dn1,
/*
* Adjust the local phandle references by the given phandle delta.
- * Assumes the existances of a __local_fixups__ node at the root.
- * Assumes that __of_verify_tree_phandle_references has been called.
- * Does not take any devtree locks so make sure you call this on a tree
- * which is at the detached state.
+ *
+ * Subtree @local_fixups, which is overlay node __local_fixups__,
+ * mirrors the fragment node structure at the root of the overlay.
+ *
+ * For each property in the fragments that contains a phandle reference,
+ * @local_fixups has a property of the same name that contains a list
+ * of offsets of the phandle reference(s) within the respective property
+ * value(s). The values at these offsets will be fixed up.
*/
-static int __of_adjust_tree_phandle_references(struct device_node *node,
- struct device_node *target, int phandle_delta)
+static int adjust_local_phandle_references(struct device_node *local_fixups,
+ struct device_node *overlay, int phandle_delta)
{
- struct device_node *child, *childtarget;
- struct property *rprop, *sprop;
+ struct device_node *child, *overlay_child;
+ struct property *prop_fix, *prop;
int err, i, count;
unsigned int off;
phandle phandle;
- if (node == NULL)
+ if (!local_fixups)
return 0;
- for_each_property_of_node(node, rprop) {
+ for_each_property_of_node(local_fixups, prop_fix) {
/* skip properties added automatically */
- if (of_prop_cmp(rprop->name, "name") == 0 ||
- of_prop_cmp(rprop->name, "phandle") == 0 ||
- of_prop_cmp(rprop->name, "linux,phandle") == 0)
+ if (!of_prop_cmp(prop_fix->name, "name") ||
+ !of_prop_cmp(prop_fix->name, "phandle") ||
+ !of_prop_cmp(prop_fix->name, "linux,phandle"))
continue;
- if ((rprop->length % 4) != 0 || rprop->length == 0) {
- pr_err("%s: Illegal property (size) '%s' @%s\n",
- __func__, rprop->name, node->full_name);
+ if ((prop_fix->length % 4) != 0 || prop_fix->length == 0)
return -EINVAL;
- }
- count = rprop->length / sizeof(__be32);
+ count = prop_fix->length / sizeof(__be32);
- /* now find the target property */
- for_each_property_of_node(target, sprop) {
- if (of_prop_cmp(sprop->name, rprop->name) == 0)
+ for_each_property_of_node(overlay, prop) {
+ if (!of_prop_cmp(prop->name, prop_fix->name))
break;
}
- if (sprop == NULL) {
- pr_err("%s: Could not find target property '%s' @%s\n",
- __func__, rprop->name, node->full_name);
+ if (!prop)
return -EINVAL;
- }
for (i = 0; i < count; i++) {
- off = be32_to_cpu(((__be32 *)rprop->value)[i]);
- /* make sure the offset doesn't overstep (even wrap) */
- if (off >= sprop->length ||
- (off + 4) > sprop->length) {
- pr_err("%s: Illegal property '%s' @%s\n",
- __func__, rprop->name,
- node->full_name);
+ off = be32_to_cpu(((__be32 *)prop_fix->value)[i]);
+ if ((off + 4) > prop->length)
return -EINVAL;
- }
-
- if (phandle_delta) {
- /* adjust */
- phandle = be32_to_cpu(*(__be32 *)(sprop->value + off));
- phandle += phandle_delta;
- *(__be32 *)(sprop->value + off) = cpu_to_be32(phandle);
- }
+
+ phandle = be32_to_cpu(*(__be32 *)(prop->value + off));
+ phandle += phandle_delta;
+ *(__be32 *)(prop->value + off) = cpu_to_be32(phandle);
}
}
- for_each_child_of_node(node, child) {
-
- for_each_child_of_node(target, childtarget)
- if (__of_node_name_cmp(child, childtarget) == 0)
+ /*
+ * These nested loops recurse down two subtrees in parallel, where the
+ * node names in the two subtrees match.
+ *
+ * The roots of the subtrees are the overlay's __local_fixups__ node
+ * and the overlay's root node.
+ */
+ for_each_child_of_node(local_fixups, child) {
+
+ for_each_child_of_node(overlay, overlay_child)
+ if (!node_name_cmp(child, overlay_child))
break;
- if (!childtarget) {
- pr_err("%s: Could not find target child '%s' @%s\n",
- __func__, child->name, node->full_name);
+ if (!overlay_child)
return -EINVAL;
- }
- err = __of_adjust_tree_phandle_references(child, childtarget,
+ err = adjust_local_phandle_references(child, overlay_child,
phandle_delta);
- if (err != 0)
+ if (err)
return err;
}
@@ -294,111 +252,103 @@ static int __of_adjust_tree_phandle_references(struct device_node *node,
}
/**
- * of_resolve - Resolve the given node against the live tree.
+ * of_resolve_phandles - Relocate and resolve overlay against live tree
*
- * @resolve: Node to resolve
+ * @overlay: Pointer to devicetree overlay to relocate and resolve
*
- * Perform dynamic Device Tree resolution against the live tree
- * to the given node to resolve. This depends on the live tree
- * having a __symbols__ node, and the resolve node the __fixups__ &
- * __local_fixups__ nodes (if needed).
- * The result of the operation is a resolve node that it's contents
- * are fit to be inserted or operate upon the live tree.
- * Returns 0 on success or a negative error value on error.
+ * Modify (relocate) values of local phandles in @overlay to a range that
+ * does not conflict with the live expanded devicetree. Update references
+ * to the local phandles in @overlay. Update (resolve) phandle references
+ * in @overlay that refer to the live expanded devicetree.
+ *
+ * Phandle values in the live tree are in the range of
+ * 1 .. live_tree_max_phandle(). The range of phandle values in the overlay
+ * also begin with at 1. Adjust the phandle values in the overlay to begin
+ * at live_tree_max_phandle() + 1. Update references to the phandles to
+ * the adjusted phandle values.
+ *
+ * The name of each property in the "__fixups__" node in the overlay matches
+ * the name of a symbol (a label) in the live tree. The values of each
+ * property in the "__fixups__" node is a list of the property values in the
+ * overlay that need to be updated to contain the phandle reference
+ * corresponding to that symbol in the live tree. Update the references in
+ * the overlay with the phandle values in the live tree.
+ *
+ * @overlay must be detached.
+ *
+ * Resolving and applying @overlay to the live expanded devicetree must be
+ * protected by a mechanism to ensure that multiple overlays are processed
+ * in a single threaded manner so that multiple overlays will not relocate
+ * phandles to overlapping ranges. The mechanism to enforce this is not
+ * yet implemented.
+ *
+ * Return: %0 on success or a negative error value on error.
*/
-int of_resolve_phandles(struct device_node *resolve)
+int of_resolve_phandles(struct device_node *overlay)
{
- struct device_node *child, *childroot, *refnode;
- struct device_node *root_sym, *resolve_sym, *resolve_fix;
- struct property *rprop;
+ struct device_node *child, *local_fixups, *refnode;
+ struct device_node *tree_symbols, *overlay_fixups;
+ struct property *prop;
const char *refpath;
phandle phandle, phandle_delta;
int err;
- if (!resolve)
- pr_err("%s: null node\n", __func__);
- if (resolve && !of_node_check_flag(resolve, OF_DETACHED))
- pr_err("%s: node %s not detached\n", __func__,
- resolve->full_name);
- /* the resolve node must exist, and be detached */
- if (!resolve || !of_node_check_flag(resolve, OF_DETACHED))
- return -EINVAL;
-
- /* first we need to adjust the phandles */
- phandle_delta = of_get_tree_max_phandle() + 1;
- __of_adjust_tree_phandles(resolve, phandle_delta);
-
- /* locate the local fixups */
- childroot = NULL;
- for_each_child_of_node(resolve, childroot)
- if (of_node_cmp(childroot->name, "__local_fixups__") == 0)
- break;
-
- if (childroot != NULL) {
- /* resolve root is guaranteed to be the '/' */
- err = __of_adjust_tree_phandle_references(childroot,
- resolve, 0);
- if (err != 0)
- return err;
+ tree_symbols = NULL;
- BUG_ON(__of_adjust_tree_phandle_references(childroot,
- resolve, phandle_delta));
+ if (!overlay) {
+ pr_err("null overlay\n");
+ err = -EINVAL;
+ goto out;
+ }
+ if (!of_node_check_flag(overlay, OF_DETACHED)) {
+ pr_err("overlay not detached\n");
+ err = -EINVAL;
+ goto out;
}
- root_sym = NULL;
- resolve_sym = NULL;
- resolve_fix = NULL;
-
- /* this may fail (if no fixups are required) */
- root_sym = of_find_node_by_path("/__symbols__");
+ phandle_delta = live_tree_max_phandle() + 1;
+ adjust_overlay_phandles(overlay, phandle_delta);
- /* locate the symbols & fixups nodes on resolve */
- for_each_child_of_node(resolve, child) {
+ for_each_child_of_node(overlay, local_fixups)
+ if (!of_node_cmp(local_fixups->name, "__local_fixups__"))
+ break;
- if (!resolve_sym &&
- of_node_cmp(child->name, "__symbols__") == 0)
- resolve_sym = child;
+ err = adjust_local_phandle_references(local_fixups, overlay, phandle_delta);
+ if (err)
+ goto out;
- if (!resolve_fix &&
- of_node_cmp(child->name, "__fixups__") == 0)
- resolve_fix = child;
+ overlay_fixups = NULL;
- /* both found, don't bother anymore */
- if (resolve_sym && resolve_fix)
- break;
+ for_each_child_of_node(overlay, child) {
+ if (!of_node_cmp(child->name, "__fixups__"))
+ overlay_fixups = child;
}
- /* we do allow for the case where no fixups are needed */
- if (!resolve_fix) {
- err = 0; /* no error */
+ if (!overlay_fixups) {
+ err = 0;
goto out;
}
- /* we need to fixup, but no root symbols... */
- if (!root_sym) {
- pr_err("%s: no symbols in root of device tree.\n", __func__);
+ tree_symbols = of_find_node_by_path("/__symbols__");
+ if (!tree_symbols) {
+ pr_err("no symbols in root of device tree.\n");
err = -EINVAL;
goto out;
}
- for_each_property_of_node(resolve_fix, rprop) {
+ for_each_property_of_node(overlay_fixups, prop) {
/* skip properties added automatically */
- if (of_prop_cmp(rprop->name, "name") == 0)
+ if (!of_prop_cmp(prop->name, "name"))
continue;
- err = of_property_read_string(root_sym,
- rprop->name, &refpath);
- if (err != 0) {
- pr_err("%s: Could not find symbol '%s'\n",
- __func__, rprop->name);
+ err = of_property_read_string(tree_symbols,
+ prop->name, &refpath);
+ if (err)
goto out;
- }
refnode = of_find_node_by_path(refpath);
if (!refnode) {
- pr_err("%s: Could not find node by path '%s'\n",
- __func__, refpath);
err = -ENOENT;
goto out;
}
@@ -406,17 +356,15 @@ int of_resolve_phandles(struct device_node *resolve)
phandle = refnode->phandle;
of_node_put(refnode);
- pr_debug("%s: %s phandle is 0x%08x\n",
- __func__, rprop->name, phandle);
-
- err = __of_adjust_phandle_ref(resolve, rprop, phandle);
+ err = update_usages_of_a_phandle_reference(overlay, prop, phandle);
if (err)
break;
}
out:
- /* NULL is handled by of_node_put as NOP */
- of_node_put(root_sym);
+ if (err)
+ pr_err("overlay phandle fixup failed: %d\n", err);
+ of_node_put(tree_symbols);
return err;
}
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index 82f7000a285d..642478d35e99 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -206,7 +206,7 @@ void sync_stop(void)
* because we cannot reach this code without at least one
* dcookie user still being registered (namely, the reader
* of the event buffer). */
-static inline unsigned long fast_get_dcookie(struct path *path)
+static inline unsigned long fast_get_dcookie(const struct path *path)
{
unsigned long cookie;
diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
index c0cc4e7ff023..67935fbbbcab 100644
--- a/drivers/oprofile/event_buffer.c
+++ b/drivers/oprofile/event_buffer.c
@@ -18,7 +18,7 @@
#include <linux/capability.h>
#include <linux/dcookies.h>
#include <linux/fs.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "oprof.h"
#include "event_buffer.h"
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index 134398e0231b..d77ebbfc67c9 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -15,7 +15,7 @@
#include <linux/oprofile.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "oprof.h"
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 3ed6238f8f6e..553ef8a5d588 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -48,7 +48,7 @@
#include <asm/byteorder.h>
#include <asm/cache.h> /* for L1_CACHE_BYTES */
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/page.h>
#include <asm/dma.h>
#include <asm/io.h>
diff --git a/drivers/parisc/ccio-rm-dma.c b/drivers/parisc/ccio-rm-dma.c
index f78f6f1aef47..1bf988010855 100644
--- a/drivers/parisc/ccio-rm-dma.c
+++ b/drivers/parisc/ccio-rm-dma.c
@@ -40,7 +40,7 @@
#include <linux/pci.h>
#include <linux/gfp.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/hardware.h>
diff --git a/drivers/parisc/eisa_eeprom.c b/drivers/parisc/eisa_eeprom.c
index 783906fe659a..4dd9b1308128 100644
--- a/drivers/parisc/eisa_eeprom.c
+++ b/drivers/parisc/eisa_eeprom.c
@@ -26,7 +26,7 @@
#include <linux/slab.h>
#include <linux/fs.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/eisa_eeprom.h>
#define EISA_EEPROM_MINOR 241
diff --git a/drivers/parisc/eisa_enumerator.c b/drivers/parisc/eisa_enumerator.c
index 21905fef2cbf..d9bffe8d29b9 100644
--- a/drivers/parisc/eisa_enumerator.c
+++ b/drivers/parisc/eisa_enumerator.c
@@ -15,7 +15,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/byteorder.h>
#include <asm/eisa_bus.h>
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index b48243131993..ff1a332d76e4 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -49,7 +49,7 @@
#include <asm/param.h> /* HZ */
#include <asm/led.h>
#include <asm/pdc.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/* The control of the LEDs and LCDs on PARISC-machines have to be done
completely in software. The necessary calculations are done in a work queue
diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
index 3651c3871d5b..055f83fddc18 100644
--- a/drivers/parisc/pdc_stable.c
+++ b/drivers/parisc/pdc_stable.c
@@ -68,7 +68,7 @@
#include <asm/pdc.h>
#include <asm/page.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/hardware.h>
#define PDCS_VERSION "0.30"
diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c
index 5bed17f68ef4..d998d0ed2bec 100644
--- a/drivers/parport/daisy.c
+++ b/drivers/parport/daisy.c
@@ -26,7 +26,7 @@
#include <linux/sched.h>
#include <asm/current.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#undef DEBUG
diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c
index 2e21af43d91e..c0e7d21c88c2 100644
--- a/drivers/parport/ieee1284_ops.c
+++ b/drivers/parport/ieee1284_ops.c
@@ -18,7 +18,7 @@
#include <linux/parport.h>
#include <linux/delay.h>
#include <linux/sched.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#undef DEBUG /* undef me for production */
diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c
index 6e3a60c78873..dd6d4ccb41e4 100644
--- a/drivers/parport/parport_gsc.c
+++ b/drivers/parport/parport_gsc.c
@@ -34,7 +34,7 @@
#include <asm/io.h>
#include <asm/dma.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/superio.h>
#include <linux/parport.h>
diff --git a/drivers/parport/probe.c b/drivers/parport/probe.c
index d763bc9e44c1..4d1d6eaf333d 100644
--- a/drivers/parport/probe.c
+++ b/drivers/parport/probe.c
@@ -10,7 +10,7 @@
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/slab.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
static const struct {
const char *token;
diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
index 74ed3e459a3e..8ee44a104ac4 100644
--- a/drivers/parport/procfs.c
+++ b/drivers/parport/procfs.c
@@ -23,7 +23,7 @@
#include <linux/sysctl.h>
#include <linux/device.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index d11cdbb8fba3..db239547fefd 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -142,10 +142,22 @@ int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
if (size == 4) {
writel(val, addr);
return PCIBIOS_SUCCESSFUL;
- } else {
- mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
}
+ /*
+ * In general, hardware that supports only 32-bit writes on PCI is
+ * not spec-compliant. For example, software may perform a 16-bit
+ * write. If the hardware only supports 32-bit accesses, we must
+ * do a 32-bit read, merge in the 16 bits we intend to write,
+ * followed by a 32-bit write. If the 16 bits we *don't* intend to
+ * write happen to have any RW1C (write-one-to-clear) bits set, we
+ * just inadvertently cleared something we shouldn't have.
+ */
+ dev_warn_ratelimited(&bus->dev, "%d-byte config write to %04x:%02x:%02x.%d offset %#x may corrupt adjacent RW1C bits\n",
+ size, pci_domain_nr(bus), bus->number,
+ PCI_SLOT(devfn), PCI_FUNC(devfn), where);
+
+ mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
tmp = readl(addr) & mask;
tmp |= val << ((where & 0x3) * 8);
writel(tmp, addr);
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index c288e5a52575..bc56cf19afd3 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -320,7 +320,7 @@ void pci_bus_add_device(struct pci_dev *dev)
pci_fixup_device(pci_fixup_final, dev);
pci_create_sysfs_dev_files(dev);
pci_proc_attach_device(dev);
- pci_bridge_d3_device_changed(dev);
+ pci_bridge_d3_update(dev);
dev->match_driver = true;
retval = device_attach(&dev->dev);
diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c
index 43ed08dd8b01..2fee61bb6559 100644
--- a/drivers/pci/ecam.c
+++ b/drivers/pci/ecam.c
@@ -162,3 +162,15 @@ struct pci_ecam_ops pci_generic_ecam_ops = {
.write = pci_generic_config_write,
}
};
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+/* ECAM ops for 32-bit access only (non-compliant) */
+struct pci_ecam_ops pci_32b_ops = {
+ .bus_shift = 20,
+ .pci_ops = {
+ .map_bus = pci_ecam_map_bus,
+ .read = pci_generic_config_read32,
+ .write = pci_generic_config_write32,
+ }
+};
+#endif
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index d7e7c0a827c3..898d2c48239c 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -69,7 +69,7 @@ config PCI_IMX6
config PCI_TEGRA
bool "NVIDIA Tegra PCIe controller"
- depends on ARCH_TEGRA && !ARM64
+ depends on ARCH_TEGRA
help
Say Y here if you want support for the PCIe host controller found
on NVIDIA Tegra SoCs.
@@ -133,8 +133,8 @@ config PCIE_XILINX
config PCI_XGENE
bool "X-Gene PCIe controller"
- depends on ARCH_XGENE
- depends on OF
+ depends on ARM64
+ depends on OF || (ACPI && PCI_QUIRKS)
select PCIEPORTBUS
help
Say Y here if you want internal PCI support on APM X-Gene SoC.
@@ -240,14 +240,16 @@ config PCIE_QCOM
config PCI_HOST_THUNDER_PEM
bool "Cavium Thunder PCIe controller to off-chip devices"
- depends on OF && ARM64
+ depends on ARM64
+ depends on OF || (ACPI && PCI_QUIRKS)
select PCI_HOST_COMMON
help
Say Y here if you want PCIe support for CN88XX Cavium Thunder SoCs.
config PCI_HOST_THUNDER_ECAM
bool "Cavium Thunder ECAM controller to on-chip devices on pass-1.x silicon"
- depends on OF && ARM64
+ depends on ARM64
+ depends on OF || (ACPI && PCI_QUIRKS)
select PCI_HOST_COMMON
help
Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs.
@@ -276,7 +278,7 @@ config PCIE_ARTPEC6
config PCIE_ROCKCHIP
bool "Rockchip PCIe controller"
- depends on ARCH_ROCKCHIP
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
depends on OF
depends on PCI_MSI_IRQ_DOMAIN
select MFD_SYSCON
@@ -286,7 +288,7 @@ config PCIE_ROCKCHIP
4 slots.
config VMD
- depends on PCI_MSI && X86_64
+ depends on PCI_MSI && X86_64 && SRCU
tristate "Intel Volume Management Device Driver"
default N
---help---
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 084cb4983645..bfe3179ae74c 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -15,7 +15,6 @@ obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o
obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o
-obj-$(CONFIG_PCI_XGENE) += pci-xgene.o
obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o
obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
@@ -25,11 +24,23 @@ obj-$(CONFIG_PCIE_IPROC_PLATFORM) += pcie-iproc-platform.o
obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o
obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o
obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o
-obj-$(CONFIG_PCI_HISI) += pcie-hisi.o
obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
-obj-$(CONFIG_PCI_HOST_THUNDER_ECAM) += pci-thunder-ecam.o
-obj-$(CONFIG_PCI_HOST_THUNDER_PEM) += pci-thunder-pem.o
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o
obj-$(CONFIG_VMD) += vmd.o
+
+# The following drivers are for devices that use the generic ACPI
+# pci_root.c driver but don't support standard ECAM config access.
+# They contain MCFG quirks to replace the generic ECAM accessors with
+# device-specific ones that are shared with the DT driver.
+
+# The ACPI driver is generic and should not require driver-specific
+# config options to be enabled, so we always build these drivers on
+# ARM64 and use internal ifdefs to only build the pieces we need
+# depending on whether ACPI, the DT driver, or both are enabled.
+
+obj-$(CONFIG_ARM64) += pcie-hisi.o
+obj-$(CONFIG_ARM64) += pci-thunder-ecam.o
+obj-$(CONFIG_ARM64) += pci-thunder-pem.o
+obj-$(CONFIG_ARM64) += pci-xgene.o
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index 763ff8745828..3efcc7bdc5fb 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -378,6 +378,8 @@ struct hv_pcibus_device {
struct msi_domain_info msi_info;
struct msi_controller msi_chip;
struct irq_domain *irq_domain;
+ struct retarget_msi_interrupt retarget_msi_interrupt_params;
+ spinlock_t retarget_msi_interrupt_lock;
};
/*
@@ -755,7 +757,7 @@ static int hv_set_affinity(struct irq_data *data, const struct cpumask *dest,
return parent->chip->irq_set_affinity(parent, dest, force);
}
-void hv_irq_mask(struct irq_data *data)
+static void hv_irq_mask(struct irq_data *data)
{
pci_msi_mask_irq(data);
}
@@ -770,38 +772,44 @@ void hv_irq_mask(struct irq_data *data)
* is built out of this PCI bus's instance GUID and the function
* number of the device.
*/
-void hv_irq_unmask(struct irq_data *data)
+static void hv_irq_unmask(struct irq_data *data)
{
struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
struct irq_cfg *cfg = irqd_cfg(data);
- struct retarget_msi_interrupt params;
+ struct retarget_msi_interrupt *params;
struct hv_pcibus_device *hbus;
struct cpumask *dest;
struct pci_bus *pbus;
struct pci_dev *pdev;
int cpu;
+ unsigned long flags;
dest = irq_data_get_affinity_mask(data);
pdev = msi_desc_to_pci_dev(msi_desc);
pbus = pdev->bus;
hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
- memset(&params, 0, sizeof(params));
- params.partition_id = HV_PARTITION_ID_SELF;
- params.source = 1; /* MSI(-X) */
- params.address = msi_desc->msg.address_lo;
- params.data = msi_desc->msg.data;
- params.device_id = (hbus->hdev->dev_instance.b[5] << 24) |
+ spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags);
+
+ params = &hbus->retarget_msi_interrupt_params;
+ memset(params, 0, sizeof(*params));
+ params->partition_id = HV_PARTITION_ID_SELF;
+ params->source = 1; /* MSI(-X) */
+ params->address = msi_desc->msg.address_lo;
+ params->data = msi_desc->msg.data;
+ params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
(hbus->hdev->dev_instance.b[4] << 16) |
(hbus->hdev->dev_instance.b[7] << 8) |
(hbus->hdev->dev_instance.b[6] & 0xf8) |
PCI_FUNC(pdev->devfn);
- params.vector = cfg->vector;
+ params->vector = cfg->vector;
for_each_cpu_and(cpu, dest, cpu_online_mask)
- params.vp_mask |= (1ULL << vmbus_cpu_number_to_vp_number(cpu));
+ params->vp_mask |= (1ULL << vmbus_cpu_number_to_vp_number(cpu));
- hv_do_hypercall(HVCALL_RETARGET_INTERRUPT, &params, NULL);
+ hv_do_hypercall(HVCALL_RETARGET_INTERRUPT, params, NULL);
+
+ spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags);
pci_msi_unmask_irq(data);
}
@@ -1271,9 +1279,9 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
struct hv_pci_dev *hpdev;
struct pci_child_message *res_req;
struct q_res_req_compl comp_pkt;
- union {
- struct pci_packet init_packet;
- u8 buffer[0x100];
+ struct {
+ struct pci_packet init_packet;
+ u8 buffer[sizeof(struct pci_child_message)];
} pkt;
unsigned long flags;
int ret;
@@ -1582,6 +1590,10 @@ static void hv_eject_device_work(struct work_struct *work)
pci_dev_put(pdev);
}
+ spin_lock_irqsave(&hpdev->hbus->device_list_lock, flags);
+ list_del(&hpdev->list_entry);
+ spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags);
+
memset(&ctxt, 0, sizeof(ctxt));
ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
@@ -1590,10 +1602,6 @@ static void hv_eject_device_work(struct work_struct *work)
sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
VM_PKT_DATA_INBAND, 0);
- spin_lock_irqsave(&hpdev->hbus->device_list_lock, flags);
- list_del(&hpdev->list_entry);
- spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags);
-
put_pcichild(hpdev, hv_pcidev_ref_childlist);
put_pcichild(hpdev, hv_pcidev_ref_pnp);
put_hvpcibus(hpdev->hbus);
@@ -2186,6 +2194,7 @@ static int hv_pci_probe(struct hv_device *hdev,
INIT_LIST_HEAD(&hbus->resources_for_children);
spin_lock_init(&hbus->config_lock);
spin_lock_init(&hbus->device_list_lock);
+ spin_lock_init(&hbus->retarget_msi_interrupt_lock);
sema_init(&hbus->enum_sem, 1);
init_completion(&hbus->remove_event);
@@ -2266,24 +2275,32 @@ free_bus:
return ret;
}
-/**
- * hv_pci_remove() - Remove routine for this VMBus channel
- * @hdev: VMBus's tracking struct for this root PCI bus
- *
- * Return: 0 on success, -errno on failure
- */
-static int hv_pci_remove(struct hv_device *hdev)
+static void hv_pci_bus_exit(struct hv_device *hdev)
{
- int ret;
- struct hv_pcibus_device *hbus;
- union {
+ struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
+ struct {
struct pci_packet teardown_packet;
- u8 buffer[0x100];
+ u8 buffer[sizeof(struct pci_message)];
} pkt;
struct pci_bus_relations relations;
struct hv_pci_compl comp_pkt;
+ int ret;
- hbus = hv_get_drvdata(hdev);
+ /*
+ * After the host sends the RESCIND_CHANNEL message, it doesn't
+ * access the per-channel ringbuffer any longer.
+ */
+ if (hdev->channel->rescind)
+ return;
+
+ /* Delete any children which might still exist. */
+ memset(&relations, 0, sizeof(relations));
+ hv_pci_devices_present(hbus, &relations);
+
+ ret = hv_send_resources_released(hdev);
+ if (ret)
+ dev_err(&hdev->device,
+ "Couldn't send resources released packet(s)\n");
memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet));
init_completion(&comp_pkt.host_event);
@@ -2298,7 +2315,19 @@ static int hv_pci_remove(struct hv_device *hdev)
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (!ret)
wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ);
+}
+
+/**
+ * hv_pci_remove() - Remove routine for this VMBus channel
+ * @hdev: VMBus's tracking struct for this root PCI bus
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int hv_pci_remove(struct hv_device *hdev)
+{
+ struct hv_pcibus_device *hbus;
+ hbus = hv_get_drvdata(hdev);
if (hbus->state == hv_pcibus_installed) {
/* Remove the bus from PCI's point of view. */
pci_lock_rescan_remove();
@@ -2307,17 +2336,10 @@ static int hv_pci_remove(struct hv_device *hdev)
pci_unlock_rescan_remove();
}
- ret = hv_send_resources_released(hdev);
- if (ret)
- dev_err(&hdev->device,
- "Couldn't send resources released packet(s)\n");
+ hv_pci_bus_exit(hdev);
vmbus_close(hdev->channel);
- /* Delete any children which might still exist. */
- memset(&relations, 0, sizeof(relations));
- hv_pci_devices_present(hbus, &relations);
-
iounmap(hbus->cfg_addr);
hv_free_config_window(hbus);
pci_free_resource_list(&hbus->resources_for_children);
diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c
index 653707996342..ea789138531b 100644
--- a/drivers/pci/host/pci-layerscape.c
+++ b/drivers/pci/host/pci-layerscape.c
@@ -35,12 +35,10 @@
#define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */
#define PCIE_DBI_RO_WR_EN 0x8bc /* DBI Read-Only Write Enable Register */
-/* PEX LUT registers */
-#define PCIE_LUT_DBG 0x7FC /* PEX LUT Debug Register */
-
struct ls_pcie_drvdata {
u32 lut_offset;
u32 ltssm_shift;
+ u32 lut_dbg;
struct pcie_host_ops *ops;
};
@@ -134,7 +132,7 @@ static int ls_pcie_link_up(struct pcie_port *pp)
struct ls_pcie *pcie = to_ls_pcie(pp);
u32 state;
- state = (ioread32(pcie->lut + PCIE_LUT_DBG) >>
+ state = (ioread32(pcie->lut + pcie->drvdata->lut_dbg) >>
pcie->drvdata->ltssm_shift) &
LTSSM_STATE_MASK;
@@ -196,18 +194,28 @@ static struct ls_pcie_drvdata ls1021_drvdata = {
static struct ls_pcie_drvdata ls1043_drvdata = {
.lut_offset = 0x10000,
.ltssm_shift = 24,
+ .lut_dbg = 0x7fc,
+ .ops = &ls_pcie_host_ops,
+};
+
+static struct ls_pcie_drvdata ls1046_drvdata = {
+ .lut_offset = 0x80000,
+ .ltssm_shift = 24,
+ .lut_dbg = 0x407fc,
.ops = &ls_pcie_host_ops,
};
static struct ls_pcie_drvdata ls2080_drvdata = {
.lut_offset = 0x80000,
.ltssm_shift = 0,
+ .lut_dbg = 0x7fc,
.ops = &ls_pcie_host_ops,
};
static const struct of_device_id ls_pcie_of_match[] = {
{ .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
{ .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
+ { .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata },
{ .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata },
{ .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata },
{ },
@@ -252,10 +260,8 @@ static int __init ls_pcie_probe(struct platform_device *pdev)
dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
pcie->pp.dbi_base = devm_ioremap_resource(dev, dbi_base);
- if (IS_ERR(pcie->pp.dbi_base)) {
- dev_err(dev, "missing *regs* space\n");
+ if (IS_ERR(pcie->pp.dbi_base))
return PTR_ERR(pcie->pp.dbi_base);
- }
pcie->lut = pcie->pp.dbi_base + pcie->drvdata->lut_offset;
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index 1eeefa4df64c..85348590848b 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -430,10 +430,10 @@ static int rcar_pci_probe(struct platform_device *pdev)
}
static struct of_device_id rcar_pci_of_match[] = {
- { .compatible = "renesas,pci-rcar-gen2", },
{ .compatible = "renesas,pci-r8a7790", },
{ .compatible = "renesas,pci-r8a7791", },
{ .compatible = "renesas,pci-r8a7794", },
+ { .compatible = "renesas,pci-rcar-gen2", },
{ },
};
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 8dfccf733241..ed8a93f2bfb5 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -51,10 +51,6 @@
#include <soc/tegra/cpuidle.h>
#include <soc/tegra/pmc.h>
-#include <asm/mach/irq.h>
-#include <asm/mach/map.h>
-#include <asm/mach/pci.h>
-
#define INT_PCI_MSI_NR (8 * 32)
/* register definitions */
@@ -188,6 +184,9 @@
#define RP_VEND_XP 0x00000f00
#define RP_VEND_XP_DL_UP (1 << 30)
+#define RP_VEND_CTL2 0x00000fa8
+#define RP_VEND_CTL2_PCA_ENABLE (1 << 7)
+
#define RP_PRIV_MISC 0x00000fe0
#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0)
#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0)
@@ -252,6 +251,7 @@ struct tegra_pcie_soc {
bool has_intr_prsnt_sense;
bool has_cml_clk;
bool has_gen2;
+ bool force_pca_enable;
};
static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip)
@@ -322,11 +322,6 @@ struct tegra_pcie_bus {
unsigned int nr;
};
-static inline struct tegra_pcie *sys_to_pcie(struct pci_sys_data *sys)
-{
- return sys->private_data;
-}
-
static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
unsigned long offset)
{
@@ -385,8 +380,7 @@ static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
unsigned int busnr)
{
struct device *dev = pcie->dev;
- pgprot_t prot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
- L_PTE_XN | L_PTE_MT_DEV_SHARED | L_PTE_SHARED);
+ pgprot_t prot = pgprot_device(PAGE_KERNEL);
phys_addr_t cs = pcie->cs->start;
struct tegra_pcie_bus *bus;
unsigned int i;
@@ -430,7 +424,8 @@ free:
static int tegra_pcie_add_bus(struct pci_bus *bus)
{
- struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
+ struct pci_host_bridge *host = pci_find_host_bridge(bus);
+ struct tegra_pcie *pcie = pci_host_bridge_priv(host);
struct tegra_pcie_bus *b;
b = tegra_pcie_bus_alloc(pcie, bus->number);
@@ -444,7 +439,8 @@ static int tegra_pcie_add_bus(struct pci_bus *bus)
static void tegra_pcie_remove_bus(struct pci_bus *child)
{
- struct tegra_pcie *pcie = sys_to_pcie(child->sysdata);
+ struct pci_host_bridge *host = pci_find_host_bridge(child);
+ struct tegra_pcie *pcie = pci_host_bridge_priv(host);
struct tegra_pcie_bus *bus, *tmp;
list_for_each_entry_safe(bus, tmp, &pcie->buses, list) {
@@ -461,7 +457,8 @@ static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
unsigned int devfn,
int where)
{
- struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
+ struct pci_host_bridge *host = pci_find_host_bridge(bus);
+ struct tegra_pcie *pcie = pci_host_bridge_priv(host);
struct device *dev = pcie->dev;
void __iomem *addr = NULL;
@@ -558,6 +555,12 @@ static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
afi_writel(port->pcie, value, ctrl);
tegra_pcie_port_reset(port);
+
+ if (soc->force_pca_enable) {
+ value = readl(port->base + RP_VEND_CTL2);
+ value |= RP_VEND_CTL2_PCA_ENABLE;
+ writel(value, port->base + RP_VEND_CTL2);
+ }
}
static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
@@ -610,39 +613,31 @@ static void tegra_pcie_relax_enable(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
-static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
+static int tegra_pcie_request_resources(struct tegra_pcie *pcie)
{
- struct tegra_pcie *pcie = sys_to_pcie(sys);
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+ struct list_head *windows = &host->windows;
struct device *dev = pcie->dev;
int err;
- sys->mem_offset = pcie->offset.mem;
- sys->io_offset = pcie->offset.io;
+ pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io);
+ pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem);
+ pci_add_resource_offset(windows, &pcie->prefetch, pcie->offset.mem);
+ pci_add_resource(windows, &pcie->busn);
- err = devm_request_resource(dev, &iomem_resource, &pcie->io);
+ err = devm_request_pci_bus_resources(dev, windows);
if (err < 0)
return err;
- err = pci_remap_iospace(&pcie->pio, pcie->io.start);
- if (!err)
- pci_add_resource_offset(&sys->resources, &pcie->pio,
- sys->io_offset);
+ pci_remap_iospace(&pcie->pio, pcie->io.start);
- pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
- pci_add_resource_offset(&sys->resources, &pcie->prefetch,
- sys->mem_offset);
- pci_add_resource(&sys->resources, &pcie->busn);
-
- err = devm_request_pci_bus_resources(dev, &sys->resources);
- if (err < 0)
- return err;
-
- return 1;
+ return 0;
}
static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
{
- struct tegra_pcie *pcie = sys_to_pcie(pdev->bus->sysdata);
+ struct pci_host_bridge *host = pci_find_host_bridge(pdev->bus);
+ struct tegra_pcie *pcie = pci_host_bridge_priv(host);
int irq;
tegra_cpuidle_pcie_irqs_in_use();
@@ -1499,10 +1494,11 @@ static const struct irq_domain_ops msi_domain_ops = {
static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
{
- struct device *dev = pcie->dev;
- struct platform_device *pdev = to_platform_device(dev);
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+ struct platform_device *pdev = to_platform_device(pcie->dev);
const struct tegra_pcie_soc *soc = pcie->soc;
struct tegra_msi *msi = &pcie->msi;
+ struct device *dev = pcie->dev;
unsigned long base;
int err;
u32 reg;
@@ -1559,6 +1555,8 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
reg |= AFI_INTR_MASK_MSI_MASK;
afi_writel(pcie, reg, AFI_INTR_MASK);
+ host->msi = &msi->chip;
+
return 0;
err:
@@ -1609,7 +1607,8 @@ static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
struct device *dev = pcie->dev;
struct device_node *np = dev->of_node;
- if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
+ if (of_device_is_compatible(np, "nvidia,tegra124-pcie") ||
+ of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
switch (lanes) {
case 0x0000104:
dev_info(dev, "4x1, 1x1 configuration\n");
@@ -1730,7 +1729,22 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
struct device_node *np = dev->of_node;
unsigned int i = 0;
- if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
+ if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
+ pcie->num_supplies = 6;
+
+ pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
+ sizeof(*pcie->supplies),
+ GFP_KERNEL);
+ if (!pcie->supplies)
+ return -ENOMEM;
+
+ pcie->supplies[i++].supply = "avdd-pll-uerefe";
+ pcie->supplies[i++].supply = "hvddio-pex";
+ pcie->supplies[i++].supply = "dvddio-pex";
+ pcie->supplies[i++].supply = "dvdd-pex-pll";
+ pcie->supplies[i++].supply = "hvdd-pex-pll-e";
+ pcie->supplies[i++].supply = "vddio-pex-ctl";
+ } else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
pcie->num_supplies = 7;
pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
@@ -2021,11 +2035,10 @@ retry:
return false;
}
-static int tegra_pcie_enable(struct tegra_pcie *pcie)
+static void tegra_pcie_enable_ports(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
struct tegra_pcie_port *port, *tmp;
- struct hw_pci hw;
list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
dev_info(dev, "probing port %u, using %u lanes\n",
@@ -2041,21 +2054,6 @@ static int tegra_pcie_enable(struct tegra_pcie *pcie)
tegra_pcie_port_disable(port);
tegra_pcie_port_free(port);
}
-
- memset(&hw, 0, sizeof(hw));
-
-#ifdef CONFIG_PCI_MSI
- hw.msi_ctrl = &pcie->msi.chip;
-#endif
-
- hw.nr_controllers = 1;
- hw.private_data = (void **)&pcie;
- hw.setup = tegra_pcie_setup;
- hw.map_irq = tegra_pcie_map_irq;
- hw.ops = &tegra_pcie_ops;
-
- pci_common_init_dev(dev, &hw);
- return 0;
}
static const struct tegra_pcie_soc tegra20_pcie = {
@@ -2069,6 +2067,7 @@ static const struct tegra_pcie_soc tegra20_pcie = {
.has_intr_prsnt_sense = false,
.has_cml_clk = false,
.has_gen2 = false,
+ .force_pca_enable = false,
};
static const struct tegra_pcie_soc tegra30_pcie = {
@@ -2083,6 +2082,7 @@ static const struct tegra_pcie_soc tegra30_pcie = {
.has_intr_prsnt_sense = true,
.has_cml_clk = true,
.has_gen2 = false,
+ .force_pca_enable = false,
};
static const struct tegra_pcie_soc tegra124_pcie = {
@@ -2096,9 +2096,25 @@ static const struct tegra_pcie_soc tegra124_pcie = {
.has_intr_prsnt_sense = true,
.has_cml_clk = true,
.has_gen2 = true,
+ .force_pca_enable = false,
+};
+
+static const struct tegra_pcie_soc tegra210_pcie = {
+ .num_ports = 2,
+ .msi_base_shift = 8,
+ .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
+ .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
+ .pads_refclk_cfg0 = 0x90b890b8,
+ .has_pex_clkreq_en = true,
+ .has_pex_bias_ctrl = true,
+ .has_intr_prsnt_sense = true,
+ .has_cml_clk = true,
+ .has_gen2 = true,
+ .force_pca_enable = true,
};
static const struct of_device_id tegra_pcie_of_match[] = {
+ { .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie },
{ .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
{ .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
{ .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
@@ -2217,13 +2233,17 @@ remove:
static int tegra_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct pci_host_bridge *host;
struct tegra_pcie *pcie;
+ struct pci_bus *child;
int err;
- pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
- if (!pcie)
+ host = pci_alloc_host_bridge(sizeof(*pcie));
+ if (!host)
return -ENOMEM;
+ pcie = pci_host_bridge_priv(host);
+
pcie->soc = of_device_get_match_data(dev);
INIT_LIST_HEAD(&pcie->buses);
INIT_LIST_HEAD(&pcie->ports);
@@ -2243,6 +2263,10 @@ static int tegra_pcie_probe(struct platform_device *pdev)
if (err)
goto put_resources;
+ err = tegra_pcie_request_resources(pcie);
+ if (err)
+ goto put_resources;
+
/* setup the AFI address translations */
tegra_pcie_setup_translations(pcie);
@@ -2254,12 +2278,30 @@ static int tegra_pcie_probe(struct platform_device *pdev)
}
}
- err = tegra_pcie_enable(pcie);
+ tegra_pcie_enable_ports(pcie);
+
+ pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
+ host->busnr = pcie->busn.start;
+ host->dev.parent = &pdev->dev;
+ host->ops = &tegra_pcie_ops;
+
+ err = pci_register_host_bridge(host);
if (err < 0) {
- dev_err(dev, "failed to enable PCIe ports: %d\n", err);
+ dev_err(dev, "failed to register host: %d\n", err);
goto disable_msi;
}
+ pci_scan_child_bus(host->bus);
+
+ pci_fixup_irqs(pci_common_swizzle, tegra_pcie_map_irq);
+ pci_bus_size_bridges(host->bus);
+ pci_bus_assign_resources(host->bus);
+
+ list_for_each_entry(child, &host->bus->children, node)
+ pcie_bus_configure_settings(child);
+
+ pci_bus_add_devices(host->bus);
+
if (IS_ENABLED(CONFIG_DEBUG_FS)) {
err = tegra_pcie_debugfs_init(pcie);
if (err < 0)
diff --git a/drivers/pci/host/pci-thunder-ecam.c b/drivers/pci/host/pci-thunder-ecam.c
index d50a3dc2d8db..3f54a43bbbea 100644
--- a/drivers/pci/host/pci-thunder-ecam.c
+++ b/drivers/pci/host/pci-thunder-ecam.c
@@ -14,6 +14,8 @@
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
+#if defined(CONFIG_PCI_HOST_THUNDER_ECAM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
+
static void set_val(u32 v, int where, int size, u32 *val)
{
int shift = (where & 3) * 8;
@@ -346,7 +348,7 @@ static int thunder_ecam_config_write(struct pci_bus *bus, unsigned int devfn,
return pci_generic_config_write(bus, devfn, where, size, val);
}
-static struct pci_ecam_ops pci_thunder_ecam_ops = {
+struct pci_ecam_ops pci_thunder_ecam_ops = {
.bus_shift = 20,
.pci_ops = {
.map_bus = pci_ecam_map_bus,
@@ -355,6 +357,8 @@ static struct pci_ecam_ops pci_thunder_ecam_ops = {
}
};
+#ifdef CONFIG_PCI_HOST_THUNDER_ECAM
+
static const struct of_device_id thunder_ecam_of_match[] = {
{ .compatible = "cavium,pci-host-thunder-ecam" },
{ },
@@ -373,3 +377,6 @@ static struct platform_driver thunder_ecam_driver = {
.probe = thunder_ecam_probe,
};
builtin_platform_driver(thunder_ecam_driver);
+
+#endif
+#endif
diff --git a/drivers/pci/host/pci-thunder-pem.c b/drivers/pci/host/pci-thunder-pem.c
index 6abaf80ffb39..af722eb0ca75 100644
--- a/drivers/pci/host/pci-thunder-pem.c
+++ b/drivers/pci/host/pci-thunder-pem.c
@@ -18,8 +18,12 @@
#include <linux/init.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
+#include <linux/pci-acpi.h>
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
+#include "../pci.h"
+
+#if defined(CONFIG_PCI_HOST_THUNDER_PEM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
#define PEM_CFG_WR 0x28
#define PEM_CFG_RD 0x30
@@ -284,35 +288,16 @@ static int thunder_pem_config_write(struct pci_bus *bus, unsigned int devfn,
return pci_generic_config_write(bus, devfn, where, size, val);
}
-static int thunder_pem_init(struct pci_config_window *cfg)
+static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg,
+ struct resource *res_pem)
{
- struct device *dev = cfg->parent;
- resource_size_t bar4_start;
- struct resource *res_pem;
struct thunder_pem_pci *pem_pci;
- struct platform_device *pdev;
-
- /* Only OF support for now */
- if (!dev->of_node)
- return -EINVAL;
+ resource_size_t bar4_start;
pem_pci = devm_kzalloc(dev, sizeof(*pem_pci), GFP_KERNEL);
if (!pem_pci)
return -ENOMEM;
- pdev = to_platform_device(dev);
-
- /*
- * The second register range is the PEM bridge to the PCIe
- * bus. It has a different config access method than those
- * devices behind the bridge.
- */
- res_pem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res_pem) {
- dev_err(dev, "missing \"reg[1]\"property\n");
- return -EINVAL;
- }
-
pem_pci->pem_reg_base = devm_ioremap(dev, res_pem->start, 0x10000);
if (!pem_pci->pem_reg_base)
return -ENOMEM;
@@ -332,9 +317,69 @@ static int thunder_pem_init(struct pci_config_window *cfg)
return 0;
}
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+
+static int thunder_pem_acpi_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct acpi_device *adev = to_acpi_device(dev);
+ struct acpi_pci_root *root = acpi_driver_data(adev);
+ struct resource *res_pem;
+ int ret;
+
+ res_pem = devm_kzalloc(&adev->dev, sizeof(*res_pem), GFP_KERNEL);
+ if (!res_pem)
+ return -ENOMEM;
+
+ ret = acpi_get_rc_resources(dev, "THRX0002", root->segment, res_pem);
+ if (ret) {
+ dev_err(dev, "can't get rc base address\n");
+ return ret;
+ }
+
+ return thunder_pem_init(dev, cfg, res_pem);
+}
+
+struct pci_ecam_ops thunder_pem_ecam_ops = {
+ .bus_shift = 24,
+ .init = thunder_pem_acpi_init,
+ .pci_ops = {
+ .map_bus = pci_ecam_map_bus,
+ .read = thunder_pem_config_read,
+ .write = thunder_pem_config_write,
+ }
+};
+
+#endif
+
+#ifdef CONFIG_PCI_HOST_THUNDER_PEM
+
+static int thunder_pem_platform_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *res_pem;
+
+ if (!dev->of_node)
+ return -EINVAL;
+
+ /*
+ * The second register range is the PEM bridge to the PCIe
+ * bus. It has a different config access method than those
+ * devices behind the bridge.
+ */
+ res_pem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res_pem) {
+ dev_err(dev, "missing \"reg[1]\"property\n");
+ return -EINVAL;
+ }
+
+ return thunder_pem_init(dev, cfg, res_pem);
+}
+
static struct pci_ecam_ops pci_thunder_pem_ops = {
.bus_shift = 24,
- .init = thunder_pem_init,
+ .init = thunder_pem_platform_init,
.pci_ops = {
.map_bus = pci_ecam_map_bus,
.read = thunder_pem_config_read,
@@ -360,3 +405,6 @@ static struct platform_driver thunder_pem_driver = {
.probe = thunder_pem_probe,
};
builtin_platform_driver(thunder_pem_driver);
+
+#endif
+#endif
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index 1de23d74783f..7c3b54b9eb17 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -27,6 +27,8 @@
#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -64,7 +66,9 @@
/* PCIe IP version */
#define XGENE_PCIE_IP_VER_UNKN 0
#define XGENE_PCIE_IP_VER_1 1
+#define XGENE_PCIE_IP_VER_2 2
+#if defined(CONFIG_PCI_XGENE) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
struct xgene_pcie_port {
struct device_node *node;
struct device *dev;
@@ -91,13 +95,24 @@ static inline u32 pcie_bar_low_val(u32 addr, u32 flags)
return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags;
}
+static inline struct xgene_pcie_port *pcie_bus_to_port(struct pci_bus *bus)
+{
+ struct pci_config_window *cfg;
+
+ if (acpi_disabled)
+ return (struct xgene_pcie_port *)(bus->sysdata);
+
+ cfg = bus->sysdata;
+ return (struct xgene_pcie_port *)(cfg->priv);
+}
+
/*
* When the address bit [17:16] is 2'b01, the Configuration access will be
* treated as Type 1 and it will be forwarded to external PCIe device.
*/
static void __iomem *xgene_pcie_get_cfg_base(struct pci_bus *bus)
{
- struct xgene_pcie_port *port = bus->sysdata;
+ struct xgene_pcie_port *port = pcie_bus_to_port(bus);
if (bus->number >= (bus->primary + 1))
return port->cfg_base + AXI_EP_CFG_ACCESS;
@@ -111,7 +126,7 @@ static void __iomem *xgene_pcie_get_cfg_base(struct pci_bus *bus)
*/
static void xgene_pcie_set_rtdid_reg(struct pci_bus *bus, uint devfn)
{
- struct xgene_pcie_port *port = bus->sysdata;
+ struct xgene_pcie_port *port = pcie_bus_to_port(bus);
unsigned int b, d, f;
u32 rtdid_val = 0;
@@ -158,7 +173,7 @@ static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
- struct xgene_pcie_port *port = bus->sysdata;
+ struct xgene_pcie_port *port = pcie_bus_to_port(bus);
if (pci_generic_config_read32(bus, devfn, where & ~0x3, 4, val) !=
PCIBIOS_SUCCESSFUL)
@@ -182,13 +197,103 @@ static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
return PCIBIOS_SUCCESSFUL;
}
+#endif
-static struct pci_ops xgene_pcie_ops = {
- .map_bus = xgene_pcie_map_bus,
- .read = xgene_pcie_config_read32,
- .write = pci_generic_config_write32,
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+static int xgene_get_csr_resource(struct acpi_device *adev,
+ struct resource *res)
+{
+ struct device *dev = &adev->dev;
+ struct resource_entry *entry;
+ struct list_head list;
+ unsigned long flags;
+ int ret;
+
+ INIT_LIST_HEAD(&list);
+ flags = IORESOURCE_MEM;
+ ret = acpi_dev_get_resources(adev, &list,
+ acpi_dev_filter_resource_type_cb,
+ (void *) flags);
+ if (ret < 0) {
+ dev_err(dev, "failed to parse _CRS method, error code %d\n",
+ ret);
+ return ret;
+ }
+
+ if (ret == 0) {
+ dev_err(dev, "no IO and memory resources present in _CRS\n");
+ return -EINVAL;
+ }
+
+ entry = list_first_entry(&list, struct resource_entry, node);
+ *res = *entry->res;
+ acpi_dev_free_resource_list(&list);
+ return 0;
+}
+
+static int xgene_pcie_ecam_init(struct pci_config_window *cfg, u32 ipversion)
+{
+ struct device *dev = cfg->parent;
+ struct acpi_device *adev = to_acpi_device(dev);
+ struct xgene_pcie_port *port;
+ struct resource csr;
+ int ret;
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ ret = xgene_get_csr_resource(adev, &csr);
+ if (ret) {
+ dev_err(dev, "can't get CSR resource\n");
+ kfree(port);
+ return ret;
+ }
+ port->csr_base = devm_ioremap_resource(dev, &csr);
+ if (IS_ERR(port->csr_base)) {
+ kfree(port);
+ return -ENOMEM;
+ }
+
+ port->cfg_base = cfg->win;
+ port->version = ipversion;
+
+ cfg->priv = port;
+ return 0;
+}
+
+static int xgene_v1_pcie_ecam_init(struct pci_config_window *cfg)
+{
+ return xgene_pcie_ecam_init(cfg, XGENE_PCIE_IP_VER_1);
+}
+
+struct pci_ecam_ops xgene_v1_pcie_ecam_ops = {
+ .bus_shift = 16,
+ .init = xgene_v1_pcie_ecam_init,
+ .pci_ops = {
+ .map_bus = xgene_pcie_map_bus,
+ .read = xgene_pcie_config_read32,
+ .write = pci_generic_config_write,
+ }
+};
+
+static int xgene_v2_pcie_ecam_init(struct pci_config_window *cfg)
+{
+ return xgene_pcie_ecam_init(cfg, XGENE_PCIE_IP_VER_2);
+}
+
+struct pci_ecam_ops xgene_v2_pcie_ecam_ops = {
+ .bus_shift = 16,
+ .init = xgene_v2_pcie_ecam_init,
+ .pci_ops = {
+ .map_bus = xgene_pcie_map_bus,
+ .read = xgene_pcie_config_read32,
+ .write = pci_generic_config_write,
+ }
};
+#endif
+#if defined(CONFIG_PCI_XGENE)
static u64 xgene_pcie_set_ib_mask(struct xgene_pcie_port *port, u32 addr,
u32 flags, u64 size)
{
@@ -521,6 +626,12 @@ static int xgene_pcie_setup(struct xgene_pcie_port *port,
return 0;
}
+static struct pci_ops xgene_pcie_ops = {
+ .map_bus = xgene_pcie_map_bus,
+ .read = xgene_pcie_config_read32,
+ .write = pci_generic_config_write32,
+};
+
static int xgene_pcie_probe_bridge(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -591,3 +702,4 @@ static struct platform_driver xgene_pcie_driver = {
.probe = xgene_pcie_probe_bridge,
};
builtin_platform_driver(xgene_pcie_driver);
+#endif
diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c
index b0ac4dfafa0b..0c1540225ca3 100644
--- a/drivers/pci/host/pcie-altera.c
+++ b/drivers/pci/host/pcie-altera.c
@@ -550,10 +550,8 @@ static int altera_pcie_parse_dt(struct altera_pcie *pcie)
cra = platform_get_resource_byname(pdev, IORESOURCE_MEM, "Cra");
pcie->cra_base = devm_ioremap_resource(dev, cra);
- if (IS_ERR(pcie->cra_base)) {
- dev_err(dev, "failed to map cra memory\n");
+ if (IS_ERR(pcie->cra_base))
return PTR_ERR(pcie->cra_base);
- }
/* setup IRQ */
pcie->irq = platform_get_irq(pdev, 0);
@@ -641,8 +639,4 @@ static struct platform_driver altera_pcie_driver = {
},
};
-static int altera_pcie_init(void)
-{
- return platform_driver_register(&altera_pcie_driver);
-}
-device_initcall(altera_pcie_init);
+builtin_platform_driver(altera_pcie_driver);
diff --git a/drivers/pci/host/pcie-hisi.c b/drivers/pci/host/pcie-hisi.c
index 56154c25980c..a301a7187b30 100644
--- a/drivers/pci/host/pcie-hisi.c
+++ b/drivers/pci/host/pcie-hisi.c
@@ -18,7 +18,106 @@
#include <linux/of_pci.h>
#include <linux/platform_device.h>
#include <linux/of_device.h>
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
#include <linux/regmap.h>
+#include "../pci.h"
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+
+static int hisi_pcie_acpi_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+ int size, u32 *val)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ int dev = PCI_SLOT(devfn);
+
+ if (bus->number == cfg->busr.start) {
+ /* access only one slot on each root port */
+ if (dev > 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ else
+ return pci_generic_config_read32(bus, devfn, where,
+ size, val);
+ }
+
+ return pci_generic_config_read(bus, devfn, where, size, val);
+}
+
+static int hisi_pcie_acpi_wr_conf(struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 val)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ int dev = PCI_SLOT(devfn);
+
+ if (bus->number == cfg->busr.start) {
+ /* access only one slot on each root port */
+ if (dev > 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ else
+ return pci_generic_config_write32(bus, devfn, where,
+ size, val);
+ }
+
+ return pci_generic_config_write(bus, devfn, where, size, val);
+}
+
+static void __iomem *hisi_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ void __iomem *reg_base = cfg->priv;
+
+ if (bus->number == cfg->busr.start)
+ return reg_base + where;
+ else
+ return pci_ecam_map_bus(bus, devfn, where);
+}
+
+static int hisi_pcie_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct acpi_device *adev = to_acpi_device(dev);
+ struct acpi_pci_root *root = acpi_driver_data(adev);
+ struct resource *res;
+ void __iomem *reg_base;
+ int ret;
+
+ /*
+ * Retrieve RC base and size from a HISI0081 device with _UID
+ * matching our segment.
+ */
+ res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ ret = acpi_get_rc_resources(dev, "HISI0081", root->segment, res);
+ if (ret) {
+ dev_err(dev, "can't get rc base address\n");
+ return -ENOMEM;
+ }
+
+ reg_base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!reg_base)
+ return -ENOMEM;
+
+ cfg->priv = reg_base;
+ return 0;
+}
+
+struct pci_ecam_ops hisi_pcie_ops = {
+ .bus_shift = 20,
+ .init = hisi_pcie_init,
+ .pci_ops = {
+ .map_bus = hisi_pcie_map_bus,
+ .read = hisi_pcie_acpi_rd_conf,
+ .write = hisi_pcie_acpi_wr_conf,
+ }
+};
+
+#endif
+
+#ifdef CONFIG_PCI_HISI
#include "pcie-designware.h"
@@ -185,17 +284,13 @@ static int hisi_pcie_probe(struct platform_device *pdev)
reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi");
pp->dbi_base = devm_ioremap_resource(dev, reg);
- if (IS_ERR(pp->dbi_base)) {
- dev_err(dev, "cannot get rc_dbi base\n");
+ if (IS_ERR(pp->dbi_base))
return PTR_ERR(pp->dbi_base);
- }
ret = hisi_add_pcie_port(hisi_pcie, pdev);
if (ret)
return ret;
- dev_warn(dev, "only 32-bit config accesses supported; smaller writes may corrupt adjacent RW1C fields\n");
-
return 0;
}
@@ -227,3 +322,5 @@ static struct platform_driver hisi_pcie_driver = {
},
};
builtin_platform_driver(hisi_pcie_driver);
+
+#endif
diff --git a/drivers/pci/host/pcie-iproc-bcma.c b/drivers/pci/host/pcie-iproc-bcma.c
index 8ce089043a27..bd4c9ec25edc 100644
--- a/drivers/pci/host/pcie-iproc-bcma.c
+++ b/drivers/pci/host/pcie-iproc-bcma.c
@@ -54,6 +54,7 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
pcie->dev = dev;
+ pcie->type = IPROC_PCIE_PAXB_BCMA;
pcie->base = bdev->io_addr;
if (!pcie->base) {
dev_err(dev, "no controller registers\n");
diff --git a/drivers/pci/host/pcie-iproc-msi.c b/drivers/pci/host/pcie-iproc-msi.c
index 9a2973bdc78a..9fad7915f82a 100644
--- a/drivers/pci/host/pcie-iproc-msi.c
+++ b/drivers/pci/host/pcie-iproc-msi.c
@@ -563,6 +563,7 @@ int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node)
}
switch (pcie->type) {
+ case IPROC_PCIE_PAXB_BCMA:
case IPROC_PCIE_PAXB:
msi->reg_offsets = iproc_msi_reg_paxb;
msi->nr_eq_region = 1;
diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c
index a3de087976b3..22d814a78a78 100644
--- a/drivers/pci/host/pcie-iproc-platform.c
+++ b/drivers/pci/host/pcie-iproc-platform.c
@@ -31,8 +31,14 @@ static const struct of_device_id iproc_pcie_of_match_table[] = {
.compatible = "brcm,iproc-pcie",
.data = (int *)IPROC_PCIE_PAXB,
}, {
+ .compatible = "brcm,iproc-pcie-paxb-v2",
+ .data = (int *)IPROC_PCIE_PAXB_V2,
+ }, {
.compatible = "brcm,iproc-pcie-paxc",
.data = (int *)IPROC_PCIE_PAXC,
+ }, {
+ .compatible = "brcm,iproc-pcie-paxc-v2",
+ .data = (int *)IPROC_PCIE_PAXC_V2,
},
{ /* sentinel */ }
};
@@ -84,19 +90,6 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
return ret;
}
pcie->ob.axi_offset = val;
-
- ret = of_property_read_u32(np, "brcm,pcie-ob-window-size",
- &val);
- if (ret) {
- dev_err(dev,
- "missing brcm,pcie-ob-window-size property\n");
- return ret;
- }
- pcie->ob.window_size = (resource_size_t)val * SZ_1M;
-
- if (of_property_read_bool(np, "brcm,pcie-ob-oarr-size"))
- pcie->ob.set_oarr_size = true;
-
pcie->need_ob_cfg = true;
}
@@ -115,7 +108,14 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
return ret;
}
- pcie->map_irq = of_irq_parse_and_map_pci;
+ /* PAXC doesn't support legacy IRQs, skip mapping */
+ switch (pcie->type) {
+ case IPROC_PCIE_PAXC:
+ case IPROC_PCIE_PAXC_V2:
+ break;
+ default:
+ pcie->map_irq = of_irq_parse_and_map_pci;
+ }
ret = iproc_pcie_setup(pcie, &res);
if (ret)
diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c
index 0b999a9fb843..3ebc025499b9 100644
--- a/drivers/pci/host/pcie-iproc.c
+++ b/drivers/pci/host/pcie-iproc.c
@@ -21,6 +21,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/irqchip/arm-gic-v3.h>
#include <linux/platform_device.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
@@ -38,6 +39,12 @@
#define RC_PCIE_RST_OUTPUT BIT(RC_PCIE_RST_OUTPUT_SHIFT)
#define PAXC_RESET_MASK 0x7f
+#define GIC_V3_CFG_SHIFT 0
+#define GIC_V3_CFG BIT(GIC_V3_CFG_SHIFT)
+
+#define MSI_ENABLE_CFG_SHIFT 0
+#define MSI_ENABLE_CFG BIT(MSI_ENABLE_CFG_SHIFT)
+
#define CFG_IND_ADDR_MASK 0x00001ffc
#define CFG_ADDR_BUS_NUM_SHIFT 20
@@ -58,59 +65,319 @@
#define PCIE_DL_ACTIVE_SHIFT 2
#define PCIE_DL_ACTIVE BIT(PCIE_DL_ACTIVE_SHIFT)
+#define APB_ERR_EN_SHIFT 0
+#define APB_ERR_EN BIT(APB_ERR_EN_SHIFT)
+
+/* derive the enum index of the outbound/inbound mapping registers */
+#define MAP_REG(base_reg, index) ((base_reg) + (index) * 2)
+
+/*
+ * Maximum number of outbound mapping window sizes that can be supported by any
+ * OARR/OMAP mapping pair
+ */
+#define MAX_NUM_OB_WINDOW_SIZES 4
+
#define OARR_VALID_SHIFT 0
#define OARR_VALID BIT(OARR_VALID_SHIFT)
#define OARR_SIZE_CFG_SHIFT 1
-#define OARR_SIZE_CFG BIT(OARR_SIZE_CFG_SHIFT)
-#define PCI_EXP_CAP 0xac
+/*
+ * Maximum number of inbound mapping region sizes that can be supported by an
+ * IARR
+ */
+#define MAX_NUM_IB_REGION_SIZES 9
+
+#define IMAP_VALID_SHIFT 0
+#define IMAP_VALID BIT(IMAP_VALID_SHIFT)
-#define MAX_NUM_OB_WINDOWS 2
+#define PCI_EXP_CAP 0xac
#define IPROC_PCIE_REG_INVALID 0xffff
+/**
+ * iProc PCIe outbound mapping controller specific parameters
+ *
+ * @window_sizes: list of supported outbound mapping window sizes in MB
+ * @nr_sizes: number of supported outbound mapping window sizes
+ */
+struct iproc_pcie_ob_map {
+ resource_size_t window_sizes[MAX_NUM_OB_WINDOW_SIZES];
+ unsigned int nr_sizes;
+};
+
+static const struct iproc_pcie_ob_map paxb_ob_map[] = {
+ {
+ /* OARR0/OMAP0 */
+ .window_sizes = { 128, 256 },
+ .nr_sizes = 2,
+ },
+ {
+ /* OARR1/OMAP1 */
+ .window_sizes = { 128, 256 },
+ .nr_sizes = 2,
+ },
+};
+
+static const struct iproc_pcie_ob_map paxb_v2_ob_map[] = {
+ {
+ /* OARR0/OMAP0 */
+ .window_sizes = { 128, 256 },
+ .nr_sizes = 2,
+ },
+ {
+ /* OARR1/OMAP1 */
+ .window_sizes = { 128, 256 },
+ .nr_sizes = 2,
+ },
+ {
+ /* OARR2/OMAP2 */
+ .window_sizes = { 128, 256, 512, 1024 },
+ .nr_sizes = 4,
+ },
+ {
+ /* OARR3/OMAP3 */
+ .window_sizes = { 128, 256, 512, 1024 },
+ .nr_sizes = 4,
+ },
+};
+
+/**
+ * iProc PCIe inbound mapping type
+ */
+enum iproc_pcie_ib_map_type {
+ /* for DDR memory */
+ IPROC_PCIE_IB_MAP_MEM = 0,
+
+ /* for device I/O memory */
+ IPROC_PCIE_IB_MAP_IO,
+
+ /* invalid or unused */
+ IPROC_PCIE_IB_MAP_INVALID
+};
+
+/**
+ * iProc PCIe inbound mapping controller specific parameters
+ *
+ * @type: inbound mapping region type
+ * @size_unit: inbound mapping region size unit, could be SZ_1K, SZ_1M, or
+ * SZ_1G
+ * @region_sizes: list of supported inbound mapping region sizes in KB, MB, or
+ * GB, depedning on the size unit
+ * @nr_sizes: number of supported inbound mapping region sizes
+ * @nr_windows: number of supported inbound mapping windows for the region
+ * @imap_addr_offset: register offset between the upper and lower 32-bit
+ * IMAP address registers
+ * @imap_window_offset: register offset between each IMAP window
+ */
+struct iproc_pcie_ib_map {
+ enum iproc_pcie_ib_map_type type;
+ unsigned int size_unit;
+ resource_size_t region_sizes[MAX_NUM_IB_REGION_SIZES];
+ unsigned int nr_sizes;
+ unsigned int nr_windows;
+ u16 imap_addr_offset;
+ u16 imap_window_offset;
+};
+
+static const struct iproc_pcie_ib_map paxb_v2_ib_map[] = {
+ {
+ /* IARR0/IMAP0 */
+ .type = IPROC_PCIE_IB_MAP_IO,
+ .size_unit = SZ_1K,
+ .region_sizes = { 32 },
+ .nr_sizes = 1,
+ .nr_windows = 8,
+ .imap_addr_offset = 0x40,
+ .imap_window_offset = 0x4,
+ },
+ {
+ /* IARR1/IMAP1 (currently unused) */
+ .type = IPROC_PCIE_IB_MAP_INVALID,
+ },
+ {
+ /* IARR2/IMAP2 */
+ .type = IPROC_PCIE_IB_MAP_MEM,
+ .size_unit = SZ_1M,
+ .region_sizes = { 64, 128, 256, 512, 1024, 2048, 4096, 8192,
+ 16384 },
+ .nr_sizes = 9,
+ .nr_windows = 1,
+ .imap_addr_offset = 0x4,
+ .imap_window_offset = 0x8,
+ },
+ {
+ /* IARR3/IMAP3 */
+ .type = IPROC_PCIE_IB_MAP_MEM,
+ .size_unit = SZ_1G,
+ .region_sizes = { 1, 2, 4, 8, 16, 32 },
+ .nr_sizes = 6,
+ .nr_windows = 8,
+ .imap_addr_offset = 0x4,
+ .imap_window_offset = 0x8,
+ },
+ {
+ /* IARR4/IMAP4 */
+ .type = IPROC_PCIE_IB_MAP_MEM,
+ .size_unit = SZ_1G,
+ .region_sizes = { 32, 64, 128, 256, 512 },
+ .nr_sizes = 5,
+ .nr_windows = 8,
+ .imap_addr_offset = 0x4,
+ .imap_window_offset = 0x8,
+ },
+};
+
+/*
+ * iProc PCIe host registers
+ */
enum iproc_pcie_reg {
+ /* clock/reset signal control */
IPROC_PCIE_CLK_CTRL = 0,
+
+ /*
+ * To allow MSI to be steered to an external MSI controller (e.g., ARM
+ * GICv3 ITS)
+ */
+ IPROC_PCIE_MSI_GIC_MODE,
+
+ /*
+ * IPROC_PCIE_MSI_BASE_ADDR and IPROC_PCIE_MSI_WINDOW_SIZE define the
+ * window where the MSI posted writes are written, for the writes to be
+ * interpreted as MSI writes.
+ */
+ IPROC_PCIE_MSI_BASE_ADDR,
+ IPROC_PCIE_MSI_WINDOW_SIZE,
+
+ /*
+ * To hold the address of the register where the MSI writes are
+ * programed. When ARM GICv3 ITS is used, this should be programmed
+ * with the address of the GITS_TRANSLATER register.
+ */
+ IPROC_PCIE_MSI_ADDR_LO,
+ IPROC_PCIE_MSI_ADDR_HI,
+
+ /* enable MSI */
+ IPROC_PCIE_MSI_EN_CFG,
+
+ /* allow access to root complex configuration space */
IPROC_PCIE_CFG_IND_ADDR,
IPROC_PCIE_CFG_IND_DATA,
+
+ /* allow access to device configuration space */
IPROC_PCIE_CFG_ADDR,
IPROC_PCIE_CFG_DATA,
+
+ /* enable INTx */
IPROC_PCIE_INTX_EN,
- IPROC_PCIE_OARR_LO,
- IPROC_PCIE_OARR_HI,
- IPROC_PCIE_OMAP_LO,
- IPROC_PCIE_OMAP_HI,
+
+ /* outbound address mapping */
+ IPROC_PCIE_OARR0,
+ IPROC_PCIE_OMAP0,
+ IPROC_PCIE_OARR1,
+ IPROC_PCIE_OMAP1,
+ IPROC_PCIE_OARR2,
+ IPROC_PCIE_OMAP2,
+ IPROC_PCIE_OARR3,
+ IPROC_PCIE_OMAP3,
+
+ /* inbound address mapping */
+ IPROC_PCIE_IARR0,
+ IPROC_PCIE_IMAP0,
+ IPROC_PCIE_IARR1,
+ IPROC_PCIE_IMAP1,
+ IPROC_PCIE_IARR2,
+ IPROC_PCIE_IMAP2,
+ IPROC_PCIE_IARR3,
+ IPROC_PCIE_IMAP3,
+ IPROC_PCIE_IARR4,
+ IPROC_PCIE_IMAP4,
+
+ /* link status */
IPROC_PCIE_LINK_STATUS,
+
+ /* enable APB error for unsupported requests */
+ IPROC_PCIE_APB_ERR_EN,
+
+ /* total number of core registers */
+ IPROC_PCIE_MAX_NUM_REG,
+};
+
+/* iProc PCIe PAXB BCMA registers */
+static const u16 iproc_pcie_reg_paxb_bcma[] = {
+ [IPROC_PCIE_CLK_CTRL] = 0x000,
+ [IPROC_PCIE_CFG_IND_ADDR] = 0x120,
+ [IPROC_PCIE_CFG_IND_DATA] = 0x124,
+ [IPROC_PCIE_CFG_ADDR] = 0x1f8,
+ [IPROC_PCIE_CFG_DATA] = 0x1fc,
+ [IPROC_PCIE_INTX_EN] = 0x330,
+ [IPROC_PCIE_LINK_STATUS] = 0xf0c,
};
/* iProc PCIe PAXB registers */
static const u16 iproc_pcie_reg_paxb[] = {
- [IPROC_PCIE_CLK_CTRL] = 0x000,
- [IPROC_PCIE_CFG_IND_ADDR] = 0x120,
- [IPROC_PCIE_CFG_IND_DATA] = 0x124,
- [IPROC_PCIE_CFG_ADDR] = 0x1f8,
- [IPROC_PCIE_CFG_DATA] = 0x1fc,
- [IPROC_PCIE_INTX_EN] = 0x330,
- [IPROC_PCIE_OARR_LO] = 0xd20,
- [IPROC_PCIE_OARR_HI] = 0xd24,
- [IPROC_PCIE_OMAP_LO] = 0xd40,
- [IPROC_PCIE_OMAP_HI] = 0xd44,
- [IPROC_PCIE_LINK_STATUS] = 0xf0c,
+ [IPROC_PCIE_CLK_CTRL] = 0x000,
+ [IPROC_PCIE_CFG_IND_ADDR] = 0x120,
+ [IPROC_PCIE_CFG_IND_DATA] = 0x124,
+ [IPROC_PCIE_CFG_ADDR] = 0x1f8,
+ [IPROC_PCIE_CFG_DATA] = 0x1fc,
+ [IPROC_PCIE_INTX_EN] = 0x330,
+ [IPROC_PCIE_OARR0] = 0xd20,
+ [IPROC_PCIE_OMAP0] = 0xd40,
+ [IPROC_PCIE_OARR1] = 0xd28,
+ [IPROC_PCIE_OMAP1] = 0xd48,
+ [IPROC_PCIE_LINK_STATUS] = 0xf0c,
+ [IPROC_PCIE_APB_ERR_EN] = 0xf40,
+};
+
+/* iProc PCIe PAXB v2 registers */
+static const u16 iproc_pcie_reg_paxb_v2[] = {
+ [IPROC_PCIE_CLK_CTRL] = 0x000,
+ [IPROC_PCIE_CFG_IND_ADDR] = 0x120,
+ [IPROC_PCIE_CFG_IND_DATA] = 0x124,
+ [IPROC_PCIE_CFG_ADDR] = 0x1f8,
+ [IPROC_PCIE_CFG_DATA] = 0x1fc,
+ [IPROC_PCIE_INTX_EN] = 0x330,
+ [IPROC_PCIE_OARR0] = 0xd20,
+ [IPROC_PCIE_OMAP0] = 0xd40,
+ [IPROC_PCIE_OARR1] = 0xd28,
+ [IPROC_PCIE_OMAP1] = 0xd48,
+ [IPROC_PCIE_OARR2] = 0xd60,
+ [IPROC_PCIE_OMAP2] = 0xd68,
+ [IPROC_PCIE_OARR3] = 0xdf0,
+ [IPROC_PCIE_OMAP3] = 0xdf8,
+ [IPROC_PCIE_IARR0] = 0xd00,
+ [IPROC_PCIE_IMAP0] = 0xc00,
+ [IPROC_PCIE_IARR2] = 0xd10,
+ [IPROC_PCIE_IMAP2] = 0xcc0,
+ [IPROC_PCIE_IARR3] = 0xe00,
+ [IPROC_PCIE_IMAP3] = 0xe08,
+ [IPROC_PCIE_IARR4] = 0xe68,
+ [IPROC_PCIE_IMAP4] = 0xe70,
+ [IPROC_PCIE_LINK_STATUS] = 0xf0c,
+ [IPROC_PCIE_APB_ERR_EN] = 0xf40,
};
/* iProc PCIe PAXC v1 registers */
static const u16 iproc_pcie_reg_paxc[] = {
- [IPROC_PCIE_CLK_CTRL] = 0x000,
- [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0,
- [IPROC_PCIE_CFG_IND_DATA] = 0x1f4,
- [IPROC_PCIE_CFG_ADDR] = 0x1f8,
- [IPROC_PCIE_CFG_DATA] = 0x1fc,
- [IPROC_PCIE_INTX_EN] = IPROC_PCIE_REG_INVALID,
- [IPROC_PCIE_OARR_LO] = IPROC_PCIE_REG_INVALID,
- [IPROC_PCIE_OARR_HI] = IPROC_PCIE_REG_INVALID,
- [IPROC_PCIE_OMAP_LO] = IPROC_PCIE_REG_INVALID,
- [IPROC_PCIE_OMAP_HI] = IPROC_PCIE_REG_INVALID,
- [IPROC_PCIE_LINK_STATUS] = IPROC_PCIE_REG_INVALID,
+ [IPROC_PCIE_CLK_CTRL] = 0x000,
+ [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0,
+ [IPROC_PCIE_CFG_IND_DATA] = 0x1f4,
+ [IPROC_PCIE_CFG_ADDR] = 0x1f8,
+ [IPROC_PCIE_CFG_DATA] = 0x1fc,
+};
+
+/* iProc PCIe PAXC v2 registers */
+static const u16 iproc_pcie_reg_paxc_v2[] = {
+ [IPROC_PCIE_MSI_GIC_MODE] = 0x050,
+ [IPROC_PCIE_MSI_BASE_ADDR] = 0x074,
+ [IPROC_PCIE_MSI_WINDOW_SIZE] = 0x078,
+ [IPROC_PCIE_MSI_ADDR_LO] = 0x07c,
+ [IPROC_PCIE_MSI_ADDR_HI] = 0x080,
+ [IPROC_PCIE_MSI_EN_CFG] = 0x09c,
+ [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0,
+ [IPROC_PCIE_CFG_IND_DATA] = 0x1f4,
+ [IPROC_PCIE_CFG_ADDR] = 0x1f8,
+ [IPROC_PCIE_CFG_DATA] = 0x1fc,
};
static inline struct iproc_pcie *iproc_data(struct pci_bus *bus)
@@ -159,16 +426,26 @@ static inline void iproc_pcie_write_reg(struct iproc_pcie *pcie,
writel(val, pcie->base + offset);
}
-static inline void iproc_pcie_ob_write(struct iproc_pcie *pcie,
- enum iproc_pcie_reg reg,
- unsigned window, u32 val)
+/**
+ * APB error forwarding can be disabled during access of configuration
+ * registers of the endpoint device, to prevent unsupported requests
+ * (typically seen during enumeration with multi-function devices) from
+ * triggering a system exception.
+ */
+static inline void iproc_pcie_apb_err_disable(struct pci_bus *bus,
+ bool disable)
{
- u16 offset = iproc_pcie_reg_offset(pcie, reg);
-
- if (iproc_pcie_reg_is_invalid(offset))
- return;
+ struct iproc_pcie *pcie = iproc_data(bus);
+ u32 val;
- writel(val, pcie->base + offset + (window * 8));
+ if (bus->number && pcie->has_apb_err_disable) {
+ val = iproc_pcie_read_reg(pcie, IPROC_PCIE_APB_ERR_EN);
+ if (disable)
+ val &= ~APB_ERR_EN;
+ else
+ val |= APB_ERR_EN;
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_APB_ERR_EN, val);
+ }
}
/**
@@ -204,7 +481,7 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus,
* PAXC is connected to an internally emulated EP within the SoC. It
* allows only one device.
*/
- if (pcie->type == IPROC_PCIE_PAXC)
+ if (pcie->ep_is_internal)
if (slot > 0)
return NULL;
@@ -222,26 +499,47 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus,
return (pcie->base + offset);
}
+static int iproc_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ int ret;
+
+ iproc_pcie_apb_err_disable(bus, true);
+ ret = pci_generic_config_read32(bus, devfn, where, size, val);
+ iproc_pcie_apb_err_disable(bus, false);
+
+ return ret;
+}
+
+static int iproc_pcie_config_write32(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ int ret;
+
+ iproc_pcie_apb_err_disable(bus, true);
+ ret = pci_generic_config_write32(bus, devfn, where, size, val);
+ iproc_pcie_apb_err_disable(bus, false);
+
+ return ret;
+}
+
static struct pci_ops iproc_pcie_ops = {
.map_bus = iproc_pcie_map_cfg_bus,
- .read = pci_generic_config_read32,
- .write = pci_generic_config_write32,
+ .read = iproc_pcie_config_read32,
+ .write = iproc_pcie_config_write32,
};
static void iproc_pcie_reset(struct iproc_pcie *pcie)
{
u32 val;
- if (pcie->type == IPROC_PCIE_PAXC) {
- val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL);
- val &= ~PAXC_RESET_MASK;
- iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val);
- udelay(100);
- val |= PAXC_RESET_MASK;
- iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val);
- udelay(100);
+ /*
+ * PAXC and the internal emulated endpoint device downstream should not
+ * be reset. If firmware has been loaded on the endpoint device at an
+ * earlier boot stage, reset here causes issues.
+ */
+ if (pcie->ep_is_internal)
return;
- }
/*
* Select perst_b signal as reset source. Put the device into reset,
@@ -270,7 +568,7 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
* PAXC connects to emulated endpoint devices directly and does not
* have a Serdes. Therefore skip the link detection logic here.
*/
- if (pcie->type == IPROC_PCIE_PAXC)
+ if (pcie->ep_is_internal)
return 0;
val = iproc_pcie_read_reg(pcie, IPROC_PCIE_LINK_STATUS);
@@ -334,6 +632,58 @@ static void iproc_pcie_enable(struct iproc_pcie *pcie)
iproc_pcie_write_reg(pcie, IPROC_PCIE_INTX_EN, SYS_RC_INTX_MASK);
}
+static inline bool iproc_pcie_ob_is_valid(struct iproc_pcie *pcie,
+ int window_idx)
+{
+ u32 val;
+
+ val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_OARR0, window_idx));
+
+ return !!(val & OARR_VALID);
+}
+
+static inline int iproc_pcie_ob_write(struct iproc_pcie *pcie, int window_idx,
+ int size_idx, u64 axi_addr, u64 pci_addr)
+{
+ struct device *dev = pcie->dev;
+ u16 oarr_offset, omap_offset;
+
+ /*
+ * Derive the OARR/OMAP offset from the first pair (OARR0/OMAP0) based
+ * on window index.
+ */
+ oarr_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OARR0,
+ window_idx));
+ omap_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OMAP0,
+ window_idx));
+ if (iproc_pcie_reg_is_invalid(oarr_offset) ||
+ iproc_pcie_reg_is_invalid(omap_offset))
+ return -EINVAL;
+
+ /*
+ * Program the OARR registers. The upper 32-bit OARR register is
+ * always right after the lower 32-bit OARR register.
+ */
+ writel(lower_32_bits(axi_addr) | (size_idx << OARR_SIZE_CFG_SHIFT) |
+ OARR_VALID, pcie->base + oarr_offset);
+ writel(upper_32_bits(axi_addr), pcie->base + oarr_offset + 4);
+
+ /* now program the OMAP registers */
+ writel(lower_32_bits(pci_addr), pcie->base + omap_offset);
+ writel(upper_32_bits(pci_addr), pcie->base + omap_offset + 4);
+
+ dev_info(dev, "ob window [%d]: offset 0x%x axi %pap pci %pap\n",
+ window_idx, oarr_offset, &axi_addr, &pci_addr);
+ dev_info(dev, "oarr lo 0x%x oarr hi 0x%x\n",
+ readl(pcie->base + oarr_offset),
+ readl(pcie->base + oarr_offset + 4));
+ dev_info(dev, "omap lo 0x%x omap hi 0x%x\n",
+ readl(pcie->base + omap_offset),
+ readl(pcie->base + omap_offset + 4));
+
+ return 0;
+}
+
/**
* Some iProc SoCs require the SW to configure the outbound address mapping
*
@@ -350,24 +700,7 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
{
struct iproc_pcie_ob *ob = &pcie->ob;
struct device *dev = pcie->dev;
- unsigned i;
- u64 max_size = (u64)ob->window_size * MAX_NUM_OB_WINDOWS;
- u64 remainder;
-
- if (size > max_size) {
- dev_err(dev,
- "res size %pap exceeds max supported size 0x%llx\n",
- &size, max_size);
- return -EINVAL;
- }
-
- div64_u64_rem(size, ob->window_size, &remainder);
- if (remainder) {
- dev_err(dev,
- "res size %pap needs to be multiple of window size %pap\n",
- &size, &ob->window_size);
- return -EINVAL;
- }
+ int ret = -EINVAL, window_idx, size_idx;
if (axi_addr < ob->axi_offset) {
dev_err(dev, "axi address %pap less than offset %pap\n",
@@ -381,26 +714,70 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
*/
axi_addr -= ob->axi_offset;
- for (i = 0; i < MAX_NUM_OB_WINDOWS; i++) {
- iproc_pcie_ob_write(pcie, IPROC_PCIE_OARR_LO, i,
- lower_32_bits(axi_addr) | OARR_VALID |
- (ob->set_oarr_size ? 1 : 0));
- iproc_pcie_ob_write(pcie, IPROC_PCIE_OARR_HI, i,
- upper_32_bits(axi_addr));
- iproc_pcie_ob_write(pcie, IPROC_PCIE_OMAP_LO, i,
- lower_32_bits(pci_addr));
- iproc_pcie_ob_write(pcie, IPROC_PCIE_OMAP_HI, i,
- upper_32_bits(pci_addr));
-
- size -= ob->window_size;
- if (size == 0)
+ /* iterate through all OARR/OMAP mapping windows */
+ for (window_idx = ob->nr_windows - 1; window_idx >= 0; window_idx--) {
+ const struct iproc_pcie_ob_map *ob_map =
+ &pcie->ob_map[window_idx];
+
+ /*
+ * If current outbound window is already in use, move on to the
+ * next one.
+ */
+ if (iproc_pcie_ob_is_valid(pcie, window_idx))
+ continue;
+
+ /*
+ * Iterate through all supported window sizes within the
+ * OARR/OMAP pair to find a match. Go through the window sizes
+ * in a descending order.
+ */
+ for (size_idx = ob_map->nr_sizes - 1; size_idx >= 0;
+ size_idx--) {
+ resource_size_t window_size =
+ ob_map->window_sizes[size_idx] * SZ_1M;
+
+ if (size < window_size)
+ continue;
+
+ if (!IS_ALIGNED(axi_addr, window_size) ||
+ !IS_ALIGNED(pci_addr, window_size)) {
+ dev_err(dev,
+ "axi %pap or pci %pap not aligned\n",
+ &axi_addr, &pci_addr);
+ return -EINVAL;
+ }
+
+ /*
+ * Match found! Program both OARR and OMAP and mark
+ * them as a valid entry.
+ */
+ ret = iproc_pcie_ob_write(pcie, window_idx, size_idx,
+ axi_addr, pci_addr);
+ if (ret)
+ goto err_ob;
+
+ size -= window_size;
+ if (size == 0)
+ return 0;
+
+ /*
+ * If we are here, we are done with the current window,
+ * but not yet finished all mappings. Need to move on
+ * to the next window.
+ */
+ axi_addr += window_size;
+ pci_addr += window_size;
break;
-
- axi_addr += ob->window_size;
- pci_addr += ob->window_size;
+ }
}
- return 0;
+err_ob:
+ dev_err(dev, "unable to configure outbound mapping\n");
+ dev_err(dev,
+ "axi %pap, axi offset %pap, pci %pap, res size %pap\n",
+ &axi_addr, &ob->axi_offset, &pci_addr, &size);
+
+ return ret;
}
static int iproc_pcie_map_ranges(struct iproc_pcie *pcie,
@@ -434,13 +811,323 @@ static int iproc_pcie_map_ranges(struct iproc_pcie *pcie,
return 0;
}
+static inline bool iproc_pcie_ib_is_in_use(struct iproc_pcie *pcie,
+ int region_idx)
+{
+ const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx];
+ u32 val;
+
+ val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_IARR0, region_idx));
+
+ return !!(val & (BIT(ib_map->nr_sizes) - 1));
+}
+
+static inline bool iproc_pcie_ib_check_type(const struct iproc_pcie_ib_map *ib_map,
+ enum iproc_pcie_ib_map_type type)
+{
+ return !!(ib_map->type == type);
+}
+
+static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx,
+ int size_idx, int nr_windows, u64 axi_addr,
+ u64 pci_addr, resource_size_t size)
+{
+ struct device *dev = pcie->dev;
+ const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx];
+ u16 iarr_offset, imap_offset;
+ u32 val;
+ int window_idx;
+
+ iarr_offset = iproc_pcie_reg_offset(pcie,
+ MAP_REG(IPROC_PCIE_IARR0, region_idx));
+ imap_offset = iproc_pcie_reg_offset(pcie,
+ MAP_REG(IPROC_PCIE_IMAP0, region_idx));
+ if (iproc_pcie_reg_is_invalid(iarr_offset) ||
+ iproc_pcie_reg_is_invalid(imap_offset))
+ return -EINVAL;
+
+ dev_info(dev, "ib region [%d]: offset 0x%x axi %pap pci %pap\n",
+ region_idx, iarr_offset, &axi_addr, &pci_addr);
+
+ /*
+ * Program the IARR registers. The upper 32-bit IARR register is
+ * always right after the lower 32-bit IARR register.
+ */
+ writel(lower_32_bits(pci_addr) | BIT(size_idx),
+ pcie->base + iarr_offset);
+ writel(upper_32_bits(pci_addr), pcie->base + iarr_offset + 4);
+
+ dev_info(dev, "iarr lo 0x%x iarr hi 0x%x\n",
+ readl(pcie->base + iarr_offset),
+ readl(pcie->base + iarr_offset + 4));
+
+ /*
+ * Now program the IMAP registers. Each IARR region may have one or
+ * more IMAP windows.
+ */
+ size >>= ilog2(nr_windows);
+ for (window_idx = 0; window_idx < nr_windows; window_idx++) {
+ val = readl(pcie->base + imap_offset);
+ val |= lower_32_bits(axi_addr) | IMAP_VALID;
+ writel(val, pcie->base + imap_offset);
+ writel(upper_32_bits(axi_addr),
+ pcie->base + imap_offset + ib_map->imap_addr_offset);
+
+ dev_info(dev, "imap window [%d] lo 0x%x hi 0x%x\n",
+ window_idx, readl(pcie->base + imap_offset),
+ readl(pcie->base + imap_offset +
+ ib_map->imap_addr_offset));
+
+ imap_offset += ib_map->imap_window_offset;
+ axi_addr += size;
+ }
+
+ return 0;
+}
+
+static int iproc_pcie_setup_ib(struct iproc_pcie *pcie,
+ struct of_pci_range *range,
+ enum iproc_pcie_ib_map_type type)
+{
+ struct device *dev = pcie->dev;
+ struct iproc_pcie_ib *ib = &pcie->ib;
+ int ret;
+ unsigned int region_idx, size_idx;
+ u64 axi_addr = range->cpu_addr, pci_addr = range->pci_addr;
+ resource_size_t size = range->size;
+
+ /* iterate through all IARR mapping regions */
+ for (region_idx = 0; region_idx < ib->nr_regions; region_idx++) {
+ const struct iproc_pcie_ib_map *ib_map =
+ &pcie->ib_map[region_idx];
+
+ /*
+ * If current inbound region is already in use or not a
+ * compatible type, move on to the next.
+ */
+ if (iproc_pcie_ib_is_in_use(pcie, region_idx) ||
+ !iproc_pcie_ib_check_type(ib_map, type))
+ continue;
+
+ /* iterate through all supported region sizes to find a match */
+ for (size_idx = 0; size_idx < ib_map->nr_sizes; size_idx++) {
+ resource_size_t region_size =
+ ib_map->region_sizes[size_idx] * ib_map->size_unit;
+
+ if (size != region_size)
+ continue;
+
+ if (!IS_ALIGNED(axi_addr, region_size) ||
+ !IS_ALIGNED(pci_addr, region_size)) {
+ dev_err(dev,
+ "axi %pap or pci %pap not aligned\n",
+ &axi_addr, &pci_addr);
+ return -EINVAL;
+ }
+
+ /* Match found! Program IARR and all IMAP windows. */
+ ret = iproc_pcie_ib_write(pcie, region_idx, size_idx,
+ ib_map->nr_windows, axi_addr,
+ pci_addr, size);
+ if (ret)
+ goto err_ib;
+ else
+ return 0;
+
+ }
+ }
+ ret = -EINVAL;
+
+err_ib:
+ dev_err(dev, "unable to configure inbound mapping\n");
+ dev_err(dev, "axi %pap, pci %pap, res size %pap\n",
+ &axi_addr, &pci_addr, &size);
+
+ return ret;
+}
+
+static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
+ struct device_node *node)
+{
+ const int na = 3, ns = 2;
+ int rlen;
+
+ parser->node = node;
+ parser->pna = of_n_addr_cells(node);
+ parser->np = parser->pna + na + ns;
+
+ parser->range = of_get_property(node, "dma-ranges", &rlen);
+ if (!parser->range)
+ return -ENOENT;
+
+ parser->end = parser->range + rlen / sizeof(__be32);
+ return 0;
+}
+
+static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie)
+{
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
+ int ret;
+
+ /* Get the dma-ranges from DT */
+ ret = pci_dma_range_parser_init(&parser, pcie->dev->of_node);
+ if (ret)
+ return ret;
+
+ for_each_of_pci_range(&parser, &range) {
+ /* Each range entry corresponds to an inbound mapping region */
+ ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_MEM);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int iproce_pcie_get_msi(struct iproc_pcie *pcie,
+ struct device_node *msi_node,
+ u64 *msi_addr)
+{
+ struct device *dev = pcie->dev;
+ int ret;
+ struct resource res;
+
+ /*
+ * Check if 'msi-map' points to ARM GICv3 ITS, which is the only
+ * supported external MSI controller that requires steering.
+ */
+ if (!of_device_is_compatible(msi_node, "arm,gic-v3-its")) {
+ dev_err(dev, "unable to find compatible MSI controller\n");
+ return -ENODEV;
+ }
+
+ /* derive GITS_TRANSLATER address from GICv3 */
+ ret = of_address_to_resource(msi_node, 0, &res);
+ if (ret < 0) {
+ dev_err(dev, "unable to obtain MSI controller resources\n");
+ return ret;
+ }
+
+ *msi_addr = res.start + GITS_TRANSLATER;
+ return 0;
+}
+
+static int iproc_pcie_paxb_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr)
+{
+ int ret;
+ struct of_pci_range range;
+
+ memset(&range, 0, sizeof(range));
+ range.size = SZ_32K;
+ range.pci_addr = range.cpu_addr = msi_addr & ~(range.size - 1);
+
+ ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_IO);
+ return ret;
+}
+
+static void iproc_pcie_paxc_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr)
+{
+ u32 val;
+
+ /*
+ * Program bits [43:13] of address of GITS_TRANSLATER register into
+ * bits [30:0] of the MSI base address register. In fact, in all iProc
+ * based SoCs, all I/O register bases are well below the 32-bit
+ * boundary, so we can safely assume bits [43:32] are always zeros.
+ */
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_BASE_ADDR,
+ (u32)(msi_addr >> 13));
+
+ /* use a default 8K window size */
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_WINDOW_SIZE, 0);
+
+ /* steering MSI to GICv3 ITS */
+ val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_GIC_MODE);
+ val |= GIC_V3_CFG;
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_GIC_MODE, val);
+
+ /*
+ * Program bits [43:2] of address of GITS_TRANSLATER register into the
+ * iProc MSI address registers.
+ */
+ msi_addr >>= 2;
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_HI,
+ upper_32_bits(msi_addr));
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_LO,
+ lower_32_bits(msi_addr));
+
+ /* enable MSI */
+ val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_EN_CFG);
+ val |= MSI_ENABLE_CFG;
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_EN_CFG, val);
+}
+
+static int iproc_pcie_msi_steer(struct iproc_pcie *pcie,
+ struct device_node *msi_node)
+{
+ struct device *dev = pcie->dev;
+ int ret;
+ u64 msi_addr;
+
+ ret = iproce_pcie_get_msi(pcie, msi_node, &msi_addr);
+ if (ret < 0) {
+ dev_err(dev, "msi steering failed\n");
+ return ret;
+ }
+
+ switch (pcie->type) {
+ case IPROC_PCIE_PAXB_V2:
+ ret = iproc_pcie_paxb_v2_msi_steer(pcie, msi_addr);
+ if (ret)
+ return ret;
+ break;
+ case IPROC_PCIE_PAXC_V2:
+ iproc_pcie_paxc_v2_msi_steer(pcie, msi_addr);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int iproc_pcie_msi_enable(struct iproc_pcie *pcie)
{
struct device_node *msi_node;
+ int ret;
+
+ /*
+ * Either the "msi-parent" or the "msi-map" phandle needs to exist
+ * for us to obtain the MSI node.
+ */
msi_node = of_parse_phandle(pcie->dev->of_node, "msi-parent", 0);
- if (!msi_node)
- return -ENODEV;
+ if (!msi_node) {
+ const __be32 *msi_map = NULL;
+ int len;
+ u32 phandle;
+
+ msi_map = of_get_property(pcie->dev->of_node, "msi-map", &len);
+ if (!msi_map)
+ return -ENODEV;
+
+ phandle = be32_to_cpup(msi_map + 1);
+ msi_node = of_find_node_by_phandle(phandle);
+ if (!msi_node)
+ return -ENODEV;
+ }
+
+ /*
+ * Certain revisions of the iProc PCIe controller require additional
+ * configurations to steer the MSI writes towards an external MSI
+ * controller.
+ */
+ if (pcie->need_msi_steer) {
+ ret = iproc_pcie_msi_steer(pcie, msi_node);
+ if (ret)
+ return ret;
+ }
/*
* If another MSI controller is being used, the call below should fail
@@ -454,6 +1141,65 @@ static void iproc_pcie_msi_disable(struct iproc_pcie *pcie)
iproc_msi_exit(pcie);
}
+static int iproc_pcie_rev_init(struct iproc_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ unsigned int reg_idx;
+ const u16 *regs;
+
+ switch (pcie->type) {
+ case IPROC_PCIE_PAXB_BCMA:
+ regs = iproc_pcie_reg_paxb_bcma;
+ break;
+ case IPROC_PCIE_PAXB:
+ regs = iproc_pcie_reg_paxb;
+ pcie->has_apb_err_disable = true;
+ if (pcie->need_ob_cfg) {
+ pcie->ob_map = paxb_ob_map;
+ pcie->ob.nr_windows = ARRAY_SIZE(paxb_ob_map);
+ }
+ break;
+ case IPROC_PCIE_PAXB_V2:
+ regs = iproc_pcie_reg_paxb_v2;
+ pcie->has_apb_err_disable = true;
+ if (pcie->need_ob_cfg) {
+ pcie->ob_map = paxb_v2_ob_map;
+ pcie->ob.nr_windows = ARRAY_SIZE(paxb_v2_ob_map);
+ }
+ pcie->ib.nr_regions = ARRAY_SIZE(paxb_v2_ib_map);
+ pcie->ib_map = paxb_v2_ib_map;
+ pcie->need_msi_steer = true;
+ break;
+ case IPROC_PCIE_PAXC:
+ regs = iproc_pcie_reg_paxc;
+ pcie->ep_is_internal = true;
+ break;
+ case IPROC_PCIE_PAXC_V2:
+ regs = iproc_pcie_reg_paxc_v2;
+ pcie->ep_is_internal = true;
+ pcie->need_msi_steer = true;
+ break;
+ default:
+ dev_err(dev, "incompatible iProc PCIe interface\n");
+ return -EINVAL;
+ }
+
+ pcie->reg_offsets = devm_kcalloc(dev, IPROC_PCIE_MAX_NUM_REG,
+ sizeof(*pcie->reg_offsets),
+ GFP_KERNEL);
+ if (!pcie->reg_offsets)
+ return -ENOMEM;
+
+ /* go through the register table and populate all valid registers */
+ pcie->reg_offsets[0] = (pcie->type == IPROC_PCIE_PAXC_V2) ?
+ IPROC_PCIE_REG_INVALID : regs[0];
+ for (reg_idx = 1; reg_idx < IPROC_PCIE_MAX_NUM_REG; reg_idx++)
+ pcie->reg_offsets[reg_idx] = regs[reg_idx] ?
+ regs[reg_idx] : IPROC_PCIE_REG_INVALID;
+
+ return 0;
+}
+
int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
{
struct device *dev;
@@ -462,6 +1208,13 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
struct pci_bus *bus;
dev = pcie->dev;
+
+ ret = iproc_pcie_rev_init(pcie);
+ if (ret) {
+ dev_err(dev, "unable to initialize controller parameters\n");
+ return ret;
+ }
+
ret = devm_request_pci_bus_resources(dev, res);
if (ret)
return ret;
@@ -478,19 +1231,6 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
goto err_exit_phy;
}
- switch (pcie->type) {
- case IPROC_PCIE_PAXB:
- pcie->reg_offsets = iproc_pcie_reg_paxb;
- break;
- case IPROC_PCIE_PAXC:
- pcie->reg_offsets = iproc_pcie_reg_paxc;
- break;
- default:
- dev_err(dev, "incompatible iProc PCIe interface\n");
- ret = -EINVAL;
- goto err_power_off_phy;
- }
-
iproc_pcie_reset(pcie);
if (pcie->need_ob_cfg) {
@@ -501,6 +1241,10 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
}
}
+ ret = iproc_pcie_map_dma_ranges(pcie);
+ if (ret && ret != -ENOENT)
+ goto err_power_off_phy;
+
#ifdef CONFIG_ARM
pcie->sysdata.private_data = pcie;
sysdata = &pcie->sysdata;
@@ -530,7 +1274,10 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
pci_scan_child_bus(bus);
pci_assign_unassigned_bus_resources(bus);
- pci_fixup_irqs(pci_common_swizzle, pcie->map_irq);
+
+ if (pcie->map_irq)
+ pci_fixup_irqs(pci_common_swizzle, pcie->map_irq);
+
pci_bus_add_devices(bus);
return 0;
diff --git a/drivers/pci/host/pcie-iproc.h b/drivers/pci/host/pcie-iproc.h
index e84d93c53c7b..04fed8e907f1 100644
--- a/drivers/pci/host/pcie-iproc.h
+++ b/drivers/pci/host/pcie-iproc.h
@@ -24,23 +24,34 @@
* endpoint devices.
*/
enum iproc_pcie_type {
- IPROC_PCIE_PAXB = 0,
+ IPROC_PCIE_PAXB_BCMA = 0,
+ IPROC_PCIE_PAXB,
+ IPROC_PCIE_PAXB_V2,
IPROC_PCIE_PAXC,
+ IPROC_PCIE_PAXC_V2,
};
/**
* iProc PCIe outbound mapping
- * @set_oarr_size: indicates the OARR size bit needs to be set
* @axi_offset: offset from the AXI address to the internal address used by
* the iProc PCIe core
- * @window_size: outbound window size
+ * @nr_windows: total number of supported outbound mapping windows
*/
struct iproc_pcie_ob {
- bool set_oarr_size;
resource_size_t axi_offset;
- resource_size_t window_size;
+ unsigned int nr_windows;
};
+/**
+ * iProc PCIe inbound mapping
+ * @nr_regions: total number of supported inbound mapping regions
+ */
+struct iproc_pcie_ib {
+ unsigned int nr_regions;
+};
+
+struct iproc_pcie_ob_map;
+struct iproc_pcie_ib_map;
struct iproc_msi;
/**
@@ -55,14 +66,25 @@ struct iproc_msi;
* @root_bus: pointer to root bus
* @phy: optional PHY device that controls the Serdes
* @map_irq: function callback to map interrupts
+ * @ep_is_internal: indicates an internal emulated endpoint device is connected
+ * @has_apb_err_disable: indicates the controller can be configured to prevent
+ * unsupported request from being forwarded as an APB bus error
+ *
* @need_ob_cfg: indicates SW needs to configure the outbound mapping window
- * @ob: outbound mapping parameters
+ * @ob: outbound mapping related parameters
+ * @ob_map: outbound mapping related parameters specific to the controller
+ *
+ * @ib: inbound mapping related parameters
+ * @ib_map: outbound mapping region related parameters
+ *
+ * @need_msi_steer: indicates additional configuration of the iProc PCIe
+ * controller is required to steer MSI writes to external interrupt controller
* @msi: MSI data
*/
struct iproc_pcie {
struct device *dev;
enum iproc_pcie_type type;
- const u16 *reg_offsets;
+ u16 *reg_offsets;
void __iomem *base;
phys_addr_t base_addr;
#ifdef CONFIG_ARM
@@ -71,8 +93,17 @@ struct iproc_pcie {
struct pci_bus *root_bus;
struct phy *phy;
int (*map_irq)(const struct pci_dev *, u8, u8);
+ bool ep_is_internal;
+ bool has_apb_err_disable;
+
bool need_ob_cfg;
struct iproc_pcie_ob ob;
+ const struct iproc_pcie_ob_map *ob_map;
+
+ struct iproc_pcie_ib ib;
+ const struct iproc_pcie_ib_map *ib_map;
+
+ bool need_msi_steer;
struct iproc_msi *msi;
};
diff --git a/drivers/pci/host/pcie-qcom.c b/drivers/pci/host/pcie-qcom.c
index 35936409b2d4..734ba0d4a5c8 100644
--- a/drivers/pci/host/pcie-qcom.c
+++ b/drivers/pci/host/pcie-qcom.c
@@ -36,11 +36,17 @@
#include "pcie-designware.h"
+#define PCIE20_PARF_SYS_CTRL 0x00
#define PCIE20_PARF_PHY_CTRL 0x40
#define PCIE20_PARF_PHY_REFCLK 0x4C
#define PCIE20_PARF_DBI_BASE_ADDR 0x168
-#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16c
+#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
+#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178
+#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8
+#define PCIE20_PARF_LTSSM 0x1B0
+#define PCIE20_PARF_SID_OFFSET 0x234
+#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
#define PCIE20_ELBI_SYS_CTRL 0x04
#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
@@ -72,9 +78,18 @@ struct qcom_pcie_resources_v1 {
struct regulator *vdda;
};
+struct qcom_pcie_resources_v2 {
+ struct clk *aux_clk;
+ struct clk *master_clk;
+ struct clk *slave_clk;
+ struct clk *cfg_clk;
+ struct clk *pipe_clk;
+};
+
union qcom_pcie_resources {
struct qcom_pcie_resources_v0 v0;
struct qcom_pcie_resources_v1 v1;
+ struct qcom_pcie_resources_v2 v2;
};
struct qcom_pcie;
@@ -82,7 +97,9 @@ struct qcom_pcie;
struct qcom_pcie_ops {
int (*get_resources)(struct qcom_pcie *pcie);
int (*init)(struct qcom_pcie *pcie);
+ int (*post_init)(struct qcom_pcie *pcie);
void (*deinit)(struct qcom_pcie *pcie);
+ void (*ltssm_enable)(struct qcom_pcie *pcie);
};
struct qcom_pcie {
@@ -116,17 +133,35 @@ static irqreturn_t qcom_pcie_msi_irq_handler(int irq, void *arg)
return dw_handle_msi_irq(pp);
}
-static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
+static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie *pcie)
{
u32 val;
- if (dw_pcie_link_up(&pcie->pp))
- return 0;
-
/* enable link training */
val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
+}
+
+static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie)
+{
+ u32 val;
+
+ /* enable link training */
+ val = readl(pcie->parf + PCIE20_PARF_LTSSM);
+ val |= BIT(8);
+ writel(val, pcie->parf + PCIE20_PARF_LTSSM);
+}
+
+static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
+{
+
+ if (dw_pcie_link_up(&pcie->pp))
+ return 0;
+
+ /* Enable Link Training state machine */
+ if (pcie->ops->ltssm_enable)
+ pcie->ops->ltssm_enable(pcie);
return dw_pcie_wait_for_link(&pcie->pp);
}
@@ -421,6 +456,113 @@ err_res:
return ret;
}
+static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
+ struct device *dev = pcie->pp.dev;
+
+ res->aux_clk = devm_clk_get(dev, "aux");
+ if (IS_ERR(res->aux_clk))
+ return PTR_ERR(res->aux_clk);
+
+ res->cfg_clk = devm_clk_get(dev, "cfg");
+ if (IS_ERR(res->cfg_clk))
+ return PTR_ERR(res->cfg_clk);
+
+ res->master_clk = devm_clk_get(dev, "bus_master");
+ if (IS_ERR(res->master_clk))
+ return PTR_ERR(res->master_clk);
+
+ res->slave_clk = devm_clk_get(dev, "bus_slave");
+ if (IS_ERR(res->slave_clk))
+ return PTR_ERR(res->slave_clk);
+
+ res->pipe_clk = devm_clk_get(dev, "pipe");
+ if (IS_ERR(res->pipe_clk))
+ return PTR_ERR(res->pipe_clk);
+
+ return 0;
+}
+
+static int qcom_pcie_init_v2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
+ struct device *dev = pcie->pp.dev;
+ u32 val;
+ int ret;
+
+ ret = clk_prepare_enable(res->aux_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable aux clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(res->cfg_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable cfg clock\n");
+ goto err_cfg_clk;
+ }
+
+ ret = clk_prepare_enable(res->master_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable master clock\n");
+ goto err_master_clk;
+ }
+
+ ret = clk_prepare_enable(res->slave_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable slave clock\n");
+ goto err_slave_clk;
+ }
+
+ /* enable PCIe clocks and resets */
+ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val &= ~BIT(0);
+ writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+ /* change DBI base address */
+ writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+ /* MAC PHY_POWERDOWN MUX DISABLE */
+ val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
+ val &= ~BIT(29);
+ writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
+
+ val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+ val |= BIT(4);
+ writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+
+ val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+ val |= BIT(31);
+ writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+
+ return 0;
+
+err_slave_clk:
+ clk_disable_unprepare(res->master_clk);
+err_master_clk:
+ clk_disable_unprepare(res->cfg_clk);
+err_cfg_clk:
+ clk_disable_unprepare(res->aux_clk);
+
+ return ret;
+}
+
+static int qcom_pcie_post_init_v2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
+ struct device *dev = pcie->pp.dev;
+ int ret;
+
+ ret = clk_prepare_enable(res->pipe_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable pipe clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int qcom_pcie_link_up(struct pcie_port *pp)
{
struct qcom_pcie *pcie = to_qcom_pcie(pp);
@@ -429,6 +571,17 @@ static int qcom_pcie_link_up(struct pcie_port *pp)
return !!(val & PCI_EXP_LNKSTA_DLLLA);
}
+static void qcom_pcie_deinit_v2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
+
+ clk_disable_unprepare(res->pipe_clk);
+ clk_disable_unprepare(res->slave_clk);
+ clk_disable_unprepare(res->master_clk);
+ clk_disable_unprepare(res->cfg_clk);
+ clk_disable_unprepare(res->aux_clk);
+}
+
static void qcom_pcie_host_init(struct pcie_port *pp)
{
struct qcom_pcie *pcie = to_qcom_pcie(pp);
@@ -444,6 +597,9 @@ static void qcom_pcie_host_init(struct pcie_port *pp)
if (ret)
goto err_deinit;
+ if (pcie->ops->post_init)
+ pcie->ops->post_init(pcie);
+
dw_pcie_setup_rc(pp);
if (IS_ENABLED(CONFIG_PCI_MSI))
@@ -487,12 +643,22 @@ static const struct qcom_pcie_ops ops_v0 = {
.get_resources = qcom_pcie_get_resources_v0,
.init = qcom_pcie_init_v0,
.deinit = qcom_pcie_deinit_v0,
+ .ltssm_enable = qcom_pcie_v0_v1_ltssm_enable,
};
static const struct qcom_pcie_ops ops_v1 = {
.get_resources = qcom_pcie_get_resources_v1,
.init = qcom_pcie_init_v1,
.deinit = qcom_pcie_deinit_v1,
+ .ltssm_enable = qcom_pcie_v0_v1_ltssm_enable,
+};
+
+static const struct qcom_pcie_ops ops_v2 = {
+ .get_resources = qcom_pcie_get_resources_v2,
+ .init = qcom_pcie_init_v2,
+ .post_init = qcom_pcie_post_init_v2,
+ .deinit = qcom_pcie_deinit_v2,
+ .ltssm_enable = qcom_pcie_v2_ltssm_enable,
};
static int qcom_pcie_probe(struct platform_device *pdev)
@@ -572,6 +738,7 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-ipq8064", .data = &ops_v0 },
{ .compatible = "qcom,pcie-apq8064", .data = &ops_v0 },
{ .compatible = "qcom,pcie-apq8084", .data = &ops_v1 },
+ { .compatible = "qcom,pcie-msm8996", .data = &ops_v2 },
{ }
};
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 62700d1896f4..aca85be101f8 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -1071,13 +1071,14 @@ static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
static const struct of_device_id rcar_pcie_of_match[] = {
{ .compatible = "renesas,pcie-r8a7779", .data = rcar_pcie_hw_init_h1 },
- { .compatible = "renesas,pcie-rcar-gen2",
- .data = rcar_pcie_hw_init_gen2 },
{ .compatible = "renesas,pcie-r8a7790",
.data = rcar_pcie_hw_init_gen2 },
{ .compatible = "renesas,pcie-r8a7791",
.data = rcar_pcie_hw_init_gen2 },
+ { .compatible = "renesas,pcie-rcar-gen2",
+ .data = rcar_pcie_hw_init_gen2 },
{ .compatible = "renesas,pcie-r8a7795", .data = rcar_pcie_hw_init },
+ { .compatible = "renesas,pcie-rcar-gen3", .data = rcar_pcie_hw_init },
{},
};
diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c
index e04f69beb42d..f2dca7bb0b39 100644
--- a/drivers/pci/host/pcie-rockchip.c
+++ b/drivers/pci/host/pcie-rockchip.c
@@ -53,6 +53,7 @@
#define PCIE_CLIENT_ARI_ENABLE HIWORD_UPDATE_BIT(0x0008)
#define PCIE_CLIENT_CONF_LANE_NUM(x) HIWORD_UPDATE(0x0030, ENCODE_LANES(x))
#define PCIE_CLIENT_MODE_RC HIWORD_UPDATE_BIT(0x0040)
+#define PCIE_CLIENT_GEN_SEL_1 HIWORD_UPDATE(0x0080, 0)
#define PCIE_CLIENT_GEN_SEL_2 HIWORD_UPDATE_BIT(0x0080)
#define PCIE_CLIENT_BASIC_STATUS1 (PCIE_CLIENT_BASE + 0x48)
#define PCIE_CLIENT_LINK_STATUS_UP 0x00300000
@@ -135,13 +136,14 @@
#define PCIE_RC_CONFIG_VENDOR (PCIE_RC_CONFIG_BASE + 0x00)
#define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08)
#define PCIE_RC_CONFIG_SCC_SHIFT 16
+#define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4)
+#define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18
+#define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff
+#define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26
#define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0)
-#define PCIE_RC_CONFIG_LCS_RETRAIN_LINK BIT(5)
-#define PCIE_RC_CONFIG_LCS_LBMIE BIT(10)
-#define PCIE_RC_CONFIG_LCS_LABIE BIT(11)
-#define PCIE_RC_CONFIG_LCS_LBMS BIT(30)
-#define PCIE_RC_CONFIG_LCS_LAMS BIT(31)
#define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c)
+#define PCIE_RC_CONFIG_THP_CAP (PCIE_RC_CONFIG_BASE + 0x274)
+#define PCIE_RC_CONFIG_THP_CAP_NEXT_MASK GENMASK(31, 20)
#define PCIE_CORE_AXI_CONF_BASE 0xc00000
#define PCIE_CORE_OB_REGION_ADDR0 (PCIE_CORE_AXI_CONF_BASE + 0x0)
@@ -203,8 +205,14 @@ struct rockchip_pcie {
struct gpio_desc *ep_gpio;
u32 lanes;
u8 root_bus_nr;
+ int link_gen;
struct device *dev;
struct irq_domain *irq_domain;
+ u32 io_size;
+ int offset;
+ phys_addr_t io_bus_addr;
+ u32 mem_size;
+ phys_addr_t mem_bus_addr;
};
static u32 rockchip_pcie_read(struct rockchip_pcie *rockchip, u32 reg)
@@ -223,7 +231,7 @@ static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip)
u32 status;
status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
- status |= (PCIE_RC_CONFIG_LCS_LBMIE | PCIE_RC_CONFIG_LCS_LABIE);
+ status |= (PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE);
rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
}
@@ -232,7 +240,7 @@ static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip)
u32 status;
status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
- status |= (PCIE_RC_CONFIG_LCS_LBMS | PCIE_RC_CONFIG_LCS_LAMS);
+ status |= (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS) << 16;
rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
}
@@ -398,6 +406,40 @@ static struct pci_ops rockchip_pcie_ops = {
.write = rockchip_pcie_wr_conf,
};
+static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip)
+{
+ u32 status, curr, scale, power;
+
+ if (IS_ERR(rockchip->vpcie3v3))
+ return;
+
+ /*
+ * Set RC's captured slot power limit and scale if
+ * vpcie3v3 available. The default values are both zero
+ * which means the software should set these two according
+ * to the actual power supply.
+ */
+ curr = regulator_get_current_limit(rockchip->vpcie3v3);
+ if (curr > 0) {
+ scale = 3; /* 0.001x */
+ curr = curr / 1000; /* convert to mA */
+ power = (curr * 3300) / 1000; /* milliwatt */
+ while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) {
+ if (!scale) {
+ dev_warn(rockchip->dev, "invalid power supply\n");
+ return;
+ }
+ scale--;
+ power = power / 10;
+ }
+
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR);
+ status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) |
+ (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR);
+ }
+}
+
/**
* rockchip_pcie_init_port - Initialize hardware
* @rockchip: PCIe port information
@@ -429,26 +471,6 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
return err;
}
- udelay(10);
-
- err = reset_control_deassert(rockchip->pm_rst);
- if (err) {
- dev_err(dev, "deassert pm_rst err %d\n", err);
- return err;
- }
-
- err = reset_control_deassert(rockchip->aclk_rst);
- if (err) {
- dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err);
- return err;
- }
-
- err = reset_control_deassert(rockchip->pclk_rst);
- if (err) {
- dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err);
- return err;
- }
-
err = phy_init(rockchip->phy);
if (err < 0) {
dev_err(dev, "fail to init phy, err %d\n", err);
@@ -479,14 +501,40 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
return err;
}
+ udelay(10);
+
+ err = reset_control_deassert(rockchip->pm_rst);
+ if (err) {
+ dev_err(dev, "deassert pm_rst err %d\n", err);
+ return err;
+ }
+
+ err = reset_control_deassert(rockchip->aclk_rst);
+ if (err) {
+ dev_err(dev, "deassert aclk_rst err %d\n", err);
+ return err;
+ }
+
+ err = reset_control_deassert(rockchip->pclk_rst);
+ if (err) {
+ dev_err(dev, "deassert pclk_rst err %d\n", err);
+ return err;
+ }
+
+ if (rockchip->link_gen == 2)
+ rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_2,
+ PCIE_CLIENT_CONFIG);
+ else
+ rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_1,
+ PCIE_CLIENT_CONFIG);
+
rockchip_pcie_write(rockchip,
PCIE_CLIENT_CONF_ENABLE |
PCIE_CLIENT_LINK_TRAIN_ENABLE |
PCIE_CLIENT_ARI_ENABLE |
PCIE_CLIENT_CONF_LANE_NUM(rockchip->lanes) |
- PCIE_CLIENT_MODE_RC |
- PCIE_CLIENT_GEN_SEL_2,
- PCIE_CLIENT_CONFIG);
+ PCIE_CLIENT_MODE_RC,
+ PCIE_CLIENT_CONFIG);
err = phy_power_on(rockchip->phy);
if (err) {
@@ -522,21 +570,19 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
return err;
}
- /*
- * We need to read/write PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 before
- * enabling ASPM. Otherwise L1PwrOnSc and L1PwrOnVal isn't
- * reliable and enabling ASPM doesn't work. This is a controller
- * bug we need to work around.
- */
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2);
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2);
-
/* Fix the transmitted FTS count desired to exit from L0s. */
status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL_PLC1);
- status = (status & PCIE_CORE_CTRL_PLC1_FTS_MASK) |
+ status = (status & ~PCIE_CORE_CTRL_PLC1_FTS_MASK) |
(PCIE_CORE_CTRL_PLC1_FTS_CNT << PCIE_CORE_CTRL_PLC1_FTS_SHIFT);
rockchip_pcie_write(rockchip, status, PCIE_CORE_CTRL_PLC1);
+ rockchip_pcie_set_power_limit(rockchip);
+
+ /* Set RC's clock architecture as common clock */
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status |= PCI_EXP_LNKCTL_CCC;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+
/* Enable Gen1 training */
rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
PCIE_CLIENT_CONFIG);
@@ -563,35 +609,37 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
msleep(20);
}
- /*
- * Enable retrain for gen2. This should be configured only after
- * gen1 finished.
- */
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
- status |= PCIE_RC_CONFIG_LCS_RETRAIN_LINK;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+ if (rockchip->link_gen == 2) {
+ /*
+ * Enable retrain for gen2. This should be configured only after
+ * gen1 finished.
+ */
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status |= PCI_EXP_LNKCTL_RL;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+
+ timeout = jiffies + msecs_to_jiffies(500);
+ for (;;) {
+ status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
+ if ((status & PCIE_CORE_PL_CONF_SPEED_MASK) ==
+ PCIE_CORE_PL_CONF_SPEED_5G) {
+ dev_dbg(dev, "PCIe link training gen2 pass!\n");
+ break;
+ }
- timeout = jiffies + msecs_to_jiffies(500);
- for (;;) {
- status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
- if ((status & PCIE_CORE_PL_CONF_SPEED_MASK) ==
- PCIE_CORE_PL_CONF_SPEED_5G) {
- dev_dbg(dev, "PCIe link training gen2 pass!\n");
- break;
- }
+ if (time_after(jiffies, timeout)) {
+ dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n");
+ break;
+ }
- if (time_after(jiffies, timeout)) {
- dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n");
- break;
+ msleep(20);
}
-
- msleep(20);
}
/* Check the final link width from negotiated lane counter from MGMT */
status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
- status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >>
- PCIE_CORE_PL_CONF_LANE_MASK);
+ status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >>
+ PCIE_CORE_PL_CONF_LANE_SHIFT);
dev_dbg(dev, "current link width is x%d\n", status);
rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID,
@@ -599,6 +647,12 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
rockchip_pcie_write(rockchip,
PCI_CLASS_BRIDGE_PCI << PCIE_RC_CONFIG_SCC_SHIFT,
PCIE_RC_CONFIG_RID_CCR);
+
+ /* Clear THP cap's next cap pointer to remove L1 substate cap */
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_THP_CAP);
+ status &= ~PCIE_RC_CONFIG_THP_CAP_NEXT_MASK;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_THP_CAP);
+
rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF);
rockchip_pcie_write(rockchip,
@@ -794,6 +848,10 @@ static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
rockchip->lanes = 1;
}
+ rockchip->link_gen = of_pci_get_max_link_speed(node);
+ if (rockchip->link_gen < 0 || rockchip->link_gen > 2)
+ rockchip->link_gen = 2;
+
rockchip->core_rst = devm_reset_control_get(dev, "core");
if (IS_ERR(rockchip->core_rst)) {
if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER)
@@ -1087,6 +1145,50 @@ static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip,
return 0;
}
+static int rockchip_cfg_atu(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ int offset;
+ int err;
+ int reg_no;
+
+ for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) {
+ err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1,
+ AXI_WRAPPER_MEM_WRITE,
+ 20 - 1,
+ rockchip->mem_bus_addr +
+ (reg_no << 20),
+ 0);
+ if (err) {
+ dev_err(dev, "program RC mem outbound ATU failed\n");
+ return err;
+ }
+ }
+
+ err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0);
+ if (err) {
+ dev_err(dev, "program RC mem inbound ATU failed\n");
+ return err;
+ }
+
+ offset = rockchip->mem_size >> 20;
+ for (reg_no = 0; reg_no < (rockchip->io_size >> 20); reg_no++) {
+ err = rockchip_pcie_prog_ob_atu(rockchip,
+ reg_no + 1 + offset,
+ AXI_WRAPPER_IO_WRITE,
+ 20 - 1,
+ rockchip->io_bus_addr +
+ (reg_no << 20),
+ 0);
+ if (err) {
+ dev_err(dev, "program RC io outbound ATU failed\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
static int rockchip_pcie_probe(struct platform_device *pdev)
{
struct rockchip_pcie *rockchip;
@@ -1096,13 +1198,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
resource_size_t io_base;
struct resource *mem;
struct resource *io;
- phys_addr_t io_bus_addr = 0;
- u32 io_size;
- phys_addr_t mem_bus_addr = 0;
- u32 mem_size = 0;
- int reg_no;
int err;
- int offset;
LIST_HEAD(res);
@@ -1169,14 +1265,13 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
goto err_vpcie;
/* Get the I/O and memory ranges from DT */
- io_size = 0;
resource_list_for_each_entry(win, &res) {
switch (resource_type(win->res)) {
case IORESOURCE_IO:
io = win->res;
io->name = "I/O";
- io_size = resource_size(io);
- io_bus_addr = io->start - win->offset;
+ rockchip->io_size = resource_size(io);
+ rockchip->io_bus_addr = io->start - win->offset;
err = pci_remap_iospace(io, io_base);
if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
@@ -1187,8 +1282,8 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
case IORESOURCE_MEM:
mem = win->res;
mem->name = "MEM";
- mem_size = resource_size(mem);
- mem_bus_addr = mem->start - win->offset;
+ rockchip->mem_size = resource_size(mem);
+ rockchip->mem_bus_addr = mem->start - win->offset;
break;
case IORESOURCE_BUS:
rockchip->root_bus_nr = win->res->start;
@@ -1198,45 +1293,9 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
}
}
- if (mem_size) {
- for (reg_no = 0; reg_no < (mem_size >> 20); reg_no++) {
- err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1,
- AXI_WRAPPER_MEM_WRITE,
- 20 - 1,
- mem_bus_addr +
- (reg_no << 20),
- 0);
- if (err) {
- dev_err(dev, "program RC mem outbound ATU failed\n");
- goto err_vpcie;
- }
- }
- }
-
- err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0);
- if (err) {
- dev_err(dev, "program RC mem inbound ATU failed\n");
+ err = rockchip_cfg_atu(rockchip);
+ if (err)
goto err_vpcie;
- }
-
- offset = mem_size >> 20;
-
- if (io_size) {
- for (reg_no = 0; reg_no < (io_size >> 20); reg_no++) {
- err = rockchip_pcie_prog_ob_atu(rockchip,
- reg_no + 1 + offset,
- AXI_WRAPPER_IO_WRITE,
- 20 - 1,
- io_bus_addr +
- (reg_no << 20),
- 0);
- if (err) {
- dev_err(dev, "program RC io outbound ATU failed\n");
- goto err_vpcie;
- }
- }
- }
-
bus = pci_scan_root_bus(&pdev->dev, 0, &rockchip_pcie_ops, rockchip, &res);
if (!bus) {
err = -ENOMEM;
@@ -1249,9 +1308,6 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
-
- dev_warn(dev, "only 32-bit config accesses supported; smaller writes may corrupt adjacent RW1C fields\n");
-
return err;
err_vpcie:
diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c
index 3cf197ba7f37..dafe8b88d97d 100644
--- a/drivers/pci/host/pcie-spear13xx.c
+++ b/drivers/pci/host/pcie-spear13xx.c
@@ -296,8 +296,4 @@ static struct platform_driver spear13xx_pcie_driver = {
},
};
-static int __init spear13xx_pcie_init(void)
-{
- return platform_driver_register(&spear13xx_pcie_driver);
-}
-device_initcall(spear13xx_pcie_init);
+builtin_platform_driver(spear13xx_pcie_driver);
diff --git a/drivers/pci/host/vmd.c b/drivers/pci/host/vmd.c
index 37e29b580be3..18ef1a93c10a 100644
--- a/drivers/pci/host/vmd.c
+++ b/drivers/pci/host/vmd.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/pci.h>
+#include <linux/srcu.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
@@ -39,7 +40,6 @@ static DEFINE_RAW_SPINLOCK(list_lock);
/**
* struct vmd_irq - private data to map driver IRQ to the VMD shared vector
* @node: list item for parent traversal.
- * @rcu: RCU callback item for freeing.
* @irq: back pointer to parent.
* @enabled: true if driver enabled IRQ
* @virq: the virtual IRQ value provided to the requesting driver.
@@ -49,7 +49,6 @@ static DEFINE_RAW_SPINLOCK(list_lock);
*/
struct vmd_irq {
struct list_head node;
- struct rcu_head rcu;
struct vmd_irq_list *irq;
bool enabled;
unsigned int virq;
@@ -58,11 +57,13 @@ struct vmd_irq {
/**
* struct vmd_irq_list - list of driver requested IRQs mapping to a VMD vector
* @irq_list: the list of irq's the VMD one demuxes to.
+ * @srcu: SRCU struct for local synchronization.
* @count: number of child IRQs assigned to this vector; used to track
* sharing.
*/
struct vmd_irq_list {
struct list_head irq_list;
+ struct srcu_struct srcu;
unsigned int count;
};
@@ -224,14 +225,14 @@ static void vmd_msi_free(struct irq_domain *domain,
struct vmd_irq *vmdirq = irq_get_chip_data(virq);
unsigned long flags;
- synchronize_rcu();
+ synchronize_srcu(&vmdirq->irq->srcu);
/* XXX: Potential optimization to rebalance */
raw_spin_lock_irqsave(&list_lock, flags);
vmdirq->irq->count--;
raw_spin_unlock_irqrestore(&list_lock, flags);
- kfree_rcu(vmdirq, rcu);
+ kfree(vmdirq);
}
static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev,
@@ -646,11 +647,12 @@ static irqreturn_t vmd_irq(int irq, void *data)
{
struct vmd_irq_list *irqs = data;
struct vmd_irq *vmdirq;
+ int idx;
- rcu_read_lock();
+ idx = srcu_read_lock(&irqs->srcu);
list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node)
generic_handle_irq(vmdirq->virq);
- rcu_read_unlock();
+ srcu_read_unlock(&irqs->srcu, idx);
return IRQ_HANDLED;
}
@@ -696,6 +698,10 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
return -ENOMEM;
for (i = 0; i < vmd->msix_count; i++) {
+ err = init_srcu_struct(&vmd->irqs[i].srcu);
+ if (err)
+ return err;
+
INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i),
vmd_irq, 0, "vmd", &vmd->irqs[i]);
@@ -714,12 +720,20 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
return 0;
}
+static void vmd_cleanup_srcu(struct vmd_dev *vmd)
+{
+ int i;
+
+ for (i = 0; i < vmd->msix_count; i++)
+ cleanup_srcu_struct(&vmd->irqs[i].srcu);
+}
+
static void vmd_remove(struct pci_dev *dev)
{
struct vmd_dev *vmd = pci_get_drvdata(dev);
vmd_detach_resources(vmd);
- pci_set_drvdata(dev, NULL);
+ vmd_cleanup_srcu(vmd);
sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
pci_stop_root_bus(vmd->bus);
pci_remove_root_bus(vmd->bus);
@@ -727,7 +741,7 @@ static void vmd_remove(struct pci_dev *dev)
irq_domain_remove(vmd->irq_domain);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int vmd_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index a46b585fae31..5ed2dcaa8e27 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -222,35 +222,6 @@ static void acpiphp_post_dock_fixup(struct acpi_device *adev)
acpiphp_let_context_go(context);
}
-/* Check whether the PCI device is managed by native PCIe hotplug driver */
-static bool device_is_managed_by_native_pciehp(struct pci_dev *pdev)
-{
- u32 reg32;
- acpi_handle tmp;
- struct acpi_pci_root *root;
-
- /* Check whether the PCIe port supports native PCIe hotplug */
- if (pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32))
- return false;
- if (!(reg32 & PCI_EXP_SLTCAP_HPC))
- return false;
-
- /*
- * Check whether native PCIe hotplug has been enabled for
- * this PCIe hierarchy.
- */
- tmp = acpi_find_root_bridge_handle(pdev);
- if (!tmp)
- return false;
- root = acpi_pci_find_root(tmp);
- if (!root)
- return false;
- if (!(root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
- return false;
-
- return true;
-}
-
/**
* acpiphp_add_context - Add ACPIPHP context to an ACPI device object.
* @handle: ACPI handle of the object to add a context to.
@@ -334,7 +305,7 @@ static acpi_status acpiphp_add_context(acpi_handle handle, u32 lvl, void *data,
* expose slots to user space in those cases.
*/
if ((acpi_pci_check_ejectable(pbus, handle) || is_dock_device(adev))
- && !(pdev && device_is_managed_by_native_pciehp(pdev))) {
+ && !(pdev && pdev->is_hotplug_bridge && pciehp_is_native(pdev))) {
unsigned long long sun;
int retval;
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index f6221d739f59..68d105aaf4e2 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -35,7 +35,7 @@
#include <linux/kobject.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "acpiphp.h"
#include "../pci.h"
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 74f3a0695b43..33d300d12411 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -40,7 +40,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "cpqphp.h"
#include "cpqphp_nvram.h"
@@ -867,7 +867,8 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
if ((pdev->revision <= 2) && (vendor_id != PCI_VENDOR_ID_INTEL)) {
err(msg_HPC_not_supported);
- return -ENODEV;
+ rc = -ENODEV;
+ goto err_disable_device;
}
/* TODO: This code can be made to support non-Compaq or Intel
diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
index c25fc9061059..daae8071a156 100644
--- a/drivers/pci/hotplug/cpqphp_nvram.c
+++ b/drivers/pci/hotplug/cpqphp_nvram.c
@@ -34,7 +34,7 @@
#include <linux/workqueue.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "cpqphp.h"
#include "cpqphp_nvram.h"
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index fea0b8b33589..7b0e97be9063 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -23,6 +23,9 @@
*
* Send feedback to <kristen.c.accardi@intel.com>
*
+ * Authors:
+ * Greg Kroah-Hartman <greg@kroah.com>
+ * Scott Murray <scottm@somanetworks.com>
*/
#include <linux/module.h> /* try_module_get & module_put */
@@ -39,7 +42,7 @@
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "../pci.h"
#include "cpci_hotplug.h"
@@ -50,15 +53,9 @@
#define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME, ## arg)
#define warn(format, arg...) printk(KERN_WARNING "%s: " format, MY_NAME, ## arg)
-
/* local variables */
static bool debug;
-#define DRIVER_VERSION "0.5"
-#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Scott Murray <scottm@somanetworks.com>"
-#define DRIVER_DESC "PCI Hot Plug PCI Core"
-
-
static LIST_HEAD(pci_hotplug_slot_list);
static DEFINE_MUTEX(pci_hp_mutex);
@@ -534,7 +531,6 @@ static int __init pci_hotplug_init(void)
return result;
}
- info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
return result;
}
device_initcall(pci_hotplug_init);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 7d32fa33dcef..35d84845d5af 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -25,6 +25,10 @@
*
* Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
*
+ * Authors:
+ * Dan Zink <dan.zink@compaq.com>
+ * Greg Kroah-Hartman <greg@kroah.com>
+ * Dely Sy <dely.l.sy@intel.com>"
*/
#include <linux/moduleparam.h>
@@ -42,10 +46,6 @@ bool pciehp_poll_mode;
int pciehp_poll_time;
static bool pciehp_force;
-#define DRIVER_VERSION "0.4"
-#define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>"
-#define DRIVER_DESC "PCI Express Hot Plug Controller Driver"
-
/*
* not really modular, but the easiest way to keep compat with existing
* bootargs behaviour is to continue using module_param here.
@@ -333,7 +333,6 @@ static int __init pcied_init(void)
retval = pcie_port_service_register(&hpdriver_portdrv);
dbg("pcie_port_service_register = %d\n", retval);
- info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
if (retval)
dbg("Failure to register service\n");
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index efe69e879455..10c9c0ba8ff2 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -31,6 +31,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include <linux/pci.h>
#include "../pci.h"
#include "pciehp.h"
@@ -98,6 +99,7 @@ static int board_added(struct slot *p_slot)
pciehp_green_led_blink(p_slot);
/* Check link training status */
+ pm_runtime_get_sync(&ctrl->pcie->port->dev);
retval = pciehp_check_link_status(ctrl);
if (retval) {
ctrl_err(ctrl, "Failed to check link status\n");
@@ -118,12 +120,14 @@ static int board_added(struct slot *p_slot)
if (retval != -EEXIST)
goto err_exit;
}
+ pm_runtime_put(&ctrl->pcie->port->dev);
pciehp_green_led_on(p_slot);
pciehp_set_attention_status(p_slot, 0);
return 0;
err_exit:
+ pm_runtime_put(&ctrl->pcie->port->dev);
set_slot_off(ctrl, p_slot);
return retval;
}
@@ -137,7 +141,9 @@ static int remove_board(struct slot *p_slot)
int retval;
struct controller *ctrl = p_slot->ctrl;
+ pm_runtime_get_sync(&ctrl->pcie->port->dev);
retval = pciehp_unconfigure_device(p_slot);
+ pm_runtime_put(&ctrl->pcie->port->dev);
if (retval)
return retval;
@@ -410,7 +416,7 @@ int pciehp_enable_slot(struct slot *p_slot)
if (getstatus) {
ctrl_info(ctrl, "Slot(%s): Already enabled\n",
slot_name(p_slot));
- return -EINVAL;
+ return 0;
}
}
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index b57fc6d6e28a..026830a138ae 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -620,8 +620,18 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
pciehp_queue_interrupt_event(slot, INT_BUTTON_PRESS);
}
- /* Check Presence Detect Changed */
- if (events & PCI_EXP_SLTSTA_PDC) {
+ /*
+ * Check Link Status Changed at higher precedence than Presence
+ * Detect Changed. The PDS value may be set to "card present" from
+ * out-of-band detection, which may be in conflict with a Link Down
+ * and cause the wrong event to queue.
+ */
+ if (events & PCI_EXP_SLTSTA_DLLSC) {
+ ctrl_info(ctrl, "Slot(%s): Link %s\n", slot_name(slot),
+ link ? "Up" : "Down");
+ pciehp_queue_interrupt_event(slot, link ? INT_LINK_UP :
+ INT_LINK_DOWN);
+ } else if (events & PCI_EXP_SLTSTA_PDC) {
present = !!(status & PCI_EXP_SLTSTA_PDS);
ctrl_info(ctrl, "Slot(%s): Card %spresent\n", slot_name(slot),
present ? "" : "not ");
@@ -636,13 +646,6 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
pciehp_queue_interrupt_event(slot, INT_POWER_FAULT);
}
- if (events & PCI_EXP_SLTSTA_DLLSC) {
- ctrl_info(ctrl, "Slot(%s): Link %s\n", slot_name(slot),
- link ? "Up" : "Down");
- pciehp_queue_interrupt_event(slot, link ? INT_LINK_UP :
- INT_LINK_DOWN);
- }
-
return IRQ_HANDLED;
}
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index dc67f39779ec..c614ff7c3bc3 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -257,8 +257,13 @@ static int dlpar_add_phb(char *drc_name, struct device_node *dn)
static int dlpar_add_vio_slot(char *drc_name, struct device_node *dn)
{
- if (vio_find_node(dn))
+ struct vio_dev *vio_dev;
+
+ vio_dev = vio_find_node(dn);
+ if (vio_dev) {
+ put_device(&vio_dev->dev);
return -EINVAL;
+ }
if (!vio_register_device_node(dn)) {
printk(KERN_ERR
@@ -334,6 +339,9 @@ static int dlpar_remove_vio_slot(char *drc_name, struct device_node *dn)
return -EINVAL;
vio_unregister_device(vio_dev);
+
+ put_device(&vio_dev->dev);
+
return 0;
}
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index e30f05c8517f..47227820406d 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -306,13 +306,6 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
return rc;
}
- pci_iov_set_numvfs(dev, nr_virtfn);
- iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
- pci_cfg_access_lock(dev);
- pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
- msleep(100);
- pci_cfg_access_unlock(dev);
-
iov->initial_VFs = initial;
if (nr_virtfn < initial)
initial = nr_virtfn;
@@ -323,6 +316,13 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
goto err_pcibios;
}
+ pci_iov_set_numvfs(dev, nr_virtfn);
+ iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
+ pci_cfg_access_lock(dev);
+ pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
+ msleep(100);
+ pci_cfg_access_unlock(dev);
+
for (i = 0; i < initial; i++) {
rc = pci_iov_add_virtfn(dev, i, 0);
if (rc)
@@ -554,21 +554,61 @@ void pci_iov_release(struct pci_dev *dev)
}
/**
- * pci_iov_resource_bar - get position of the SR-IOV BAR
+ * pci_iov_update_resource - update a VF BAR
* @dev: the PCI device
* @resno: the resource number
*
- * Returns position of the BAR encapsulated in the SR-IOV capability.
+ * Update a VF BAR in the SR-IOV capability of a PF.
*/
-int pci_iov_resource_bar(struct pci_dev *dev, int resno)
+void pci_iov_update_resource(struct pci_dev *dev, int resno)
{
- if (resno < PCI_IOV_RESOURCES || resno > PCI_IOV_RESOURCE_END)
- return 0;
+ struct pci_sriov *iov = dev->is_physfn ? dev->sriov : NULL;
+ struct resource *res = dev->resource + resno;
+ int vf_bar = resno - PCI_IOV_RESOURCES;
+ struct pci_bus_region region;
+ u16 cmd;
+ u32 new;
+ int reg;
+
+ /*
+ * The generic pci_restore_bars() path calls this for all devices,
+ * including VFs and non-SR-IOV devices. If this is not a PF, we
+ * have nothing to do.
+ */
+ if (!iov)
+ return;
+
+ pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &cmd);
+ if ((cmd & PCI_SRIOV_CTRL_VFE) && (cmd & PCI_SRIOV_CTRL_MSE)) {
+ dev_WARN(&dev->dev, "can't update enabled VF BAR%d %pR\n",
+ vf_bar, res);
+ return;
+ }
+
+ /*
+ * Ignore unimplemented BARs, unused resource slots for 64-bit
+ * BARs, and non-movable resources, e.g., those described via
+ * Enhanced Allocation.
+ */
+ if (!res->flags)
+ return;
+
+ if (res->flags & IORESOURCE_UNSET)
+ return;
+
+ if (res->flags & IORESOURCE_PCI_FIXED)
+ return;
- BUG_ON(!dev->is_physfn);
+ pcibios_resource_to_bus(dev->bus, &region, res);
+ new = region.start;
+ new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK;
- return dev->sriov->pos + PCI_SRIOV_BAR +
- 4 * (resno - PCI_IOV_RESOURCES);
+ reg = iov->pos + PCI_SRIOV_BAR + 4 * vf_bar;
+ pci_write_config_dword(dev, reg, new);
+ if (res->flags & IORESOURCE_MEM_64) {
+ new = region.start >> 16 >> 16;
+ pci_write_config_dword(dev, reg + 4, new);
+ }
}
resource_size_t __weak pcibios_iov_resource_alignment(struct pci_dev *dev,
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index dd27f73a45fc..50c5003295ca 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -1302,7 +1302,8 @@ const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr)
} else if (dev->msi_enabled) {
struct msi_desc *entry = first_pci_msi_entry(dev);
- if (WARN_ON_ONCE(!entry || nr >= entry->nvec_used))
+ if (WARN_ON_ONCE(!entry || !entry->affinity ||
+ nr >= entry->nvec_used))
return NULL;
return &entry->affinity[nr];
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index d966d47c9e80..001860361434 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -29,6 +29,82 @@ const u8 pci_acpi_dsm_uuid[] = {
0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d
};
+#if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64)
+static int acpi_get_rc_addr(struct acpi_device *adev, struct resource *res)
+{
+ struct device *dev = &adev->dev;
+ struct resource_entry *entry;
+ struct list_head list;
+ unsigned long flags;
+ int ret;
+
+ INIT_LIST_HEAD(&list);
+ flags = IORESOURCE_MEM;
+ ret = acpi_dev_get_resources(adev, &list,
+ acpi_dev_filter_resource_type_cb,
+ (void *) flags);
+ if (ret < 0) {
+ dev_err(dev, "failed to parse _CRS method, error code %d\n",
+ ret);
+ return ret;
+ }
+
+ if (ret == 0) {
+ dev_err(dev, "no IO and memory resources present in _CRS\n");
+ return -EINVAL;
+ }
+
+ entry = list_first_entry(&list, struct resource_entry, node);
+ *res = *entry->res;
+ acpi_dev_free_resource_list(&list);
+ return 0;
+}
+
+static acpi_status acpi_match_rc(acpi_handle handle, u32 lvl, void *context,
+ void **retval)
+{
+ u16 *segment = context;
+ unsigned long long uid;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(handle, "_UID", NULL, &uid);
+ if (ACPI_FAILURE(status) || uid != *segment)
+ return AE_CTRL_DEPTH;
+
+ *(acpi_handle *)retval = handle;
+ return AE_CTRL_TERMINATE;
+}
+
+int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
+ struct resource *res)
+{
+ struct acpi_device *adev;
+ acpi_status status;
+ acpi_handle handle;
+ int ret;
+
+ status = acpi_get_devices(hid, acpi_match_rc, &segment, &handle);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "can't find _HID %s device to locate resources\n",
+ hid);
+ return -ENODEV;
+ }
+
+ ret = acpi_bus_get_device(handle, &adev);
+ if (ret)
+ return ret;
+
+ ret = acpi_get_rc_addr(adev, res);
+ if (ret) {
+ dev_err(dev, "can't get resource from %s\n",
+ dev_name(&adev->dev));
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
{
acpi_status status = AE_NOT_EXIST;
@@ -294,6 +370,30 @@ int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp)
EXPORT_SYMBOL_GPL(pci_get_hp_params);
/**
+ * pciehp_is_native - Check whether a hotplug port is handled by the OS
+ * @pdev: Hotplug port to check
+ *
+ * Walk up from @pdev to the host bridge, obtain its cached _OSC Control Field
+ * and return the value of the "PCI Express Native Hot Plug control" bit.
+ * On failure to obtain the _OSC Control Field return %false.
+ */
+bool pciehp_is_native(struct pci_dev *pdev)
+{
+ struct acpi_pci_root *root;
+ acpi_handle handle;
+
+ handle = acpi_find_root_bridge_handle(pdev);
+ if (!handle)
+ return false;
+
+ root = acpi_pci_find_root(handle);
+ if (!root)
+ return false;
+
+ return root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL;
+}
+
+/**
* pci_acpi_wake_bus - Root bus wakeup notification fork function.
* @work: Work item to handle.
*/
diff --git a/drivers/pci/pci-mid.c b/drivers/pci/pci-mid.c
index c7f3408e3148..1c4af7227bca 100644
--- a/drivers/pci/pci-mid.c
+++ b/drivers/pci/pci-mid.c
@@ -54,7 +54,7 @@ static bool mid_pci_need_resume(struct pci_dev *dev)
return false;
}
-static struct pci_platform_pm_ops mid_pci_platform_pm = {
+static const struct pci_platform_pm_ops mid_pci_platform_pm = {
.is_manageable = mid_pci_power_manageable,
.set_state = mid_pci_set_power_state,
.get_state = mid_pci_get_power_state,
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index bcd10c795284..066628776e1b 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -50,6 +50,7 @@ pci_config_attr(vendor, "0x%04x\n");
pci_config_attr(device, "0x%04x\n");
pci_config_attr(subsystem_vendor, "0x%04x\n");
pci_config_attr(subsystem_device, "0x%04x\n");
+pci_config_attr(revision, "0x%02x\n");
pci_config_attr(class, "0x%06x\n");
pci_config_attr(irq, "%u\n");
@@ -568,6 +569,7 @@ static struct attribute *pci_dev_attrs[] = {
&dev_attr_device.attr,
&dev_attr_subsystem_vendor.attr,
&dev_attr_subsystem_device.attr,
+ &dev_attr_revision.attr,
&dev_attr_class.attr,
&dev_attr_irq.attr,
&dev_attr_local_cpus.attr,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index ba34907538f6..a881c0d3d2e8 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -564,10 +564,6 @@ static void pci_restore_bars(struct pci_dev *dev)
{
int i;
- /* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */
- if (dev->is_virtfn)
- return;
-
for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
pci_update_resource(dev, i);
}
@@ -2106,6 +2102,10 @@ bool pci_dev_run_wake(struct pci_dev *dev)
if (!dev->pme_support)
return false;
+ /* PME-capable in principle, but not from the intended sleep state */
+ if (!pci_pme_capable(dev, pci_target_state(dev)))
+ return false;
+
while (bus->parent) {
struct pci_dev *bridge = bus->self;
@@ -2226,7 +2226,7 @@ void pci_config_pm_runtime_put(struct pci_dev *pdev)
* This function checks if it is possible to move the bridge to D3.
* Currently we only allow D3 for recent enough PCIe ports.
*/
-static bool pci_bridge_d3_possible(struct pci_dev *bridge)
+bool pci_bridge_d3_possible(struct pci_dev *bridge)
{
unsigned int year;
@@ -2239,6 +2239,14 @@ static bool pci_bridge_d3_possible(struct pci_dev *bridge)
case PCI_EXP_TYPE_DOWNSTREAM:
if (pci_bridge_d3_disable)
return false;
+
+ /*
+ * Hotplug ports handled by firmware in System Management Mode
+ * may not be put into D3 by the OS (Thunderbolt on non-Macs).
+ */
+ if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
+ return false;
+
if (pci_bridge_d3_force)
return true;
@@ -2259,32 +2267,36 @@ static bool pci_bridge_d3_possible(struct pci_dev *bridge)
static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
{
bool *d3cold_ok = data;
- bool no_d3cold;
- /*
- * The device needs to be allowed to go D3cold and if it is wake
- * capable to do so from D3cold.
- */
- no_d3cold = dev->no_d3cold || !dev->d3cold_allowed ||
- (device_may_wakeup(&dev->dev) && !pci_pme_capable(dev, PCI_D3cold)) ||
- !pci_power_manageable(dev);
+ if (/* The device needs to be allowed to go D3cold ... */
+ dev->no_d3cold || !dev->d3cold_allowed ||
- *d3cold_ok = !no_d3cold;
+ /* ... and if it is wakeup capable to do so from D3cold. */
+ (device_may_wakeup(&dev->dev) &&
+ !pci_pme_capable(dev, PCI_D3cold)) ||
- return no_d3cold;
+ /* If it is a bridge it must be allowed to go to D3. */
+ !pci_power_manageable(dev) ||
+
+ /* Hotplug interrupts cannot be delivered if the link is down. */
+ dev->is_hotplug_bridge)
+
+ *d3cold_ok = false;
+
+ return !*d3cold_ok;
}
/*
* pci_bridge_d3_update - Update bridge D3 capabilities
* @dev: PCI device which is changed
- * @remove: Is the device being removed
*
* Update upstream bridge PM capabilities accordingly depending on if the
* device PM configuration was changed or the device is being removed. The
* change is also propagated upstream.
*/
-static void pci_bridge_d3_update(struct pci_dev *dev, bool remove)
+void pci_bridge_d3_update(struct pci_dev *dev)
{
+ bool remove = !device_is_registered(&dev->dev);
struct pci_dev *bridge;
bool d3cold_ok = true;
@@ -2292,55 +2304,39 @@ static void pci_bridge_d3_update(struct pci_dev *dev, bool remove)
if (!bridge || !pci_bridge_d3_possible(bridge))
return;
- pci_dev_get(bridge);
/*
- * If the device is removed we do not care about its D3cold
- * capabilities.
+ * If D3 is currently allowed for the bridge, removing one of its
+ * children won't change that.
+ */
+ if (remove && bridge->bridge_d3)
+ return;
+
+ /*
+ * If D3 is currently allowed for the bridge and a child is added or
+ * changed, disallowance of D3 can only be caused by that child, so
+ * we only need to check that single device, not any of its siblings.
+ *
+ * If D3 is currently not allowed for the bridge, checking the device
+ * first may allow us to skip checking its siblings.
*/
if (!remove)
pci_dev_check_d3cold(dev, &d3cold_ok);
- if (d3cold_ok) {
- /*
- * We need to go through all children to find out if all of
- * them can still go to D3cold.
- */
+ /*
+ * If D3 is currently not allowed for the bridge, this may be caused
+ * either by the device being changed/removed or any of its siblings,
+ * so we need to go through all children to find out if one of them
+ * continues to block D3.
+ */
+ if (d3cold_ok && !bridge->bridge_d3)
pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
&d3cold_ok);
- }
if (bridge->bridge_d3 != d3cold_ok) {
bridge->bridge_d3 = d3cold_ok;
/* Propagate change to upstream bridges */
- pci_bridge_d3_update(bridge, false);
+ pci_bridge_d3_update(bridge);
}
-
- pci_dev_put(bridge);
-}
-
-/**
- * pci_bridge_d3_device_changed - Update bridge D3 capabilities on change
- * @dev: PCI device that was changed
- *
- * If a device is added or its PM configuration, such as is it allowed to
- * enter D3cold, is changed this function updates upstream bridge PM
- * capabilities accordingly.
- */
-void pci_bridge_d3_device_changed(struct pci_dev *dev)
-{
- pci_bridge_d3_update(dev, false);
-}
-
-/**
- * pci_bridge_d3_device_removed - Update bridge D3 capabilities on remove
- * @dev: PCI device being removed
- *
- * Function updates upstream bridge PM capabilities based on other devices
- * still left on the bus.
- */
-void pci_bridge_d3_device_removed(struct pci_dev *dev)
-{
- pci_bridge_d3_update(dev, true);
}
/**
@@ -2355,7 +2351,7 @@ void pci_d3cold_enable(struct pci_dev *dev)
{
if (dev->no_d3cold) {
dev->no_d3cold = false;
- pci_bridge_d3_device_changed(dev);
+ pci_bridge_d3_update(dev);
}
}
EXPORT_SYMBOL_GPL(pci_d3cold_enable);
@@ -2372,7 +2368,7 @@ void pci_d3cold_disable(struct pci_dev *dev)
{
if (!dev->no_d3cold) {
dev->no_d3cold = true;
- pci_bridge_d3_device_changed(dev);
+ pci_bridge_d3_update(dev);
}
}
EXPORT_SYMBOL_GPL(pci_d3cold_disable);
@@ -4831,36 +4827,6 @@ int pci_select_bars(struct pci_dev *dev, unsigned long flags)
}
EXPORT_SYMBOL(pci_select_bars);
-/**
- * pci_resource_bar - get position of the BAR associated with a resource
- * @dev: the PCI device
- * @resno: the resource number
- * @type: the BAR type to be filled in
- *
- * Returns BAR position in config space, or 0 if the BAR is invalid.
- */
-int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
-{
- int reg;
-
- if (resno < PCI_ROM_RESOURCE) {
- *type = pci_bar_unknown;
- return PCI_BASE_ADDRESS_0 + 4 * resno;
- } else if (resno == PCI_ROM_RESOURCE) {
- *type = pci_bar_mem32;
- return dev->rom_base_reg;
- } else if (resno < PCI_BRIDGE_RESOURCES) {
- /* device specific resource */
- *type = pci_bar_unknown;
- reg = pci_iov_resource_bar(dev, resno);
- if (reg)
- return reg;
- }
-
- dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
- return 0;
-}
-
/* Some architectures require additional programming to enable VGA */
static arch_set_vga_state_t arch_set_vga_state;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 451856210e18..cb17db242f30 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -1,9 +1,6 @@
#ifndef DRIVERS_PCI_H
#define DRIVERS_PCI_H
-#define PCI_CFG_SPACE_SIZE 256
-#define PCI_CFG_SPACE_EXP_SIZE 4096
-
#define PCI_FIND_CAP_TTL 48
extern const unsigned char pcie_link_speed[];
@@ -85,8 +82,8 @@ void pci_pm_init(struct pci_dev *dev);
void pci_ea_init(struct pci_dev *dev);
void pci_allocate_cap_save_buffers(struct pci_dev *dev);
void pci_free_cap_save_buffers(struct pci_dev *dev);
-void pci_bridge_d3_device_changed(struct pci_dev *dev);
-void pci_bridge_d3_device_removed(struct pci_dev *dev);
+bool pci_bridge_d3_possible(struct pci_dev *dev);
+void pci_bridge_d3_update(struct pci_dev *dev);
static inline void pci_wakeup_event(struct pci_dev *dev)
{
@@ -245,7 +242,6 @@ bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
int pci_setup_device(struct pci_dev *dev);
int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
struct resource *res, unsigned int reg);
-int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type);
void pci_configure_ari(struct pci_dev *dev);
void __pci_bus_size_bridges(struct pci_bus *bus,
struct list_head *realloc_head);
@@ -289,7 +285,7 @@ static inline void pci_restore_ats_state(struct pci_dev *dev)
#ifdef CONFIG_PCI_IOV
int pci_iov_init(struct pci_dev *dev);
void pci_iov_release(struct pci_dev *dev);
-int pci_iov_resource_bar(struct pci_dev *dev, int resno);
+void pci_iov_update_resource(struct pci_dev *dev, int resno);
resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
void pci_restore_iov_state(struct pci_dev *dev);
int pci_iov_bus_range(struct pci_bus *bus);
@@ -303,10 +299,6 @@ static inline void pci_iov_release(struct pci_dev *dev)
{
}
-static inline int pci_iov_resource_bar(struct pci_dev *dev, int resno)
-{
- return 0;
-}
static inline void pci_restore_iov_state(struct pci_dev *dev)
{
}
@@ -356,4 +348,9 @@ static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe)
}
#endif
+#if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64)
+int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
+ struct resource *res);
+#endif
+
#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 139150b2bdfd..dea186a9d6b6 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -30,13 +30,6 @@
#include "aerdrv.h"
#include "../../pci.h"
-/*
- * Version Information
- */
-#define DRIVER_VERSION "v1.0"
-#define DRIVER_AUTHOR "tom.l.nguyen@intel.com"
-#define DRIVER_DESC "Root Port Advanced Error Reporting Driver"
-
static int aer_probe(struct pcie_device *dev);
static void aer_remove(struct pcie_device *dev);
static pci_ers_result_t aer_error_detected(struct pci_dev *dev,
@@ -297,12 +290,12 @@ static int aer_probe(struct pcie_device *dev)
{
int status;
struct aer_rpc *rpc;
- struct device *device = &dev->device;
+ struct device *device = &dev->port->dev;
/* Alloc rpc data structure */
rpc = aer_alloc_rpc(dev);
if (!rpc) {
- dev_printk(KERN_DEBUG, device, "alloc rpc failed\n");
+ dev_printk(KERN_DEBUG, device, "alloc AER rpc failed\n");
aer_remove(dev);
return -ENOMEM;
}
@@ -310,7 +303,8 @@ static int aer_probe(struct pcie_device *dev)
/* Request IRQ ISR */
status = request_irq(dev->irq, aer_irq, IRQF_SHARED, "aerdrv", dev);
if (status) {
- dev_printk(KERN_DEBUG, device, "request IRQ failed\n");
+ dev_printk(KERN_DEBUG, device, "request AER IRQ %d failed\n",
+ dev->irq);
aer_remove(dev);
return status;
}
@@ -318,8 +312,8 @@ static int aer_probe(struct pcie_device *dev)
rpc->isr = 1;
aer_enable_rootport(rpc);
-
- return status;
+ dev_info(device, "AER enabled with IRQ %d\n", dev->irq);
+ return 0;
}
/**
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 0ec649d961d7..17ac1dce3286 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -351,12 +351,26 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
return;
}
+ /* Get upstream/downstream components' register state */
+ pcie_get_aspm_reg(parent, &upreg);
+ child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
+ pcie_get_aspm_reg(child, &dwreg);
+
+ /*
+ * If ASPM not supported, don't mess with the clocks and link,
+ * bail out now.
+ */
+ if (!(upreg.support & dwreg.support))
+ return;
+
/* Configure common clock before checking latencies */
pcie_aspm_configure_common_clock(link);
- /* Get upstream/downstream components' register state */
+ /*
+ * Re-read upstream/downstream components' register state
+ * after clock configuration
+ */
pcie_get_aspm_reg(parent, &upreg);
- child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
pcie_get_aspm_reg(child, &dwreg);
/*
@@ -886,8 +900,8 @@ static ssize_t clk_ctl_store(struct device *dev,
return n;
}
-static DEVICE_ATTR(link_state, 0644, link_state_show, link_state_store);
-static DEVICE_ATTR(clk_ctl, 0644, clk_ctl_show, clk_ctl_store);
+static DEVICE_ATTR_RW(link_state);
+static DEVICE_ATTR_RW(clk_ctl);
static char power_group[] = "power";
void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev)
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 884bad5320f8..717529331dac 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -300,8 +300,6 @@ static irqreturn_t pcie_pme_irq(int irq, void *context)
*/
static int pcie_pme_set_native(struct pci_dev *dev, void *ign)
{
- dev_info(&dev->dev, "Signaling PME through PCIe PME interrupt\n");
-
device_set_run_wake(&dev->dev, true);
dev->pme_interrupt = true;
return 0;
@@ -319,23 +317,8 @@ static int pcie_pme_set_native(struct pci_dev *dev, void *ign)
static void pcie_pme_mark_devices(struct pci_dev *port)
{
pcie_pme_set_native(port, NULL);
- if (port->subordinate) {
+ if (port->subordinate)
pci_walk_bus(port->subordinate, pcie_pme_set_native, NULL);
- } else {
- struct pci_bus *bus = port->bus;
- struct pci_dev *dev;
-
- /* Check if this is a root port event collector. */
- if (pci_pcie_type(port) != PCI_EXP_TYPE_RC_EC || !bus)
- return;
-
- down_read(&pci_bus_sem);
- list_for_each_entry(dev, &bus->devices, bus_list)
- if (pci_is_pcie(dev)
- && pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END)
- pcie_pme_set_native(dev, NULL);
- up_read(&pci_bus_sem);
- }
}
/**
@@ -364,12 +347,14 @@ static int pcie_pme_probe(struct pcie_device *srv)
ret = request_irq(srv->irq, pcie_pme_irq, IRQF_SHARED, "PCIe PME", srv);
if (ret) {
kfree(data);
- } else {
- pcie_pme_mark_devices(port);
- pcie_pme_interrupt_enable(port, true);
+ return ret;
}
- return ret;
+ dev_info(&port->dev, "Signaling PME with IRQ %d\n", srv->irq);
+
+ pcie_pme_mark_devices(port);
+ pcie_pme_interrupt_enable(port, true);
+ return 0;
}
static bool pcie_pme_check_wakeup(struct pci_bus *bus)
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index e9270b4026f3..9698289f105c 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -499,7 +499,6 @@ static int pcie_port_probe_service(struct device *dev)
if (status)
return status;
- dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n", driver->name);
get_device(dev);
return 0;
}
@@ -524,8 +523,6 @@ static int pcie_port_remove_service(struct device *dev)
pciedev = to_pcie_device(dev);
driver = to_service_driver(dev->driver);
if (driver && driver->remove) {
- dev_printk(KERN_DEBUG, dev, "unloading service driver %s\n",
- driver->name);
driver->remove(pciedev);
put_device(dev);
}
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 79327cc14e7d..8aa3f14bc87d 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -19,6 +19,7 @@
#include <linux/dmi.h>
#include <linux/pci-aspm.h>
+#include "../pci.h"
#include "portdrv.h"
#include "aer/aerdrv.h"
@@ -149,15 +150,7 @@ static int pcie_portdrv_probe(struct pci_dev *dev,
pci_save_state(dev);
- /*
- * Prevent runtime PM if the port is advertising support for PCIe
- * hotplug. Otherwise the BIOS hotplug SMI code might not be able
- * to enumerate devices behind this port properly (the port is
- * powered down preventing all config space accesses to the
- * subordinate devices). We can't be sure for native PCIe hotplug
- * either so prevent that as well.
- */
- if (!dev->is_hotplug_bridge) {
+ if (pci_bridge_d3_possible(dev)) {
/*
* Keep the port resumed 100ms to make sure things like
* config space accesses from userspace (lspci) will not
@@ -175,7 +168,7 @@ static int pcie_portdrv_probe(struct pci_dev *dev,
static void pcie_portdrv_remove(struct pci_dev *dev)
{
- if (!dev->is_hotplug_bridge) {
+ if (pci_bridge_d3_possible(dev)) {
pm_runtime_forbid(&dev->dev);
pm_runtime_get_noresume(&dev->dev);
pm_runtime_dont_use_autosuspend(&dev->dev);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 104c46d53121..e164b5c9f0f0 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -227,7 +227,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
}
} else {
- res->flags |= (l & IORESOURCE_ROM_ENABLE);
+ if (l & PCI_ROM_ADDRESS_ENABLE)
+ res->flags |= IORESOURCE_ROM_ENABLE;
l64 = l & PCI_ROM_ADDRESS_MASK;
sz64 = sz & PCI_ROM_ADDRESS_MASK;
mask64 = (u32)PCI_ROM_ADDRESS_MASK;
@@ -521,18 +522,19 @@ static void pci_release_host_bridge_dev(struct device *dev)
kfree(bridge);
}
-static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b)
+struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
{
struct pci_host_bridge *bridge;
- bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+ bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL);
if (!bridge)
return NULL;
INIT_LIST_HEAD(&bridge->windows);
- bridge->bus = b;
+
return bridge;
}
+EXPORT_SYMBOL(pci_alloc_host_bridge);
static const unsigned char pcix_bus_speed[] = {
PCI_SPEED_UNKNOWN, /* 0 */
@@ -717,6 +719,123 @@ static void pci_set_bus_msi_domain(struct pci_bus *bus)
dev_set_msi_domain(&bus->dev, d);
}
+int pci_register_host_bridge(struct pci_host_bridge *bridge)
+{
+ struct device *parent = bridge->dev.parent;
+ struct resource_entry *window, *n;
+ struct pci_bus *bus, *b;
+ resource_size_t offset;
+ LIST_HEAD(resources);
+ struct resource *res;
+ char addr[64], *fmt;
+ const char *name;
+ int err;
+
+ bus = pci_alloc_bus(NULL);
+ if (!bus)
+ return -ENOMEM;
+
+ bridge->bus = bus;
+
+ /* temporarily move resources off the list */
+ list_splice_init(&bridge->windows, &resources);
+ bus->sysdata = bridge->sysdata;
+ bus->msi = bridge->msi;
+ bus->ops = bridge->ops;
+ bus->number = bus->busn_res.start = bridge->busnr;
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+ bus->domain_nr = pci_bus_find_domain_nr(bus, parent);
+#endif
+
+ b = pci_find_bus(pci_domain_nr(bus), bridge->busnr);
+ if (b) {
+ /* If we already got to this bus through a different bridge, ignore it */
+ dev_dbg(&b->dev, "bus already known\n");
+ err = -EEXIST;
+ goto free;
+ }
+
+ dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(bus),
+ bridge->busnr);
+
+ err = pcibios_root_bridge_prepare(bridge);
+ if (err)
+ goto free;
+
+ err = device_register(&bridge->dev);
+ if (err)
+ put_device(&bridge->dev);
+
+ bus->bridge = get_device(&bridge->dev);
+ device_enable_async_suspend(bus->bridge);
+ pci_set_bus_of_node(bus);
+ pci_set_bus_msi_domain(bus);
+
+ if (!parent)
+ set_dev_node(bus->bridge, pcibus_to_node(bus));
+
+ bus->dev.class = &pcibus_class;
+ bus->dev.parent = bus->bridge;
+
+ dev_set_name(&bus->dev, "%04x:%02x", pci_domain_nr(bus), bus->number);
+ name = dev_name(&bus->dev);
+
+ err = device_register(&bus->dev);
+ if (err)
+ goto unregister;
+
+ pcibios_add_bus(bus);
+
+ /* Create legacy_io and legacy_mem files for this bus */
+ pci_create_legacy_files(bus);
+
+ if (parent)
+ dev_info(parent, "PCI host bridge to bus %s\n", name);
+ else
+ pr_info("PCI host bridge to bus %s\n", name);
+
+ /* Add initial resources to the bus */
+ resource_list_for_each_entry_safe(window, n, &resources) {
+ list_move_tail(&window->node, &bridge->windows);
+ offset = window->offset;
+ res = window->res;
+
+ if (res->flags & IORESOURCE_BUS)
+ pci_bus_insert_busn_res(bus, bus->number, res->end);
+ else
+ pci_bus_add_resource(bus, res, 0);
+
+ if (offset) {
+ if (resource_type(res) == IORESOURCE_IO)
+ fmt = " (bus address [%#06llx-%#06llx])";
+ else
+ fmt = " (bus address [%#010llx-%#010llx])";
+
+ snprintf(addr, sizeof(addr), fmt,
+ (unsigned long long)(res->start - offset),
+ (unsigned long long)(res->end - offset));
+ } else
+ addr[0] = '\0';
+
+ dev_info(&bus->dev, "root bus resource %pR%s\n", res, addr);
+ }
+
+ down_write(&pci_bus_sem);
+ list_add_tail(&bus->node, &pci_root_buses);
+ up_write(&pci_bus_sem);
+
+ return 0;
+
+unregister:
+ put_device(&bridge->dev);
+ device_unregister(&bridge->dev);
+
+free:
+ kfree(bus);
+ return err;
+}
+EXPORT_SYMBOL(pci_register_host_bridge);
+
static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
struct pci_dev *bridge, int busnr)
{
@@ -1764,8 +1883,7 @@ static void pci_dma_configure(struct pci_dev *dev)
if (attr == DEV_DMA_NOT_SUPPORTED)
dev_warn(&dev->dev, "DMA not supported.\n");
else
- arch_setup_dma_ops(&dev->dev, 0, 0, NULL,
- attr == DEV_DMA_COHERENT);
+ acpi_dma_configure(&dev->dev, attr);
}
pci_put_host_bridge_device(bridge);
@@ -2156,113 +2274,43 @@ void __weak pcibios_remove_bus(struct pci_bus *bus)
{
}
-struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
- struct pci_ops *ops, void *sysdata, struct list_head *resources)
+static struct pci_bus *pci_create_root_bus_msi(struct device *parent,
+ int bus, struct pci_ops *ops, void *sysdata,
+ struct list_head *resources, struct msi_controller *msi)
{
int error;
struct pci_host_bridge *bridge;
- struct pci_bus *b, *b2;
- struct resource_entry *window, *n;
- struct resource *res;
- resource_size_t offset;
- char bus_addr[64];
- char *fmt;
-
- b = pci_alloc_bus(NULL);
- if (!b)
- return NULL;
- b->sysdata = sysdata;
- b->ops = ops;
- b->number = b->busn_res.start = bus;
-#ifdef CONFIG_PCI_DOMAINS_GENERIC
- b->domain_nr = pci_bus_find_domain_nr(b, parent);
-#endif
- b2 = pci_find_bus(pci_domain_nr(b), bus);
- if (b2) {
- /* If we already got to this bus through a different bridge, ignore it */
- dev_dbg(&b2->dev, "bus already known\n");
- goto err_out;
- }
-
- bridge = pci_alloc_host_bridge(b);
+ bridge = pci_alloc_host_bridge(0);
if (!bridge)
- goto err_out;
+ return NULL;
bridge->dev.parent = parent;
bridge->dev.release = pci_release_host_bridge_dev;
- dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus);
- error = pcibios_root_bridge_prepare(bridge);
- if (error) {
- kfree(bridge);
- goto err_out;
- }
-
- error = device_register(&bridge->dev);
- if (error) {
- put_device(&bridge->dev);
- goto err_out;
- }
- b->bridge = get_device(&bridge->dev);
- device_enable_async_suspend(b->bridge);
- pci_set_bus_of_node(b);
- pci_set_bus_msi_domain(b);
- if (!parent)
- set_dev_node(b->bridge, pcibus_to_node(b));
-
- b->dev.class = &pcibus_class;
- b->dev.parent = b->bridge;
- dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus);
- error = device_register(&b->dev);
- if (error)
- goto class_dev_reg_err;
+ list_splice_init(resources, &bridge->windows);
+ bridge->sysdata = sysdata;
+ bridge->busnr = bus;
+ bridge->ops = ops;
+ bridge->msi = msi;
- pcibios_add_bus(b);
-
- /* Create legacy_io and legacy_mem files for this bus */
- pci_create_legacy_files(b);
-
- if (parent)
- dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
- else
- printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
-
- /* Add initial resources to the bus */
- resource_list_for_each_entry_safe(window, n, resources) {
- list_move_tail(&window->node, &bridge->windows);
- res = window->res;
- offset = window->offset;
- if (res->flags & IORESOURCE_BUS)
- pci_bus_insert_busn_res(b, bus, res->end);
- else
- pci_bus_add_resource(b, res, 0);
- if (offset) {
- if (resource_type(res) == IORESOURCE_IO)
- fmt = " (bus address [%#06llx-%#06llx])";
- else
- fmt = " (bus address [%#010llx-%#010llx])";
- snprintf(bus_addr, sizeof(bus_addr), fmt,
- (unsigned long long) (res->start - offset),
- (unsigned long long) (res->end - offset));
- } else
- bus_addr[0] = '\0';
- dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr);
- }
+ error = pci_register_host_bridge(bridge);
+ if (error < 0)
+ goto err_out;
- down_write(&pci_bus_sem);
- list_add_tail(&b->node, &pci_root_buses);
- up_write(&pci_bus_sem);
+ return bridge->bus;
- return b;
-
-class_dev_reg_err:
- put_device(&bridge->dev);
- device_unregister(&bridge->dev);
err_out:
- kfree(b);
+ kfree(bridge);
return NULL;
}
+
+struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
+ struct pci_ops *ops, void *sysdata, struct list_head *resources)
+{
+ return pci_create_root_bus_msi(parent, bus, ops, sysdata, resources,
+ NULL);
+}
EXPORT_SYMBOL_GPL(pci_create_root_bus);
int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
@@ -2343,12 +2391,10 @@ struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus,
break;
}
- b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
+ b = pci_create_root_bus_msi(parent, bus, ops, sysdata, resources, msi);
if (!b)
return NULL;
- b->msi = msi;
-
if (!found) {
dev_info(&b->dev,
"No busn resource found for root bus, will use [bus %02x-ff]\n",
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 2408abe4ee8c..f82710a8694d 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -11,7 +11,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/capability.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/byteorder.h>
#include "pci.h"
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index c232729f5b1b..1800befa8b8b 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2156,7 +2156,7 @@ static void quirk_blacklist_vpd(struct pci_dev *dev)
{
if (dev->vpd) {
dev->vpd->len = 0;
- dev_warn(&dev->dev, FW_BUG "VPD access disabled\n");
+ dev_warn(&dev->dev, FW_BUG "disabling VPD access (can't determine size of non-standard VPD format)\n");
}
}
@@ -3044,7 +3044,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e0d, quirk_intel_ntb);
static ktime_t fixup_debug_start(struct pci_dev *dev,
void (*fn)(struct pci_dev *dev))
{
- ktime_t calltime = ktime_set(0, 0);
+ ktime_t calltime = 0;
dev_dbg(&dev->dev, "calling %pF\n", fn);
if (initcall_debug) {
@@ -3137,8 +3137,9 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b5, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b7, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2298, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x229c, quirk_remove_d3_delay);
+
/*
- * Some devices may pass our check in pci_intx_mask_supported if
+ * Some devices may pass our check in pci_intx_mask_supported() if
* PCI_COMMAND_INTX_DISABLE works though they actually do not properly
* support this feature.
*/
@@ -3146,53 +3147,139 @@ static void quirk_broken_intx_masking(struct pci_dev *dev)
{
dev->broken_intx_masking = 1;
}
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, 0x0030,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */
- quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x0030,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */
+ quirk_broken_intx_masking);
+
/*
* Realtek RTL8169 PCI Gigabit Ethernet Controller (rev 10)
* Subsystem: Realtek RTL8169/8110 Family PCI Gigabit Ethernet NIC
*
* RTL8110SC - Fails under PCI device assignment using DisINTx masking.
*/
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
- quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_REALTEK, 0x8169,
+ quirk_broken_intx_masking);
/*
* Intel i40e (XL710/X710) 10/20/40GbE NICs all have broken INTx masking,
* DisINTx can be set but the interrupt status bit is non-functional.
*/
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1572,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1574,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1580,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1581,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1583,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1584,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1585,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1586,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1587,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1588,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1589,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d0,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d1,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d2,
- quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1572,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1574,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1580,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1581,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1583,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1584,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1585,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1586,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1587,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1588,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1589,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d0,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d1,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d2,
+ quirk_broken_intx_masking);
+
+static u16 mellanox_broken_intx_devs[] = {
+ PCI_DEVICE_ID_MELLANOX_HERMON_SDR,
+ PCI_DEVICE_ID_MELLANOX_HERMON_DDR,
+ PCI_DEVICE_ID_MELLANOX_HERMON_QDR,
+ PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2,
+ PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2,
+ PCI_DEVICE_ID_MELLANOX_HERMON_EN,
+ PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX_EN,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX2,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX3,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO,
+};
+
+#define CONNECTX_4_CURR_MAX_MINOR 99
+#define CONNECTX_4_INTX_SUPPORT_MINOR 14
+
+/*
+ * Check ConnectX-4/LX FW version to see if it supports legacy interrupts.
+ * If so, don't mark it as broken.
+ * FW minor > 99 means older FW version format and no INTx masking support.
+ * FW minor < 14 means new FW version format and no INTx masking support.
+ */
+static void mellanox_check_broken_intx_masking(struct pci_dev *pdev)
+{
+ __be32 __iomem *fw_ver;
+ u16 fw_major;
+ u16 fw_minor;
+ u16 fw_subminor;
+ u32 fw_maj_min;
+ u32 fw_sub_min;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mellanox_broken_intx_devs); i++) {
+ if (pdev->device == mellanox_broken_intx_devs[i]) {
+ pdev->broken_intx_masking = 1;
+ return;
+ }
+ }
+
+ /* Getting here means Connect-IB cards and up. Connect-IB has no INTx
+ * support so shouldn't be checked further
+ */
+ if (pdev->device == PCI_DEVICE_ID_MELLANOX_CONNECTIB)
+ return;
+
+ if (pdev->device != PCI_DEVICE_ID_MELLANOX_CONNECTX4 &&
+ pdev->device != PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX)
+ return;
+
+ /* For ConnectX-4 and ConnectX-4LX, need to check FW support */
+ if (pci_enable_device_mem(pdev)) {
+ dev_warn(&pdev->dev, "Can't enable device memory\n");
+ return;
+ }
+
+ fw_ver = ioremap(pci_resource_start(pdev, 0), 4);
+ if (!fw_ver) {
+ dev_warn(&pdev->dev, "Can't map ConnectX-4 initialization segment\n");
+ goto out;
+ }
+
+ /* Reading from resource space should be 32b aligned */
+ fw_maj_min = ioread32be(fw_ver);
+ fw_sub_min = ioread32be(fw_ver + 1);
+ fw_major = fw_maj_min & 0xffff;
+ fw_minor = fw_maj_min >> 16;
+ fw_subminor = fw_sub_min & 0xffff;
+ if (fw_minor > CONNECTX_4_CURR_MAX_MINOR ||
+ fw_minor < CONNECTX_4_INTX_SUPPORT_MINOR) {
+ dev_warn(&pdev->dev, "ConnectX-4: FW %u.%u.%u doesn't support INTx masking, disabling. Please upgrade FW to %d.14.1100 and up for INTx support\n",
+ fw_major, fw_minor, fw_subminor, pdev->device ==
+ PCI_DEVICE_ID_MELLANOX_CONNECTX4 ? 12 : 14);
+ pdev->broken_intx_masking = 1;
+ }
+
+ iounmap(fw_ver);
+
+out:
+ pci_disable_device(pdev);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
+ mellanox_check_broken_intx_masking);
static void quirk_no_bus_reset(struct pci_dev *dev)
{
@@ -3255,6 +3342,25 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE,
quirk_thunderbolt_hotplug_msi);
+static void quirk_chelsio_extend_vpd(struct pci_dev *dev)
+{
+ pci_set_vpd_size(dev, 8192);
+}
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x20, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x21, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x22, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x23, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x24, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x25, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x26, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x30, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x31, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x32, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x35, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x36, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x37, quirk_chelsio_extend_vpd);
+
#ifdef CONFIG_ACPI
/*
* Apple: Shutdown Cactus Ridge Thunderbolt controller.
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index f9357e09e9b3..73a03d382590 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -40,7 +40,7 @@ static void pci_destroy_dev(struct pci_dev *dev)
list_del(&dev->bus_list);
up_write(&pci_bus_sem);
- pci_bridge_d3_device_removed(dev);
+ pci_bridge_d3_update(dev);
pci_free_resources(dev);
put_device(&dev->dev);
}
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index 06663d391b39..b6edb187d160 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -35,6 +35,11 @@ int pci_enable_rom(struct pci_dev *pdev)
if (res->flags & IORESOURCE_ROM_SHADOW)
return 0;
+ /*
+ * Ideally pci_update_resource() would update the ROM BAR address,
+ * and we would only set the enable bit here. But apparently some
+ * devices have buggy ROM BARs that read as zero when disabled.
+ */
pcibios_resource_to_bus(pdev->bus, &region, res);
pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr);
rom_addr &= ~PCI_ROM_ADDRESS_MASK;
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 9526e341988b..4bc589ee78d0 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -25,21 +25,18 @@
#include <linux/slab.h>
#include "pci.h"
-
-void pci_update_resource(struct pci_dev *dev, int resno)
+static void pci_std_update_resource(struct pci_dev *dev, int resno)
{
struct pci_bus_region region;
bool disable;
u16 cmd;
u32 new, check, mask;
int reg;
- enum pci_bar_type type;
struct resource *res = dev->resource + resno;
- if (dev->is_virtfn) {
- dev_warn(&dev->dev, "can't update VF BAR%d\n", resno);
+ /* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */
+ if (dev->is_virtfn)
return;
- }
/*
* Ignore resources for unimplemented BARs and unused resource slots
@@ -60,21 +57,34 @@ void pci_update_resource(struct pci_dev *dev, int resno)
return;
pcibios_resource_to_bus(dev->bus, &region, res);
+ new = region.start;
- new = region.start | (res->flags & PCI_REGION_FLAG_MASK);
- if (res->flags & IORESOURCE_IO)
+ if (res->flags & IORESOURCE_IO) {
mask = (u32)PCI_BASE_ADDRESS_IO_MASK;
- else
+ new |= res->flags & ~PCI_BASE_ADDRESS_IO_MASK;
+ } else if (resno == PCI_ROM_RESOURCE) {
+ mask = (u32)PCI_ROM_ADDRESS_MASK;
+ } else {
mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
+ new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK;
+ }
- reg = pci_resource_bar(dev, resno, &type);
- if (!reg)
- return;
- if (type != pci_bar_unknown) {
+ if (resno < PCI_ROM_RESOURCE) {
+ reg = PCI_BASE_ADDRESS_0 + 4 * resno;
+ } else if (resno == PCI_ROM_RESOURCE) {
+
+ /*
+ * Apparently some Matrox devices have ROM BARs that read
+ * as zero when disabled, so don't update ROM BARs unless
+ * they're enabled. See https://lkml.org/lkml/2005/8/30/138.
+ */
if (!(res->flags & IORESOURCE_ROM_ENABLE))
return;
+
+ reg = dev->rom_base_reg;
new |= PCI_ROM_ADDRESS_ENABLE;
- }
+ } else
+ return;
/*
* We can't update a 64-bit BAR atomically, so when possible,
@@ -110,6 +120,16 @@ void pci_update_resource(struct pci_dev *dev, int resno)
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
+void pci_update_resource(struct pci_dev *dev, int resno)
+{
+ if (resno <= PCI_ROM_RESOURCE)
+ pci_std_update_resource(dev, resno);
+#ifdef CONFIG_PCI_IOV
+ else if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END)
+ pci_iov_update_resource(dev, resno);
+#endif
+}
+
int pci_claim_resource(struct pci_dev *dev, int resource)
{
struct resource *res = &dev->resource[resource];
diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c
index b91c4da68365..9bf993e1f71e 100644
--- a/drivers/pci/syscall.c
+++ b/drivers/pci/syscall.c
@@ -10,7 +10,7 @@
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/syscalls.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "pci.h"
SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn,
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index b37b57294566..6d9335865880 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -1084,7 +1084,7 @@ static int arm_pmu_hp_init(void)
int ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING,
- "AP_PERF_ARM_STARTING",
+ "perf/arm/pmu:starting",
arm_perf_starting_cpu, NULL);
if (ret)
pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 1bb38d0493eb..85d009112864 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -75,12 +75,6 @@ enum bcm2835_pinconf_param {
BCM2835_PINCONF_PARAM_PULL,
};
-enum bcm2835_pinconf_pull {
- BCM2835_PINCONFIG_PULL_NONE,
- BCM2835_PINCONFIG_PULL_DOWN,
- BCM2835_PINCONFIG_PULL_UP,
-};
-
#define BCM2835_PINCONF_PACK(_param_, _arg_) ((_param_) << 16 | (_arg_))
#define BCM2835_PINCONF_UNPACK_PARAM(_conf_) ((_conf_) >> 16)
#define BCM2835_PINCONF_UNPACK_ARG(_conf_) ((_conf_) & 0xffff)
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index a579126832af..620c231a2889 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -212,7 +212,7 @@ static int meson_pmx_request_gpio(struct pinctrl_dev *pcdev,
{
struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
- meson_pmx_disable_other_groups(pc, range->pin_base + offset, -1);
+ meson_pmx_disable_other_groups(pc, offset, -1);
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index aea310a91821..c9a146948192 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -382,26 +382,21 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
int ret = 0;
u32 pin_reg;
- unsigned long flags;
- bool level_trig;
- u32 active_level;
+ unsigned long flags, irq_flags;
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
- /*
- * When level_trig is set EDGE and active_level is set HIGH in BIOS
- * default settings, ignore incoming settings from client and use
- * BIOS settings to configure GPIO register.
+ /* Ignore the settings coming from the client and
+ * read the values from the ACPI tables
+ * while setting the trigger type
*/
- level_trig = !(pin_reg & (LEVEL_TRIGGER << LEVEL_TRIG_OFF));
- active_level = pin_reg & (ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
- if(level_trig &&
- ((active_level >> ACTIVE_LEVEL_OFF) == ACTIVE_HIGH))
- type = IRQ_TYPE_EDGE_FALLING;
+ irq_flags = irq_get_trigger_type(d->irq);
+ if (irq_flags != IRQ_TYPE_NONE)
+ type = irq_flags;
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_RISING:
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index 12f7d1eb65bc..07409fde02b2 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -56,6 +56,17 @@ static const struct samsung_pin_bank_type bank_type_alive = {
.reg_offset = { 0x00, 0x04, 0x08, 0x0c, },
};
+/* Exynos5433 has the 4bit widths for PINCFG_TYPE_DRV bitfields. */
+static const struct samsung_pin_bank_type exynos5433_bank_type_off = {
+ .fld_width = { 4, 1, 2, 4, 2, 2, },
+ .reg_offset = { 0x00, 0x04, 0x08, 0x0c, 0x10, 0x14, },
+};
+
+static const struct samsung_pin_bank_type exynos5433_bank_type_alive = {
+ .fld_width = { 4, 1, 2, 4, },
+ .reg_offset = { 0x00, 0x04, 0x08, 0x0c, },
+};
+
static void exynos_irq_mask(struct irq_data *irqd)
{
struct irq_chip *chip = irq_data_get_irq_chip(irqd);
@@ -1335,82 +1346,82 @@ const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = {
/* pin banks of exynos5433 pin-controller - ALIVE */
static const struct samsung_pin_bank_data exynos5433_pin_banks0[] = {
- EXYNOS_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
- EXYNOS_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
- EXYNOS_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
- EXYNOS_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c),
- EXYNOS_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1),
- EXYNOS_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1),
- EXYNOS_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1),
- EXYNOS_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1),
- EXYNOS_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1),
+ EXYNOS5433_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
+ EXYNOS5433_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
+ EXYNOS5433_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
+ EXYNOS5433_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c),
+ EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1),
+ EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1),
+ EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1),
+ EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1),
+ EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1),
};
/* pin banks of exynos5433 pin-controller - AUD */
static const struct samsung_pin_bank_data exynos5433_pin_banks1[] = {
- EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
- EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
+ EXYNOS5433_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
+ EXYNOS5433_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
};
/* pin banks of exynos5433 pin-controller - CPIF */
static const struct samsung_pin_bank_data exynos5433_pin_banks2[] = {
- EXYNOS_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00),
+ EXYNOS5433_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00),
};
/* pin banks of exynos5433 pin-controller - eSE */
static const struct samsung_pin_bank_data exynos5433_pin_banks3[] = {
- EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00),
+ EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00),
};
/* pin banks of exynos5433 pin-controller - FINGER */
static const struct samsung_pin_bank_data exynos5433_pin_banks4[] = {
- EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00),
+ EXYNOS5433_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00),
};
/* pin banks of exynos5433 pin-controller - FSYS */
static const struct samsung_pin_bank_data exynos5433_pin_banks5[] = {
- EXYNOS_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00),
- EXYNOS_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04),
- EXYNOS_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08),
- EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpr1", 0x0c),
- EXYNOS_PIN_BANK_EINTG(2, 0x080, "gpr2", 0x10),
- EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpr3", 0x14),
+ EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00),
+ EXYNOS5433_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04),
+ EXYNOS5433_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08),
+ EXYNOS5433_PIN_BANK_EINTG(8, 0x060, "gpr1", 0x0c),
+ EXYNOS5433_PIN_BANK_EINTG(2, 0x080, "gpr2", 0x10),
+ EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpr3", 0x14),
};
/* pin banks of exynos5433 pin-controller - IMEM */
static const struct samsung_pin_bank_data exynos5433_pin_banks6[] = {
- EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00),
+ EXYNOS5433_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00),
};
/* pin banks of exynos5433 pin-controller - NFC */
static const struct samsung_pin_bank_data exynos5433_pin_banks7[] = {
- EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00),
+ EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00),
};
/* pin banks of exynos5433 pin-controller - PERIC */
static const struct samsung_pin_bank_data exynos5433_pin_banks8[] = {
- EXYNOS_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00),
- EXYNOS_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04),
- EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08),
- EXYNOS_PIN_BANK_EINTG(2, 0x060, "gpc1", 0x0c),
- EXYNOS_PIN_BANK_EINTG(6, 0x080, "gpc2", 0x10),
- EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpc3", 0x14),
- EXYNOS_PIN_BANK_EINTG(2, 0x0c0, "gpg0", 0x18),
- EXYNOS_PIN_BANK_EINTG(4, 0x0e0, "gpd0", 0x1c),
- EXYNOS_PIN_BANK_EINTG(6, 0x100, "gpd1", 0x20),
- EXYNOS_PIN_BANK_EINTG(8, 0x120, "gpd2", 0x24),
- EXYNOS_PIN_BANK_EINTG(5, 0x140, "gpd4", 0x28),
- EXYNOS_PIN_BANK_EINTG(2, 0x160, "gpd8", 0x2c),
- EXYNOS_PIN_BANK_EINTG(7, 0x180, "gpd6", 0x30),
- EXYNOS_PIN_BANK_EINTG(3, 0x1a0, "gpd7", 0x34),
- EXYNOS_PIN_BANK_EINTG(5, 0x1c0, "gpg1", 0x38),
- EXYNOS_PIN_BANK_EINTG(2, 0x1e0, "gpg2", 0x3c),
- EXYNOS_PIN_BANK_EINTG(8, 0x200, "gpg3", 0x40),
+ EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00),
+ EXYNOS5433_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04),
+ EXYNOS5433_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08),
+ EXYNOS5433_PIN_BANK_EINTG(2, 0x060, "gpc1", 0x0c),
+ EXYNOS5433_PIN_BANK_EINTG(6, 0x080, "gpc2", 0x10),
+ EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpc3", 0x14),
+ EXYNOS5433_PIN_BANK_EINTG(2, 0x0c0, "gpg0", 0x18),
+ EXYNOS5433_PIN_BANK_EINTG(4, 0x0e0, "gpd0", 0x1c),
+ EXYNOS5433_PIN_BANK_EINTG(6, 0x100, "gpd1", 0x20),
+ EXYNOS5433_PIN_BANK_EINTG(8, 0x120, "gpd2", 0x24),
+ EXYNOS5433_PIN_BANK_EINTG(5, 0x140, "gpd4", 0x28),
+ EXYNOS5433_PIN_BANK_EINTG(2, 0x160, "gpd8", 0x2c),
+ EXYNOS5433_PIN_BANK_EINTG(7, 0x180, "gpd6", 0x30),
+ EXYNOS5433_PIN_BANK_EINTG(3, 0x1a0, "gpd7", 0x34),
+ EXYNOS5433_PIN_BANK_EINTG(5, 0x1c0, "gpg1", 0x38),
+ EXYNOS5433_PIN_BANK_EINTG(2, 0x1e0, "gpg2", 0x3c),
+ EXYNOS5433_PIN_BANK_EINTG(8, 0x200, "gpg3", 0x40),
};
/* pin banks of exynos5433 pin-controller - TOUCH */
static const struct samsung_pin_bank_data exynos5433_pin_banks9[] = {
- EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00),
+ EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00),
};
/*
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h
index 5821525a2c84..a473092fb8d2 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.h
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.h
@@ -90,6 +90,37 @@
.pctl_res_idx = pctl_idx, \
} \
+#define EXYNOS5433_PIN_BANK_EINTG(pins, reg, id, offs) \
+ { \
+ .type = &exynos5433_bank_type_off, \
+ .pctl_offset = reg, \
+ .nr_pins = pins, \
+ .eint_type = EINT_TYPE_GPIO, \
+ .eint_offset = offs, \
+ .name = id \
+ }
+
+#define EXYNOS5433_PIN_BANK_EINTW(pins, reg, id, offs) \
+ { \
+ .type = &exynos5433_bank_type_alive, \
+ .pctl_offset = reg, \
+ .nr_pins = pins, \
+ .eint_type = EINT_TYPE_WKUP, \
+ .eint_offset = offs, \
+ .name = id \
+ }
+
+#define EXYNOS5433_PIN_BANK_EINTW_EXT(pins, reg, id, offs, pctl_idx) \
+ { \
+ .type = &exynos5433_bank_type_alive, \
+ .pctl_offset = reg, \
+ .nr_pins = pins, \
+ .eint_type = EINT_TYPE_WKUP, \
+ .eint_offset = offs, \
+ .name = id, \
+ .pctl_res_idx = pctl_idx, \
+ } \
+
/**
* struct exynos_weint_data: irq specific data for all the wakeup interrupts
* generated by the external wakeup interrupt controller.
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index b8a21d7b25d4..59aa8e302bc3 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -363,6 +363,18 @@ config IDEAPAD_LAPTOP
This is a driver for Lenovo IdeaPad netbooks contains drivers for
rfkill switch, hotkey, fan control and backlight control.
+config SURFACE3_WMI
+ tristate "Surface 3 WMI Driver"
+ depends on ACPI_WMI
+ depends on DMI
+ depends on INPUT
+ depends on SPI
+ ---help---
+ Say Y here if you have a Surface 3.
+
+ To compile this driver as a module, choose M here: the module will
+ be called surface3-wmi.
+
config THINKPAD_ACPI
tristate "ThinkPad ACPI Laptop Extras"
depends on ACPI
@@ -1005,12 +1017,27 @@ config INTEL_PMC_IPC
The PMC is an ARC processor which defines IPC commands for communication
with other entities in the CPU.
+config INTEL_BXTWC_PMIC_TMU
+ tristate "Intel BXT Whiskey Cove TMU Driver"
+ depends on REGMAP
+ depends on INTEL_SOC_PMIC && INTEL_PMC_IPC
+ ---help---
+ Select this driver to use Intel BXT Whiskey Cove PMIC TMU feature.
+ This driver enables the alarm wakeup functionality in the TMU unit
+ of Whiskey Cove PMIC.
+
config SURFACE_PRO3_BUTTON
tristate "Power/home/volume buttons driver for Microsoft Surface Pro 3/4 tablet"
depends on ACPI && INPUT
---help---
This driver handles the power/home/volume buttons on the Microsoft Surface Pro 3/4 tablet.
+config SURFACE_3_BUTTON
+ tristate "Power/home/volume buttons driver for Microsoft Surface 3 tablet"
+ depends on ACPI && KEYBOARD_GPIO && I2C
+ ---help---
+ This driver handles the power/home/volume buttons on the Microsoft Surface 3 tablet.
+
config INTEL_PUNIT_IPC
tristate "Intel P-Unit IPC Driver"
---help---
@@ -1027,4 +1054,26 @@ config INTEL_TELEMETRY
used to get various SoC events and parameters
directly via debugfs files. Various tools may use
this interface for SoC state monitoring.
+
+config MLX_PLATFORM
+ tristate "Mellanox Technologies platform support"
+ depends on X86_64
+ ---help---
+ This option enables system support for the Mellanox Technologies
+ platform. The Mellanox systems provide data center networking
+ solutions based on Virtual Protocol Interconnect (VPI) technology
+ enable seamless connectivity to 56/100Gb/s InfiniBand or 10/40/56GbE
+ connection.
+
+ If you have a Mellanox system, say Y or M here.
+
+config MLX_CPLD_PLATFORM
+ tristate "Mellanox platform hotplug driver support"
+ default n
+ select HWMON
+ select I2C
+ ---help---
+ This driver handles hot-plug events for the power suppliers, power
+ cables and fans on the wide range Mellanox IB and Ethernet systems.
+
endif # X86_PLATFORM_DEVICES
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 2efa86d2a1a7..d4111f0f8a78 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_PANASONIC_LAPTOP) += panasonic-laptop.o
obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o
obj-$(CONFIG_ACPI_WMI) += wmi.o
obj-$(CONFIG_MSI_WMI) += msi-wmi.o
+obj-$(CONFIG_SURFACE3_WMI) += surface3-wmi.o
obj-$(CONFIG_TOPSTAR_LAPTOP) += topstar-laptop.o
# toshiba_acpi must link after wmi to ensure that wmi devices are found
@@ -66,8 +67,12 @@ obj-$(CONFIG_PVPANIC) += pvpanic.o
obj-$(CONFIG_ALIENWARE_WMI) += alienware-wmi.o
obj-$(CONFIG_INTEL_PMC_IPC) += intel_pmc_ipc.o
obj-$(CONFIG_SURFACE_PRO3_BUTTON) += surfacepro3_button.o
+obj-$(CONFIG_SURFACE_3_BUTTON) += surface3_button.o
obj-$(CONFIG_INTEL_PUNIT_IPC) += intel_punit_ipc.o
+obj-$(CONFIG_INTEL_BXTWC_PMIC_TMU) += intel_bxtwc_tmu.o
obj-$(CONFIG_INTEL_TELEMETRY) += intel_telemetry_core.o \
intel_telemetry_pltdrv.o \
intel_telemetry_debugfs.o
obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core.o
+obj-$(CONFIG_MLX_PLATFORM) += mlx-platform.o
+obj-$(CONFIG_MLX_CPLD_PLATFORM) += mlxcpld-hotplug.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 79d64ea00bfb..a66192f692e3 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -355,6 +355,32 @@ static const struct dmi_system_id acer_blacklist[] __initconst = {
{}
};
+static const struct dmi_system_id amw0_whitelist[] __initconst = {
+ {
+ .ident = "Acer",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ },
+ },
+ {
+ .ident = "Gateway",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Gateway"),
+ },
+ },
+ {
+ .ident = "Packard Bell",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Packard Bell"),
+ },
+ },
+ {}
+};
+
+/*
+ * This quirk table is only for Acer/Gateway/Packard Bell family
+ * that those machines are supported by acer-wmi driver.
+ */
static const struct dmi_system_id acer_quirks[] __initconst = {
{
.callback = dmi_matched,
@@ -464,6 +490,17 @@ static const struct dmi_system_id acer_quirks[] __initconst = {
},
.driver_data = &quirk_acer_travelmate_2490,
},
+ {}
+};
+
+/*
+ * This quirk list is for those non-acer machines that have AMW0_GUID1
+ * but supported by acer-wmi in past days. Keeping this quirk list here
+ * is only for backward compatible. Please do not add new machine to
+ * here anymore. Those non-acer machines should be supported by
+ * appropriate wmi drivers.
+ */
+static const struct dmi_system_id non_acer_quirks[] __initconst = {
{
.callback = dmi_matched,
.ident = "Fujitsu Siemens Amilo Li 1718",
@@ -598,6 +635,7 @@ static void __init find_quirks(void)
{
if (!force_series) {
dmi_check_system(acer_quirks);
+ dmi_check_system(non_acer_quirks);
} else if (force_series == 2490) {
quirks = &quirk_acer_travelmate_2490;
}
@@ -2108,6 +2146,24 @@ static int __init acer_wmi_init(void)
find_quirks();
/*
+ * The AMW0_GUID1 wmi is not only found on Acer family but also other
+ * machines like Lenovo, Fujitsu and Medion. In the past days,
+ * acer-wmi driver handled those non-Acer machines by quirks list.
+ * But actually acer-wmi driver was loaded on any machines that have
+ * AMW0_GUID1. This behavior is strange because those machines should
+ * be supported by appropriate wmi drivers. e.g. fujitsu-laptop,
+ * ideapad-laptop. So, here checks the machine that has AMW0_GUID1
+ * should be in Acer/Gateway/Packard Bell white list, or it's already
+ * in the past quirk list.
+ */
+ if (wmi_has_guid(AMW0_GUID1) &&
+ !dmi_check_system(amw0_whitelist) &&
+ quirks == &quirk_unknown) {
+ pr_err("Unsupported machine has AMW0_GUID1, unable to load\n");
+ return -ENODEV;
+ }
+
+ /*
* Detect which ACPI-WMI interface we're using.
*/
if (wmi_has_guid(AMW0_GUID1) && wmi_has_guid(WMID_GUID1))
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 26e4cbc34db8..5be4783e40d4 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -116,8 +116,13 @@ static struct quirk_entry quirk_asus_ux303ub = {
.wmi_backlight_native = true,
};
+static struct quirk_entry quirk_asus_x550lb = {
+ .xusb2pr = 0x01D9,
+};
+
static int dmi_matched(const struct dmi_system_id *dmi)
{
+ pr_info("Identified laptop model '%s'\n", dmi->ident);
quirks = dmi->driver_data;
return 1;
}
@@ -175,6 +180,15 @@ static const struct dmi_system_id asus_quirks[] = {
},
{
.callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X45U",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X45U"),
+ },
+ .driver_data = &quirk_asus_wapf4,
+ },
+ {
+ .callback = dmi_matched,
.ident = "ASUSTeK COMPUTER INC. X456UA",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
@@ -398,6 +412,15 @@ static const struct dmi_system_id asus_quirks[] = {
},
.driver_data = &quirk_asus_ux303ub,
},
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X550LB",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X550LB"),
+ },
+ .driver_data = &quirk_asus_x550lb,
+ },
{},
};
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index ce6ca31a2d09..43cb680adbb4 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -156,6 +156,9 @@ MODULE_LICENSE("GPL");
#define ASUS_FAN_CTRL_MANUAL 1
#define ASUS_FAN_CTRL_AUTO 2
+#define USB_INTEL_XUSB2PR 0xD0
+#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
+
struct bios_args {
u32 arg0;
u32 arg1;
@@ -1080,6 +1083,29 @@ exit:
return result;
}
+static void asus_wmi_set_xusb2pr(struct asus_wmi *asus)
+{
+ struct pci_dev *xhci_pdev;
+ u32 orig_ports_available;
+ u32 ports_available = asus->driver->quirks->xusb2pr;
+
+ xhci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI,
+ NULL);
+
+ if (!xhci_pdev)
+ return;
+
+ pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
+ &orig_ports_available);
+
+ pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
+ cpu_to_le32(ports_available));
+
+ pr_info("set USB_INTEL_XUSB2PR old: 0x%04x, new: 0x%04x\n",
+ orig_ports_available, ports_available);
+}
+
/*
* Hwmon device
*/
@@ -2087,6 +2113,9 @@ static int asus_wmi_add(struct platform_device *pdev)
if (asus->driver->quirks->wmi_backlight_native)
acpi_video_set_dmi_backlight_type(acpi_backlight_native);
+ if (asus->driver->quirks->xusb2pr)
+ asus_wmi_set_xusb2pr(asus);
+
if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {
err = asus_wmi_backlight_init(asus);
if (err && err != -ENODEV)
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index 0e19014e9f54..fdff626c3b51 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -53,6 +53,7 @@ struct quirk_entry {
* and let the ACPI interrupt to send out the key event.
*/
int no_display_toggle;
+ u32 xusb2pr;
bool (*i8042_filter)(unsigned char data, unsigned char str,
struct serio *serio);
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 2c2f02b2e08a..14392a01ab36 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -1904,38 +1904,40 @@ static enum led_brightness kbd_led_level_get(struct led_classdev *led_cdev)
return 0;
}
-static void kbd_led_level_set(struct led_classdev *led_cdev,
- enum led_brightness value)
+static int kbd_led_level_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
{
struct kbd_state state;
struct kbd_state new_state;
u16 num;
+ int ret;
if (kbd_get_max_level()) {
- if (kbd_get_state(&state))
- return;
+ ret = kbd_get_state(&state);
+ if (ret)
+ return ret;
new_state = state;
- if (kbd_set_level(&new_state, value))
- return;
- kbd_set_state_safe(&new_state, &state);
- return;
+ ret = kbd_set_level(&new_state, value);
+ if (ret)
+ return ret;
+ return kbd_set_state_safe(&new_state, &state);
}
if (kbd_get_valid_token_counts()) {
for (num = kbd_token_bits; num != 0 && value > 0; --value)
num &= num - 1; /* clear the first bit set */
if (num == 0)
- return;
- kbd_set_token_bit(ffs(num) - 1);
- return;
+ return 0;
+ return kbd_set_token_bit(ffs(num) - 1);
}
pr_warn("Keyboard brightness level control not supported\n");
+ return -ENXIO;
}
static struct led_classdev kbd_led = {
.name = "dell::kbd_backlight",
- .brightness_set = kbd_led_level_set,
+ .brightness_set_blocking = kbd_led_level_set,
.brightness_get = kbd_led_level_get,
.groups = kbd_led_groups,
};
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index da2fe18162e1..75e637047d36 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -114,7 +114,7 @@ static const struct key_entry dell_wmi_keymap_type_0000[] __initconst = {
{ KE_IGNORE, 0xe00e, { KEY_RESERVED } },
/* Wifi Catcher */
- { KE_KEY, 0xe011, { KEY_PROG2 } },
+ { KE_KEY, 0xe011, { KEY_WLAN } },
/* Ambient light sensor toggle */
{ KE_IGNORE, 0xe013, { KEY_RESERVED } },
@@ -274,6 +274,16 @@ static const struct key_entry dell_wmi_keymap_type_0010[] __initconst = {
/* Stealth mode toggle */
{ KE_IGNORE, 0x155, { KEY_RESERVED } },
+
+ /* Rugged magnetic dock attach/detach events */
+ { KE_IGNORE, 0x156, { KEY_RESERVED } },
+ { KE_IGNORE, 0x157, { KEY_RESERVED } },
+
+ /* Rugged programmable (P1/P2/P3 keys) */
+ { KE_KEY, 0x850, { KEY_PROG1 } },
+ { KE_KEY, 0x851, { KEY_PROG2 } },
+ { KE_KEY, 0x852, { KEY_PROG3 } },
+
};
/*
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 61f39abf5dc8..82d67715ce76 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -177,43 +177,43 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event);
#if IS_ENABLED(CONFIG_LEDS_CLASS)
static enum led_brightness logolamp_get(struct led_classdev *cdev);
-static void logolamp_set(struct led_classdev *cdev,
+static int logolamp_set(struct led_classdev *cdev,
enum led_brightness brightness);
static struct led_classdev logolamp_led = {
.name = "fujitsu::logolamp",
.brightness_get = logolamp_get,
- .brightness_set = logolamp_set
+ .brightness_set_blocking = logolamp_set
};
static enum led_brightness kblamps_get(struct led_classdev *cdev);
-static void kblamps_set(struct led_classdev *cdev,
+static int kblamps_set(struct led_classdev *cdev,
enum led_brightness brightness);
static struct led_classdev kblamps_led = {
.name = "fujitsu::kblamps",
.brightness_get = kblamps_get,
- .brightness_set = kblamps_set
+ .brightness_set_blocking = kblamps_set
};
static enum led_brightness radio_led_get(struct led_classdev *cdev);
-static void radio_led_set(struct led_classdev *cdev,
+static int radio_led_set(struct led_classdev *cdev,
enum led_brightness brightness);
static struct led_classdev radio_led = {
.name = "fujitsu::radio_led",
.brightness_get = radio_led_get,
- .brightness_set = radio_led_set
+ .brightness_set_blocking = radio_led_set
};
static enum led_brightness eco_led_get(struct led_classdev *cdev);
-static void eco_led_set(struct led_classdev *cdev,
+static int eco_led_set(struct led_classdev *cdev,
enum led_brightness brightness);
static struct led_classdev eco_led = {
.name = "fujitsu::eco_led",
.brightness_get = eco_led_get,
- .brightness_set = eco_led_set
+ .brightness_set_blocking = eco_led_set
};
#endif
@@ -267,48 +267,48 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
#if IS_ENABLED(CONFIG_LEDS_CLASS)
/* LED class callbacks */
-static void logolamp_set(struct led_classdev *cdev,
+static int logolamp_set(struct led_classdev *cdev,
enum led_brightness brightness)
{
if (brightness >= LED_FULL) {
call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, FUNC_LED_ON);
- call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_ALWAYS, FUNC_LED_ON);
+ return call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_ALWAYS, FUNC_LED_ON);
} else if (brightness >= LED_HALF) {
call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, FUNC_LED_ON);
- call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_ALWAYS, FUNC_LED_OFF);
+ return call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_ALWAYS, FUNC_LED_OFF);
} else {
- call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, FUNC_LED_OFF);
+ return call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, FUNC_LED_OFF);
}
}
-static void kblamps_set(struct led_classdev *cdev,
+static int kblamps_set(struct led_classdev *cdev,
enum led_brightness brightness)
{
if (brightness >= LED_FULL)
- call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_ON);
+ return call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_ON);
else
- call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_OFF);
+ return call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_OFF);
}
-static void radio_led_set(struct led_classdev *cdev,
+static int radio_led_set(struct led_classdev *cdev,
enum led_brightness brightness)
{
if (brightness >= LED_FULL)
- call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, RADIO_LED_ON);
+ return call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, RADIO_LED_ON);
else
- call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, 0x0);
+ return call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, 0x0);
}
-static void eco_led_set(struct led_classdev *cdev,
+static int eco_led_set(struct led_classdev *cdev,
enum led_brightness brightness)
{
int curr;
curr = call_fext_func(FUNC_LEDS, 0x2, ECO_LED, 0x0);
if (brightness >= LED_FULL)
- call_fext_func(FUNC_LEDS, 0x1, ECO_LED, curr | ECO_LED_ON);
+ return call_fext_func(FUNC_LEDS, 0x1, ECO_LED, curr | ECO_LED_ON);
else
- call_fext_func(FUNC_LEDS, 0x1, ECO_LED, curr & ~ECO_LED_ON);
+ return call_fext_func(FUNC_LEDS, 0x1, ECO_LED, curr & ~ECO_LED_ON);
}
static enum led_brightness logolamp_get(struct led_classdev *cdev)
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index a7614fc542b5..410741acb3c9 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -871,6 +871,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
},
},
{
+ .ident = "Lenovo ideapad Y700-15ACZ",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-15ACZ"),
+ },
+ },
+ {
.ident = "Lenovo ideapad Y700-15ISK",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index 12dbb5063376..cb3ab2b212b1 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -69,7 +69,7 @@ static int intel_hid_set_enable(struct device *device, int enable)
arg0.integer.value = enable;
status = acpi_evaluate_object(ACPI_HANDLE(device), "HDSM", &args, NULL);
- if (!ACPI_SUCCESS(status)) {
+ if (ACPI_FAILURE(status)) {
dev_warn(device, "failed to %sable hotkeys\n",
enable ? "en" : "dis");
return -EIO;
@@ -148,7 +148,7 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
}
status = acpi_evaluate_integer(handle, "HDEM", NULL, &ev_index);
- if (!ACPI_SUCCESS(status)) {
+ if (ACPI_FAILURE(status)) {
dev_warn(&device->dev, "failed to get event index\n");
return;
}
@@ -167,7 +167,7 @@ static int intel_hid_probe(struct platform_device *device)
int err;
status = acpi_evaluate_integer(handle, "HDMM", NULL, &mode);
- if (!ACPI_SUCCESS(status)) {
+ if (ACPI_FAILURE(status)) {
dev_warn(&device->dev, "failed to read mode\n");
return -ENODEV;
}
diff --git a/drivers/platform/x86/intel-smartconnect.c b/drivers/platform/x86/intel-smartconnect.c
index 04cf5dffdfd9..bbe4c06c769f 100644
--- a/drivers/platform/x86/intel-smartconnect.c
+++ b/drivers/platform/x86/intel-smartconnect.c
@@ -29,7 +29,7 @@ static int smartconnect_acpi_init(struct acpi_device *acpi)
acpi_status status;
status = acpi_evaluate_integer(acpi->handle, "GAOS", NULL, &value);
- if (!ACPI_SUCCESS(status))
+ if (ACPI_FAILURE(status))
return -EINVAL;
if (value & 0x1) {
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
index 78080763df51..554e82ebe83c 100644
--- a/drivers/platform/x86/intel-vbtn.c
+++ b/drivers/platform/x86/intel-vbtn.c
@@ -49,34 +49,19 @@ static int intel_vbtn_input_setup(struct platform_device *device)
struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
int ret;
- priv->input_dev = input_allocate_device();
+ priv->input_dev = devm_input_allocate_device(&device->dev);
if (!priv->input_dev)
return -ENOMEM;
ret = sparse_keymap_setup(priv->input_dev, intel_vbtn_keymap, NULL);
if (ret)
- goto err_free_device;
+ return ret;
priv->input_dev->dev.parent = &device->dev;
priv->input_dev->name = "Intel Virtual Button driver";
priv->input_dev->id.bustype = BUS_HOST;
- ret = input_register_device(priv->input_dev);
- if (ret)
- goto err_free_device;
-
- return 0;
-
-err_free_device:
- input_free_device(priv->input_dev);
- return ret;
-}
-
-static void intel_vbtn_input_destroy(struct platform_device *device)
-{
- struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
-
- input_unregister_device(priv->input_dev);
+ return input_register_device(priv->input_dev);
}
static void notify_handler(acpi_handle handle, u32 event, void *context)
@@ -97,7 +82,7 @@ static int intel_vbtn_probe(struct platform_device *device)
int err;
status = acpi_evaluate_object(handle, "VBDL", NULL, NULL);
- if (!ACPI_SUCCESS(status)) {
+ if (ACPI_FAILURE(status)) {
dev_warn(&device->dev, "failed to read Intel Virtual Button driver\n");
return -ENODEV;
}
@@ -117,24 +102,16 @@ static int intel_vbtn_probe(struct platform_device *device)
ACPI_DEVICE_NOTIFY,
notify_handler,
device);
- if (ACPI_FAILURE(status)) {
- err = -EBUSY;
- goto err_remove_input;
- }
+ if (ACPI_FAILURE(status))
+ return -EBUSY;
return 0;
-
-err_remove_input:
- intel_vbtn_input_destroy(device);
-
- return err;
}
static int intel_vbtn_remove(struct platform_device *device)
{
acpi_handle handle = ACPI_HANDLE(&device->dev);
- intel_vbtn_input_destroy(device);
acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler);
/*
diff --git a/drivers/platform/x86/intel_bxtwc_tmu.c b/drivers/platform/x86/intel_bxtwc_tmu.c
new file mode 100644
index 000000000000..e202abd5b0df
--- /dev/null
+++ b/drivers/platform/x86/intel_bxtwc_tmu.c
@@ -0,0 +1,162 @@
+/*
+ * intel_bxtwc_tmu.c - Intel BXT Whiskey Cove PMIC TMU driver
+ *
+ * Copyright (C) 2016 Intel Corporation. All rights reserved.
+ *
+ * This driver adds TMU (Time Management Unit) support for Intel BXT platform.
+ * It enables the alarm wake-up functionality in the TMU unit of Whiskey Cove
+ * PMIC.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/intel_soc_pmic.h>
+
+#define BXTWC_TMUIRQ 0x4fb6
+#define BXTWC_MIRQLVL1 0x4e0e
+#define BXTWC_MTMUIRQ_REG 0x4fb7
+#define BXTWC_MIRQLVL1_MTMU BIT(1)
+#define BXTWC_TMU_WK_ALRM BIT(1)
+#define BXTWC_TMU_SYS_ALRM BIT(2)
+#define BXTWC_TMU_ALRM_MASK (BXTWC_TMU_WK_ALRM | BXTWC_TMU_SYS_ALRM)
+#define BXTWC_TMU_ALRM_IRQ (BXTWC_TMU_WK_ALRM | BXTWC_TMU_SYS_ALRM)
+
+struct wcove_tmu {
+ int irq;
+ struct device *dev;
+ struct regmap *regmap;
+};
+
+static irqreturn_t bxt_wcove_tmu_irq_handler(int irq, void *data)
+{
+ struct wcove_tmu *wctmu = data;
+ unsigned int tmu_irq;
+
+ /* Read TMU interrupt reg */
+ regmap_read(wctmu->regmap, BXTWC_TMUIRQ, &tmu_irq);
+ if (tmu_irq & BXTWC_TMU_ALRM_IRQ) {
+ /* clear TMU irq */
+ regmap_write(wctmu->regmap, BXTWC_TMUIRQ, tmu_irq);
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+static int bxt_wcove_tmu_probe(struct platform_device *pdev)
+{
+ struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
+ struct regmap_irq_chip_data *regmap_irq_chip;
+ struct wcove_tmu *wctmu;
+ int ret, virq, irq;
+
+ wctmu = devm_kzalloc(&pdev->dev, sizeof(*wctmu), GFP_KERNEL);
+ if (!wctmu)
+ return -ENOMEM;
+
+ wctmu->dev = &pdev->dev;
+ wctmu->regmap = pmic->regmap;
+
+ irq = platform_get_irq(pdev, 0);
+
+ if (irq < 0) {
+ dev_err(&pdev->dev, "invalid irq %d\n", irq);
+ return irq;
+ }
+
+ regmap_irq_chip = pmic->irq_chip_data_tmu;
+ virq = regmap_irq_get_virq(regmap_irq_chip, irq);
+ if (virq < 0) {
+ dev_err(&pdev->dev,
+ "failed to get virtual interrupt=%d\n", irq);
+ return virq;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, virq,
+ NULL, bxt_wcove_tmu_irq_handler,
+ IRQF_ONESHOT, "bxt_wcove_tmu", wctmu);
+ if (ret) {
+ dev_err(&pdev->dev, "request irq failed: %d,virq: %d\n",
+ ret, virq);
+ return ret;
+ }
+ wctmu->irq = virq;
+
+ /* Enable TMU interrupts */
+ regmap_update_bits(wctmu->regmap, BXTWC_MIRQLVL1,
+ BXTWC_MIRQLVL1_MTMU, 0);
+
+ /* Unmask TMU second level Wake & System alarm */
+ regmap_update_bits(wctmu->regmap, BXTWC_MTMUIRQ_REG,
+ BXTWC_TMU_ALRM_MASK, 0);
+
+ platform_set_drvdata(pdev, wctmu);
+ return 0;
+}
+
+static int bxt_wcove_tmu_remove(struct platform_device *pdev)
+{
+ struct wcove_tmu *wctmu = platform_get_drvdata(pdev);
+ unsigned int val;
+
+ /* Mask TMU interrupts */
+ regmap_read(wctmu->regmap, BXTWC_MIRQLVL1, &val);
+ regmap_write(wctmu->regmap, BXTWC_MIRQLVL1,
+ val | BXTWC_MIRQLVL1_MTMU);
+ regmap_read(wctmu->regmap, BXTWC_MTMUIRQ_REG, &val);
+ regmap_write(wctmu->regmap, BXTWC_MTMUIRQ_REG,
+ val | BXTWC_TMU_ALRM_MASK);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bxtwc_tmu_suspend(struct device *dev)
+{
+ struct wcove_tmu *wctmu = dev_get_drvdata(dev);
+
+ enable_irq_wake(wctmu->irq);
+ return 0;
+}
+
+static int bxtwc_tmu_resume(struct device *dev)
+{
+ struct wcove_tmu *wctmu = dev_get_drvdata(dev);
+
+ disable_irq_wake(wctmu->irq);
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(bxtwc_tmu_pm_ops, bxtwc_tmu_suspend, bxtwc_tmu_resume);
+
+static const struct platform_device_id bxt_wcove_tmu_id_table[] = {
+ { .name = "bxt_wcove_tmu" },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, bxt_wcove_tmu_id_table);
+
+static struct platform_driver bxt_wcove_tmu_driver = {
+ .probe = bxt_wcove_tmu_probe,
+ .remove = bxt_wcove_tmu_remove,
+ .driver = {
+ .name = "bxt_wcove_tmu",
+ .pm = &bxtwc_tmu_pm_ops,
+ },
+ .id_table = bxt_wcove_tmu_id_table,
+};
+
+module_platform_driver(bxt_wcove_tmu_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Nilesh Bacchewar <nilesh.bacchewar@intel.com>");
+MODULE_DESCRIPTION("BXT Whiskey Cove TMU Driver");
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
index 9f713b832ba3..0df3c9d37509 100644
--- a/drivers/platform/x86/intel_mid_thermal.c
+++ b/drivers/platform/x86/intel_mid_thermal.c
@@ -415,6 +415,7 @@ static struct thermal_device_info *initialize_sensor(int index)
return td_info;
}
+#ifdef CONFIG_PM_SLEEP
/**
* mid_thermal_resume - resume routine
* @dev: device structure
@@ -442,6 +443,7 @@ static int mid_thermal_suspend(struct device *dev)
*/
return configure_adc(0);
}
+#endif
static SIMPLE_DEV_PM_OPS(mid_thermal_pm,
mid_thermal_suspend, mid_thermal_resume);
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
index e8b1b836ca2d..b130b8c9b9d7 100644
--- a/drivers/platform/x86/intel_pmc_core.c
+++ b/drivers/platform/x86/intel_pmc_core.c
@@ -19,10 +19,12 @@
*/
#include <linux/debugfs.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/pci.h>
+#include <linux/uaccess.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
@@ -32,16 +34,106 @@
static struct pmc_dev pmc;
+static const struct pmc_bit_map spt_pll_map[] = {
+ {"MIPI PLL", SPT_PMC_BIT_MPHY_CMN_LANE0},
+ {"GEN2 USB2PCIE2 PLL", SPT_PMC_BIT_MPHY_CMN_LANE1},
+ {"DMIPCIE3 PLL", SPT_PMC_BIT_MPHY_CMN_LANE2},
+ {"SATA PLL", SPT_PMC_BIT_MPHY_CMN_LANE3},
+ {},
+};
+
+static const struct pmc_bit_map spt_mphy_map[] = {
+ {"MPHY CORE LANE 0", SPT_PMC_BIT_MPHY_LANE0},
+ {"MPHY CORE LANE 1", SPT_PMC_BIT_MPHY_LANE1},
+ {"MPHY CORE LANE 2", SPT_PMC_BIT_MPHY_LANE2},
+ {"MPHY CORE LANE 3", SPT_PMC_BIT_MPHY_LANE3},
+ {"MPHY CORE LANE 4", SPT_PMC_BIT_MPHY_LANE4},
+ {"MPHY CORE LANE 5", SPT_PMC_BIT_MPHY_LANE5},
+ {"MPHY CORE LANE 6", SPT_PMC_BIT_MPHY_LANE6},
+ {"MPHY CORE LANE 7", SPT_PMC_BIT_MPHY_LANE7},
+ {"MPHY CORE LANE 8", SPT_PMC_BIT_MPHY_LANE8},
+ {"MPHY CORE LANE 9", SPT_PMC_BIT_MPHY_LANE9},
+ {"MPHY CORE LANE 10", SPT_PMC_BIT_MPHY_LANE10},
+ {"MPHY CORE LANE 11", SPT_PMC_BIT_MPHY_LANE11},
+ {"MPHY CORE LANE 12", SPT_PMC_BIT_MPHY_LANE12},
+ {"MPHY CORE LANE 13", SPT_PMC_BIT_MPHY_LANE13},
+ {"MPHY CORE LANE 14", SPT_PMC_BIT_MPHY_LANE14},
+ {"MPHY CORE LANE 15", SPT_PMC_BIT_MPHY_LANE15},
+ {},
+};
+
+static const struct pmc_bit_map spt_pfear_map[] = {
+ {"PMC", SPT_PMC_BIT_PMC},
+ {"OPI-DMI", SPT_PMC_BIT_OPI},
+ {"SPI / eSPI", SPT_PMC_BIT_SPI},
+ {"XHCI", SPT_PMC_BIT_XHCI},
+ {"SPA", SPT_PMC_BIT_SPA},
+ {"SPB", SPT_PMC_BIT_SPB},
+ {"SPC", SPT_PMC_BIT_SPC},
+ {"GBE", SPT_PMC_BIT_GBE},
+ {"SATA", SPT_PMC_BIT_SATA},
+ {"HDA-PGD0", SPT_PMC_BIT_HDA_PGD0},
+ {"HDA-PGD1", SPT_PMC_BIT_HDA_PGD1},
+ {"HDA-PGD2", SPT_PMC_BIT_HDA_PGD2},
+ {"HDA-PGD3", SPT_PMC_BIT_HDA_PGD3},
+ {"RSVD", SPT_PMC_BIT_RSVD_0B},
+ {"LPSS", SPT_PMC_BIT_LPSS},
+ {"LPC", SPT_PMC_BIT_LPC},
+ {"SMB", SPT_PMC_BIT_SMB},
+ {"ISH", SPT_PMC_BIT_ISH},
+ {"P2SB", SPT_PMC_BIT_P2SB},
+ {"DFX", SPT_PMC_BIT_DFX},
+ {"SCC", SPT_PMC_BIT_SCC},
+ {"RSVD", SPT_PMC_BIT_RSVD_0C},
+ {"FUSE", SPT_PMC_BIT_FUSE},
+ {"CAMERA", SPT_PMC_BIT_CAMREA},
+ {"RSVD", SPT_PMC_BIT_RSVD_0D},
+ {"USB3-OTG", SPT_PMC_BIT_USB3_OTG},
+ {"EXI", SPT_PMC_BIT_EXI},
+ {"CSE", SPT_PMC_BIT_CSE},
+ {"CSME_KVM", SPT_PMC_BIT_CSME_KVM},
+ {"CSME_PMT", SPT_PMC_BIT_CSME_PMT},
+ {"CSME_CLINK", SPT_PMC_BIT_CSME_CLINK},
+ {"CSME_PTIO", SPT_PMC_BIT_CSME_PTIO},
+ {"CSME_USBR", SPT_PMC_BIT_CSME_USBR},
+ {"CSME_SUSRAM", SPT_PMC_BIT_CSME_SUSRAM},
+ {"CSME_SMT", SPT_PMC_BIT_CSME_SMT},
+ {"RSVD", SPT_PMC_BIT_RSVD_1A},
+ {"CSME_SMS2", SPT_PMC_BIT_CSME_SMS2},
+ {"CSME_SMS1", SPT_PMC_BIT_CSME_SMS1},
+ {"CSME_RTC", SPT_PMC_BIT_CSME_RTC},
+ {"CSME_PSF", SPT_PMC_BIT_CSME_PSF},
+ {},
+};
+
+static const struct pmc_reg_map spt_reg_map = {
+ .pfear_sts = spt_pfear_map,
+ .mphy_sts = spt_mphy_map,
+ .pll_sts = spt_pll_map,
+};
+
static const struct pci_device_id pmc_pci_ids[] = {
- { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID), (kernel_ulong_t)NULL },
+ { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID),
+ (kernel_ulong_t)&spt_reg_map },
{ 0, },
};
+static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset)
+{
+ return readb(pmcdev->regbase + offset);
+}
+
static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset)
{
return readl(pmcdev->regbase + reg_offset);
}
+static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int
+ reg_offset, u32 val)
+{
+ writel(val, pmcdev->regbase + reg_offset);
+}
+
static inline u32 pmc_core_adjust_slp_s0_step(u32 value)
{
return value * SPT_PMC_SLP_S0_RES_COUNTER_STEP;
@@ -90,6 +182,245 @@ static int pmc_core_dev_state_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(pmc_core_dev_state, pmc_core_dev_state_get, NULL, "%llu\n");
+static int pmc_core_check_read_lock_bit(void)
+{
+ struct pmc_dev *pmcdev = &pmc;
+ u32 value;
+
+ value = pmc_core_reg_read(pmcdev, SPT_PMC_PM_CFG_OFFSET);
+ return test_bit(SPT_PMC_READ_DISABLE_BIT,
+ (unsigned long *)&value);
+}
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+static void pmc_core_display_map(struct seq_file *s, int index,
+ u8 pf_reg, const struct pmc_bit_map *pf_map)
+{
+ seq_printf(s, "PCH IP: %-2d - %-32s\tState: %s\n",
+ index, pf_map[index].name,
+ pf_map[index].bit_mask & pf_reg ? "Off" : "On");
+}
+
+static int pmc_core_ppfear_sts_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ const struct pmc_bit_map *map = pmcdev->map->pfear_sts;
+ u8 pf_regs[NUM_ENTRIES];
+ int index, iter;
+
+ iter = SPT_PMC_XRAM_PPFEAR0A;
+
+ for (index = 0; index < NUM_ENTRIES; index++, iter++)
+ pf_regs[index] = pmc_core_reg_read_byte(pmcdev, iter);
+
+ for (index = 0; map[index].name; index++)
+ pmc_core_display_map(s, index, pf_regs[index / 8], map);
+
+ return 0;
+}
+
+static int pmc_core_ppfear_sts_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pmc_core_ppfear_sts_show, inode->i_private);
+}
+
+static const struct file_operations pmc_core_ppfear_ops = {
+ .open = pmc_core_ppfear_sts_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/* This function should return link status, 0 means ready */
+static int pmc_core_mtpmc_link_status(void)
+{
+ struct pmc_dev *pmcdev = &pmc;
+ u32 value;
+
+ value = pmc_core_reg_read(pmcdev, SPT_PMC_PM_STS_OFFSET);
+ return test_bit(SPT_PMC_MSG_FULL_STS_BIT,
+ (unsigned long *)&value);
+}
+
+static int pmc_core_send_msg(u32 *addr_xram)
+{
+ struct pmc_dev *pmcdev = &pmc;
+ u32 dest;
+ int timeout;
+
+ for (timeout = NUM_RETRIES; timeout > 0; timeout--) {
+ if (pmc_core_mtpmc_link_status() == 0)
+ break;
+ msleep(5);
+ }
+
+ if (timeout <= 0 && pmc_core_mtpmc_link_status())
+ return -EBUSY;
+
+ dest = (*addr_xram & MTPMC_MASK) | (1U << 1);
+ pmc_core_reg_write(pmcdev, SPT_PMC_MTPMC_OFFSET, dest);
+ return 0;
+}
+
+static int pmc_core_mphy_pg_sts_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ const struct pmc_bit_map *map = pmcdev->map->mphy_sts;
+ u32 mphy_core_reg_low, mphy_core_reg_high;
+ u32 val_low, val_high;
+ int index, err = 0;
+
+ if (pmcdev->pmc_xram_read_bit) {
+ seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
+ return 0;
+ }
+
+ mphy_core_reg_low = (SPT_PMC_MPHY_CORE_STS_0 << 16);
+ mphy_core_reg_high = (SPT_PMC_MPHY_CORE_STS_1 << 16);
+
+ mutex_lock(&pmcdev->lock);
+
+ if (pmc_core_send_msg(&mphy_core_reg_low) != 0) {
+ err = -EBUSY;
+ goto out_unlock;
+ }
+
+ msleep(10);
+ val_low = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
+
+ if (pmc_core_send_msg(&mphy_core_reg_high) != 0) {
+ err = -EBUSY;
+ goto out_unlock;
+ }
+
+ msleep(10);
+ val_high = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
+
+ for (index = 0; map[index].name && index < 8; index++) {
+ seq_printf(s, "%-32s\tState: %s\n",
+ map[index].name,
+ map[index].bit_mask & val_low ? "Not power gated" :
+ "Power gated");
+ }
+
+ for (index = 8; map[index].name; index++) {
+ seq_printf(s, "%-32s\tState: %s\n",
+ map[index].name,
+ map[index].bit_mask & val_high ? "Not power gated" :
+ "Power gated");
+ }
+
+out_unlock:
+ mutex_unlock(&pmcdev->lock);
+ return err;
+}
+
+static int pmc_core_mphy_pg_sts_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pmc_core_mphy_pg_sts_show, inode->i_private);
+}
+
+static const struct file_operations pmc_core_mphy_pg_ops = {
+ .open = pmc_core_mphy_pg_sts_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int pmc_core_pll_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ const struct pmc_bit_map *map = pmcdev->map->pll_sts;
+ u32 mphy_common_reg, val;
+ int index, err = 0;
+
+ if (pmcdev->pmc_xram_read_bit) {
+ seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
+ return 0;
+ }
+
+ mphy_common_reg = (SPT_PMC_MPHY_COM_STS_0 << 16);
+ mutex_lock(&pmcdev->lock);
+
+ if (pmc_core_send_msg(&mphy_common_reg) != 0) {
+ err = -EBUSY;
+ goto out_unlock;
+ }
+
+ /* Observed PMC HW response latency for MTPMC-MFPMC is ~10 ms */
+ msleep(10);
+ val = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
+
+ for (index = 0; map[index].name ; index++) {
+ seq_printf(s, "%-32s\tState: %s\n",
+ map[index].name,
+ map[index].bit_mask & val ? "Active" : "Idle");
+ }
+
+out_unlock:
+ mutex_unlock(&pmcdev->lock);
+ return err;
+}
+
+static int pmc_core_pll_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pmc_core_pll_show, inode->i_private);
+}
+
+static const struct file_operations pmc_core_pll_ops = {
+ .open = pmc_core_pll_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static ssize_t pmc_core_ltr_ignore_write(struct file *file, const char __user
+*userbuf, size_t count, loff_t *ppos)
+{
+ struct pmc_dev *pmcdev = &pmc;
+ u32 val, buf_size, fd;
+ int err = 0;
+
+ buf_size = count < 64 ? count : 64;
+ mutex_lock(&pmcdev->lock);
+
+ if (kstrtou32_from_user(userbuf, buf_size, 10, &val)) {
+ err = -EFAULT;
+ goto out_unlock;
+ }
+
+ if (val > NUM_IP_IGN_ALLOWED) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ fd = pmc_core_reg_read(pmcdev, SPT_PMC_LTR_IGNORE_OFFSET);
+ fd |= (1U << val);
+ pmc_core_reg_write(pmcdev, SPT_PMC_LTR_IGNORE_OFFSET, fd);
+
+out_unlock:
+ mutex_unlock(&pmcdev->lock);
+ return err == 0 ? count : err;
+}
+
+static int pmc_core_ltr_ignore_show(struct seq_file *s, void *unused)
+{
+ return 0;
+}
+
+static int pmc_core_ltr_ignore_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pmc_core_ltr_ignore_show, inode->i_private);
+}
+
+static const struct file_operations pmc_core_ltr_ignore_ops = {
+ .open = pmc_core_ltr_ignore_open,
+ .read = seq_read,
+ .write = pmc_core_ltr_ignore_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
{
debugfs_remove_recursive(pmcdev->dbgfs_dir);
@@ -106,20 +437,59 @@ static int pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
pmcdev->dbgfs_dir = dir;
file = debugfs_create_file("slp_s0_residency_usec", S_IFREG | S_IRUGO,
dir, pmcdev, &pmc_core_dev_state);
+ if (!file)
+ goto err;
- if (!file) {
- pmc_core_dbgfs_unregister(pmcdev);
- return -ENODEV;
- }
+ file = debugfs_create_file("pch_ip_power_gating_status",
+ S_IFREG | S_IRUGO, dir, pmcdev,
+ &pmc_core_ppfear_ops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("mphy_core_lanes_power_gating_status",
+ S_IFREG | S_IRUGO, dir, pmcdev,
+ &pmc_core_mphy_pg_ops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("pll_status",
+ S_IFREG | S_IRUGO, dir, pmcdev,
+ &pmc_core_pll_ops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("ltr_ignore",
+ S_IFREG | S_IRUGO, dir, pmcdev,
+ &pmc_core_ltr_ignore_ops);
+
+ if (!file)
+ goto err;
return 0;
+err:
+ pmc_core_dbgfs_unregister(pmcdev);
+ return -ENODEV;
}
+#else
+static inline int pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
+{
+ return 0;
+}
+
+static inline void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
static const struct x86_cpu_id intel_pmc_core_ids[] = {
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_MOBILE, X86_FEATURE_MWAIT,
(kernel_ulong_t)NULL},
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_DESKTOP, X86_FEATURE_MWAIT,
(kernel_ulong_t)NULL},
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_KABYLAKE_MOBILE, X86_FEATURE_MWAIT,
+ (kernel_ulong_t)NULL},
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_KABYLAKE_DESKTOP, X86_FEATURE_MWAIT,
+ (kernel_ulong_t)NULL},
{}
};
@@ -128,6 +498,7 @@ static int pmc_core_probe(struct pci_dev *dev, const struct pci_device_id *id)
struct device *ptr_dev = &dev->dev;
struct pmc_dev *pmcdev = &pmc;
const struct x86_cpu_id *cpu_id;
+ const struct pmc_reg_map *map = (struct pmc_reg_map *)id->driver_data;
int err;
cpu_id = x86_match_cpu(intel_pmc_core_ids);
@@ -149,6 +520,7 @@ static int pmc_core_probe(struct pci_dev *dev, const struct pci_device_id *id)
dev_dbg(&dev->dev, "PMC Core: failed to read PCI config space.\n");
return err;
}
+ pmcdev->base_addr &= PMC_BASE_ADDR_MASK;
dev_dbg(&dev->dev, "PMC Core: PWRMBASE is %#x\n", pmcdev->base_addr);
pmcdev->regbase = devm_ioremap_nocache(ptr_dev,
@@ -159,6 +531,10 @@ static int pmc_core_probe(struct pci_dev *dev, const struct pci_device_id *id)
return -ENOMEM;
}
+ mutex_init(&pmcdev->lock);
+ pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit();
+ pmcdev->map = map;
+
err = pmc_core_dbgfs_register(pmcdev);
if (err < 0)
dev_warn(&dev->dev, "PMC Core: debugfs register failed.\n");
diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h
index e3f671f4d122..5a48e7728479 100644
--- a/drivers/platform/x86/intel_pmc_core.h
+++ b/drivers/platform/x86/intel_pmc_core.h
@@ -26,8 +26,111 @@
#define SPT_PMC_BASE_ADDR_OFFSET 0x48
#define SPT_PMC_SLP_S0_RES_COUNTER_OFFSET 0x13c
-#define SPT_PMC_MMIO_REG_LEN 0x100
+#define SPT_PMC_PM_CFG_OFFSET 0x18
+#define SPT_PMC_PM_STS_OFFSET 0x1c
+#define SPT_PMC_MTPMC_OFFSET 0x20
+#define SPT_PMC_MFPMC_OFFSET 0x38
+#define SPT_PMC_LTR_IGNORE_OFFSET 0x30C
+#define SPT_PMC_MPHY_CORE_STS_0 0x1143
+#define SPT_PMC_MPHY_CORE_STS_1 0x1142
+#define SPT_PMC_MPHY_COM_STS_0 0x1155
+#define SPT_PMC_MMIO_REG_LEN 0x1000
#define SPT_PMC_SLP_S0_RES_COUNTER_STEP 0x64
+#define PMC_BASE_ADDR_MASK ~(SPT_PMC_MMIO_REG_LEN - 1)
+#define MTPMC_MASK 0xffff0000
+#define NUM_ENTRIES 5
+#define SPT_PMC_READ_DISABLE_BIT 0x16
+#define SPT_PMC_MSG_FULL_STS_BIT 0x18
+#define NUM_RETRIES 100
+#define NUM_IP_IGN_ALLOWED 17
+
+/* Sunrise Point: PGD PFET Enable Ack Status Registers */
+enum ppfear_regs {
+ SPT_PMC_XRAM_PPFEAR0A = 0x590,
+ SPT_PMC_XRAM_PPFEAR0B,
+ SPT_PMC_XRAM_PPFEAR0C,
+ SPT_PMC_XRAM_PPFEAR0D,
+ SPT_PMC_XRAM_PPFEAR1A,
+};
+
+#define SPT_PMC_BIT_PMC BIT(0)
+#define SPT_PMC_BIT_OPI BIT(1)
+#define SPT_PMC_BIT_SPI BIT(2)
+#define SPT_PMC_BIT_XHCI BIT(3)
+#define SPT_PMC_BIT_SPA BIT(4)
+#define SPT_PMC_BIT_SPB BIT(5)
+#define SPT_PMC_BIT_SPC BIT(6)
+#define SPT_PMC_BIT_GBE BIT(7)
+
+#define SPT_PMC_BIT_SATA BIT(0)
+#define SPT_PMC_BIT_HDA_PGD0 BIT(1)
+#define SPT_PMC_BIT_HDA_PGD1 BIT(2)
+#define SPT_PMC_BIT_HDA_PGD2 BIT(3)
+#define SPT_PMC_BIT_HDA_PGD3 BIT(4)
+#define SPT_PMC_BIT_RSVD_0B BIT(5)
+#define SPT_PMC_BIT_LPSS BIT(6)
+#define SPT_PMC_BIT_LPC BIT(7)
+
+#define SPT_PMC_BIT_SMB BIT(0)
+#define SPT_PMC_BIT_ISH BIT(1)
+#define SPT_PMC_BIT_P2SB BIT(2)
+#define SPT_PMC_BIT_DFX BIT(3)
+#define SPT_PMC_BIT_SCC BIT(4)
+#define SPT_PMC_BIT_RSVD_0C BIT(5)
+#define SPT_PMC_BIT_FUSE BIT(6)
+#define SPT_PMC_BIT_CAMREA BIT(7)
+
+#define SPT_PMC_BIT_RSVD_0D BIT(0)
+#define SPT_PMC_BIT_USB3_OTG BIT(1)
+#define SPT_PMC_BIT_EXI BIT(2)
+#define SPT_PMC_BIT_CSE BIT(3)
+#define SPT_PMC_BIT_CSME_KVM BIT(4)
+#define SPT_PMC_BIT_CSME_PMT BIT(5)
+#define SPT_PMC_BIT_CSME_CLINK BIT(6)
+#define SPT_PMC_BIT_CSME_PTIO BIT(7)
+
+#define SPT_PMC_BIT_CSME_USBR BIT(0)
+#define SPT_PMC_BIT_CSME_SUSRAM BIT(1)
+#define SPT_PMC_BIT_CSME_SMT BIT(2)
+#define SPT_PMC_BIT_RSVD_1A BIT(3)
+#define SPT_PMC_BIT_CSME_SMS2 BIT(4)
+#define SPT_PMC_BIT_CSME_SMS1 BIT(5)
+#define SPT_PMC_BIT_CSME_RTC BIT(6)
+#define SPT_PMC_BIT_CSME_PSF BIT(7)
+
+#define SPT_PMC_BIT_MPHY_LANE0 BIT(0)
+#define SPT_PMC_BIT_MPHY_LANE1 BIT(1)
+#define SPT_PMC_BIT_MPHY_LANE2 BIT(2)
+#define SPT_PMC_BIT_MPHY_LANE3 BIT(3)
+#define SPT_PMC_BIT_MPHY_LANE4 BIT(4)
+#define SPT_PMC_BIT_MPHY_LANE5 BIT(5)
+#define SPT_PMC_BIT_MPHY_LANE6 BIT(6)
+#define SPT_PMC_BIT_MPHY_LANE7 BIT(7)
+
+#define SPT_PMC_BIT_MPHY_LANE8 BIT(0)
+#define SPT_PMC_BIT_MPHY_LANE9 BIT(1)
+#define SPT_PMC_BIT_MPHY_LANE10 BIT(2)
+#define SPT_PMC_BIT_MPHY_LANE11 BIT(3)
+#define SPT_PMC_BIT_MPHY_LANE12 BIT(4)
+#define SPT_PMC_BIT_MPHY_LANE13 BIT(5)
+#define SPT_PMC_BIT_MPHY_LANE14 BIT(6)
+#define SPT_PMC_BIT_MPHY_LANE15 BIT(7)
+
+#define SPT_PMC_BIT_MPHY_CMN_LANE0 BIT(0)
+#define SPT_PMC_BIT_MPHY_CMN_LANE1 BIT(1)
+#define SPT_PMC_BIT_MPHY_CMN_LANE2 BIT(2)
+#define SPT_PMC_BIT_MPHY_CMN_LANE3 BIT(3)
+
+struct pmc_bit_map {
+ const char *name;
+ u32 bit_mask;
+};
+
+struct pmc_reg_map {
+ const struct pmc_bit_map *pfear_sts;
+ const struct pmc_bit_map *mphy_sts;
+ const struct pmc_bit_map *pll_sts;
+};
/**
* struct pmc_dev - pmc device structure
@@ -43,8 +146,13 @@
struct pmc_dev {
u32 base_addr;
void __iomem *regbase;
+ const struct pmc_reg_map *map;
+#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *dbgfs_dir;
+#endif /* CONFIG_DEBUG_FS */
bool has_slp_s0_res;
+ int pmc_xram_read_bit;
+ struct mutex lock; /* generic mutex lock for PMC Core */
};
#endif /* PMC_CORE_H */
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
new file mode 100644
index 000000000000..97b4c3a219c0
--- /dev/null
+++ b/drivers/platform/x86/mlx-platform.c
@@ -0,0 +1,361 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Vadim Pasternak <vadimp@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/device.h>
+#include <linux/dmi.h>
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/i2c-mux-reg.h>
+#include <linux/platform_data/mlxcpld-hotplug.h>
+
+#define MLX_PLAT_DEVICE_NAME "mlxplat"
+
+/* LPC bus IO offsets */
+#define MLXPLAT_CPLD_LPC_I2C_BASE_ADRR 0x2000
+#define MLXPLAT_CPLD_LPC_REG_BASE_ADRR 0x2500
+#define MLXPLAT_CPLD_LPC_IO_RANGE 0x100
+#define MLXPLAT_CPLD_LPC_I2C_CH1_OFF 0xdb
+#define MLXPLAT_CPLD_LPC_I2C_CH2_OFF 0xda
+#define MLXPLAT_CPLD_LPC_PIO_OFFSET 0x10000UL
+#define MLXPLAT_CPLD_LPC_REG1 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
+ MLXPLAT_CPLD_LPC_I2C_CH1_OFF) | \
+ MLXPLAT_CPLD_LPC_PIO_OFFSET)
+#define MLXPLAT_CPLD_LPC_REG2 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
+ MLXPLAT_CPLD_LPC_I2C_CH2_OFF) | \
+ MLXPLAT_CPLD_LPC_PIO_OFFSET)
+
+/* Start channel numbers */
+#define MLXPLAT_CPLD_CH1 2
+#define MLXPLAT_CPLD_CH2 10
+
+/* Number of LPC attached MUX platform devices */
+#define MLXPLAT_CPLD_LPC_MUX_DEVS 2
+
+/* mlxplat_priv - platform private data
+ * @pdev_i2c - i2c controller platform device
+ * @pdev_mux - array of mux platform devices
+ */
+struct mlxplat_priv {
+ struct platform_device *pdev_i2c;
+ struct platform_device *pdev_mux[MLXPLAT_CPLD_LPC_MUX_DEVS];
+ struct platform_device *pdev_hotplug;
+};
+
+/* Regions for LPC I2C controller and LPC base register space */
+static const struct resource mlxplat_lpc_resources[] = {
+ [0] = DEFINE_RES_NAMED(MLXPLAT_CPLD_LPC_I2C_BASE_ADRR,
+ MLXPLAT_CPLD_LPC_IO_RANGE,
+ "mlxplat_cpld_lpc_i2c_ctrl", IORESOURCE_IO),
+ [1] = DEFINE_RES_NAMED(MLXPLAT_CPLD_LPC_REG_BASE_ADRR,
+ MLXPLAT_CPLD_LPC_IO_RANGE,
+ "mlxplat_cpld_lpc_regs",
+ IORESOURCE_IO),
+};
+
+/* Platform default channels */
+static const int mlxplat_default_channels[][8] = {
+ {
+ MLXPLAT_CPLD_CH1, MLXPLAT_CPLD_CH1 + 1, MLXPLAT_CPLD_CH1 + 2,
+ MLXPLAT_CPLD_CH1 + 3, MLXPLAT_CPLD_CH1 + 4, MLXPLAT_CPLD_CH1 +
+ 5, MLXPLAT_CPLD_CH1 + 6, MLXPLAT_CPLD_CH1 + 7
+ },
+ {
+ MLXPLAT_CPLD_CH2, MLXPLAT_CPLD_CH2 + 1, MLXPLAT_CPLD_CH2 + 2,
+ MLXPLAT_CPLD_CH2 + 3, MLXPLAT_CPLD_CH2 + 4, MLXPLAT_CPLD_CH2 +
+ 5, MLXPLAT_CPLD_CH2 + 6, MLXPLAT_CPLD_CH2 + 7
+ },
+};
+
+/* Platform channels for MSN21xx system family */
+static const int mlxplat_msn21xx_channels[] = { 1, 2, 3, 4, 5, 6, 7, 8 };
+
+/* Platform mux data */
+static struct i2c_mux_reg_platform_data mlxplat_mux_data[] = {
+ {
+ .parent = 1,
+ .base_nr = MLXPLAT_CPLD_CH1,
+ .write_only = 1,
+ .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG1,
+ .reg_size = 1,
+ .idle_in_use = 1,
+ },
+ {
+ .parent = 1,
+ .base_nr = MLXPLAT_CPLD_CH2,
+ .write_only = 1,
+ .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG2,
+ .reg_size = 1,
+ .idle_in_use = 1,
+ },
+
+};
+
+/* Platform hotplug devices */
+static struct mlxcpld_hotplug_device mlxplat_mlxcpld_hotplug_psu[] = {
+ {
+ .brdinfo = { I2C_BOARD_INFO("24c02", 0x51) },
+ .bus = 10,
+ },
+ {
+ .brdinfo = { I2C_BOARD_INFO("24c02", 0x50) },
+ .bus = 10,
+ },
+};
+
+static struct mlxcpld_hotplug_device mlxplat_mlxcpld_hotplug_pwr[] = {
+ {
+ .brdinfo = { I2C_BOARD_INFO("dps460", 0x59) },
+ .bus = 10,
+ },
+ {
+ .brdinfo = { I2C_BOARD_INFO("dps460", 0x58) },
+ .bus = 10,
+ },
+};
+
+static struct mlxcpld_hotplug_device mlxplat_mlxcpld_hotplug_fan[] = {
+ {
+ .brdinfo = { I2C_BOARD_INFO("24c32", 0x50) },
+ .bus = 11,
+ },
+ {
+ .brdinfo = { I2C_BOARD_INFO("24c32", 0x50) },
+ .bus = 12,
+ },
+ {
+ .brdinfo = { I2C_BOARD_INFO("24c32", 0x50) },
+ .bus = 13,
+ },
+ {
+ .brdinfo = { I2C_BOARD_INFO("24c32", 0x50) },
+ .bus = 14,
+ },
+};
+
+/* Platform hotplug default data */
+static
+struct mlxcpld_hotplug_platform_data mlxplat_mlxcpld_hotplug_default_data = {
+ .top_aggr_offset = (MLXPLAT_CPLD_LPC_REG_BASE_ADRR | 0x3a),
+ .top_aggr_mask = 0x48,
+ .top_aggr_psu_mask = 0x08,
+ .psu_reg_offset = (MLXPLAT_CPLD_LPC_REG_BASE_ADRR | 0x58),
+ .psu_mask = 0x03,
+ .psu_count = ARRAY_SIZE(mlxplat_mlxcpld_hotplug_psu),
+ .psu = mlxplat_mlxcpld_hotplug_psu,
+ .top_aggr_pwr_mask = 0x08,
+ .pwr_reg_offset = (MLXPLAT_CPLD_LPC_REG_BASE_ADRR | 0x64),
+ .pwr_mask = 0x03,
+ .pwr_count = ARRAY_SIZE(mlxplat_mlxcpld_hotplug_pwr),
+ .pwr = mlxplat_mlxcpld_hotplug_pwr,
+ .top_aggr_fan_mask = 0x40,
+ .fan_reg_offset = (MLXPLAT_CPLD_LPC_REG_BASE_ADRR | 0x88),
+ .fan_mask = 0x0f,
+ .fan_count = ARRAY_SIZE(mlxplat_mlxcpld_hotplug_fan),
+ .fan = mlxplat_mlxcpld_hotplug_fan,
+};
+
+/* Platform hotplug MSN21xx system family data */
+static
+struct mlxcpld_hotplug_platform_data mlxplat_mlxcpld_hotplug_msn21xx_data = {
+ .top_aggr_offset = (MLXPLAT_CPLD_LPC_REG_BASE_ADRR | 0x3a),
+ .top_aggr_mask = 0x04,
+ .top_aggr_pwr_mask = 0x04,
+ .pwr_reg_offset = (MLXPLAT_CPLD_LPC_REG_BASE_ADRR | 0x64),
+ .pwr_mask = 0x03,
+ .pwr_count = ARRAY_SIZE(mlxplat_mlxcpld_hotplug_pwr),
+};
+
+static struct resource mlxplat_mlxcpld_hotplug_resources[] = {
+ [0] = DEFINE_RES_IRQ_NAMED(17, "mlxcpld-hotplug"),
+};
+
+struct platform_device *mlxplat_dev;
+struct mlxcpld_hotplug_platform_data *mlxplat_hotplug;
+
+static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ mlxplat_mux_data[i].values = mlxplat_default_channels[i];
+ mlxplat_mux_data[i].n_values =
+ ARRAY_SIZE(mlxplat_default_channels[i]);
+ }
+ mlxplat_hotplug = &mlxplat_mlxcpld_hotplug_default_data;
+
+ return 1;
+};
+
+static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
+ mlxplat_mux_data[i].n_values =
+ ARRAY_SIZE(mlxplat_msn21xx_channels);
+ }
+ mlxplat_hotplug = &mlxplat_mlxcpld_hotplug_msn21xx_data;
+
+ return 1;
+};
+
+static struct dmi_system_id mlxplat_dmi_table[] __initdata = {
+ {
+ .callback = mlxplat_dmi_default_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MSN24"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_default_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MSN27"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_default_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MSB"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_default_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MSX"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_msn21xx_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MSN21"),
+ },
+ },
+ { }
+};
+
+static int __init mlxplat_init(void)
+{
+ struct mlxplat_priv *priv;
+ int i, err;
+
+ if (!dmi_check_system(mlxplat_dmi_table))
+ return -ENODEV;
+
+ mlxplat_dev = platform_device_register_simple(MLX_PLAT_DEVICE_NAME, -1,
+ mlxplat_lpc_resources,
+ ARRAY_SIZE(mlxplat_lpc_resources));
+
+ if (IS_ERR(mlxplat_dev))
+ return PTR_ERR(mlxplat_dev);
+
+ priv = devm_kzalloc(&mlxplat_dev->dev, sizeof(struct mlxplat_priv),
+ GFP_KERNEL);
+ if (!priv) {
+ err = -ENOMEM;
+ goto fail_alloc;
+ }
+ platform_set_drvdata(mlxplat_dev, priv);
+
+ priv->pdev_i2c = platform_device_register_simple("i2c_mlxcpld", -1,
+ NULL, 0);
+ if (IS_ERR(priv->pdev_i2c)) {
+ err = PTR_ERR(priv->pdev_i2c);
+ goto fail_alloc;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ priv->pdev_mux[i] = platform_device_register_resndata(
+ &mlxplat_dev->dev,
+ "i2c-mux-reg", i, NULL,
+ 0, &mlxplat_mux_data[i],
+ sizeof(mlxplat_mux_data[i]));
+ if (IS_ERR(priv->pdev_mux[i])) {
+ err = PTR_ERR(priv->pdev_mux[i]);
+ goto fail_platform_mux_register;
+ }
+ }
+
+ priv->pdev_hotplug = platform_device_register_resndata(
+ &mlxplat_dev->dev, "mlxcpld-hotplug", -1,
+ mlxplat_mlxcpld_hotplug_resources,
+ ARRAY_SIZE(mlxplat_mlxcpld_hotplug_resources),
+ mlxplat_hotplug, sizeof(*mlxplat_hotplug));
+ if (IS_ERR(priv->pdev_hotplug)) {
+ err = PTR_ERR(priv->pdev_hotplug);
+ goto fail_platform_mux_register;
+ }
+
+ return 0;
+
+fail_platform_mux_register:
+ for (i--; i > 0 ; i--)
+ platform_device_unregister(priv->pdev_mux[i]);
+ platform_device_unregister(priv->pdev_i2c);
+fail_alloc:
+ platform_device_unregister(mlxplat_dev);
+
+ return err;
+}
+module_init(mlxplat_init);
+
+static void __exit mlxplat_exit(void)
+{
+ struct mlxplat_priv *priv = platform_get_drvdata(mlxplat_dev);
+ int i;
+
+ platform_device_unregister(priv->pdev_hotplug);
+
+ for (i = ARRAY_SIZE(mlxplat_mux_data) - 1; i >= 0 ; i--)
+ platform_device_unregister(priv->pdev_mux[i]);
+
+ platform_device_unregister(priv->pdev_i2c);
+ platform_device_unregister(mlxplat_dev);
+}
+module_exit(mlxplat_exit);
+
+MODULE_AUTHOR("Vadim Pasternak (vadimp@mellanox.com)");
+MODULE_DESCRIPTION("Mellanox platform driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("dmi:*:*Mellanox*:MSN24*:");
+MODULE_ALIAS("dmi:*:*Mellanox*:MSN27*:");
+MODULE_ALIAS("dmi:*:*Mellanox*:MSB*:");
+MODULE_ALIAS("dmi:*:*Mellanox*:MSX*:");
+MODULE_ALIAS("dmi:*:*Mellanox*:MSN21*:");
diff --git a/drivers/platform/x86/mlxcpld-hotplug.c b/drivers/platform/x86/mlxcpld-hotplug.c
new file mode 100644
index 000000000000..aff3686b3b37
--- /dev/null
+++ b/drivers/platform/x86/mlxcpld-hotplug.c
@@ -0,0 +1,515 @@
+/*
+ * drivers/platform/x86/mlxcpld-hotplug.c
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Vadim Pasternak <vadimp@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_data/mlxcpld-hotplug.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+/* Offset of event and mask registers from status register */
+#define MLXCPLD_HOTPLUG_EVENT_OFF 1
+#define MLXCPLD_HOTPLUG_MASK_OFF 2
+#define MLXCPLD_HOTPLUG_AGGR_MASK_OFF 1
+
+#define MLXCPLD_HOTPLUG_ATTRS_NUM 8
+
+/**
+ * enum mlxcpld_hotplug_attr_type - sysfs attributes for hotplug events:
+ * @MLXCPLD_HOTPLUG_ATTR_TYPE_PSU: power supply unit attribute;
+ * @MLXCPLD_HOTPLUG_ATTR_TYPE_PWR: power cable attribute;
+ * @MLXCPLD_HOTPLUG_ATTR_TYPE_FAN: FAN drawer attribute;
+ */
+enum mlxcpld_hotplug_attr_type {
+ MLXCPLD_HOTPLUG_ATTR_TYPE_PSU,
+ MLXCPLD_HOTPLUG_ATTR_TYPE_PWR,
+ MLXCPLD_HOTPLUG_ATTR_TYPE_FAN,
+};
+
+/**
+ * struct mlxcpld_hotplug_priv_data - platform private data:
+ * @irq: platform interrupt number;
+ * @pdev: platform device;
+ * @plat: platform data;
+ * @hwmon: hwmon device;
+ * @mlxcpld_hotplug_attr: sysfs attributes array;
+ * @mlxcpld_hotplug_dev_attr: sysfs sensor device attribute array;
+ * @group: sysfs attribute group;
+ * @groups: list of sysfs attribute group for hwmon registration;
+ * @dwork: delayed work template;
+ * @lock: spin lock;
+ * @aggr_cache: last value of aggregation register status;
+ * @psu_cache: last value of PSU register status;
+ * @pwr_cache: last value of power register status;
+ * @fan_cache: last value of FAN register status;
+ */
+struct mlxcpld_hotplug_priv_data {
+ int irq;
+ struct platform_device *pdev;
+ struct mlxcpld_hotplug_platform_data *plat;
+ struct device *hwmon;
+ struct attribute *mlxcpld_hotplug_attr[MLXCPLD_HOTPLUG_ATTRS_NUM + 1];
+ struct sensor_device_attribute_2
+ mlxcpld_hotplug_dev_attr[MLXCPLD_HOTPLUG_ATTRS_NUM];
+ struct attribute_group group;
+ const struct attribute_group *groups[2];
+ struct delayed_work dwork;
+ spinlock_t lock;
+ u8 aggr_cache;
+ u8 psu_cache;
+ u8 pwr_cache;
+ u8 fan_cache;
+};
+
+static ssize_t mlxcpld_hotplug_attr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mlxcpld_hotplug_priv_data *priv = platform_get_drvdata(pdev);
+ int index = to_sensor_dev_attr_2(attr)->index;
+ int nr = to_sensor_dev_attr_2(attr)->nr;
+ u8 reg_val = 0;
+
+ switch (nr) {
+ case MLXCPLD_HOTPLUG_ATTR_TYPE_PSU:
+ /* Bit = 0 : PSU is present. */
+ reg_val = !!!(inb(priv->plat->psu_reg_offset) & BIT(index));
+ break;
+
+ case MLXCPLD_HOTPLUG_ATTR_TYPE_PWR:
+ /* Bit = 1 : power cable is attached. */
+ reg_val = !!(inb(priv->plat->pwr_reg_offset) & BIT(index %
+ priv->plat->pwr_count));
+ break;
+
+ case MLXCPLD_HOTPLUG_ATTR_TYPE_FAN:
+ /* Bit = 0 : FAN is present. */
+ reg_val = !!!(inb(priv->plat->fan_reg_offset) & BIT(index %
+ priv->plat->fan_count));
+ break;
+ }
+
+ return sprintf(buf, "%u\n", reg_val);
+}
+
+#define PRIV_ATTR(i) priv->mlxcpld_hotplug_attr[i]
+#define PRIV_DEV_ATTR(i) priv->mlxcpld_hotplug_dev_attr[i]
+static int mlxcpld_hotplug_attr_init(struct mlxcpld_hotplug_priv_data *priv)
+{
+ int num_attrs = priv->plat->psu_count + priv->plat->pwr_count +
+ priv->plat->fan_count;
+ int i;
+
+ priv->group.attrs = devm_kzalloc(&priv->pdev->dev, num_attrs *
+ sizeof(struct attribute *),
+ GFP_KERNEL);
+ if (!priv->group.attrs)
+ return -ENOMEM;
+
+ for (i = 0; i < num_attrs; i++) {
+ PRIV_ATTR(i) = &PRIV_DEV_ATTR(i).dev_attr.attr;
+
+ if (i < priv->plat->psu_count) {
+ PRIV_ATTR(i)->name = devm_kasprintf(&priv->pdev->dev,
+ GFP_KERNEL, "psu%u", i + 1);
+ PRIV_DEV_ATTR(i).nr = MLXCPLD_HOTPLUG_ATTR_TYPE_PSU;
+ } else if (i < priv->plat->psu_count + priv->plat->pwr_count) {
+ PRIV_ATTR(i)->name = devm_kasprintf(&priv->pdev->dev,
+ GFP_KERNEL, "pwr%u", i %
+ priv->plat->pwr_count + 1);
+ PRIV_DEV_ATTR(i).nr = MLXCPLD_HOTPLUG_ATTR_TYPE_PWR;
+ } else {
+ PRIV_ATTR(i)->name = devm_kasprintf(&priv->pdev->dev,
+ GFP_KERNEL, "fan%u", i %
+ priv->plat->fan_count + 1);
+ PRIV_DEV_ATTR(i).nr = MLXCPLD_HOTPLUG_ATTR_TYPE_FAN;
+ }
+
+ if (!PRIV_ATTR(i)->name) {
+ dev_err(&priv->pdev->dev, "Memory allocation failed for sysfs attribute %d.\n",
+ i + 1);
+ return -ENOMEM;
+ }
+
+ PRIV_DEV_ATTR(i).dev_attr.attr.name = PRIV_ATTR(i)->name;
+ PRIV_DEV_ATTR(i).dev_attr.attr.mode = S_IRUGO;
+ PRIV_DEV_ATTR(i).dev_attr.show = mlxcpld_hotplug_attr_show;
+ PRIV_DEV_ATTR(i).index = i;
+ sysfs_attr_init(&PRIV_DEV_ATTR(i).dev_attr.attr);
+ }
+
+ priv->group.attrs = priv->mlxcpld_hotplug_attr;
+ priv->groups[0] = &priv->group;
+ priv->groups[1] = NULL;
+
+ return 0;
+}
+
+static int mlxcpld_hotplug_device_create(struct device *dev,
+ struct mlxcpld_hotplug_device *item)
+{
+ item->adapter = i2c_get_adapter(item->bus);
+ if (!item->adapter) {
+ dev_err(dev, "Failed to get adapter for bus %d\n",
+ item->bus);
+ return -EFAULT;
+ }
+
+ item->client = i2c_new_device(item->adapter, &item->brdinfo);
+ if (!item->client) {
+ dev_err(dev, "Failed to create client %s at bus %d at addr 0x%02x\n",
+ item->brdinfo.type, item->bus, item->brdinfo.addr);
+ i2c_put_adapter(item->adapter);
+ item->adapter = NULL;
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void mlxcpld_hotplug_device_destroy(struct mlxcpld_hotplug_device *item)
+{
+ if (item->client) {
+ i2c_unregister_device(item->client);
+ item->client = NULL;
+ }
+
+ if (item->adapter) {
+ i2c_put_adapter(item->adapter);
+ item->adapter = NULL;
+ }
+}
+
+static inline void
+mlxcpld_hotplug_work_helper(struct device *dev,
+ struct mlxcpld_hotplug_device *item, u8 is_inverse,
+ u16 offset, u8 mask, u8 *cache)
+{
+ u8 val, asserted;
+ int bit;
+
+ /* Mask event. */
+ outb(0, offset + MLXCPLD_HOTPLUG_MASK_OFF);
+ /* Read status. */
+ val = inb(offset) & mask;
+ asserted = *cache ^ val;
+ *cache = val;
+
+ /*
+ * Validate if item related to received signal type is valid.
+ * It should never happen, excepted the situation when some
+ * piece of hardware is broken. In such situation just produce
+ * error message and return. Caller must continue to handle the
+ * signals from other devices if any.
+ */
+ if (unlikely(!item)) {
+ dev_err(dev, "False signal is received: register at offset 0x%02x, mask 0x%02x.\n",
+ offset, mask);
+ return;
+ }
+
+ for_each_set_bit(bit, (unsigned long *)&asserted, 8) {
+ if (val & BIT(bit)) {
+ if (is_inverse)
+ mlxcpld_hotplug_device_destroy(item + bit);
+ else
+ mlxcpld_hotplug_device_create(dev, item + bit);
+ } else {
+ if (is_inverse)
+ mlxcpld_hotplug_device_create(dev, item + bit);
+ else
+ mlxcpld_hotplug_device_destroy(item + bit);
+ }
+ }
+
+ /* Acknowledge event. */
+ outb(0, offset + MLXCPLD_HOTPLUG_EVENT_OFF);
+ /* Unmask event. */
+ outb(mask, offset + MLXCPLD_HOTPLUG_MASK_OFF);
+}
+
+/*
+ * mlxcpld_hotplug_work_handler - performs traversing of CPLD interrupt
+ * registers according to the below hierarchy schema:
+ *
+ * Aggregation registers (status/mask)
+ * PSU registers: *---*
+ * *-----------------* | |
+ * |status/event/mask|----->| * |
+ * *-----------------* | |
+ * Power registers: | |
+ * *-----------------* | |
+ * |status/event/mask|----->| * |---> CPU
+ * *-----------------* | |
+ * FAN registers:
+ * *-----------------* | |
+ * |status/event/mask|----->| * |
+ * *-----------------* | |
+ * *---*
+ * In case some system changed are detected: FAN in/out, PSU in/out, power
+ * cable attached/detached, relevant device is created or destroyed.
+ */
+static void mlxcpld_hotplug_work_handler(struct work_struct *work)
+{
+ struct mlxcpld_hotplug_priv_data *priv = container_of(work,
+ struct mlxcpld_hotplug_priv_data, dwork.work);
+ u8 val, aggr_asserted;
+ unsigned long flags;
+
+ /* Mask aggregation event. */
+ outb(0, priv->plat->top_aggr_offset + MLXCPLD_HOTPLUG_AGGR_MASK_OFF);
+ /* Read aggregation status. */
+ val = inb(priv->plat->top_aggr_offset) & priv->plat->top_aggr_mask;
+ aggr_asserted = priv->aggr_cache ^ val;
+ priv->aggr_cache = val;
+
+ /* Handle PSU configuration changes. */
+ if (aggr_asserted & priv->plat->top_aggr_psu_mask)
+ mlxcpld_hotplug_work_helper(&priv->pdev->dev, priv->plat->psu,
+ 1, priv->plat->psu_reg_offset,
+ priv->plat->psu_mask,
+ &priv->psu_cache);
+
+ /* Handle power cable configuration changes. */
+ if (aggr_asserted & priv->plat->top_aggr_pwr_mask)
+ mlxcpld_hotplug_work_helper(&priv->pdev->dev, priv->plat->pwr,
+ 0, priv->plat->pwr_reg_offset,
+ priv->plat->pwr_mask,
+ &priv->pwr_cache);
+
+ /* Handle FAN configuration changes. */
+ if (aggr_asserted & priv->plat->top_aggr_fan_mask)
+ mlxcpld_hotplug_work_helper(&priv->pdev->dev, priv->plat->fan,
+ 1, priv->plat->fan_reg_offset,
+ priv->plat->fan_mask,
+ &priv->fan_cache);
+
+ if (aggr_asserted) {
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /*
+ * It is possible, that some signals have been inserted, while
+ * interrupt has been masked by mlxcpld_hotplug_work_handler.
+ * In this case such signals will be missed. In order to handle
+ * these signals delayed work is canceled and work task
+ * re-scheduled for immediate execution. It allows to handle
+ * missed signals, if any. In other case work handler just
+ * validates that no new signals have been received during
+ * masking.
+ */
+ cancel_delayed_work(&priv->dwork);
+ schedule_delayed_work(&priv->dwork, 0);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return;
+ }
+
+ /* Unmask aggregation event (no need acknowledge). */
+ outb(priv->plat->top_aggr_mask, priv->plat->top_aggr_offset +
+ MLXCPLD_HOTPLUG_AGGR_MASK_OFF);
+}
+
+static void mlxcpld_hotplug_set_irq(struct mlxcpld_hotplug_priv_data *priv)
+{
+ /* Clear psu presense event. */
+ outb(0, priv->plat->psu_reg_offset + MLXCPLD_HOTPLUG_EVENT_OFF);
+ /* Set psu initial status as mask and unmask psu event. */
+ priv->psu_cache = priv->plat->psu_mask;
+ outb(priv->plat->psu_mask, priv->plat->psu_reg_offset +
+ MLXCPLD_HOTPLUG_MASK_OFF);
+
+ /* Clear power cable event. */
+ outb(0, priv->plat->pwr_reg_offset + MLXCPLD_HOTPLUG_EVENT_OFF);
+ /* Keep power initial status as zero and unmask power event. */
+ outb(priv->plat->pwr_mask, priv->plat->pwr_reg_offset +
+ MLXCPLD_HOTPLUG_MASK_OFF);
+
+ /* Clear fan presense event. */
+ outb(0, priv->plat->fan_reg_offset + MLXCPLD_HOTPLUG_EVENT_OFF);
+ /* Set fan initial status as mask and unmask fan event. */
+ priv->fan_cache = priv->plat->fan_mask;
+ outb(priv->plat->fan_mask, priv->plat->fan_reg_offset +
+ MLXCPLD_HOTPLUG_MASK_OFF);
+
+ /* Keep aggregation initial status as zero and unmask events. */
+ outb(priv->plat->top_aggr_mask, priv->plat->top_aggr_offset +
+ MLXCPLD_HOTPLUG_AGGR_MASK_OFF);
+
+ /* Invoke work handler for initializing hot plug devices setting. */
+ mlxcpld_hotplug_work_handler(&priv->dwork.work);
+
+ enable_irq(priv->irq);
+}
+
+static void mlxcpld_hotplug_unset_irq(struct mlxcpld_hotplug_priv_data *priv)
+{
+ int i;
+
+ disable_irq(priv->irq);
+ cancel_delayed_work_sync(&priv->dwork);
+
+ /* Mask aggregation event. */
+ outb(0, priv->plat->top_aggr_offset + MLXCPLD_HOTPLUG_AGGR_MASK_OFF);
+
+ /* Mask psu presense event. */
+ outb(0, priv->plat->psu_reg_offset + MLXCPLD_HOTPLUG_MASK_OFF);
+ /* Clear psu presense event. */
+ outb(0, priv->plat->psu_reg_offset + MLXCPLD_HOTPLUG_EVENT_OFF);
+
+ /* Mask power cable event. */
+ outb(0, priv->plat->pwr_reg_offset + MLXCPLD_HOTPLUG_MASK_OFF);
+ /* Clear power cable event. */
+ outb(0, priv->plat->pwr_reg_offset + MLXCPLD_HOTPLUG_EVENT_OFF);
+
+ /* Mask fan presense event. */
+ outb(0, priv->plat->fan_reg_offset + MLXCPLD_HOTPLUG_MASK_OFF);
+ /* Clear fan presense event. */
+ outb(0, priv->plat->fan_reg_offset + MLXCPLD_HOTPLUG_EVENT_OFF);
+
+ /* Remove all the attached devices. */
+ for (i = 0; i < priv->plat->psu_count; i++)
+ mlxcpld_hotplug_device_destroy(priv->plat->psu + i);
+
+ for (i = 0; i < priv->plat->pwr_count; i++)
+ mlxcpld_hotplug_device_destroy(priv->plat->pwr + i);
+
+ for (i = 0; i < priv->plat->fan_count; i++)
+ mlxcpld_hotplug_device_destroy(priv->plat->fan + i);
+}
+
+static irqreturn_t mlxcpld_hotplug_irq_handler(int irq, void *dev)
+{
+ struct mlxcpld_hotplug_priv_data *priv =
+ (struct mlxcpld_hotplug_priv_data *)dev;
+
+ /* Schedule work task for immediate execution.*/
+ schedule_delayed_work(&priv->dwork, 0);
+
+ return IRQ_HANDLED;
+}
+
+static int mlxcpld_hotplug_probe(struct platform_device *pdev)
+{
+ struct mlxcpld_hotplug_platform_data *pdata;
+ struct mlxcpld_hotplug_priv_data *priv;
+ int err;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ dev_err(&pdev->dev, "Failed to get platform data.\n");
+ return -EINVAL;
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->pdev = pdev;
+ priv->plat = pdata;
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (priv->irq < 0) {
+ dev_err(&pdev->dev, "Failed to get platform irq: %d\n",
+ priv->irq);
+ return priv->irq;
+ }
+
+ err = devm_request_irq(&pdev->dev, priv->irq,
+ mlxcpld_hotplug_irq_handler, 0, pdev->name,
+ priv);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to request irq: %d\n", err);
+ return err;
+ }
+ disable_irq(priv->irq);
+
+ INIT_DELAYED_WORK(&priv->dwork, mlxcpld_hotplug_work_handler);
+ spin_lock_init(&priv->lock);
+
+ err = mlxcpld_hotplug_attr_init(priv);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to allocate attributes: %d\n", err);
+ return err;
+ }
+
+ priv->hwmon = devm_hwmon_device_register_with_groups(&pdev->dev,
+ "mlxcpld_hotplug", priv, priv->groups);
+ if (IS_ERR(priv->hwmon)) {
+ dev_err(&pdev->dev, "Failed to register hwmon device %ld\n",
+ PTR_ERR(priv->hwmon));
+ return PTR_ERR(priv->hwmon);
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ /* Perform initial interrupts setup. */
+ mlxcpld_hotplug_set_irq(priv);
+
+ return 0;
+}
+
+static int mlxcpld_hotplug_remove(struct platform_device *pdev)
+{
+ struct mlxcpld_hotplug_priv_data *priv = platform_get_drvdata(pdev);
+
+ /* Clean interrupts setup. */
+ mlxcpld_hotplug_unset_irq(priv);
+
+ return 0;
+}
+
+static struct platform_driver mlxcpld_hotplug_driver = {
+ .driver = {
+ .name = "mlxcpld-hotplug",
+ },
+ .probe = mlxcpld_hotplug_probe,
+ .remove = mlxcpld_hotplug_remove,
+};
+
+module_platform_driver(mlxcpld_hotplug_driver);
+
+MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox CPLD hotplug platform driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("platform:mlxcpld-hotplug");
diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
index 978e6d640572..9a32f8627ecc 100644
--- a/drivers/platform/x86/msi-wmi.c
+++ b/drivers/platform/x86/msi-wmi.c
@@ -283,7 +283,7 @@ static int __init msi_wmi_input_setup(void)
if (err)
goto err_free_keymap;
- last_pressed = ktime_set(0, 0);
+ last_pressed = 0;
return 0;
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 3f870972247c..59b8eb626dcc 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -458,7 +458,7 @@ static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc)
rc = acpi_evaluate_integer(pcc->handle, METHOD_HKEY_QUERY,
NULL, &result);
- if (!ACPI_SUCCESS(rc)) {
+ if (ACPI_FAILURE(rc)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"error getting hotkey status\n"));
return;
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index c890a49587e4..aa2ee51d3547 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -68,7 +68,7 @@
#include <linux/poll.h>
#include <linux/miscdevice.h>
#endif
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <acpi/video.h>
#define dprintk(fmt, ...) \
diff --git a/drivers/platform/x86/surface3-wmi.c b/drivers/platform/x86/surface3-wmi.c
new file mode 100644
index 000000000000..cbf4d83a7271
--- /dev/null
+++ b/drivers/platform/x86/surface3-wmi.c
@@ -0,0 +1,297 @@
+/*
+ * Driver for the LID cover switch of the Surface 3
+ *
+ * Copyright (c) 2016 Red Hat Inc.
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2 of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/input.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+
+MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@redhat.com>");
+MODULE_DESCRIPTION("Surface 3 platform driver");
+MODULE_LICENSE("GPL");
+
+#define ACPI_BUTTON_HID_LID "PNP0C0D"
+#define SPI_CTL_OBJ_NAME "SPI"
+#define SPI_TS_OBJ_NAME "NTRG"
+
+#define SURFACE3_LID_GUID "F7CC25EC-D20B-404C-8903-0ED4359C18AE"
+
+MODULE_ALIAS("wmi:" SURFACE3_LID_GUID);
+
+static const struct dmi_system_id surface3_dmi_table[] = {
+#if defined(CONFIG_X86)
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Surface 3"),
+ },
+ },
+#endif
+ { }
+};
+
+struct surface3_wmi {
+ struct acpi_device *touchscreen_adev;
+ struct acpi_device *pnp0c0d_adev;
+ struct acpi_hotplug_context hp;
+ struct input_dev *input;
+};
+
+static struct platform_device *s3_wmi_pdev;
+
+static struct surface3_wmi s3_wmi;
+
+static DEFINE_MUTEX(s3_wmi_lock);
+
+static int s3_wmi_query_block(const char *guid, int instance, int *ret)
+{
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_status status;
+ union acpi_object *obj;
+ int error = 0;
+
+ mutex_lock(&s3_wmi_lock);
+ status = wmi_query_block(guid, instance, &output);
+
+ obj = output.pointer;
+
+ if (!obj || obj->type != ACPI_TYPE_INTEGER) {
+ if (obj) {
+ pr_err("query block returned object type: %d - buffer length:%d\n",
+ obj->type,
+ obj->type == ACPI_TYPE_BUFFER ?
+ obj->buffer.length : 0);
+ }
+ error = -EINVAL;
+ goto out_free_unlock;
+ }
+ *ret = obj->integer.value;
+ out_free_unlock:
+ kfree(obj);
+ mutex_unlock(&s3_wmi_lock);
+ return error;
+}
+
+static inline int s3_wmi_query_lid(int *ret)
+{
+ return s3_wmi_query_block(SURFACE3_LID_GUID, 0, ret);
+}
+
+static int s3_wmi_send_lid_state(void)
+{
+ int ret, lid_sw;
+
+ ret = s3_wmi_query_lid(&lid_sw);
+ if (ret)
+ return ret;
+
+ input_report_switch(s3_wmi.input, SW_LID, lid_sw);
+ input_sync(s3_wmi.input);
+
+ return 0;
+}
+
+static int s3_wmi_hp_notify(struct acpi_device *adev, u32 value)
+{
+ return s3_wmi_send_lid_state();
+}
+
+static acpi_status s3_wmi_attach_spi_device(acpi_handle handle,
+ u32 level,
+ void *data,
+ void **return_value)
+{
+ struct acpi_device *adev, **ts_adev;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
+
+ ts_adev = data;
+
+ if (strncmp(acpi_device_bid(adev), SPI_TS_OBJ_NAME,
+ strlen(SPI_TS_OBJ_NAME)))
+ return AE_OK;
+
+ if (*ts_adev) {
+ pr_err("duplicate entry %s\n", SPI_TS_OBJ_NAME);
+ return AE_OK;
+ }
+
+ *ts_adev = adev;
+
+ return AE_OK;
+}
+
+static int s3_wmi_check_platform_device(struct device *dev, void *data)
+{
+ struct acpi_device *adev, *ts_adev;
+ acpi_handle handle;
+ acpi_status status;
+
+ /* ignore non ACPI devices */
+ handle = ACPI_HANDLE(dev);
+ if (!handle || acpi_bus_get_device(handle, &adev))
+ return 0;
+
+ /* check for LID ACPI switch */
+ if (!strcmp(ACPI_BUTTON_HID_LID, acpi_device_hid(adev))) {
+ s3_wmi.pnp0c0d_adev = adev;
+ return 0;
+ }
+
+ /* ignore non SPI controllers */
+ if (strncmp(acpi_device_bid(adev), SPI_CTL_OBJ_NAME,
+ strlen(SPI_CTL_OBJ_NAME)))
+ return 0;
+
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ s3_wmi_attach_spi_device, NULL,
+ &ts_adev, NULL);
+ if (ACPI_FAILURE(status))
+ dev_warn(dev, "failed to enumerate SPI slaves\n");
+
+ if (!ts_adev)
+ return 0;
+
+ s3_wmi.touchscreen_adev = ts_adev;
+
+ return 0;
+}
+
+static int s3_wmi_create_and_register_input(struct platform_device *pdev)
+{
+ struct input_dev *input;
+ int error;
+
+ input = devm_input_allocate_device(&pdev->dev);
+ if (!input)
+ return -ENOMEM;
+
+ input->name = "Lid Switch";
+ input->phys = "button/input0";
+ input->id.bustype = BUS_HOST;
+ input->id.product = 0x0005;
+
+ input_set_capability(input, EV_SW, SW_LID);
+
+ error = input_register_device(input);
+ if (error)
+ goto out_err;
+
+ s3_wmi.input = input;
+
+ return 0;
+ out_err:
+ input_free_device(s3_wmi.input);
+ return error;
+}
+
+static int __init s3_wmi_probe(struct platform_device *pdev)
+{
+ int error;
+
+ if (!dmi_check_system(surface3_dmi_table))
+ return -ENODEV;
+
+ memset(&s3_wmi, 0, sizeof(s3_wmi));
+
+ bus_for_each_dev(&platform_bus_type, NULL, NULL,
+ s3_wmi_check_platform_device);
+
+ if (!s3_wmi.touchscreen_adev)
+ return -ENODEV;
+
+ acpi_bus_trim(s3_wmi.pnp0c0d_adev);
+
+ error = s3_wmi_create_and_register_input(pdev);
+ if (error)
+ goto restore_acpi_lid;
+
+ acpi_initialize_hp_context(s3_wmi.touchscreen_adev, &s3_wmi.hp,
+ s3_wmi_hp_notify, NULL);
+
+ s3_wmi_send_lid_state();
+
+ return 0;
+
+ restore_acpi_lid:
+ acpi_bus_scan(s3_wmi.pnp0c0d_adev->handle);
+ return error;
+}
+
+static int s3_wmi_remove(struct platform_device *device)
+{
+ /* remove the hotplug context from the acpi device */
+ s3_wmi.touchscreen_adev->hp = NULL;
+
+ /* reinstall the actual PNPC0C0D LID default handle */
+ acpi_bus_scan(s3_wmi.pnp0c0d_adev->handle);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int s3_wmi_resume(struct device *dev)
+{
+ s3_wmi_send_lid_state();
+ return 0;
+}
+#endif
+static SIMPLE_DEV_PM_OPS(s3_wmi_pm, NULL, s3_wmi_resume);
+
+static struct platform_driver s3_wmi_driver = {
+ .driver = {
+ .name = "surface3-wmi",
+ .pm = &s3_wmi_pm,
+ },
+ .remove = s3_wmi_remove,
+};
+
+static int __init s3_wmi_init(void)
+{
+ int error;
+
+ s3_wmi_pdev = platform_device_alloc("surface3-wmi", -1);
+ if (!s3_wmi_pdev)
+ return -ENOMEM;
+
+ error = platform_device_add(s3_wmi_pdev);
+ if (error)
+ goto err_device_put;
+
+ error = platform_driver_probe(&s3_wmi_driver, s3_wmi_probe);
+ if (error)
+ goto err_device_del;
+
+ pr_info("Surface 3 WMI Extras loaded\n");
+ return 0;
+
+ err_device_del:
+ platform_device_del(s3_wmi_pdev);
+ err_device_put:
+ platform_device_put(s3_wmi_pdev);
+ return error;
+}
+
+static void __exit s3_wmi_exit(void)
+{
+ platform_device_unregister(s3_wmi_pdev);
+ platform_driver_unregister(&s3_wmi_driver);
+}
+
+module_init(s3_wmi_init);
+module_exit(s3_wmi_exit);
diff --git a/drivers/platform/x86/surface3_button.c b/drivers/platform/x86/surface3_button.c
new file mode 100644
index 000000000000..8bfd7f613d36
--- /dev/null
+++ b/drivers/platform/x86/surface3_button.c
@@ -0,0 +1,250 @@
+/*
+ * Supports for the button array on the Surface tablets.
+ *
+ * (C) Copyright 2016 Red Hat, Inc
+ *
+ * Based on soc_button_array.c:
+ *
+ * {C} Copyright 2014 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio_keys.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+
+
+#define SURFACE_BUTTON_OBJ_NAME "TEV2"
+#define MAX_NBUTTONS 4
+
+/*
+ * Some of the buttons like volume up/down are auto repeat, while others
+ * are not. To support both, we register two platform devices, and put
+ * buttons into them based on whether the key should be auto repeat.
+ */
+#define BUTTON_TYPES 2
+
+/*
+ * Power button, Home button, Volume buttons support is supposed to
+ * be covered by drivers/input/misc/soc_button_array.c, which is implemented
+ * according to "Windows ACPI Design Guide for SoC Platforms".
+ * However surface 3 seems not to obey the specs, instead it uses
+ * device TEV2(MSHW0028) for declaring the GPIOs. The gpios are also slightly
+ * different in which the Home button is active high.
+ * Compared to surfacepro3_button.c which also handles MSHW0028, the Surface 3
+ * is a reduce platform and thus uses GPIOs, not ACPI events.
+ * We choose an I2C driver here because we need to access the resources
+ * declared under the device node, while surfacepro3_button.c only needs
+ * the ACPI companion node.
+ */
+static const struct acpi_device_id surface3_acpi_match[] = {
+ { "MSHW0028", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, surface3_acpi_match);
+
+struct surface3_button_info {
+ const char *name;
+ int acpi_index;
+ unsigned int event_type;
+ unsigned int event_code;
+ bool autorepeat;
+ bool wakeup;
+ bool active_low;
+};
+
+struct surface3_button_data {
+ struct platform_device *children[BUTTON_TYPES];
+};
+
+/*
+ * Get the Nth GPIO number from the ACPI object.
+ */
+static int surface3_button_lookup_gpio(struct device *dev, int acpi_index)
+{
+ struct gpio_desc *desc;
+ int gpio;
+
+ desc = gpiod_get_index(dev, NULL, acpi_index, GPIOD_ASIS);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ gpio = desc_to_gpio(desc);
+
+ gpiod_put(desc);
+
+ return gpio;
+}
+
+static struct platform_device *
+surface3_button_device_create(struct i2c_client *client,
+ const struct surface3_button_info *button_info,
+ bool autorepeat)
+{
+ const struct surface3_button_info *info;
+ struct platform_device *pd;
+ struct gpio_keys_button *gpio_keys;
+ struct gpio_keys_platform_data *gpio_keys_pdata;
+ int n_buttons = 0;
+ int gpio;
+ int error;
+
+ gpio_keys_pdata = devm_kzalloc(&client->dev,
+ sizeof(*gpio_keys_pdata) +
+ sizeof(*gpio_keys) * MAX_NBUTTONS,
+ GFP_KERNEL);
+ if (!gpio_keys_pdata)
+ return ERR_PTR(-ENOMEM);
+
+ gpio_keys = (void *)(gpio_keys_pdata + 1);
+
+ for (info = button_info; info->name; info++) {
+ if (info->autorepeat != autorepeat)
+ continue;
+
+ gpio = surface3_button_lookup_gpio(&client->dev,
+ info->acpi_index);
+ if (!gpio_is_valid(gpio))
+ continue;
+
+ gpio_keys[n_buttons].type = info->event_type;
+ gpio_keys[n_buttons].code = info->event_code;
+ gpio_keys[n_buttons].gpio = gpio;
+ gpio_keys[n_buttons].active_low = info->active_low;
+ gpio_keys[n_buttons].desc = info->name;
+ gpio_keys[n_buttons].wakeup = info->wakeup;
+ n_buttons++;
+ }
+
+ if (n_buttons == 0) {
+ error = -ENODEV;
+ goto err_free_mem;
+ }
+
+ gpio_keys_pdata->buttons = gpio_keys;
+ gpio_keys_pdata->nbuttons = n_buttons;
+ gpio_keys_pdata->rep = autorepeat;
+
+ pd = platform_device_alloc("gpio-keys", PLATFORM_DEVID_AUTO);
+ if (!pd) {
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ error = platform_device_add_data(pd, gpio_keys_pdata,
+ sizeof(*gpio_keys_pdata));
+ if (error)
+ goto err_free_pdev;
+
+ error = platform_device_add(pd);
+ if (error)
+ goto err_free_pdev;
+
+ return pd;
+
+err_free_pdev:
+ platform_device_put(pd);
+err_free_mem:
+ devm_kfree(&client->dev, gpio_keys_pdata);
+ return ERR_PTR(error);
+}
+
+static int surface3_button_remove(struct i2c_client *client)
+{
+ struct surface3_button_data *priv = i2c_get_clientdata(client);
+
+ int i;
+
+ for (i = 0; i < BUTTON_TYPES; i++)
+ if (priv->children[i])
+ platform_device_unregister(priv->children[i]);
+
+ return 0;
+}
+
+static struct surface3_button_info surface3_button_surface3[] = {
+ { "power", 0, EV_KEY, KEY_POWER, false, true, true },
+ { "home", 1, EV_KEY, KEY_LEFTMETA, false, true, false },
+ { "volume_up", 2, EV_KEY, KEY_VOLUMEUP, true, false, true },
+ { "volume_down", 3, EV_KEY, KEY_VOLUMEDOWN, true, false, true },
+ { }
+};
+
+static int surface3_button_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct surface3_button_data *priv;
+ struct platform_device *pd;
+ int i;
+ int error;
+
+ if (strncmp(acpi_device_bid(ACPI_COMPANION(&client->dev)),
+ SURFACE_BUTTON_OBJ_NAME,
+ strlen(SURFACE_BUTTON_OBJ_NAME)))
+ return -ENODEV;
+
+ if (gpiod_count(dev, KBUILD_MODNAME) <= 0) {
+ dev_dbg(dev, "no GPIO attached, ignoring...\n");
+ return -ENODEV;
+ }
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, priv);
+
+ for (i = 0; i < BUTTON_TYPES; i++) {
+ pd = surface3_button_device_create(client,
+ surface3_button_surface3,
+ i == 0);
+ if (IS_ERR(pd)) {
+ error = PTR_ERR(pd);
+ if (error != -ENODEV) {
+ surface3_button_remove(client);
+ return error;
+ }
+ continue;
+ }
+
+ priv->children[i] = pd;
+ }
+
+ if (!priv->children[0] && !priv->children[1])
+ return -ENODEV;
+
+ return 0;
+}
+
+static const struct i2c_device_id surface3_id[] = {
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, surface3_id);
+
+static struct i2c_driver surface3_driver = {
+ .probe = surface3_button_probe,
+ .remove = surface3_button_remove,
+ .id_table = surface3_id,
+ .driver = {
+ .name = "surface3",
+ .acpi_match_table = ACPI_PTR(surface3_acpi_match),
+ },
+};
+module_i2c_driver(surface3_driver);
+
+MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>");
+MODULE_DESCRIPTION("surface3 button array driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index b65ce7519411..cacb43fb1df7 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -82,7 +82,7 @@
#include <sound/core.h>
#include <sound/control.h>
#include <sound/initval.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <acpi/video.h>
/* ThinkPad CMOS commands */
@@ -128,6 +128,7 @@ enum {
/* ACPI HIDs */
#define TPACPI_ACPI_IBM_HKEY_HID "IBM0068"
#define TPACPI_ACPI_LENOVO_HKEY_HID "LEN0068"
+#define TPACPI_ACPI_LENOVO_HKEY_V2_HID "LEN0268"
#define TPACPI_ACPI_EC_HID "PNP0C09"
/* Input IDs */
@@ -190,6 +191,9 @@ enum tpacpi_hkey_event_t {
TP_HKEY_EV_LID_OPEN = 0x5002, /* laptop lid opened */
TP_HKEY_EV_TABLET_TABLET = 0x5009, /* tablet swivel up */
TP_HKEY_EV_TABLET_NOTEBOOK = 0x500a, /* tablet swivel down */
+ TP_HKEY_EV_TABLET_CHANGED = 0x60c0, /* X1 Yoga (2016):
+ * enter/leave tablet mode
+ */
TP_HKEY_EV_PEN_INSERTED = 0x500b, /* tablet pen inserted */
TP_HKEY_EV_PEN_REMOVED = 0x500c, /* tablet pen removed */
TP_HKEY_EV_BRGHT_CHANGED = 0x5010, /* backlight control event */
@@ -302,7 +306,12 @@ static struct {
u32 hotkey:1;
u32 hotkey_mask:1;
u32 hotkey_wlsw:1;
- u32 hotkey_tablet:1;
+ enum {
+ TP_HOTKEY_TABLET_NONE = 0,
+ TP_HOTKEY_TABLET_USES_MHKG,
+ /* X1 Yoga 2016, seen on BIOS N1FET44W */
+ TP_HOTKEY_TABLET_USES_CMMD,
+ } hotkey_tablet;
u32 kbdlight:1;
u32 light:1;
u32 light_status:1;
@@ -2059,6 +2068,8 @@ static void hotkey_poll_setup(const bool may_warn);
/* HKEY.MHKG() return bits */
#define TP_HOTKEY_TABLET_MASK (1 << 3)
+/* ThinkPad X1 Yoga (2016) */
+#define TP_EC_CMMD_TABLET_MODE 0x6
static int hotkey_get_wlsw(void)
{
@@ -2083,10 +2094,23 @@ static int hotkey_get_tablet_mode(int *status)
{
int s;
- if (!acpi_evalf(hkey_handle, &s, "MHKG", "d"))
- return -EIO;
+ switch (tp_features.hotkey_tablet) {
+ case TP_HOTKEY_TABLET_USES_MHKG:
+ if (!acpi_evalf(hkey_handle, &s, "MHKG", "d"))
+ return -EIO;
+
+ *status = ((s & TP_HOTKEY_TABLET_MASK) != 0);
+ break;
+ case TP_HOTKEY_TABLET_USES_CMMD:
+ if (!acpi_evalf(ec_handle, &s, "CMMD", "d"))
+ return -EIO;
+
+ *status = (s == TP_EC_CMMD_TABLET_MODE);
+ break;
+ default:
+ break;
+ }
- *status = ((s & TP_HOTKEY_TABLET_MASK) != 0);
return 0;
}
@@ -3117,6 +3141,37 @@ static const struct tpacpi_quirk tpacpi_hotkey_qtable[] __initconst = {
typedef u16 tpacpi_keymap_entry_t;
typedef tpacpi_keymap_entry_t tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN];
+static int hotkey_init_tablet_mode(void)
+{
+ int in_tablet_mode = 0, res;
+ char *type = NULL;
+
+ if (acpi_evalf(hkey_handle, &res, "MHKG", "qd")) {
+ /* For X41t, X60t, X61t Tablets... */
+ tp_features.hotkey_tablet = TP_HOTKEY_TABLET_USES_MHKG;
+ in_tablet_mode = !!(res & TP_HOTKEY_TABLET_MASK);
+ type = "MHKG";
+ } else if (acpi_evalf(ec_handle, &res, "CMMD", "qd")) {
+ /* For X1 Yoga (2016) */
+ tp_features.hotkey_tablet = TP_HOTKEY_TABLET_USES_CMMD;
+ in_tablet_mode = res == TP_EC_CMMD_TABLET_MODE;
+ type = "CMMD";
+ }
+
+ if (!tp_features.hotkey_tablet)
+ return 0;
+
+ pr_info("Tablet mode switch found (type: %s), currently in %s mode\n",
+ type, in_tablet_mode ? "tablet" : "laptop");
+
+ res = add_to_attr_set(hotkey_dev_attributes,
+ &dev_attr_hotkey_tablet_mode.attr);
+ if (res)
+ return -1;
+
+ return in_tablet_mode;
+}
+
static int __init hotkey_init(struct ibm_init_struct *iibm)
{
/* Requirements for changing the default keymaps:
@@ -3464,21 +3519,14 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
res = add_to_attr_set(hotkey_dev_attributes,
&dev_attr_hotkey_radio_sw.attr);
- /* For X41t, X60t, X61t Tablets... */
- if (!res && acpi_evalf(hkey_handle, &status, "MHKG", "qd")) {
- tp_features.hotkey_tablet = 1;
- tabletsw_state = !!(status & TP_HOTKEY_TABLET_MASK);
- pr_info("possible tablet mode switch found; "
- "ThinkPad in %s mode\n",
- (tabletsw_state) ? "tablet" : "laptop");
- res = add_to_attr_set(hotkey_dev_attributes,
- &dev_attr_hotkey_tablet_mode.attr);
- }
+ res = hotkey_init_tablet_mode();
+ if (res < 0)
+ goto err_exit;
- if (!res)
- res = register_attr_set_with_sysfs(
- hotkey_dev_attributes,
- &tpacpi_pdev->dev.kobj);
+ tabletsw_state = res;
+
+ res = register_attr_set_with_sysfs(hotkey_dev_attributes,
+ &tpacpi_pdev->dev.kobj);
if (res)
goto err_exit;
@@ -3899,6 +3947,12 @@ static bool hotkey_notify_6xxx(const u32 hkey,
*ignore_acpi_ev = true;
return true;
+ case TP_HKEY_EV_TABLET_CHANGED:
+ tpacpi_input_send_tabletsw();
+ hotkey_tablet_mode_notify_change();
+ *send_acpi_ev = false;
+ break;
+
default:
pr_warn("unknown possible thermal alarm or keyboard event received\n");
known = false;
@@ -4143,6 +4197,7 @@ errexit:
static const struct acpi_device_id ibm_htk_device_ids[] = {
{TPACPI_ACPI_IBM_HKEY_HID, 0},
{TPACPI_ACPI_LENOVO_HKEY_HID, 0},
+ {TPACPI_ACPI_LENOVO_HKEY_V2_HID, 0},
{"", 0},
};
@@ -7716,7 +7771,7 @@ static struct ibm_struct volume_driver_data = {
#define alsa_card NULL
-static void inline volume_alsa_notify_change(void)
+static inline void volume_alsa_notify_change(void)
{
}
@@ -9018,7 +9073,7 @@ static int mute_led_on_off(struct tp_led_table *t, bool state)
acpi_handle temp;
int output;
- if (!ACPI_SUCCESS(acpi_get_handle(hkey_handle, t->name, &temp))) {
+ if (ACPI_FAILURE(acpi_get_handle(hkey_handle, t->name, &temp))) {
pr_warn("Thinkpad ACPI has no %s interface.\n", t->name);
return -EIO;
}
diff --git a/drivers/pnp/interface.c b/drivers/pnp/interface.c
index 4b6808ff0e5d..5c5b3d47b5f6 100644
--- a/drivers/pnp/interface.c
+++ b/drivers/pnp/interface.c
@@ -17,7 +17,7 @@
#include <linux/slab.h>
#include <linux/mutex.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "base.h"
diff --git a/drivers/pnp/pnpbios/proc.c b/drivers/pnp/pnpbios/proc.c
index c212db0fc65d..5ee6b2a5f8d5 100644
--- a/drivers/pnp/pnpbios/proc.c
+++ b/drivers/pnp/pnpbios/proc.c
@@ -26,7 +26,7 @@
#include <linux/seq_file.h>
#include <linux/init.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "pnpbios.h"
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index c74c3f67b8da..abeb77217a21 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -104,6 +104,16 @@ config POWER_RESET_MSM
help
Power off and restart support for Qualcomm boards.
+config POWER_RESET_PIIX4_POWEROFF
+ tristate "Intel PIIX4 power-off driver"
+ depends on PCI
+ depends on MIPS || COMPILE_TEST
+ help
+ This driver supports powering off a system using the Intel PIIX4
+ southbridge, for example the MIPS Malta development board. The
+ southbridge SOff state is entered in response to a request to
+ power off the system.
+
config POWER_RESET_LTC2952
bool "LTC2952 PowerPath power-off driver"
depends on OF_GPIO
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index 1be307c7fc25..11dae3b56ff9 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_POWER_RESET_GPIO_RESTART) += gpio-restart.o
obj-$(CONFIG_POWER_RESET_HISI) += hisi-reboot.o
obj-$(CONFIG_POWER_RESET_IMX) += imx-snvs-poweroff.o
obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o
+obj-$(CONFIG_POWER_RESET_PIIX4_POWEROFF) += piix4-poweroff.o
obj-$(CONFIG_POWER_RESET_LTC2952) += ltc2952-poweroff.o
obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o
obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o
diff --git a/drivers/power/reset/at91-poweroff.c b/drivers/power/reset/at91-poweroff.c
index e9e24df35f26..a85dd4d233af 100644
--- a/drivers/power/reset/at91-poweroff.c
+++ b/drivers/power/reset/at91-poweroff.c
@@ -169,6 +169,7 @@ static const struct of_device_id at91_poweroff_of_match[] = {
{ .compatible = "atmel,at91sam9x5-shdwc", },
{ /*sentinel*/ }
};
+MODULE_DEVICE_TABLE(of, at91_poweroff_of_match);
static struct platform_driver at91_poweroff_driver = {
.remove = __exit_p(at91_poweroff_remove),
diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
index 1b5d450586d1..568580cf0655 100644
--- a/drivers/power/reset/at91-reset.c
+++ b/drivers/power/reset/at91-reset.c
@@ -175,6 +175,7 @@ static const struct of_device_id at91_reset_of_match[] = {
{ .compatible = "atmel,sama5d3-rstc", .data = sama5d3_restart },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, at91_reset_of_match);
static struct notifier_block at91_restart_nb = {
.priority = 192,
@@ -242,6 +243,7 @@ static const struct platform_device_id at91_reset_plat_match[] = {
{ "at91-sam9g45-reset", (unsigned long)at91sam9g45_restart },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(platform, at91_reset_plat_match);
static struct platform_driver at91_reset_driver = {
.remove = __exit_p(at91_reset_remove),
diff --git a/drivers/power/reset/ltc2952-poweroff.c b/drivers/power/reset/ltc2952-poweroff.c
index 15fed9d8f871..bfcd6fba6363 100644
--- a/drivers/power/reset/ltc2952-poweroff.c
+++ b/drivers/power/reset/ltc2952-poweroff.c
@@ -169,7 +169,7 @@ static void ltc2952_poweroff_kill(void)
static void ltc2952_poweroff_default(struct ltc2952_poweroff *data)
{
- data->wde_interval = ktime_set(0, 300L*1E6L);
+ data->wde_interval = 300L * 1E6L;
data->trigger_delay = ktime_set(2, 500L*1E6L);
hrtimer_init(&data->timer_trigger, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
diff --git a/drivers/power/reset/piix4-poweroff.c b/drivers/power/reset/piix4-poweroff.c
new file mode 100644
index 000000000000..bacfc95783f0
--- /dev/null
+++ b/drivers/power/reset/piix4-poweroff.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pm.h>
+
+static struct pci_dev *pm_dev;
+static resource_size_t io_offset;
+
+enum piix4_pm_io_reg {
+ PIIX4_FUNC3IO_PMSTS = 0x00,
+#define PIIX4_FUNC3IO_PMSTS_PWRBTN_STS BIT(8)
+ PIIX4_FUNC3IO_PMCNTRL = 0x04,
+#define PIIX4_FUNC3IO_PMCNTRL_SUS_EN BIT(13)
+#define PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_SOFF (0x0 << 10)
+};
+
+#define PIIX4_SUSPEND_MAGIC 0x00120002
+
+static const int piix4_pm_io_region = PCI_BRIDGE_RESOURCES;
+
+static void piix4_poweroff(void)
+{
+ int spec_devid;
+ u16 sts;
+
+ /* Ensure the power button status is clear */
+ while (1) {
+ sts = inw(io_offset + PIIX4_FUNC3IO_PMSTS);
+ if (!(sts & PIIX4_FUNC3IO_PMSTS_PWRBTN_STS))
+ break;
+ outw(sts, io_offset + PIIX4_FUNC3IO_PMSTS);
+ }
+
+ /* Enable entry to suspend */
+ outw(PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_SOFF | PIIX4_FUNC3IO_PMCNTRL_SUS_EN,
+ io_offset + PIIX4_FUNC3IO_PMCNTRL);
+
+ /* If the special cycle occurs too soon this doesn't work... */
+ mdelay(10);
+
+ /*
+ * The PIIX4 will enter the suspend state only after seeing a special
+ * cycle with the correct magic data on the PCI bus. Generate that
+ * cycle now.
+ */
+ spec_devid = PCI_DEVID(0, PCI_DEVFN(0x1f, 0x7));
+ pci_bus_write_config_dword(pm_dev->bus, spec_devid, 0,
+ PIIX4_SUSPEND_MAGIC);
+
+ /* Give the system some time to power down, then error */
+ mdelay(1000);
+ pr_emerg("Unable to poweroff system\n");
+}
+
+static int piix4_poweroff_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ int res;
+
+ if (pm_dev)
+ return -EINVAL;
+
+ /* Request access to the PIIX4 PM IO registers */
+ res = pci_request_region(dev, piix4_pm_io_region,
+ "PIIX4 PM IO registers");
+ if (res) {
+ dev_err(&dev->dev, "failed to request PM IO registers: %d\n",
+ res);
+ return res;
+ }
+
+ pm_dev = dev;
+ io_offset = pci_resource_start(dev, piix4_pm_io_region);
+ pm_power_off = piix4_poweroff;
+
+ return 0;
+}
+
+static void piix4_poweroff_remove(struct pci_dev *dev)
+{
+ if (pm_power_off == piix4_poweroff)
+ pm_power_off = NULL;
+
+ pci_release_region(dev, piix4_pm_io_region);
+ pm_dev = NULL;
+}
+
+static const struct pci_device_id piix4_poweroff_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3) },
+ { 0 },
+};
+
+static struct pci_driver piix4_poweroff_driver = {
+ .name = "piix4-poweroff",
+ .id_table = piix4_poweroff_ids,
+ .probe = piix4_poweroff_probe,
+ .remove = piix4_poweroff_remove,
+};
+
+module_pci_driver(piix4_poweroff_driver);
+MODULE_AUTHOR("Paul Burton <paul.burton@imgtec.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/reset/syscon-reboot-mode.c b/drivers/power/reset/syscon-reboot-mode.c
index 1ecb51d67149..c8c371b285b1 100644
--- a/drivers/power/reset/syscon-reboot-mode.c
+++ b/drivers/power/reset/syscon-reboot-mode.c
@@ -74,6 +74,7 @@ static const struct of_device_id syscon_reboot_mode_of_match[] = {
{ .compatible = "syscon-reboot-mode" },
{}
};
+MODULE_DEVICE_TABLE(of, syscon_reboot_mode_of_match);
static struct platform_driver syscon_reboot_mode_driver = {
.probe = syscon_reboot_mode_probe,
diff --git a/drivers/power/reset/zx-reboot.c b/drivers/power/reset/zx-reboot.c
index b0b1eb3a78c2..7549c7f74a3c 100644
--- a/drivers/power/reset/zx-reboot.c
+++ b/drivers/power/reset/zx-reboot.c
@@ -72,6 +72,7 @@ static const struct of_device_id zx_reboot_of_match[] = {
{ .compatible = "zte,sysctrl" },
{}
};
+MODULE_DEVICE_TABLE(of, zx_reboot_of_match);
static struct platform_driver zx_reboot_driver = {
.probe = zx_reboot_probe,
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index 2199f673118c..c569f82a0071 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -1900,7 +1900,7 @@ static void ab8500_fg_low_bat_work(struct work_struct *work)
* ab8500_fg_battok_calc - calculate the bit pattern corresponding
* to the target voltage.
* @di: pointer to the ab8500_fg structure
- * @target target voltage
+ * @target: target voltage
*
* Returns bit pattern closest to the target voltage
* valid return values are 0-14. (0-BATT_OK_MAX_NR_INCREMENTS)
@@ -2391,7 +2391,7 @@ static void ab8500_fg_external_power_changed(struct power_supply *psy)
}
/**
- * abab8500_fg_reinit_work() - work to reset the FG algorithm
+ * ab8500_fg_reinit_work() - work to reset the FG algorithm
* @work: pointer to the work_struct structure
*
* Used to reset the current battery capacity to be able to
@@ -2528,7 +2528,7 @@ static struct kobj_type ab8500_fg_ktype = {
};
/**
- * ab8500_chargalg_sysfs_exit() - de-init of sysfs entry
+ * ab8500_fg_sysfs_exit() - de-init of sysfs entry
* @di: pointer to the struct ab8500_chargalg
*
* This function removes the entry in sysfs.
@@ -2539,7 +2539,7 @@ static void ab8500_fg_sysfs_exit(struct ab8500_fg *di)
}
/**
- * ab8500_chargalg_sysfs_init() - init of sysfs entry
+ * ab8500_fg_sysfs_init() - init of sysfs entry
* @di: pointer to the struct ab8500_chargalg
*
* This function adds an entry in sysfs.
diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
index 5bdde692f724..539eb41504bb 100644
--- a/drivers/power/supply/axp288_fuel_gauge.c
+++ b/drivers/power/supply/axp288_fuel_gauge.c
@@ -1120,6 +1120,7 @@ static const struct platform_device_id axp288_fg_id_table[] = {
{ .name = DEV_NAME },
{},
};
+MODULE_DEVICE_TABLE(platform, axp288_fg_id_table);
static int axp288_fuel_gauge_remove(struct platform_device *pdev)
{
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index f5746b9f4e83..e9584330aeed 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -1141,7 +1141,7 @@ static int bq24190_battery_set_property(struct power_supply *psy,
dev_dbg(bdi->dev, "prop: %d\n", psp);
- pm_runtime_put_sync(bdi->dev);
+ pm_runtime_get_sync(bdi->dev);
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 3b0dbc689d72..08c36b8e04bd 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -164,6 +164,25 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = {
[BQ27XXX_REG_DCAP] = 0x3c,
[BQ27XXX_REG_AP] = INVALID_REG_ADDR,
},
+ [BQ27510] = {
+ [BQ27XXX_REG_CTRL] = 0x00,
+ [BQ27XXX_REG_TEMP] = 0x06,
+ [BQ27XXX_REG_INT_TEMP] = 0x28,
+ [BQ27XXX_REG_VOLT] = 0x08,
+ [BQ27XXX_REG_AI] = 0x14,
+ [BQ27XXX_REG_FLAGS] = 0x0a,
+ [BQ27XXX_REG_TTE] = 0x16,
+ [BQ27XXX_REG_TTF] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_TTES] = 0x1a,
+ [BQ27XXX_REG_TTECP] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_NAC] = 0x0c,
+ [BQ27XXX_REG_FCC] = 0x12,
+ [BQ27XXX_REG_CYCT] = 0x1e,
+ [BQ27XXX_REG_AE] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_SOC] = 0x20,
+ [BQ27XXX_REG_DCAP] = 0x2e,
+ [BQ27XXX_REG_AP] = INVALID_REG_ADDR,
+ },
[BQ27530] = {
[BQ27XXX_REG_CTRL] = 0x00,
[BQ27XXX_REG_TEMP] = 0x06,
@@ -302,6 +321,24 @@ static enum power_supply_property bq27500_battery_props[] = {
POWER_SUPPLY_PROP_MANUFACTURER,
};
+static enum power_supply_property bq27510_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
static enum power_supply_property bq27530_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
@@ -385,6 +422,7 @@ static struct {
BQ27XXX_PROP(BQ27000, bq27000_battery_props),
BQ27XXX_PROP(BQ27010, bq27010_battery_props),
BQ27XXX_PROP(BQ27500, bq27500_battery_props),
+ BQ27XXX_PROP(BQ27510, bq27510_battery_props),
BQ27XXX_PROP(BQ27530, bq27530_battery_props),
BQ27XXX_PROP(BQ27541, bq27541_battery_props),
BQ27XXX_PROP(BQ27545, bq27545_battery_props),
@@ -397,10 +435,11 @@ static LIST_HEAD(bq27xxx_battery_devices);
static int poll_interval_param_set(const char *val, const struct kernel_param *kp)
{
struct bq27xxx_device_info *di;
+ unsigned int prev_val = *(unsigned int *) kp->arg;
int ret;
ret = param_set_uint(val, kp);
- if (ret < 0)
+ if (ret < 0 || prev_val == *(unsigned int *) kp->arg)
return ret;
mutex_lock(&bq27xxx_list_lock);
@@ -635,7 +674,8 @@ static int bq27xxx_battery_read_pwr_avg(struct bq27xxx_device_info *di)
*/
static bool bq27xxx_battery_overtemp(struct bq27xxx_device_info *di, u16 flags)
{
- if (di->chip == BQ27500 || di->chip == BQ27541 || di->chip == BQ27545)
+ if (di->chip == BQ27500 || di->chip == BQ27510 ||
+ di->chip == BQ27541 || di->chip == BQ27545)
return flags & (BQ27XXX_FLAG_OTC | BQ27XXX_FLAG_OTD);
if (di->chip == BQ27530 || di->chip == BQ27421)
return flags & BQ27XXX_FLAG_OT;
diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
index 85d4ea2a9c20..5c5c3a6f9923 100644
--- a/drivers/power/supply/bq27xxx_battery_i2c.c
+++ b/drivers/power/supply/bq27xxx_battery_i2c.c
@@ -149,8 +149,8 @@ static const struct i2c_device_id bq27xxx_i2c_id_table[] = {
{ "bq27200", BQ27000 },
{ "bq27210", BQ27010 },
{ "bq27500", BQ27500 },
- { "bq27510", BQ27500 },
- { "bq27520", BQ27500 },
+ { "bq27510", BQ27510 },
+ { "bq27520", BQ27510 },
{ "bq27530", BQ27530 },
{ "bq27531", BQ27530 },
{ "bq27541", BQ27541 },
diff --git a/drivers/power/supply/ipaq_micro_battery.c b/drivers/power/supply/ipaq_micro_battery.c
index 4af7b770f293..2fa6edd6e8b1 100644
--- a/drivers/power/supply/ipaq_micro_battery.c
+++ b/drivers/power/supply/ipaq_micro_battery.c
@@ -313,4 +313,4 @@ module_platform_driver(micro_batt_device_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("driver for iPAQ Atmel micro battery");
-MODULE_ALIAS("platform:battery-ipaq-micro");
+MODULE_ALIAS("platform:ipaq-micro-battery");
diff --git a/drivers/power/supply/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c
index 7321b727d484..509e2b341bd6 100644
--- a/drivers/power/supply/lp8788-charger.c
+++ b/drivers/power/supply/lp8788-charger.c
@@ -384,9 +384,6 @@ static int lp8788_update_charger_params(struct platform_device *pdev,
for (i = 0; i < pdata->num_chg_params; i++) {
param = pdata->chg_params + i;
- if (!param)
- continue;
-
if (lp8788_is_valid_charger_register(param->addr)) {
ret = lp8788_write_byte(lp, param->addr, param->val);
if (ret)
diff --git a/drivers/power/supply/max17040_battery.c b/drivers/power/supply/max17040_battery.c
index 8689c80202b5..e7c3649b31a0 100644
--- a/drivers/power/supply/max17040_battery.c
+++ b/drivers/power/supply/max17040_battery.c
@@ -21,18 +21,13 @@
#include <linux/max17040_battery.h>
#include <linux/slab.h>
-#define MAX17040_VCELL_MSB 0x02
-#define MAX17040_VCELL_LSB 0x03
-#define MAX17040_SOC_MSB 0x04
-#define MAX17040_SOC_LSB 0x05
-#define MAX17040_MODE_MSB 0x06
-#define MAX17040_MODE_LSB 0x07
-#define MAX17040_VER_MSB 0x08
-#define MAX17040_VER_LSB 0x09
-#define MAX17040_RCOMP_MSB 0x0C
-#define MAX17040_RCOMP_LSB 0x0D
-#define MAX17040_CMD_MSB 0xFE
-#define MAX17040_CMD_LSB 0xFF
+#define MAX17040_VCELL 0x02
+#define MAX17040_SOC 0x04
+#define MAX17040_MODE 0x06
+#define MAX17040_VER 0x08
+#define MAX17040_RCOMP 0x0C
+#define MAX17040_CMD 0xFE
+
#define MAX17040_DELAY 1000
#define MAX17040_BATTERY_FULL 95
@@ -78,11 +73,11 @@ static int max17040_get_property(struct power_supply *psy,
return 0;
}
-static int max17040_write_reg(struct i2c_client *client, int reg, u8 value)
+static int max17040_write_reg(struct i2c_client *client, int reg, u16 value)
{
int ret;
- ret = i2c_smbus_write_byte_data(client, reg, value);
+ ret = i2c_smbus_write_word_swapped(client, reg, value);
if (ret < 0)
dev_err(&client->dev, "%s: err %d\n", __func__, ret);
@@ -94,7 +89,7 @@ static int max17040_read_reg(struct i2c_client *client, int reg)
{
int ret;
- ret = i2c_smbus_read_byte_data(client, reg);
+ ret = i2c_smbus_read_word_swapped(client, reg);
if (ret < 0)
dev_err(&client->dev, "%s: err %d\n", __func__, ret);
@@ -104,43 +99,36 @@ static int max17040_read_reg(struct i2c_client *client, int reg)
static void max17040_reset(struct i2c_client *client)
{
- max17040_write_reg(client, MAX17040_CMD_MSB, 0x54);
- max17040_write_reg(client, MAX17040_CMD_LSB, 0x00);
+ max17040_write_reg(client, MAX17040_CMD, 0x0054);
}
static void max17040_get_vcell(struct i2c_client *client)
{
struct max17040_chip *chip = i2c_get_clientdata(client);
- u8 msb;
- u8 lsb;
+ u16 vcell;
- msb = max17040_read_reg(client, MAX17040_VCELL_MSB);
- lsb = max17040_read_reg(client, MAX17040_VCELL_LSB);
+ vcell = max17040_read_reg(client, MAX17040_VCELL);
- chip->vcell = (msb << 4) + (lsb >> 4);
+ chip->vcell = vcell;
}
static void max17040_get_soc(struct i2c_client *client)
{
struct max17040_chip *chip = i2c_get_clientdata(client);
- u8 msb;
- u8 lsb;
+ u16 soc;
- msb = max17040_read_reg(client, MAX17040_SOC_MSB);
- lsb = max17040_read_reg(client, MAX17040_SOC_LSB);
+ soc = max17040_read_reg(client, MAX17040_SOC);
- chip->soc = msb;
+ chip->soc = (soc >> 8);
}
static void max17040_get_version(struct i2c_client *client)
{
- u8 msb;
- u8 lsb;
+ u16 version;
- msb = max17040_read_reg(client, MAX17040_VER_MSB);
- lsb = max17040_read_reg(client, MAX17040_VER_LSB);
+ version = max17040_read_reg(client, MAX17040_VER);
- dev_info(&client->dev, "MAX17040 Fuel-Gauge Ver %d%d\n", msb, lsb);
+ dev_info(&client->dev, "MAX17040 Fuel-Gauge Ver 0x%x\n", version);
}
static void max17040_get_online(struct i2c_client *client)
diff --git a/drivers/power/supply/max8997_charger.c b/drivers/power/supply/max8997_charger.c
index 0b2eab571528..290ddc12b040 100644
--- a/drivers/power/supply/max8997_charger.c
+++ b/drivers/power/supply/max8997_charger.c
@@ -184,6 +184,7 @@ static const struct platform_device_id max8997_battery_id[] = {
{ "max8997-battery", 0 },
{ }
};
+MODULE_DEVICE_TABLE(platform, max8997_battery_id);
static struct platform_driver max8997_battery_driver = {
.driver = {
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index a74d8ca383a1..1e0960b646e8 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -413,7 +413,7 @@ static int power_supply_match_device_node(struct device *dev, const void *data)
/**
* power_supply_get_by_phandle() - Search for a power supply and returns its ref
* @np: Pointer to device node holding phandle property
- * @phandle_name: Name of property holding a power supply name
+ * @property: Name of property holding a power supply name
*
* If power supply was found, it increases reference count for the
* internal power supply's device. The user should power_supply_put()
@@ -458,7 +458,7 @@ static void devm_power_supply_put(struct device *dev, void *res)
* devm_power_supply_get_by_phandle() - Resource managed version of
* power_supply_get_by_phandle()
* @dev: Pointer to device holding phandle property
- * @phandle_name: Name of property holding a power supply phandle
+ * @property: Name of property holding a power supply phandle
*
* Return: On success returns a reference to a power supply with
* matching name equals to value under @property, NULL or ERR_PTR otherwise.
diff --git a/drivers/power/supply/wm8350_power.c b/drivers/power/supply/wm8350_power.c
index 5c5880664e09..a2740cf57ad3 100644
--- a/drivers/power/supply/wm8350_power.c
+++ b/drivers/power/supply/wm8350_power.c
@@ -182,7 +182,7 @@ static ssize_t charger_state_show(struct device *dev,
return sprintf(buf, "%s\n", charge);
}
-static DEVICE_ATTR(charger_state, 0444, charger_state_show, NULL);
+static DEVICE_ATTR_RO(charger_state);
static irqreturn_t wm8350_charger_handler(int irq, void *data)
{
diff --git a/drivers/power/supply/wm97xx_battery.c b/drivers/power/supply/wm97xx_battery.c
index 6285626d142a..e3edb31ac880 100644
--- a/drivers/power/supply/wm97xx_battery.c
+++ b/drivers/power/supply/wm97xx_battery.c
@@ -30,8 +30,7 @@ static enum power_supply_property *prop;
static unsigned long wm97xx_read_bat(struct power_supply *bat_ps)
{
- struct wm97xx_pdata *wmdata = bat_ps->dev.parent->platform_data;
- struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata;
+ struct wm97xx_batt_pdata *pdata = power_supply_get_drvdata(bat_ps);
return wm97xx_read_aux_adc(dev_get_drvdata(bat_ps->dev.parent),
pdata->batt_aux) * pdata->batt_mult /
@@ -40,8 +39,7 @@ static unsigned long wm97xx_read_bat(struct power_supply *bat_ps)
static unsigned long wm97xx_read_temp(struct power_supply *bat_ps)
{
- struct wm97xx_pdata *wmdata = bat_ps->dev.parent->platform_data;
- struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata;
+ struct wm97xx_batt_pdata *pdata = power_supply_get_drvdata(bat_ps);
return wm97xx_read_aux_adc(dev_get_drvdata(bat_ps->dev.parent),
pdata->temp_aux) * pdata->temp_mult /
@@ -52,8 +50,7 @@ static int wm97xx_bat_get_property(struct power_supply *bat_ps,
enum power_supply_property psp,
union power_supply_propval *val)
{
- struct wm97xx_pdata *wmdata = bat_ps->dev.parent->platform_data;
- struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata;
+ struct wm97xx_batt_pdata *pdata = power_supply_get_drvdata(bat_ps);
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
@@ -103,8 +100,7 @@ static void wm97xx_bat_external_power_changed(struct power_supply *bat_ps)
static void wm97xx_bat_update(struct power_supply *bat_ps)
{
int old_status = bat_status;
- struct wm97xx_pdata *wmdata = bat_ps->dev.parent->platform_data;
- struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata;
+ struct wm97xx_batt_pdata *pdata = power_supply_get_drvdata(bat_ps);
mutex_lock(&work_lock);
@@ -166,15 +162,15 @@ static int wm97xx_bat_probe(struct platform_device *dev)
int ret = 0;
int props = 1; /* POWER_SUPPLY_PROP_PRESENT */
int i = 0;
- struct wm97xx_pdata *wmdata = dev->dev.platform_data;
- struct wm97xx_batt_pdata *pdata;
+ struct wm97xx_batt_pdata *pdata = dev->dev.platform_data;
+ struct power_supply_config cfg = {};
- if (!wmdata) {
+ if (!pdata) {
dev_err(&dev->dev, "No platform data supplied\n");
return -EINVAL;
}
- pdata = wmdata->batt_pdata;
+ cfg.drv_data = pdata;
if (dev->id != -1)
return -EINVAL;
@@ -243,7 +239,7 @@ static int wm97xx_bat_probe(struct platform_device *dev)
bat_psy_desc.properties = prop;
bat_psy_desc.num_properties = props;
- bat_psy = power_supply_register(&dev->dev, &bat_psy_desc, NULL);
+ bat_psy = power_supply_register(&dev->dev, &bat_psy_desc, &cfg);
if (!IS_ERR(bat_psy)) {
schedule_work(&bat_work);
} else {
@@ -266,8 +262,7 @@ err:
static int wm97xx_bat_remove(struct platform_device *dev)
{
- struct wm97xx_pdata *wmdata = dev->dev.platform_data;
- struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata;
+ struct wm97xx_batt_pdata *pdata = dev->dev.platform_data;
if (pdata && gpio_is_valid(pdata->charge_gpio)) {
free_irq(gpio_to_irq(pdata->charge_gpio), dev);
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index bf0128899c09..f92dd41b0395 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -175,6 +175,15 @@ config PWM_FSL_FTM
To compile this driver as a module, choose M here: the module
will be called pwm-fsl-ftm.
+config PWM_HIBVT
+ tristate "HiSilicon BVT PWM support"
+ depends on ARCH_HISI || COMPILE_TEST
+ help
+ Generic PWM framework driver for HiSilicon BVT SoCs.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-hibvt.
+
config PWM_IMG
tristate "Imagination Technologies PWM driver"
depends on HAS_IOMEM
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 1194c54efcc2..a48bdb517792 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_PWM_CRC) += pwm-crc.o
obj-$(CONFIG_PWM_CROS_EC) += pwm-cros-ec.o
obj-$(CONFIG_PWM_EP93XX) += pwm-ep93xx.o
obj-$(CONFIG_PWM_FSL_FTM) += pwm-fsl-ftm.o
+obj-$(CONFIG_PWM_HIBVT) += pwm-hibvt.o
obj-$(CONFIG_PWM_IMG) += pwm-img.o
obj-$(CONFIG_PWM_IMX) += pwm-imx.o
obj-$(CONFIG_PWM_JZ4740) += pwm-jz4740.o
diff --git a/drivers/pwm/pwm-hibvt.c b/drivers/pwm/pwm-hibvt.c
new file mode 100644
index 000000000000..d0e8f8542626
--- /dev/null
+++ b/drivers/pwm/pwm-hibvt.c
@@ -0,0 +1,271 @@
+/*
+ * PWM Controller Driver for HiSilicon BVT SoCs
+ *
+ * Copyright (c) 2016 HiSilicon Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/reset.h>
+
+#define PWM_CFG0_ADDR(x) (((x) * 0x20) + 0x0)
+#define PWM_CFG1_ADDR(x) (((x) * 0x20) + 0x4)
+#define PWM_CFG2_ADDR(x) (((x) * 0x20) + 0x8)
+#define PWM_CTRL_ADDR(x) (((x) * 0x20) + 0xC)
+
+#define PWM_ENABLE_SHIFT 0
+#define PWM_ENABLE_MASK BIT(0)
+
+#define PWM_POLARITY_SHIFT 1
+#define PWM_POLARITY_MASK BIT(1)
+
+#define PWM_KEEP_SHIFT 2
+#define PWM_KEEP_MASK BIT(2)
+
+#define PWM_PERIOD_MASK GENMASK(31, 0)
+#define PWM_DUTY_MASK GENMASK(31, 0)
+
+struct hibvt_pwm_chip {
+ struct pwm_chip chip;
+ struct clk *clk;
+ void __iomem *base;
+ struct reset_control *rstc;
+};
+
+struct hibvt_pwm_soc {
+ u32 num_pwms;
+};
+
+static const struct hibvt_pwm_soc pwm_soc[2] = {
+ { .num_pwms = 4 },
+ { .num_pwms = 8 },
+};
+
+static inline struct hibvt_pwm_chip *to_hibvt_pwm_chip(struct pwm_chip *chip)
+{
+ return container_of(chip, struct hibvt_pwm_chip, chip);
+}
+
+static void hibvt_pwm_set_bits(void __iomem *base, u32 offset,
+ u32 mask, u32 data)
+{
+ void __iomem *address = base + offset;
+ u32 value;
+
+ value = readl(address);
+ value &= ~mask;
+ value |= (data & mask);
+ writel(value, address);
+}
+
+static void hibvt_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct hibvt_pwm_chip *hi_pwm_chip = to_hibvt_pwm_chip(chip);
+
+ hibvt_pwm_set_bits(hi_pwm_chip->base, PWM_CTRL_ADDR(pwm->hwpwm),
+ PWM_ENABLE_MASK, 0x1);
+}
+
+static void hibvt_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct hibvt_pwm_chip *hi_pwm_chip = to_hibvt_pwm_chip(chip);
+
+ hibvt_pwm_set_bits(hi_pwm_chip->base, PWM_CTRL_ADDR(pwm->hwpwm),
+ PWM_ENABLE_MASK, 0x0);
+}
+
+static void hibvt_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_cycle_ns, int period_ns)
+{
+ struct hibvt_pwm_chip *hi_pwm_chip = to_hibvt_pwm_chip(chip);
+ u32 freq, period, duty;
+
+ freq = div_u64(clk_get_rate(hi_pwm_chip->clk), 1000000);
+
+ period = div_u64(freq * period_ns, 1000);
+ duty = div_u64(period * duty_cycle_ns, period_ns);
+
+ hibvt_pwm_set_bits(hi_pwm_chip->base, PWM_CFG0_ADDR(pwm->hwpwm),
+ PWM_PERIOD_MASK, period);
+
+ hibvt_pwm_set_bits(hi_pwm_chip->base, PWM_CFG1_ADDR(pwm->hwpwm),
+ PWM_DUTY_MASK, duty);
+}
+
+static void hibvt_pwm_set_polarity(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ enum pwm_polarity polarity)
+{
+ struct hibvt_pwm_chip *hi_pwm_chip = to_hibvt_pwm_chip(chip);
+
+ if (polarity == PWM_POLARITY_INVERSED)
+ hibvt_pwm_set_bits(hi_pwm_chip->base, PWM_CTRL_ADDR(pwm->hwpwm),
+ PWM_POLARITY_MASK, (0x1 << PWM_POLARITY_SHIFT));
+ else
+ hibvt_pwm_set_bits(hi_pwm_chip->base, PWM_CTRL_ADDR(pwm->hwpwm),
+ PWM_POLARITY_MASK, (0x0 << PWM_POLARITY_SHIFT));
+}
+
+static void hibvt_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct hibvt_pwm_chip *hi_pwm_chip = to_hibvt_pwm_chip(chip);
+ void __iomem *base;
+ u32 freq, value;
+
+ freq = div_u64(clk_get_rate(hi_pwm_chip->clk), 1000000);
+ base = hi_pwm_chip->base;
+
+ value = readl(base + PWM_CFG0_ADDR(pwm->hwpwm));
+ state->period = div_u64(value * 1000, freq);
+
+ value = readl(base + PWM_CFG1_ADDR(pwm->hwpwm));
+ state->duty_cycle = div_u64(value * 1000, freq);
+
+ value = readl(base + PWM_CTRL_ADDR(pwm->hwpwm));
+ state->enabled = (PWM_ENABLE_MASK & value);
+}
+
+static int hibvt_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ if (state->polarity != pwm->state.polarity)
+ hibvt_pwm_set_polarity(chip, pwm, state->polarity);
+
+ if (state->period != pwm->state.period ||
+ state->duty_cycle != pwm->state.duty_cycle)
+ hibvt_pwm_config(chip, pwm, state->duty_cycle, state->period);
+
+ if (state->enabled != pwm->state.enabled) {
+ if (state->enabled)
+ hibvt_pwm_enable(chip, pwm);
+ else
+ hibvt_pwm_disable(chip, pwm);
+ }
+
+ return 0;
+}
+
+static struct pwm_ops hibvt_pwm_ops = {
+ .get_state = hibvt_pwm_get_state,
+ .apply = hibvt_pwm_apply,
+
+ .owner = THIS_MODULE,
+};
+
+static int hibvt_pwm_probe(struct platform_device *pdev)
+{
+ const struct hibvt_pwm_soc *soc =
+ of_device_get_match_data(&pdev->dev);
+ struct hibvt_pwm_chip *pwm_chip;
+ struct resource *res;
+ int ret;
+ int i;
+
+ pwm_chip = devm_kzalloc(&pdev->dev, sizeof(*pwm_chip), GFP_KERNEL);
+ if (pwm_chip == NULL)
+ return -ENOMEM;
+
+ pwm_chip->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pwm_chip->clk)) {
+ dev_err(&pdev->dev, "getting clock failed with %ld\n",
+ PTR_ERR(pwm_chip->clk));
+ return PTR_ERR(pwm_chip->clk);
+ }
+
+ pwm_chip->chip.ops = &hibvt_pwm_ops;
+ pwm_chip->chip.dev = &pdev->dev;
+ pwm_chip->chip.base = -1;
+ pwm_chip->chip.npwm = soc->num_pwms;
+ pwm_chip->chip.of_xlate = of_pwm_xlate_with_flags;
+ pwm_chip->chip.of_pwm_n_cells = 3;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pwm_chip->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pwm_chip->base))
+ return PTR_ERR(pwm_chip->base);
+
+ ret = clk_prepare_enable(pwm_chip->clk);
+ if (ret < 0)
+ return ret;
+
+ pwm_chip->rstc = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(pwm_chip->rstc)) {
+ clk_disable_unprepare(pwm_chip->clk);
+ return PTR_ERR(pwm_chip->rstc);
+ }
+
+ reset_control_assert(pwm_chip->rstc);
+ msleep(30);
+ reset_control_deassert(pwm_chip->rstc);
+
+ ret = pwmchip_add(&pwm_chip->chip);
+ if (ret < 0) {
+ clk_disable_unprepare(pwm_chip->clk);
+ return ret;
+ }
+
+ for (i = 0; i < pwm_chip->chip.npwm; i++) {
+ hibvt_pwm_set_bits(pwm_chip->base, PWM_CTRL_ADDR(i),
+ PWM_KEEP_MASK, (0x1 << PWM_KEEP_SHIFT));
+ }
+
+ platform_set_drvdata(pdev, pwm_chip);
+
+ return 0;
+}
+
+static int hibvt_pwm_remove(struct platform_device *pdev)
+{
+ struct hibvt_pwm_chip *pwm_chip;
+
+ pwm_chip = platform_get_drvdata(pdev);
+
+ reset_control_assert(pwm_chip->rstc);
+ msleep(30);
+ reset_control_deassert(pwm_chip->rstc);
+
+ clk_disable_unprepare(pwm_chip->clk);
+
+ return pwmchip_remove(&pwm_chip->chip);
+}
+
+static const struct of_device_id hibvt_pwm_of_match[] = {
+ { .compatible = "hisilicon,hi3516cv300-pwm", .data = &pwm_soc[0] },
+ { .compatible = "hisilicon,hi3519v100-pwm", .data = &pwm_soc[1] },
+ { }
+};
+MODULE_DEVICE_TABLE(of, hibvt_pwm_of_match);
+
+static struct platform_driver hibvt_pwm_driver = {
+ .driver = {
+ .name = "hibvt-pwm",
+ .of_match_table = hibvt_pwm_of_match,
+ },
+ .probe = hibvt_pwm_probe,
+ .remove = hibvt_pwm_remove,
+};
+module_platform_driver(hibvt_pwm_driver);
+
+MODULE_AUTHOR("Jian Yuan");
+MODULE_DESCRIPTION("HiSilicon BVT SoCs PWM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
index 9d5bd7d5c610..045ef9fa6fe3 100644
--- a/drivers/pwm/pwm-meson.c
+++ b/drivers/pwm/pwm-meson.c
@@ -524,7 +524,6 @@ static struct platform_driver meson_pwm_driver = {
};
module_platform_driver(meson_pwm_driver);
-MODULE_ALIAS("platform:meson-pwm");
MODULE_DESCRIPTION("Amlogic Meson PWM Generator driver");
MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index 3314bf299a51..fb44d5215e30 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -120,7 +120,7 @@ static const struct regulator_linear_range rk808_ldo3_voltage_ranges[] = {
static int rk808_buck1_2_get_voltage_sel_regmap(struct regulator_dev *rdev)
{
struct rk808_regulator_data *pdata = rdev_get_drvdata(rdev);
- int id = rdev->desc->id - RK808_ID_DCDC1;
+ int id = rdev_get_id(rdev);
struct gpio_desc *gpio = pdata->dvs_gpio[id];
unsigned int val;
int ret;
@@ -193,7 +193,7 @@ static int rk808_buck1_2_set_voltage_sel(struct regulator_dev *rdev,
unsigned sel)
{
struct rk808_regulator_data *pdata = rdev_get_drvdata(rdev);
- int id = rdev->desc->id - RK808_ID_DCDC1;
+ int id = rdev_get_id(rdev);
struct gpio_desc *gpio = pdata->dvs_gpio[id];
unsigned int reg = rdev->desc->vsel_reg;
unsigned old_sel;
@@ -232,7 +232,7 @@ static int rk808_buck1_2_set_voltage_time_sel(struct regulator_dev *rdev,
unsigned int new_selector)
{
struct rk808_regulator_data *pdata = rdev_get_drvdata(rdev);
- int id = rdev->desc->id - RK808_ID_DCDC1;
+ int id = rdev_get_id(rdev);
struct gpio_desc *gpio = pdata->dvs_gpio[id];
/* if there is no dvs1/2 pin, we don't need wait extra time here. */
@@ -245,8 +245,7 @@ static int rk808_buck1_2_set_voltage_time_sel(struct regulator_dev *rdev,
static int rk808_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
{
unsigned int ramp_value = RK808_RAMP_RATE_10MV_PER_US;
- unsigned int reg = rk808_buck_config_regs[rdev->desc->id -
- RK808_ID_DCDC1];
+ unsigned int reg = rk808_buck_config_regs[rdev_get_id(rdev)];
switch (ramp_delay) {
case 1 ... 2000:
diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c
index eb0f5b13841a..9aafbb03482d 100644
--- a/drivers/regulator/tps65218-regulator.c
+++ b/drivers/regulator/tps65218-regulator.c
@@ -22,6 +22,7 @@
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/of_device.h>
+#include <linux/regmap.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
@@ -30,10 +31,11 @@
enum tps65218_regulators { DCDC1, DCDC2, DCDC3, DCDC4,
DCDC5, DCDC6, LDO1, LS3 };
-#define TPS65218_REGULATOR(_name, _id, _type, _ops, _n, _vr, _vm, _er, _em, \
- _cr, _cm, _lr, _nlr, _delay, _fuv, _sr, _sm) \
+#define TPS65218_REGULATOR(_name, _of, _id, _type, _ops, _n, _vr, _vm, _er, \
+ _em, _cr, _cm, _lr, _nlr, _delay, _fuv, _sr, _sm) \
{ \
.name = _name, \
+ .of_match = _of, \
.id = _id, \
.ops = &_ops, \
.n_voltages = _n, \
@@ -54,14 +56,6 @@ enum tps65218_regulators { DCDC1, DCDC2, DCDC3, DCDC4,
.bypass_mask = _sm, \
} \
-#define TPS65218_INFO(_id, _nm, _min, _max) \
- [_id] = { \
- .id = _id, \
- .name = _nm, \
- .min_uV = _min, \
- .max_uV = _max, \
- }
-
static const struct regulator_linear_range dcdc1_dcdc2_ranges[] = {
REGULATOR_LINEAR_RANGE(850000, 0x0, 0x32, 10000),
REGULATOR_LINEAR_RANGE(1375000, 0x33, 0x3f, 25000),
@@ -77,36 +71,6 @@ static const struct regulator_linear_range dcdc4_ranges[] = {
REGULATOR_LINEAR_RANGE(1600000, 0x10, 0x34, 50000),
};
-static struct tps_info tps65218_pmic_regs[] = {
- TPS65218_INFO(DCDC1, "DCDC1", 850000, 1675000),
- TPS65218_INFO(DCDC2, "DCDC2", 850000, 1675000),
- TPS65218_INFO(DCDC3, "DCDC3", 900000, 3400000),
- TPS65218_INFO(DCDC4, "DCDC4", 1175000, 3400000),
- TPS65218_INFO(DCDC5, "DCDC5", 1000000, 1000000),
- TPS65218_INFO(DCDC6, "DCDC6", 1800000, 1800000),
- TPS65218_INFO(LDO1, "LDO1", 900000, 3400000),
- TPS65218_INFO(LS3, "LS3", -1, -1),
-};
-
-#define TPS65218_OF_MATCH(comp, label) \
- { \
- .compatible = comp, \
- .data = &label, \
- }
-
-static const struct of_device_id tps65218_of_match[] = {
- TPS65218_OF_MATCH("ti,tps65218-dcdc1", tps65218_pmic_regs[DCDC1]),
- TPS65218_OF_MATCH("ti,tps65218-dcdc2", tps65218_pmic_regs[DCDC2]),
- TPS65218_OF_MATCH("ti,tps65218-dcdc3", tps65218_pmic_regs[DCDC3]),
- TPS65218_OF_MATCH("ti,tps65218-dcdc4", tps65218_pmic_regs[DCDC4]),
- TPS65218_OF_MATCH("ti,tps65218-dcdc5", tps65218_pmic_regs[DCDC5]),
- TPS65218_OF_MATCH("ti,tps65218-dcdc6", tps65218_pmic_regs[DCDC6]),
- TPS65218_OF_MATCH("ti,tps65218-ldo1", tps65218_pmic_regs[LDO1]),
- TPS65218_OF_MATCH("ti,tps65218-ls3", tps65218_pmic_regs[LS3]),
- { }
-};
-MODULE_DEVICE_TABLE(of, tps65218_of_match);
-
static int tps65218_pmic_set_voltage_sel(struct regulator_dev *dev,
unsigned selector)
{
@@ -188,7 +152,7 @@ static int tps65218_pmic_set_suspend_disable(struct regulator_dev *dev)
if (rid == TPS65218_DCDC_3 && tps->rev == TPS65218_REV_2_1)
return 0;
- if (!tps->info[rid]->strobe) {
+ if (!tps->strobes[rid]) {
if (rid == TPS65218_DCDC_3)
tps->info[rid]->strobe = 3;
else
@@ -197,8 +161,7 @@ static int tps65218_pmic_set_suspend_disable(struct regulator_dev *dev)
return tps65218_set_bits(tps, dev->desc->bypass_reg,
dev->desc->bypass_mask,
- tps->info[rid]->strobe,
- TPS65218_PROTECT_L1);
+ tps->strobes[rid], TPS65218_PROTECT_L1);
}
/* Operations permitted on DCDC1, DCDC2 */
@@ -272,7 +235,7 @@ static int tps65218_pmic_get_current_limit(struct regulator_dev *dev)
unsigned int index;
struct tps65218 *tps = rdev_get_drvdata(dev);
- retval = tps65218_reg_read(tps, dev->desc->csel_reg, &index);
+ retval = regmap_read(tps->regmap, dev->desc->csel_reg, &index);
if (retval < 0)
return retval;
@@ -300,104 +263,104 @@ static struct regulator_ops tps65218_dcdc56_pmic_ops = {
};
static const struct regulator_desc regulators[] = {
- TPS65218_REGULATOR("DCDC1", TPS65218_DCDC_1, REGULATOR_VOLTAGE,
- tps65218_dcdc12_ops, 64, TPS65218_REG_CONTROL_DCDC1,
+ TPS65218_REGULATOR("DCDC1", "regulator-dcdc1", TPS65218_DCDC_1,
+ REGULATOR_VOLTAGE, tps65218_dcdc12_ops, 64,
+ TPS65218_REG_CONTROL_DCDC1,
TPS65218_CONTROL_DCDC1_MASK, TPS65218_REG_ENABLE1,
TPS65218_ENABLE1_DC1_EN, 0, 0, dcdc1_dcdc2_ranges,
2, 4000, 0, TPS65218_REG_SEQ3,
TPS65218_SEQ3_DC1_SEQ_MASK),
- TPS65218_REGULATOR("DCDC2", TPS65218_DCDC_2, REGULATOR_VOLTAGE,
- tps65218_dcdc12_ops, 64, TPS65218_REG_CONTROL_DCDC2,
+ TPS65218_REGULATOR("DCDC2", "regulator-dcdc2", TPS65218_DCDC_2,
+ REGULATOR_VOLTAGE, tps65218_dcdc12_ops, 64,
+ TPS65218_REG_CONTROL_DCDC2,
TPS65218_CONTROL_DCDC2_MASK, TPS65218_REG_ENABLE1,
TPS65218_ENABLE1_DC2_EN, 0, 0, dcdc1_dcdc2_ranges,
2, 4000, 0, TPS65218_REG_SEQ3,
TPS65218_SEQ3_DC2_SEQ_MASK),
- TPS65218_REGULATOR("DCDC3", TPS65218_DCDC_3, REGULATOR_VOLTAGE,
- tps65218_ldo1_dcdc34_ops, 64,
+ TPS65218_REGULATOR("DCDC3", "regulator-dcdc3", TPS65218_DCDC_3,
+ REGULATOR_VOLTAGE, tps65218_ldo1_dcdc34_ops, 64,
TPS65218_REG_CONTROL_DCDC3,
TPS65218_CONTROL_DCDC3_MASK, TPS65218_REG_ENABLE1,
TPS65218_ENABLE1_DC3_EN, 0, 0, ldo1_dcdc3_ranges, 2,
0, 0, TPS65218_REG_SEQ4, TPS65218_SEQ4_DC3_SEQ_MASK),
- TPS65218_REGULATOR("DCDC4", TPS65218_DCDC_4, REGULATOR_VOLTAGE,
- tps65218_ldo1_dcdc34_ops, 53,
+ TPS65218_REGULATOR("DCDC4", "regulator-dcdc4", TPS65218_DCDC_4,
+ REGULATOR_VOLTAGE, tps65218_ldo1_dcdc34_ops, 53,
TPS65218_REG_CONTROL_DCDC4,
TPS65218_CONTROL_DCDC4_MASK, TPS65218_REG_ENABLE1,
TPS65218_ENABLE1_DC4_EN, 0, 0, dcdc4_ranges, 2,
0, 0, TPS65218_REG_SEQ4, TPS65218_SEQ4_DC4_SEQ_MASK),
- TPS65218_REGULATOR("DCDC5", TPS65218_DCDC_5, REGULATOR_VOLTAGE,
- tps65218_dcdc56_pmic_ops, 1, -1, -1,
- TPS65218_REG_ENABLE1, TPS65218_ENABLE1_DC5_EN, 0, 0,
- NULL, 0, 0, 1000000, TPS65218_REG_SEQ5,
+ TPS65218_REGULATOR("DCDC5", "regulator-dcdc5", TPS65218_DCDC_5,
+ REGULATOR_VOLTAGE, tps65218_dcdc56_pmic_ops, 1, -1,
+ -1, TPS65218_REG_ENABLE1, TPS65218_ENABLE1_DC5_EN, 0,
+ 0, NULL, 0, 0, 1000000, TPS65218_REG_SEQ5,
TPS65218_SEQ5_DC5_SEQ_MASK),
- TPS65218_REGULATOR("DCDC6", TPS65218_DCDC_6, REGULATOR_VOLTAGE,
- tps65218_dcdc56_pmic_ops, 1, -1, -1,
- TPS65218_REG_ENABLE1, TPS65218_ENABLE1_DC6_EN, 0, 0,
- NULL, 0, 0, 1800000, TPS65218_REG_SEQ5,
+ TPS65218_REGULATOR("DCDC6", "regulator-dcdc6", TPS65218_DCDC_6,
+ REGULATOR_VOLTAGE, tps65218_dcdc56_pmic_ops, 1, -1,
+ -1, TPS65218_REG_ENABLE1, TPS65218_ENABLE1_DC6_EN, 0,
+ 0, NULL, 0, 0, 1800000, TPS65218_REG_SEQ5,
TPS65218_SEQ5_DC6_SEQ_MASK),
- TPS65218_REGULATOR("LDO1", TPS65218_LDO_1, REGULATOR_VOLTAGE,
- tps65218_ldo1_dcdc34_ops, 64,
+ TPS65218_REGULATOR("LDO1", "regulator-ldo1", TPS65218_LDO_1,
+ REGULATOR_VOLTAGE, tps65218_ldo1_dcdc34_ops, 64,
TPS65218_REG_CONTROL_LDO1,
TPS65218_CONTROL_LDO1_MASK, TPS65218_REG_ENABLE2,
TPS65218_ENABLE2_LDO1_EN, 0, 0, ldo1_dcdc3_ranges,
2, 0, 0, TPS65218_REG_SEQ6,
TPS65218_SEQ6_LDO1_SEQ_MASK),
- TPS65218_REGULATOR("LS3", TPS65218_LS_3, REGULATOR_CURRENT,
- tps65218_ls3_ops, 0, 0, 0, TPS65218_REG_ENABLE2,
- TPS65218_ENABLE2_LS3_EN, TPS65218_REG_CONFIG2,
- TPS65218_CONFIG2_LS3ILIM_MASK, NULL, 0, 0, 0, 0, 0),
+ TPS65218_REGULATOR("LS3", "regulator-ls3", TPS65218_LS_3,
+ REGULATOR_CURRENT, tps65218_ls3_ops, 0, 0, 0,
+ TPS65218_REG_ENABLE2, TPS65218_ENABLE2_LS3_EN,
+ TPS65218_REG_CONFIG2, TPS65218_CONFIG2_LS3ILIM_MASK,
+ NULL, 0, 0, 0, 0, 0),
};
static int tps65218_regulator_probe(struct platform_device *pdev)
{
struct tps65218 *tps = dev_get_drvdata(pdev->dev.parent);
- struct regulator_init_data *init_data;
- const struct tps_info *template;
struct regulator_dev *rdev;
- const struct of_device_id *match;
struct regulator_config config = { };
- int id, ret;
+ int i, ret;
unsigned int val;
- match = of_match_device(tps65218_of_match, &pdev->dev);
- if (!match)
- return -ENODEV;
-
- template = match->data;
- id = template->id;
- init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node,
- &regulators[id]);
-
- platform_set_drvdata(pdev, tps);
-
- tps->info[id] = &tps65218_pmic_regs[id];
config.dev = &pdev->dev;
- config.init_data = init_data;
+ config.dev->of_node = tps->dev->of_node;
config.driver_data = tps;
config.regmap = tps->regmap;
- config.of_node = pdev->dev.of_node;
- rdev = devm_regulator_register(&pdev->dev, &regulators[id], &config);
- if (IS_ERR(rdev)) {
- dev_err(tps->dev, "failed to register %s regulator\n",
- pdev->name);
- return PTR_ERR(rdev);
- }
+ /* Allocate memory for strobes */
+ tps->strobes = devm_kzalloc(&pdev->dev, sizeof(u8) *
+ TPS65218_NUM_REGULATOR, GFP_KERNEL);
- ret = tps65218_reg_read(tps, regulators[id].bypass_reg, &val);
- if (ret)
- return ret;
+ for (i = 0; i < ARRAY_SIZE(regulators); i++) {
+ rdev = devm_regulator_register(&pdev->dev, &regulators[i],
+ &config);
+ if (IS_ERR(rdev)) {
+ dev_err(tps->dev, "failed to register %s regulator\n",
+ pdev->name);
+ return PTR_ERR(rdev);
+ }
- tps->info[id]->strobe = val & regulators[id].bypass_mask;
+ ret = regmap_read(tps->regmap, regulators[i].bypass_reg, &val);
+ if (ret)
+ return ret;
+
+ tps->strobes[i] = val & regulators[i].bypass_mask;
+ }
return 0;
}
+static const struct platform_device_id tps65218_regulator_id_table[] = {
+ { "tps65218-regulator", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, tps65218_regulator_id_table);
+
static struct platform_driver tps65218_regulator_driver = {
.driver = {
.name = "tps65218-pmic",
- .of_match_table = tps65218_of_match,
},
.probe = tps65218_regulator_probe,
+ .id_table = tps65218_regulator_id_table,
};
module_platform_driver(tps65218_regulator_driver);
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 9a507e77eced..90b05c72186c 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -396,9 +396,6 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
goto unwind_vring_allocations;
}
- /* track the rvdevs list reference */
- kref_get(&rvdev->refcount);
-
list_add_tail(&rvdev->node, &rproc->rvdevs);
rproc_add_subdev(rproc, &rvdev->subdev,
@@ -889,13 +886,15 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
/*
* Create a copy of the resource table. When a virtio device starts
* and calls vring_new_virtqueue() the address of the allocated vring
- * will be stored in the table_ptr. Before the device is started,
- * table_ptr will be copied into device memory.
+ * will be stored in the cached_table. Before the device is started,
+ * cached_table will be copied into device memory.
*/
- rproc->table_ptr = kmemdup(table, tablesz, GFP_KERNEL);
- if (!rproc->table_ptr)
+ rproc->cached_table = kmemdup(table, tablesz, GFP_KERNEL);
+ if (!rproc->cached_table)
goto clean_up;
+ rproc->table_ptr = rproc->cached_table;
+
/* reset max_notifyid */
rproc->max_notifyid = -1;
@@ -914,16 +913,18 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
}
/*
- * The starting device has been given the rproc->table_ptr as the
+ * The starting device has been given the rproc->cached_table as the
* resource table. The address of the vring along with the other
- * allocated resources (carveouts etc) is stored in table_ptr.
+ * allocated resources (carveouts etc) is stored in cached_table.
* In order to pass this information to the remote device we must copy
* this information to device memory. We also update the table_ptr so
* that any subsequent changes will be applied to the loaded version.
*/
loaded_table = rproc_find_loaded_rsc_table(rproc, fw);
- if (loaded_table)
- memcpy(loaded_table, rproc->table_ptr, tablesz);
+ if (loaded_table) {
+ memcpy(loaded_table, rproc->cached_table, tablesz);
+ rproc->table_ptr = loaded_table;
+ }
/* power up the remote processor */
ret = rproc->ops->start(rproc);
@@ -951,7 +952,8 @@ stop_rproc:
clean_up_resources:
rproc_resource_cleanup(rproc);
clean_up:
- kfree(rproc->table_ptr);
+ kfree(rproc->cached_table);
+ rproc->cached_table = NULL;
rproc->table_ptr = NULL;
rproc_disable_iommu(rproc);
@@ -1185,7 +1187,8 @@ void rproc_shutdown(struct rproc *rproc)
rproc_disable_iommu(rproc);
/* Free the copy of the resource table */
- kfree(rproc->table_ptr);
+ kfree(rproc->cached_table);
+ rproc->cached_table = NULL;
rproc->table_ptr = NULL;
/* if in crash state, unlock crash handler */
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 06d9fa2f3bc0..172dc966a01f 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -94,5 +94,6 @@ config RESET_ZYNQ
source "drivers/reset/sti/Kconfig"
source "drivers/reset/hisilicon/Kconfig"
+source "drivers/reset/tegra/Kconfig"
endif
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index bbe7026617fc..13b346e03d84 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -1,6 +1,7 @@
obj-y += core.o
obj-y += hisilicon/
obj-$(CONFIG_ARCH_STI) += sti/
+obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-$(CONFIG_RESET_ATH79) += reset-ath79.o
obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o
obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index b8ae1dbd4c17..10368ed8fd13 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -32,6 +32,9 @@ static LIST_HEAD(reset_controller_list);
* @refcnt: Number of gets of this reset_control
* @shared: Is this a shared (1), or an exclusive (0) reset_control?
* @deassert_cnt: Number of times this reset line has been deasserted
+ * @triggered_count: Number of times this reset line has been reset. Currently
+ * only used for shared resets, which means that the value
+ * will be either 0 or 1.
*/
struct reset_control {
struct reset_controller_dev *rcdev;
@@ -40,6 +43,7 @@ struct reset_control {
unsigned int refcnt;
int shared;
atomic_t deassert_count;
+ atomic_t triggered_count;
};
/**
@@ -134,18 +138,35 @@ EXPORT_SYMBOL_GPL(devm_reset_controller_register);
* reset_control_reset - reset the controlled device
* @rstc: reset controller
*
- * Calling this on a shared reset controller is an error.
+ * On a shared reset line the actual reset pulse is only triggered once for the
+ * lifetime of the reset_control instance: for all but the first caller this is
+ * a no-op.
+ * Consumers must not use reset_control_(de)assert on shared reset lines when
+ * reset_control_reset has been used.
*/
int reset_control_reset(struct reset_control *rstc)
{
- if (WARN_ON(IS_ERR_OR_NULL(rstc)) ||
- WARN_ON(rstc->shared))
+ int ret;
+
+ if (WARN_ON(IS_ERR_OR_NULL(rstc)))
return -EINVAL;
- if (rstc->rcdev->ops->reset)
- return rstc->rcdev->ops->reset(rstc->rcdev, rstc->id);
+ if (!rstc->rcdev->ops->reset)
+ return -ENOTSUPP;
- return -ENOTSUPP;
+ if (rstc->shared) {
+ if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
+ return -EINVAL;
+
+ if (atomic_inc_return(&rstc->triggered_count) != 1)
+ return 0;
+ }
+
+ ret = rstc->rcdev->ops->reset(rstc->rcdev, rstc->id);
+ if (rstc->shared && !ret)
+ atomic_dec(&rstc->triggered_count);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(reset_control_reset);
@@ -159,6 +180,8 @@ EXPORT_SYMBOL_GPL(reset_control_reset);
*
* For shared reset controls a driver cannot expect the hw's registers and
* internal state to be reset, but must be prepared for this to happen.
+ * Consumers must not use reset_control_reset on shared reset lines when
+ * reset_control_(de)assert has been used.
*/
int reset_control_assert(struct reset_control *rstc)
{
@@ -169,6 +192,9 @@ int reset_control_assert(struct reset_control *rstc)
return -ENOTSUPP;
if (rstc->shared) {
+ if (WARN_ON(atomic_read(&rstc->triggered_count) != 0))
+ return -EINVAL;
+
if (WARN_ON(atomic_read(&rstc->deassert_count) == 0))
return -EINVAL;
@@ -185,6 +211,8 @@ EXPORT_SYMBOL_GPL(reset_control_assert);
* @rstc: reset controller
*
* After calling this function, the reset is guaranteed to be deasserted.
+ * Consumers must not use reset_control_reset on shared reset lines when
+ * reset_control_(de)assert has been used.
*/
int reset_control_deassert(struct reset_control *rstc)
{
@@ -195,6 +223,9 @@ int reset_control_deassert(struct reset_control *rstc)
return -ENOTSUPP;
if (rstc->shared) {
+ if (WARN_ON(atomic_read(&rstc->triggered_count) != 0))
+ return -EINVAL;
+
if (atomic_inc_return(&rstc->deassert_count) != 1)
return 0;
}
diff --git a/drivers/reset/reset-berlin.c b/drivers/reset/reset-berlin.c
index 369f3917fd8e..371197bbd055 100644
--- a/drivers/reset/reset-berlin.c
+++ b/drivers/reset/reset-berlin.c
@@ -1,6 +1,8 @@
/*
* Copyright (C) 2014 Marvell Technology Group Ltd.
*
+ * Marvell Berlin reset driver
+ *
* Antoine Tenart <antoine.tenart@free-electrons.com>
* Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
*
@@ -12,7 +14,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
@@ -91,7 +93,6 @@ static const struct of_device_id berlin_reset_dt_match[] = {
{ .compatible = "marvell,berlin2-reset" },
{ },
};
-MODULE_DEVICE_TABLE(of, berlin_reset_dt_match);
static struct platform_driver berlin_reset_driver = {
.probe = berlin2_reset_probe,
@@ -100,9 +101,4 @@ static struct platform_driver berlin_reset_driver = {
.of_match_table = berlin_reset_dt_match,
},
};
-module_platform_driver(berlin_reset_driver);
-
-MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
-MODULE_AUTHOR("Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>");
-MODULE_DESCRIPTION("Marvell Berlin reset driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(berlin_reset_driver);
diff --git a/drivers/reset/reset-lpc18xx.c b/drivers/reset/reset-lpc18xx.c
index 54cca0055171..a62ad52e262b 100644
--- a/drivers/reset/reset-lpc18xx.c
+++ b/drivers/reset/reset-lpc18xx.c
@@ -13,7 +13,7 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
@@ -218,39 +218,17 @@ dis_clk_reg:
return ret;
}
-static int lpc18xx_rgu_remove(struct platform_device *pdev)
-{
- struct lpc18xx_rgu_data *rc = platform_get_drvdata(pdev);
- int ret;
-
- ret = unregister_restart_handler(&rc->restart_nb);
- if (ret)
- dev_warn(&pdev->dev, "failed to unregister restart handler\n");
-
- reset_controller_unregister(&rc->rcdev);
-
- clk_disable_unprepare(rc->clk_delay);
- clk_disable_unprepare(rc->clk_reg);
-
- return 0;
-}
-
static const struct of_device_id lpc18xx_rgu_match[] = {
{ .compatible = "nxp,lpc1850-rgu" },
{ }
};
-MODULE_DEVICE_TABLE(of, lpc18xx_rgu_match);
static struct platform_driver lpc18xx_rgu_driver = {
.probe = lpc18xx_rgu_probe,
- .remove = lpc18xx_rgu_remove,
.driver = {
- .name = "lpc18xx-reset",
- .of_match_table = lpc18xx_rgu_match,
+ .name = "lpc18xx-reset",
+ .of_match_table = lpc18xx_rgu_match,
+ .suppress_bind_attrs = true,
},
};
-module_platform_driver(lpc18xx_rgu_driver);
-
-MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>");
-MODULE_DESCRIPTION("Reset driver for LPC18xx/43xx RGU");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(lpc18xx_rgu_driver);
diff --git a/drivers/reset/reset-oxnas.c b/drivers/reset/reset-oxnas.c
index 944980572f79..0d9036dea010 100644
--- a/drivers/reset/reset-oxnas.c
+++ b/drivers/reset/reset-oxnas.c
@@ -80,6 +80,7 @@ static const struct reset_control_ops oxnas_reset_ops = {
static const struct of_device_id oxnas_reset_dt_ids[] = {
{ .compatible = "oxsemi,ox810se-reset", },
+ { .compatible = "oxsemi,ox820-reset", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, oxnas_reset_dt_ids);
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
index 78ebf8424375..43e4a9f39b9b 100644
--- a/drivers/reset/reset-socfpga.c
+++ b/drivers/reset/reset-socfpga.c
@@ -1,4 +1,6 @@
/*
+ * Socfpga Reset Controller Driver
+ *
* Copyright 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
*
* based on
@@ -16,7 +18,7 @@
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
@@ -148,8 +150,4 @@ static struct platform_driver socfpga_reset_driver = {
.of_match_table = socfpga_reset_dt_ids,
},
};
-module_platform_driver(socfpga_reset_driver);
-
-MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de");
-MODULE_DESCRIPTION("Socfpga Reset Controller Driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(socfpga_reset_driver);
diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c
index 3080190b3f90..b44f6b5f87b6 100644
--- a/drivers/reset/reset-sunxi.c
+++ b/drivers/reset/reset-sunxi.c
@@ -13,7 +13,7 @@
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
@@ -142,7 +142,6 @@ static const struct of_device_id sunxi_reset_dt_ids[] = {
{ .compatible = "allwinner,sun6i-a31-clock-reset", },
{ /* sentinel */ },
};
-MODULE_DEVICE_TABLE(of, sunxi_reset_dt_ids);
static int sunxi_reset_probe(struct platform_device *pdev)
{
@@ -175,8 +174,4 @@ static struct platform_driver sunxi_reset_driver = {
.of_match_table = sunxi_reset_dt_ids,
},
};
-module_platform_driver(sunxi_reset_driver);
-
-MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
-MODULE_DESCRIPTION("Allwinner SoCs Reset Controller Driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(sunxi_reset_driver);
diff --git a/drivers/reset/reset-zynq.c b/drivers/reset/reset-zynq.c
index 138f2f205662..87a4e355578f 100644
--- a/drivers/reset/reset-zynq.c
+++ b/drivers/reset/reset-zynq.c
@@ -3,6 +3,8 @@
*
* Xilinx Zynq Reset controller driver
*
+ * Author: Moritz Fischer <moritz.fischer@ettus.com>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
@@ -15,7 +17,7 @@
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -137,8 +139,4 @@ static struct platform_driver zynq_reset_driver = {
.of_match_table = zynq_reset_dt_ids,
},
};
-module_platform_driver(zynq_reset_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Moritz Fischer <moritz.fischer@ettus.com>");
-MODULE_DESCRIPTION("Zynq Reset Controller Driver");
+builtin_platform_driver(zynq_reset_driver);
diff --git a/drivers/reset/sti/Kconfig b/drivers/reset/sti/Kconfig
index 613178553612..71592b5bfd14 100644
--- a/drivers/reset/sti/Kconfig
+++ b/drivers/reset/sti/Kconfig
@@ -3,14 +3,6 @@ if ARCH_STI
config STI_RESET_SYSCFG
bool
-config STIH415_RESET
- bool
- select STI_RESET_SYSCFG
-
-config STIH416_RESET
- bool
- select STI_RESET_SYSCFG
-
config STIH407_RESET
bool
select STI_RESET_SYSCFG
diff --git a/drivers/reset/sti/Makefile b/drivers/reset/sti/Makefile
index dc85dfbe56a9..f9d82411f29e 100644
--- a/drivers/reset/sti/Makefile
+++ b/drivers/reset/sti/Makefile
@@ -1,5 +1,3 @@
obj-$(CONFIG_STI_RESET_SYSCFG) += reset-syscfg.o
-obj-$(CONFIG_STIH415_RESET) += reset-stih415.o
-obj-$(CONFIG_STIH416_RESET) += reset-stih416.o
obj-$(CONFIG_STIH407_RESET) += reset-stih407.o
diff --git a/drivers/reset/sti/reset-stih415.c b/drivers/reset/sti/reset-stih415.c
deleted file mode 100644
index 6f220cdbef46..000000000000
--- a/drivers/reset/sti/reset-stih415.c
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Copyright (C) 2013 STMicroelectronics (R&D) Limited
- * Author: Stephen Gallimore <stephen.gallimore@st.com>
- * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/platform_device.h>
-
-#include <dt-bindings/reset/stih415-resets.h>
-
-#include "reset-syscfg.h"
-
-/*
- * STiH415 Peripheral powerdown definitions.
- */
-static const char stih415_front[] = "st,stih415-front-syscfg";
-static const char stih415_rear[] = "st,stih415-rear-syscfg";
-static const char stih415_sbc[] = "st,stih415-sbc-syscfg";
-static const char stih415_lpm[] = "st,stih415-lpm-syscfg";
-
-#define STIH415_PDN_FRONT(_bit) \
- _SYSCFG_RST_CH(stih415_front, SYSCFG_114, _bit, SYSSTAT_187, _bit)
-
-#define STIH415_PDN_REAR(_cntl, _stat) \
- _SYSCFG_RST_CH(stih415_rear, SYSCFG_336, _cntl, SYSSTAT_384, _stat)
-
-#define STIH415_SRST_REAR(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih415_rear, _reg, _bit)
-
-#define STIH415_SRST_SBC(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih415_sbc, _reg, _bit)
-
-#define STIH415_SRST_FRONT(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih415_front, _reg, _bit)
-
-#define STIH415_SRST_LPM(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih415_lpm, _reg, _bit)
-
-#define SYSCFG_114 0x38 /* Powerdown request EMI/NAND/Keyscan */
-#define SYSSTAT_187 0x15c /* Powerdown status EMI/NAND/Keyscan */
-
-#define SYSCFG_336 0x90 /* Powerdown request USB/SATA/PCIe */
-#define SYSSTAT_384 0x150 /* Powerdown status USB/SATA/PCIe */
-
-#define SYSCFG_376 0x130 /* Reset generator 0 control 0 */
-#define SYSCFG_166 0x108 /* Softreset Ethernet 0 */
-#define SYSCFG_31 0x7c /* Softreset Ethernet 1 */
-#define LPM_SYSCFG_1 0x4 /* Softreset IRB */
-
-static const struct syscfg_reset_channel_data stih415_powerdowns[] = {
- [STIH415_EMISS_POWERDOWN] = STIH415_PDN_FRONT(0),
- [STIH415_NAND_POWERDOWN] = STIH415_PDN_FRONT(1),
- [STIH415_KEYSCAN_POWERDOWN] = STIH415_PDN_FRONT(2),
- [STIH415_USB0_POWERDOWN] = STIH415_PDN_REAR(0, 0),
- [STIH415_USB1_POWERDOWN] = STIH415_PDN_REAR(1, 1),
- [STIH415_USB2_POWERDOWN] = STIH415_PDN_REAR(2, 2),
- [STIH415_SATA0_POWERDOWN] = STIH415_PDN_REAR(3, 3),
- [STIH415_SATA1_POWERDOWN] = STIH415_PDN_REAR(4, 4),
- [STIH415_PCIE_POWERDOWN] = STIH415_PDN_REAR(5, 8),
-};
-
-static const struct syscfg_reset_channel_data stih415_softresets[] = {
- [STIH415_ETH0_SOFTRESET] = STIH415_SRST_FRONT(SYSCFG_166, 0),
- [STIH415_ETH1_SOFTRESET] = STIH415_SRST_SBC(SYSCFG_31, 0),
- [STIH415_IRB_SOFTRESET] = STIH415_SRST_LPM(LPM_SYSCFG_1, 6),
- [STIH415_USB0_SOFTRESET] = STIH415_SRST_REAR(SYSCFG_376, 9),
- [STIH415_USB1_SOFTRESET] = STIH415_SRST_REAR(SYSCFG_376, 10),
- [STIH415_USB2_SOFTRESET] = STIH415_SRST_REAR(SYSCFG_376, 11),
- [STIH415_KEYSCAN_SOFTRESET] = STIH415_SRST_LPM(LPM_SYSCFG_1, 8),
-};
-
-static struct syscfg_reset_controller_data stih415_powerdown_controller = {
- .wait_for_ack = true,
- .nr_channels = ARRAY_SIZE(stih415_powerdowns),
- .channels = stih415_powerdowns,
-};
-
-static struct syscfg_reset_controller_data stih415_softreset_controller = {
- .wait_for_ack = false,
- .active_low = true,
- .nr_channels = ARRAY_SIZE(stih415_softresets),
- .channels = stih415_softresets,
-};
-
-static const struct of_device_id stih415_reset_match[] = {
- { .compatible = "st,stih415-powerdown",
- .data = &stih415_powerdown_controller, },
- { .compatible = "st,stih415-softreset",
- .data = &stih415_softreset_controller, },
- {},
-};
-
-static struct platform_driver stih415_reset_driver = {
- .probe = syscfg_reset_probe,
- .driver = {
- .name = "reset-stih415",
- .of_match_table = stih415_reset_match,
- },
-};
-
-static int __init stih415_reset_init(void)
-{
- return platform_driver_register(&stih415_reset_driver);
-}
-arch_initcall(stih415_reset_init);
diff --git a/drivers/reset/sti/reset-stih416.c b/drivers/reset/sti/reset-stih416.c
deleted file mode 100644
index c581d606ef0f..000000000000
--- a/drivers/reset/sti/reset-stih416.c
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Copyright (C) 2013 STMicroelectronics (R&D) Limited
- * Author: Stephen Gallimore <stephen.gallimore@st.com>
- * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/platform_device.h>
-
-#include <dt-bindings/reset/stih416-resets.h>
-
-#include "reset-syscfg.h"
-
-/*
- * STiH416 Peripheral powerdown definitions.
- */
-static const char stih416_front[] = "st,stih416-front-syscfg";
-static const char stih416_rear[] = "st,stih416-rear-syscfg";
-static const char stih416_sbc[] = "st,stih416-sbc-syscfg";
-static const char stih416_lpm[] = "st,stih416-lpm-syscfg";
-static const char stih416_cpu[] = "st,stih416-cpu-syscfg";
-
-#define STIH416_PDN_FRONT(_bit) \
- _SYSCFG_RST_CH(stih416_front, SYSCFG_1500, _bit, SYSSTAT_1578, _bit)
-
-#define STIH416_PDN_REAR(_cntl, _stat) \
- _SYSCFG_RST_CH(stih416_rear, SYSCFG_2525, _cntl, SYSSTAT_2583, _stat)
-
-#define SYSCFG_1500 0x7d0 /* Powerdown request EMI/NAND/Keyscan */
-#define SYSSTAT_1578 0x908 /* Powerdown status EMI/NAND/Keyscan */
-
-#define SYSCFG_2525 0x834 /* Powerdown request USB/SATA/PCIe */
-#define SYSSTAT_2583 0x91c /* Powerdown status USB/SATA/PCIe */
-
-#define SYSCFG_2552 0x8A0 /* Reset Generator control 0 */
-#define SYSCFG_1539 0x86c /* Softreset Ethernet 0 */
-#define SYSCFG_510 0x7f8 /* Softreset Ethernet 1 */
-#define LPM_SYSCFG_1 0x4 /* Softreset IRB */
-#define SYSCFG_2553 0x8a4 /* Softreset SATA0/1, PCIE0/1 */
-#define SYSCFG_7563 0x8cc /* MPE softresets 0 */
-#define SYSCFG_7564 0x8d0 /* MPE softresets 1 */
-
-#define STIH416_SRST_CPU(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih416_cpu, _reg, _bit)
-
-#define STIH416_SRST_FRONT(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih416_front, _reg, _bit)
-
-#define STIH416_SRST_REAR(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih416_rear, _reg, _bit)
-
-#define STIH416_SRST_LPM(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih416_lpm, _reg, _bit)
-
-#define STIH416_SRST_SBC(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih416_sbc, _reg, _bit)
-
-static const struct syscfg_reset_channel_data stih416_powerdowns[] = {
- [STIH416_EMISS_POWERDOWN] = STIH416_PDN_FRONT(0),
- [STIH416_NAND_POWERDOWN] = STIH416_PDN_FRONT(1),
- [STIH416_KEYSCAN_POWERDOWN] = STIH416_PDN_FRONT(2),
- [STIH416_USB0_POWERDOWN] = STIH416_PDN_REAR(0, 0),
- [STIH416_USB1_POWERDOWN] = STIH416_PDN_REAR(1, 1),
- [STIH416_USB2_POWERDOWN] = STIH416_PDN_REAR(2, 2),
- [STIH416_USB3_POWERDOWN] = STIH416_PDN_REAR(6, 5),
- [STIH416_SATA0_POWERDOWN] = STIH416_PDN_REAR(3, 3),
- [STIH416_SATA1_POWERDOWN] = STIH416_PDN_REAR(4, 4),
- [STIH416_PCIE0_POWERDOWN] = STIH416_PDN_REAR(7, 9),
- [STIH416_PCIE1_POWERDOWN] = STIH416_PDN_REAR(5, 8),
-};
-
-static const struct syscfg_reset_channel_data stih416_softresets[] = {
- [STIH416_ETH0_SOFTRESET] = STIH416_SRST_FRONT(SYSCFG_1539, 0),
- [STIH416_ETH1_SOFTRESET] = STIH416_SRST_SBC(SYSCFG_510, 0),
- [STIH416_IRB_SOFTRESET] = STIH416_SRST_LPM(LPM_SYSCFG_1, 6),
- [STIH416_USB0_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 9),
- [STIH416_USB1_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 10),
- [STIH416_USB2_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 11),
- [STIH416_USB3_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 28),
- [STIH416_SATA0_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 7),
- [STIH416_SATA1_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 3),
- [STIH416_PCIE0_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 15),
- [STIH416_PCIE1_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 2),
- [STIH416_AUD_DAC_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 14),
- [STIH416_HDTVOUT_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 5),
- [STIH416_VTAC_M_RX_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 25),
- [STIH416_VTAC_A_RX_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 26),
- [STIH416_SYNC_HD_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 5),
- [STIH416_SYNC_SD_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 6),
- [STIH416_BLITTER_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 10),
- [STIH416_GPU_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 11),
- [STIH416_VTAC_M_TX_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 18),
- [STIH416_VTAC_A_TX_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 19),
- [STIH416_VTG_AUX_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 21),
- [STIH416_JPEG_DEC_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 23),
- [STIH416_HVA_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 2),
- [STIH416_COMPO_M_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 3),
- [STIH416_COMPO_A_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 4),
- [STIH416_VP8_DEC_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 10),
- [STIH416_VTG_MAIN_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 16),
- [STIH416_KEYSCAN_SOFTRESET] = STIH416_SRST_LPM(LPM_SYSCFG_1, 8),
-};
-
-static struct syscfg_reset_controller_data stih416_powerdown_controller = {
- .wait_for_ack = true,
- .nr_channels = ARRAY_SIZE(stih416_powerdowns),
- .channels = stih416_powerdowns,
-};
-
-static struct syscfg_reset_controller_data stih416_softreset_controller = {
- .wait_for_ack = false,
- .active_low = true,
- .nr_channels = ARRAY_SIZE(stih416_softresets),
- .channels = stih416_softresets,
-};
-
-static const struct of_device_id stih416_reset_match[] = {
- { .compatible = "st,stih416-powerdown",
- .data = &stih416_powerdown_controller, },
- { .compatible = "st,stih416-softreset",
- .data = &stih416_softreset_controller, },
- {},
-};
-
-static struct platform_driver stih416_reset_driver = {
- .probe = syscfg_reset_probe,
- .driver = {
- .name = "reset-stih416",
- .of_match_table = stih416_reset_match,
- },
-};
-
-static int __init stih416_reset_init(void)
-{
- return platform_driver_register(&stih416_reset_driver);
-}
-arch_initcall(stih416_reset_init);
diff --git a/drivers/reset/tegra/Kconfig b/drivers/reset/tegra/Kconfig
new file mode 100644
index 000000000000..d2afa293df7d
--- /dev/null
+++ b/drivers/reset/tegra/Kconfig
@@ -0,0 +1,2 @@
+config RESET_TEGRA_BPMP
+ def_bool TEGRA_BPMP
diff --git a/drivers/reset/tegra/Makefile b/drivers/reset/tegra/Makefile
new file mode 100644
index 000000000000..775243ab7383
--- /dev/null
+++ b/drivers/reset/tegra/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_RESET_TEGRA_BPMP) += reset-bpmp.o
diff --git a/drivers/reset/tegra/reset-bpmp.c b/drivers/reset/tegra/reset-bpmp.c
new file mode 100644
index 000000000000..5daf2ee1a396
--- /dev/null
+++ b/drivers/reset/tegra/reset-bpmp.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2016 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/reset-controller.h>
+
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+
+static struct tegra_bpmp *to_tegra_bpmp(struct reset_controller_dev *rstc)
+{
+ return container_of(rstc, struct tegra_bpmp, rstc);
+}
+
+static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
+ enum mrq_reset_commands command,
+ unsigned int id)
+{
+ struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc);
+ struct mrq_reset_request request;
+ struct tegra_bpmp_message msg;
+
+ memset(&request, 0, sizeof(request));
+ request.cmd = command;
+ request.reset_id = id;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_RESET;
+ msg.tx.data = &request;
+ msg.tx.size = sizeof(request);
+
+ return tegra_bpmp_transfer(bpmp, &msg);
+}
+
+static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc,
+ unsigned long id)
+{
+ return tegra_bpmp_reset_common(rstc, CMD_RESET_MODULE, id);
+}
+
+static int tegra_bpmp_reset_assert(struct reset_controller_dev *rstc,
+ unsigned long id)
+{
+ return tegra_bpmp_reset_common(rstc, CMD_RESET_ASSERT, id);
+}
+
+static int tegra_bpmp_reset_deassert(struct reset_controller_dev *rstc,
+ unsigned long id)
+{
+ return tegra_bpmp_reset_common(rstc, CMD_RESET_DEASSERT, id);
+}
+
+static const struct reset_control_ops tegra_bpmp_reset_ops = {
+ .reset = tegra_bpmp_reset_module,
+ .assert = tegra_bpmp_reset_assert,
+ .deassert = tegra_bpmp_reset_deassert,
+};
+
+int tegra_bpmp_init_resets(struct tegra_bpmp *bpmp)
+{
+ bpmp->rstc.ops = &tegra_bpmp_reset_ops;
+ bpmp->rstc.owner = THIS_MODULE;
+ bpmp->rstc.of_node = bpmp->dev->of_node;
+ bpmp->rstc.nr_resets = bpmp->soc->num_resets;
+
+ return devm_reset_controller_register(bpmp->dev, &bpmp->rstc);
+}
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index a79cb5a9e5f2..1cfb775e8e82 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -453,8 +453,8 @@ int rpmsg_register_device(struct rpmsg_device *rpdev)
struct device *dev = &rpdev->dev;
int ret;
- dev_set_name(&rpdev->dev, "%s:%s",
- dev_name(dev->parent), rpdev->id.name);
+ dev_set_name(&rpdev->dev, "%s.%s.%d.%d", dev_name(dev->parent),
+ rpdev->id.name, rpdev->src, rpdev->dst);
rpdev->dev.bus = &rpmsg_bus;
rpdev->dev.release = rpmsg_release_device;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index e859d148aba9..c93c5a8fba32 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -303,7 +303,7 @@ config RTC_DRV_MAX6900
config RTC_DRV_MAX8907
tristate "Maxim MAX8907"
- depends on MFD_MAX8907
+ depends on MFD_MAX8907 || COMPILE_TEST
help
If you say yes here you will get support for the
RTC of Maxim MAX8907 PMIC.
@@ -343,7 +343,7 @@ config RTC_DRV_MAX8997
config RTC_DRV_MAX77686
tristate "Maxim MAX77686"
- depends on MFD_MAX77686 || MFD_MAX77620
+ depends on MFD_MAX77686 || MFD_MAX77620 || COMPILE_TEST
help
If you say yes here you will get support for the
RTC of Maxim MAX77686/MAX77620/MAX77802 PMIC.
@@ -481,6 +481,7 @@ config RTC_DRV_TWL92330
config RTC_DRV_TWL4030
tristate "TI TWL4030/TWL5030/TWL6030/TPS659x0"
depends on TWL4030_CORE
+ depends on OF
help
If you say yes here you get support for the RTC on the
TWL4030/TWL5030/TWL6030 family chips, used mostly with OMAP3 platforms.
@@ -602,7 +603,8 @@ config RTC_DRV_RV8803
config RTC_DRV_S5M
tristate "Samsung S2M/S5M series"
- depends on MFD_SEC_CORE
+ depends on MFD_SEC_CORE || COMPILE_TEST
+ select REGMAP_IRQ
help
If you say yes here you will get support for the
RTC of Samsung S2MPS14 and S5M PMIC series.
@@ -820,8 +822,8 @@ config RTC_DRV_RV3029_HWMON
comment "Platform RTC drivers"
-# this 'CMOS' RTC driver is arch dependent because <asm-generic/rtc.h>
-# requires <asm/mc146818rtc.h> defining CMOS_READ/CMOS_WRITE, and a
+# this 'CMOS' RTC driver is arch dependent because it requires
+# <asm/mc146818rtc.h> defining CMOS_READ/CMOS_WRITE, and a
# global rtc_lock ... it's not yet just another platform_device.
config RTC_DRV_CMOS
@@ -1549,14 +1551,11 @@ config RTC_DRV_MPC5121
will be called rtc-mpc5121.
config RTC_DRV_JZ4740
- tristate "Ingenic JZ4740 SoC"
- depends on MACH_JZ4740 || COMPILE_TEST
+ bool "Ingenic JZ4740 SoC"
+ depends on MACH_INGENIC || COMPILE_TEST
help
- If you say yes here you get support for the Ingenic JZ4740 SoC RTC
- controller.
-
- This driver can also be buillt as a module. If so, the module
- will be called rtc-jz4740.
+ If you say yes here you get support for the Ingenic JZ47xx SoCs RTC
+ controllers.
config RTC_DRV_LPC24XX
tristate "NXP RTC for LPC178x/18xx/408x/43xx"
@@ -1567,7 +1566,7 @@ config RTC_DRV_LPC24XX
NXP LPC178x/18xx/408x/43xx devices.
If you have one of the devices above enable this driver to use
- the hardware RTC. This driver can also be buillt as a module. If
+ the hardware RTC. This driver can also be built as a module. If
so, the module will be called rtc-lpc24xx.
config RTC_DRV_LPC32XX
@@ -1576,7 +1575,7 @@ config RTC_DRV_LPC32XX
help
This enables support for the NXP RTC in the LPC32XX
- This driver can also be buillt as a module. If so, the module
+ This driver can also be built as a module. If so, the module
will be called rtc-lpc32xx.
config RTC_DRV_PM8XXX
@@ -1706,6 +1705,17 @@ config RTC_DRV_PIC32
This driver can also be built as a module. If so, the module
will be called rtc-pic32
+config RTC_DRV_R7301
+ tristate "EPSON TOYOCOM RTC-7301SF/DG"
+ select REGMAP_MMIO
+ depends on OF && HAS_IOMEM
+ help
+ If you say yes here you get support for the EPSON TOYOCOM
+ RTC-7301SF/DG chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-r7301.
+
comment "HID Sensor RTC drivers"
config RTC_DRV_HID_SENSOR_TIME
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 1ac694a330c8..f13ab1c5c222 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -120,6 +120,7 @@ obj-$(CONFIG_RTC_DRV_PM8XXX) += rtc-pm8xxx.o
obj-$(CONFIG_RTC_DRV_PS3) += rtc-ps3.o
obj-$(CONFIG_RTC_DRV_PUV3) += rtc-puv3.o
obj-$(CONFIG_RTC_DRV_PXA) += rtc-pxa.o
+obj-$(CONFIG_RTC_DRV_R7301) += rtc-r7301.o
obj-$(CONFIG_RTC_DRV_R9701) += rtc-r9701.o
obj-$(CONFIG_RTC_DRV_RC5T583) += rtc-rc5t583.o
obj-$(CONFIG_RTC_DRV_RK808) += rtc-rk808.o
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 84a52db9b05f..fc0fa7577636 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -363,7 +363,7 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
rtc_timer_remove(rtc, &rtc->aie_timer);
rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
- rtc->aie_timer.period = ktime_set(0, 0);
+ rtc->aie_timer.period = 0;
if (alarm->enabled)
err = rtc_timer_enqueue(rtc, &rtc->aie_timer);
@@ -391,11 +391,11 @@ int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
return err;
rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
- rtc->aie_timer.period = ktime_set(0, 0);
+ rtc->aie_timer.period = 0;
/* Alarm has to be enabled & in the future for us to enqueue it */
- if (alarm->enabled && (rtc_tm_to_ktime(now).tv64 <
- rtc->aie_timer.node.expires.tv64)) {
+ if (alarm->enabled && (rtc_tm_to_ktime(now) <
+ rtc->aie_timer.node.expires)) {
rtc->aie_timer.enabled = 1;
timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node);
@@ -554,7 +554,7 @@ enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer)
int count;
rtc = container_of(timer, struct rtc_device, pie_timer);
- period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq);
+ period = NSEC_PER_SEC / rtc->irq_freq;
count = hrtimer_forward_now(timer, period);
rtc_handle_legacy_irq(rtc, count, RTC_PF);
@@ -665,7 +665,7 @@ static int rtc_update_hrtimer(struct rtc_device *rtc, int enabled)
return -1;
if (enabled) {
- ktime_t period = ktime_set(0, NSEC_PER_SEC / rtc->irq_freq);
+ ktime_t period = NSEC_PER_SEC / rtc->irq_freq;
hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL);
}
@@ -766,7 +766,7 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
/* Skip over expired timers */
while (next) {
- if (next->expires.tv64 >= now.tv64)
+ if (next->expires >= now)
break;
next = timerqueue_iterate_next(next);
}
@@ -858,7 +858,7 @@ again:
__rtc_read_time(rtc, &tm);
now = rtc_tm_to_ktime(tm);
while ((next = timerqueue_getnext(&rtc->timerqueue))) {
- if (next->expires.tv64 > now.tv64)
+ if (next->expires > now)
break;
/* expire timer */
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 38aa8e1906c2..f4a96dbdabf2 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -332,14 +332,86 @@ static void cmos_irq_disable(struct cmos_rtc *cmos, unsigned char mask)
cmos_checkintr(cmos, rtc_control);
}
+static int cmos_validate_alarm(struct device *dev, struct rtc_wkalrm *t)
+{
+ struct cmos_rtc *cmos = dev_get_drvdata(dev);
+ struct rtc_time now;
+
+ cmos_read_time(dev, &now);
+
+ if (!cmos->day_alrm) {
+ time64_t t_max_date;
+ time64_t t_alrm;
+
+ t_max_date = rtc_tm_to_time64(&now);
+ t_max_date += 24 * 60 * 60 - 1;
+ t_alrm = rtc_tm_to_time64(&t->time);
+ if (t_alrm > t_max_date) {
+ dev_err(dev,
+ "Alarms can be up to one day in the future\n");
+ return -EINVAL;
+ }
+ } else if (!cmos->mon_alrm) {
+ struct rtc_time max_date = now;
+ time64_t t_max_date;
+ time64_t t_alrm;
+ int max_mday;
+
+ if (max_date.tm_mon == 11) {
+ max_date.tm_mon = 0;
+ max_date.tm_year += 1;
+ } else {
+ max_date.tm_mon += 1;
+ }
+ max_mday = rtc_month_days(max_date.tm_mon, max_date.tm_year);
+ if (max_date.tm_mday > max_mday)
+ max_date.tm_mday = max_mday;
+
+ t_max_date = rtc_tm_to_time64(&max_date);
+ t_max_date -= 1;
+ t_alrm = rtc_tm_to_time64(&t->time);
+ if (t_alrm > t_max_date) {
+ dev_err(dev,
+ "Alarms can be up to one month in the future\n");
+ return -EINVAL;
+ }
+ } else {
+ struct rtc_time max_date = now;
+ time64_t t_max_date;
+ time64_t t_alrm;
+ int max_mday;
+
+ max_date.tm_year += 1;
+ max_mday = rtc_month_days(max_date.tm_mon, max_date.tm_year);
+ if (max_date.tm_mday > max_mday)
+ max_date.tm_mday = max_mday;
+
+ t_max_date = rtc_tm_to_time64(&max_date);
+ t_max_date -= 1;
+ t_alrm = rtc_tm_to_time64(&t->time);
+ if (t_alrm > t_max_date) {
+ dev_err(dev,
+ "Alarms can be up to one year in the future\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
{
struct cmos_rtc *cmos = dev_get_drvdata(dev);
unsigned char mon, mday, hrs, min, sec, rtc_control;
+ int ret;
if (!is_valid_irq(cmos->irq))
return -EIO;
+ ret = cmos_validate_alarm(dev, t);
+ if (ret < 0)
+ return ret;
+
mon = t->time.tm_mon + 1;
mday = t->time.tm_mday;
hrs = t->time.tm_hour;
@@ -707,9 +779,6 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
spin_unlock_irq(&rtc_lock);
- /* FIXME:
- * <asm-generic/rtc.h> doesn't know 12-hour mode either.
- */
if (is_valid_irq(rtc_irq) && !(rtc_control & RTC_24H)) {
dev_warn(dev, "only 24-hr supported\n");
retval = -ENXIO;
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 4e31036ee259..4ad97be48043 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -11,6 +11,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/acpi.h>
#include <linux/bcd.h>
#include <linux/i2c.h>
#include <linux/init.h>
@@ -191,6 +192,26 @@ static const struct i2c_device_id ds1307_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ds1307_id);
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id ds1307_acpi_ids[] = {
+ { .id = "DS1307", .driver_data = ds_1307 },
+ { .id = "DS1337", .driver_data = ds_1337 },
+ { .id = "DS1338", .driver_data = ds_1338 },
+ { .id = "DS1339", .driver_data = ds_1339 },
+ { .id = "DS1388", .driver_data = ds_1388 },
+ { .id = "DS1340", .driver_data = ds_1340 },
+ { .id = "DS3231", .driver_data = ds_3231 },
+ { .id = "M41T00", .driver_data = m41t00 },
+ { .id = "MCP7940X", .driver_data = mcp794xx },
+ { .id = "MCP7941X", .driver_data = mcp794xx },
+ { .id = "PT7C4338", .driver_data = ds_1307 },
+ { .id = "RX8025", .driver_data = rx_8025 },
+ { .id = "ISL12057", .driver_data = ds_1337 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, ds1307_acpi_ids);
+#endif
+
/*----------------------------------------------------------------------*/
#define BLOCK_DATA_MAX_TRIES 10
@@ -874,17 +895,17 @@ static u8 do_trickle_setup_ds1339(struct i2c_client *client,
return setup;
}
-static void ds1307_trickle_of_init(struct i2c_client *client,
- struct chip_desc *chip)
+static void ds1307_trickle_init(struct i2c_client *client,
+ struct chip_desc *chip)
{
uint32_t ohms = 0;
bool diode = true;
if (!chip->do_trickle_setup)
goto out;
- if (of_property_read_u32(client->dev.of_node, "trickle-resistor-ohms" , &ohms))
+ if (device_property_read_u32(&client->dev, "trickle-resistor-ohms", &ohms))
goto out;
- if (of_property_read_bool(client->dev.of_node, "trickle-diode-disable"))
+ if (device_property_read_bool(&client->dev, "trickle-diode-disable"))
diode = false;
chip->trickle_charger_setup = chip->do_trickle_setup(client,
ohms, diode);
@@ -1268,7 +1289,7 @@ static int ds1307_probe(struct i2c_client *client,
struct ds1307 *ds1307;
int err = -ENODEV;
int tmp, wday;
- struct chip_desc *chip = &chips[id->driver_data];
+ struct chip_desc *chip;
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
bool want_irq = false;
bool ds1307_can_wakeup_device = false;
@@ -1297,11 +1318,23 @@ static int ds1307_probe(struct i2c_client *client,
i2c_set_clientdata(client, ds1307);
ds1307->client = client;
- ds1307->type = id->driver_data;
+ if (id) {
+ chip = &chips[id->driver_data];
+ ds1307->type = id->driver_data;
+ } else {
+ const struct acpi_device_id *acpi_id;
+
+ acpi_id = acpi_match_device(ACPI_PTR(ds1307_acpi_ids),
+ &client->dev);
+ if (!acpi_id)
+ return -ENODEV;
+ chip = &chips[acpi_id->driver_data];
+ ds1307->type = acpi_id->driver_data;
+ }
- if (!pdata && client->dev.of_node)
- ds1307_trickle_of_init(client, chip);
- else if (pdata && pdata->trickle_charger_setup)
+ if (!pdata)
+ ds1307_trickle_init(client, chip);
+ else if (pdata->trickle_charger_setup)
chip->trickle_charger_setup = pdata->trickle_charger_setup;
if (chip->trickle_charger_setup && chip->trickle_charger_reg) {
@@ -1678,6 +1711,7 @@ static int ds1307_remove(struct i2c_client *client)
static struct i2c_driver ds1307_driver = {
.driver = {
.name = "rtc-ds1307",
+ .acpi_match_table = ACPI_PTR(ds1307_acpi_ids),
},
.probe = ds1307_probe,
.remove = ds1307_remove,
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 3b3049c8c9e0..52429f0a57cc 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -89,10 +89,8 @@ static int ds1374_read_rtc(struct i2c_client *client, u32 *time,
int ret;
int i;
- if (nbytes > 4) {
- WARN_ON(1);
+ if (WARN_ON(nbytes > 4))
return -EINVAL;
- }
ret = i2c_smbus_read_i2c_block_data(client, reg, nbytes, buf);
diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c
index 8d8049bdfaf6..67b56b80dc70 100644
--- a/drivers/rtc/rtc-imxdi.c
+++ b/drivers/rtc/rtc-imxdi.c
@@ -67,7 +67,7 @@
#define DSR_ETAD (1 << 21) /* External tamper A detected */
#define DSR_EBD (1 << 20) /* External boot detected */
#define DSR_SAD (1 << 19) /* SCC alarm detected */
-#define DSR_TTD (1 << 18) /* Temperatur tamper detected */
+#define DSR_TTD (1 << 18) /* Temperature tamper detected */
#define DSR_CTD (1 << 17) /* Clock tamper detected */
#define DSR_VTD (1 << 16) /* Voltage tamper detected */
#define DSR_WBF (1 << 10) /* Write Busy Flag (synchronous) */
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index 5e14651b71a8..72918c1ba092 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -14,10 +14,12 @@
*
*/
+#include <linux/clk.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/reboot.h>
#include <linux/rtc.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -27,8 +29,14 @@
#define JZ_REG_RTC_SEC_ALARM 0x08
#define JZ_REG_RTC_REGULATOR 0x0C
#define JZ_REG_RTC_HIBERNATE 0x20
+#define JZ_REG_RTC_WAKEUP_FILTER 0x24
+#define JZ_REG_RTC_RESET_COUNTER 0x28
#define JZ_REG_RTC_SCRATCHPAD 0x34
+/* The following are present on the jz4780 */
+#define JZ_REG_RTC_WENR 0x3C
+#define JZ_RTC_WENR_WEN BIT(31)
+
#define JZ_RTC_CTRL_WRDY BIT(7)
#define JZ_RTC_CTRL_1HZ BIT(6)
#define JZ_RTC_CTRL_1HZ_IRQ BIT(5)
@@ -37,16 +45,34 @@
#define JZ_RTC_CTRL_AE BIT(2)
#define JZ_RTC_CTRL_ENABLE BIT(0)
+/* Magic value to enable writes on jz4780 */
+#define JZ_RTC_WENR_MAGIC 0xA55A
+
+#define JZ_RTC_WAKEUP_FILTER_MASK 0x0000FFE0
+#define JZ_RTC_RESET_COUNTER_MASK 0x00000FE0
+
+enum jz4740_rtc_type {
+ ID_JZ4740,
+ ID_JZ4780,
+};
+
struct jz4740_rtc {
void __iomem *base;
+ enum jz4740_rtc_type type;
struct rtc_device *rtc;
+ struct clk *clk;
int irq;
spinlock_t lock;
+
+ unsigned int min_wakeup_pin_assert_time;
+ unsigned int reset_pin_assert_time;
};
+static struct device *dev_for_power_off;
+
static inline uint32_t jz4740_rtc_reg_read(struct jz4740_rtc *rtc, size_t reg)
{
return readl(rtc->base + reg);
@@ -64,11 +90,33 @@ static int jz4740_rtc_wait_write_ready(struct jz4740_rtc *rtc)
return timeout ? 0 : -EIO;
}
+static inline int jz4780_rtc_enable_write(struct jz4740_rtc *rtc)
+{
+ uint32_t ctrl;
+ int ret, timeout = 1000;
+
+ ret = jz4740_rtc_wait_write_ready(rtc);
+ if (ret != 0)
+ return ret;
+
+ writel(JZ_RTC_WENR_MAGIC, rtc->base + JZ_REG_RTC_WENR);
+
+ do {
+ ctrl = readl(rtc->base + JZ_REG_RTC_WENR);
+ } while (!(ctrl & JZ_RTC_WENR_WEN) && --timeout);
+
+ return timeout ? 0 : -EIO;
+}
+
static inline int jz4740_rtc_reg_write(struct jz4740_rtc *rtc, size_t reg,
uint32_t val)
{
- int ret;
- ret = jz4740_rtc_wait_write_ready(rtc);
+ int ret = 0;
+
+ if (rtc->type >= ID_JZ4780)
+ ret = jz4780_rtc_enable_write(rtc);
+ if (ret == 0)
+ ret = jz4740_rtc_wait_write_ready(rtc);
if (ret == 0)
writel(val, rtc->base + reg);
@@ -203,12 +251,57 @@ static irqreturn_t jz4740_rtc_irq(int irq, void *data)
return IRQ_HANDLED;
}
-void jz4740_rtc_poweroff(struct device *dev)
+static void jz4740_rtc_poweroff(struct device *dev)
{
struct jz4740_rtc *rtc = dev_get_drvdata(dev);
jz4740_rtc_reg_write(rtc, JZ_REG_RTC_HIBERNATE, 1);
}
-EXPORT_SYMBOL_GPL(jz4740_rtc_poweroff);
+
+static void jz4740_rtc_power_off(void)
+{
+ struct jz4740_rtc *rtc = dev_get_drvdata(dev_for_power_off);
+ unsigned long rtc_rate;
+ unsigned long wakeup_filter_ticks;
+ unsigned long reset_counter_ticks;
+
+ clk_prepare_enable(rtc->clk);
+
+ rtc_rate = clk_get_rate(rtc->clk);
+
+ /*
+ * Set minimum wakeup pin assertion time: 100 ms.
+ * Range is 0 to 2 sec if RTC is clocked at 32 kHz.
+ */
+ wakeup_filter_ticks =
+ (rtc->min_wakeup_pin_assert_time * rtc_rate) / 1000;
+ if (wakeup_filter_ticks < JZ_RTC_WAKEUP_FILTER_MASK)
+ wakeup_filter_ticks &= JZ_RTC_WAKEUP_FILTER_MASK;
+ else
+ wakeup_filter_ticks = JZ_RTC_WAKEUP_FILTER_MASK;
+ jz4740_rtc_reg_write(rtc,
+ JZ_REG_RTC_WAKEUP_FILTER, wakeup_filter_ticks);
+
+ /*
+ * Set reset pin low-level assertion time after wakeup: 60 ms.
+ * Range is 0 to 125 ms if RTC is clocked at 32 kHz.
+ */
+ reset_counter_ticks = (rtc->reset_pin_assert_time * rtc_rate) / 1000;
+ if (reset_counter_ticks < JZ_RTC_RESET_COUNTER_MASK)
+ reset_counter_ticks &= JZ_RTC_RESET_COUNTER_MASK;
+ else
+ reset_counter_ticks = JZ_RTC_RESET_COUNTER_MASK;
+ jz4740_rtc_reg_write(rtc,
+ JZ_REG_RTC_RESET_COUNTER, reset_counter_ticks);
+
+ jz4740_rtc_poweroff(dev_for_power_off);
+ machine_halt();
+}
+
+static const struct of_device_id jz4740_rtc_of_match[] = {
+ { .compatible = "ingenic,jz4740-rtc", .data = (void *)ID_JZ4740 },
+ { .compatible = "ingenic,jz4780-rtc", .data = (void *)ID_JZ4780 },
+ {},
+};
static int jz4740_rtc_probe(struct platform_device *pdev)
{
@@ -216,11 +309,20 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
struct jz4740_rtc *rtc;
uint32_t scratchpad;
struct resource *mem;
+ const struct platform_device_id *id = platform_get_device_id(pdev);
+ const struct of_device_id *of_id = of_match_device(
+ jz4740_rtc_of_match, &pdev->dev);
+ struct device_node *np = pdev->dev.of_node;
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
if (!rtc)
return -ENOMEM;
+ if (of_id)
+ rtc->type = (enum jz4740_rtc_type)of_id->data;
+ else
+ rtc->type = id->driver_data;
+
rtc->irq = platform_get_irq(pdev, 0);
if (rtc->irq < 0) {
dev_err(&pdev->dev, "Failed to get platform irq\n");
@@ -232,6 +334,12 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
if (IS_ERR(rtc->base))
return PTR_ERR(rtc->base);
+ rtc->clk = devm_clk_get(&pdev->dev, "rtc");
+ if (IS_ERR(rtc->clk)) {
+ dev_err(&pdev->dev, "Failed to get RTC clock\n");
+ return PTR_ERR(rtc->clk);
+ }
+
spin_lock_init(&rtc->lock);
platform_set_drvdata(pdev, rtc);
@@ -263,6 +371,27 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
}
}
+ if (np && of_device_is_system_power_controller(np)) {
+ if (!pm_power_off) {
+ /* Default: 60ms */
+ rtc->reset_pin_assert_time = 60;
+ of_property_read_u32(np, "reset-pin-assert-time-ms",
+ &rtc->reset_pin_assert_time);
+
+ /* Default: 100ms */
+ rtc->min_wakeup_pin_assert_time = 100;
+ of_property_read_u32(np,
+ "min-wakeup-pin-assert-time-ms",
+ &rtc->min_wakeup_pin_assert_time);
+
+ dev_for_power_off = &pdev->dev;
+ pm_power_off = jz4740_rtc_power_off;
+ } else {
+ dev_warn(&pdev->dev,
+ "Poweroff handler already present!\n");
+ }
+ }
+
return 0;
}
@@ -295,17 +424,20 @@ static const struct dev_pm_ops jz4740_pm_ops = {
#define JZ4740_RTC_PM_OPS NULL
#endif /* CONFIG_PM */
+static const struct platform_device_id jz4740_rtc_ids[] = {
+ { "jz4740-rtc", ID_JZ4740 },
+ { "jz4780-rtc", ID_JZ4780 },
+ {}
+};
+
static struct platform_driver jz4740_rtc_driver = {
.probe = jz4740_rtc_probe,
.driver = {
.name = "jz4740-rtc",
.pm = JZ4740_RTC_PM_OPS,
+ .of_match_table = of_match_ptr(jz4740_rtc_of_match),
},
+ .id_table = jz4740_rtc_ids,
};
-module_platform_driver(jz4740_rtc_driver);
-
-MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("RTC driver for the JZ4740 SoC\n");
-MODULE_ALIAS("platform:jz4740-rtc");
+builtin_platform_driver(jz4740_rtc_driver);
diff --git a/drivers/rtc/rtc-lib.c b/drivers/rtc/rtc-lib.c
index e6bfb9c42a10..1ae7da5cfc60 100644
--- a/drivers/rtc/rtc-lib.c
+++ b/drivers/rtc/rtc-lib.c
@@ -11,7 +11,7 @@
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/rtc.h>
static const unsigned char rtc_days_in_month[] = {
@@ -148,5 +148,3 @@ struct rtc_time rtc_ktime_to_tm(ktime_t kt)
return ret;
}
EXPORT_SYMBOL_GPL(rtc_ktime_to_tm);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-mcp795.c b/drivers/rtc/rtc-mcp795.c
index 4021fd04cb0a..ce75e421ba00 100644
--- a/drivers/rtc/rtc-mcp795.c
+++ b/drivers/rtc/rtc-mcp795.c
@@ -12,7 +12,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
- * */
+ */
#include <linux/module.h>
#include <linux/kernel.h>
@@ -21,6 +21,8 @@
#include <linux/spi/spi.h>
#include <linux/rtc.h>
#include <linux/of.h>
+#include <linux/bcd.h>
+#include <linux/delay.h>
/* MCP795 Instructions, see datasheet table 3-1 */
#define MCP795_EEREAD 0x03
@@ -29,7 +31,7 @@
#define MCP795_EEWREN 0x06
#define MCP795_SRREAD 0x05
#define MCP795_SRWRITE 0x01
-#define MCP795_READ 0x13
+#define MCP795_READ 0x13
#define MCP795_WRITE 0x12
#define MCP795_UNLOCK 0x14
#define MCP795_IDWRITE 0x32
@@ -37,8 +39,17 @@
#define MCP795_CLRWDT 0x44
#define MCP795_CLRRAM 0x54
-#define MCP795_ST_BIT 0x80
-#define MCP795_24_BIT 0x40
+/* MCP795 RTCC registers, see datasheet table 4-1 */
+#define MCP795_REG_SECONDS 0x01
+#define MCP795_REG_DAY 0x04
+#define MCP795_REG_MONTH 0x06
+#define MCP795_REG_CONTROL 0x08
+
+#define MCP795_ST_BIT BIT(7)
+#define MCP795_24_BIT BIT(6)
+#define MCP795_LP_BIT BIT(5)
+#define MCP795_EXTOSC_BIT BIT(3)
+#define MCP795_OSCON_BIT BIT(5)
static int mcp795_rtcc_read(struct device *dev, u8 addr, u8 *buf, u8 count)
{
@@ -93,30 +104,97 @@ static int mcp795_rtcc_set_bits(struct device *dev, u8 addr, u8 mask, u8 state)
return ret;
}
+static int mcp795_stop_oscillator(struct device *dev, bool *extosc)
+{
+ int retries = 5;
+ int ret;
+ u8 data;
+
+ ret = mcp795_rtcc_set_bits(dev, MCP795_REG_SECONDS, MCP795_ST_BIT, 0);
+ if (ret)
+ return ret;
+ ret = mcp795_rtcc_read(dev, MCP795_REG_CONTROL, &data, 1);
+ if (ret)
+ return ret;
+ *extosc = !!(data & MCP795_EXTOSC_BIT);
+ ret = mcp795_rtcc_set_bits(
+ dev, MCP795_REG_CONTROL, MCP795_EXTOSC_BIT, 0);
+ if (ret)
+ return ret;
+ /* wait for the OSCON bit to clear */
+ do {
+ usleep_range(700, 800);
+ ret = mcp795_rtcc_read(dev, MCP795_REG_DAY, &data, 1);
+ if (ret)
+ break;
+ if (!(data & MCP795_OSCON_BIT))
+ break;
+
+ } while (--retries);
+
+ return !retries ? -EIO : ret;
+}
+
+static int mcp795_start_oscillator(struct device *dev, bool *extosc)
+{
+ if (extosc) {
+ u8 data = *extosc ? MCP795_EXTOSC_BIT : 0;
+ int ret;
+
+ ret = mcp795_rtcc_set_bits(
+ dev, MCP795_REG_CONTROL, MCP795_EXTOSC_BIT, data);
+ if (ret)
+ return ret;
+ }
+ return mcp795_rtcc_set_bits(
+ dev, MCP795_REG_SECONDS, MCP795_ST_BIT, MCP795_ST_BIT);
+}
+
static int mcp795_set_time(struct device *dev, struct rtc_time *tim)
{
int ret;
u8 data[7];
+ bool extosc;
+
+ /* Stop RTC and store current value of EXTOSC bit */
+ ret = mcp795_stop_oscillator(dev, &extosc);
+ if (ret)
+ return ret;
/* Read first, so we can leave config bits untouched */
- ret = mcp795_rtcc_read(dev, 0x01, data, sizeof(data));
+ ret = mcp795_rtcc_read(dev, MCP795_REG_SECONDS, data, sizeof(data));
if (ret)
return ret;
- data[0] = (data[0] & 0x80) | ((tim->tm_sec / 10) << 4) | (tim->tm_sec % 10);
- data[1] = (data[1] & 0x80) | ((tim->tm_min / 10) << 4) | (tim->tm_min % 10);
- data[2] = ((tim->tm_hour / 10) << 4) | (tim->tm_hour % 10);
- data[4] = ((tim->tm_mday / 10) << 4) | ((tim->tm_mday) % 10);
- data[5] = (data[5] & 0x10) | (tim->tm_mon / 10) | (tim->tm_mon % 10);
+ data[0] = (data[0] & 0x80) | bin2bcd(tim->tm_sec);
+ data[1] = (data[1] & 0x80) | bin2bcd(tim->tm_min);
+ data[2] = bin2bcd(tim->tm_hour);
+ data[4] = bin2bcd(tim->tm_mday);
+ data[5] = (data[5] & MCP795_LP_BIT) | bin2bcd(tim->tm_mon + 1);
if (tim->tm_year > 100)
tim->tm_year -= 100;
- data[6] = ((tim->tm_year / 10) << 4) | (tim->tm_year % 10);
+ data[6] = bin2bcd(tim->tm_year);
+
+ /* Always write the date and month using a separate Write command.
+ * This is a workaround for a know silicon issue that some combinations
+ * of date and month values may result in the date being reset to 1.
+ */
+ ret = mcp795_rtcc_write(dev, MCP795_REG_SECONDS, data, 5);
+ if (ret)
+ return ret;
- ret = mcp795_rtcc_write(dev, 0x01, data, sizeof(data));
+ ret = mcp795_rtcc_write(dev, MCP795_REG_MONTH, &data[5], 2);
+ if (ret)
+ return ret;
+ /* Start back RTC and restore previous value of EXTOSC bit.
+ * There is no need to clear EXTOSC bit when the previous value was 0
+ * because it was already cleared when stopping the RTC oscillator.
+ */
+ ret = mcp795_start_oscillator(dev, extosc ? &extosc : NULL);
if (ret)
return ret;
@@ -132,17 +210,17 @@ static int mcp795_read_time(struct device *dev, struct rtc_time *tim)
int ret;
u8 data[7];
- ret = mcp795_rtcc_read(dev, 0x01, data, sizeof(data));
+ ret = mcp795_rtcc_read(dev, MCP795_REG_SECONDS, data, sizeof(data));
if (ret)
return ret;
- tim->tm_sec = ((data[0] & 0x70) >> 4) * 10 + (data[0] & 0x0f);
- tim->tm_min = ((data[1] & 0x70) >> 4) * 10 + (data[1] & 0x0f);
- tim->tm_hour = ((data[2] & 0x30) >> 4) * 10 + (data[2] & 0x0f);
- tim->tm_mday = ((data[4] & 0x30) >> 4) * 10 + (data[4] & 0x0f);
- tim->tm_mon = ((data[5] & 0x10) >> 4) * 10 + (data[5] & 0x0f);
- tim->tm_year = ((data[6] & 0xf0) >> 4) * 10 + (data[6] & 0x0f) + 100; /* Assume we are in 20xx */
+ tim->tm_sec = bcd2bin(data[0] & 0x7F);
+ tim->tm_min = bcd2bin(data[1] & 0x7F);
+ tim->tm_hour = bcd2bin(data[2] & 0x3F);
+ tim->tm_mday = bcd2bin(data[4] & 0x3F);
+ tim->tm_mon = bcd2bin(data[5] & 0x1F) - 1;
+ tim->tm_year = bcd2bin(data[6]) + 100; /* Assume we are in 20xx */
dev_dbg(dev, "Read from mcp795: %04d-%02d-%02d %02d:%02d:%02d\n",
tim->tm_year + 1900, tim->tm_mon, tim->tm_mday,
@@ -169,13 +247,13 @@ static int mcp795_probe(struct spi_device *spi)
return ret;
}
- /* Start the oscillator */
- mcp795_rtcc_set_bits(&spi->dev, 0x01, MCP795_ST_BIT, MCP795_ST_BIT);
+ /* Start the oscillator but don't set the value of EXTOSC bit */
+ mcp795_start_oscillator(&spi->dev, NULL);
/* Clear the 12 hour mode flag*/
mcp795_rtcc_set_bits(&spi->dev, 0x03, MCP795_24_BIT, 0);
rtc = devm_rtc_device_register(&spi->dev, "rtc-mcp795",
- &mcp795_rtc_ops, THIS_MODULE);
+ &mcp795_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index efb0a08ac117..a06dff994c83 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -191,12 +191,19 @@ static int pcf85063_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct rtc_device *rtc;
+ int err;
dev_dbg(&client->dev, "%s\n", __func__);
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
+ err = i2c_smbus_read_byte_data(client, PCF85063_REG_CTRL1);
+ if (err < 0) {
+ dev_err(&client->dev, "RTC chip is not present\n");
+ return err;
+ }
+
rtc = devm_rtc_device_register(&client->dev,
pcf85063_driver.driver.name,
&pcf85063_rtc_ops, THIS_MODULE);
diff --git a/drivers/rtc/rtc-r7301.c b/drivers/rtc/rtc-r7301.c
new file mode 100644
index 000000000000..28d540885f3d
--- /dev/null
+++ b/drivers/rtc/rtc-r7301.c
@@ -0,0 +1,453 @@
+/*
+ * EPSON TOYOCOM RTC-7301SF/DG Driver
+ *
+ * Copyright (c) 2016 Akinobu Mita <akinobu.mita@gmail.com>
+ *
+ * Based on rtc-rp5c01.c
+ *
+ * Datasheet: http://www5.epsondevice.com/en/products/parallel/rtc7301sf.html
+ */
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+
+#define DRV_NAME "rtc-r7301"
+
+#define RTC7301_1_SEC 0x0 /* Bank 0 and Band 1 */
+#define RTC7301_10_SEC 0x1 /* Bank 0 and Band 1 */
+#define RTC7301_AE BIT(3)
+#define RTC7301_1_MIN 0x2 /* Bank 0 and Band 1 */
+#define RTC7301_10_MIN 0x3 /* Bank 0 and Band 1 */
+#define RTC7301_1_HOUR 0x4 /* Bank 0 and Band 1 */
+#define RTC7301_10_HOUR 0x5 /* Bank 0 and Band 1 */
+#define RTC7301_DAY_OF_WEEK 0x6 /* Bank 0 and Band 1 */
+#define RTC7301_1_DAY 0x7 /* Bank 0 and Band 1 */
+#define RTC7301_10_DAY 0x8 /* Bank 0 and Band 1 */
+#define RTC7301_1_MONTH 0x9 /* Bank 0 */
+#define RTC7301_10_MONTH 0xa /* Bank 0 */
+#define RTC7301_1_YEAR 0xb /* Bank 0 */
+#define RTC7301_10_YEAR 0xc /* Bank 0 */
+#define RTC7301_100_YEAR 0xd /* Bank 0 */
+#define RTC7301_1000_YEAR 0xe /* Bank 0 */
+#define RTC7301_ALARM_CONTROL 0xe /* Bank 1 */
+#define RTC7301_ALARM_CONTROL_AIE BIT(0)
+#define RTC7301_ALARM_CONTROL_AF BIT(1)
+#define RTC7301_TIMER_CONTROL 0xe /* Bank 2 */
+#define RTC7301_TIMER_CONTROL_TIE BIT(0)
+#define RTC7301_TIMER_CONTROL_TF BIT(1)
+#define RTC7301_CONTROL 0xf /* All banks */
+#define RTC7301_CONTROL_BUSY BIT(0)
+#define RTC7301_CONTROL_STOP BIT(1)
+#define RTC7301_CONTROL_BANK_SEL_0 BIT(2)
+#define RTC7301_CONTROL_BANK_SEL_1 BIT(3)
+
+struct rtc7301_priv {
+ struct regmap *regmap;
+ int irq;
+ spinlock_t lock;
+ u8 bank;
+};
+
+static const struct regmap_config rtc7301_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 8,
+ .reg_stride = 4,
+};
+
+static u8 rtc7301_read(struct rtc7301_priv *priv, unsigned int reg)
+{
+ int reg_stride = regmap_get_reg_stride(priv->regmap);
+ unsigned int val;
+
+ regmap_read(priv->regmap, reg_stride * reg, &val);
+
+ return val & 0xf;
+}
+
+static void rtc7301_write(struct rtc7301_priv *priv, u8 val, unsigned int reg)
+{
+ int reg_stride = regmap_get_reg_stride(priv->regmap);
+
+ regmap_write(priv->regmap, reg_stride * reg, val);
+}
+
+static void rtc7301_update_bits(struct rtc7301_priv *priv, unsigned int reg,
+ u8 mask, u8 val)
+{
+ int reg_stride = regmap_get_reg_stride(priv->regmap);
+
+ regmap_update_bits(priv->regmap, reg_stride * reg, mask, val);
+}
+
+static int rtc7301_wait_while_busy(struct rtc7301_priv *priv)
+{
+ int retries = 100;
+
+ while (retries-- > 0) {
+ u8 val;
+
+ val = rtc7301_read(priv, RTC7301_CONTROL);
+ if (!(val & RTC7301_CONTROL_BUSY))
+ return 0;
+
+ usleep_range(200, 300);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static void rtc7301_stop(struct rtc7301_priv *priv)
+{
+ rtc7301_update_bits(priv, RTC7301_CONTROL, RTC7301_CONTROL_STOP,
+ RTC7301_CONTROL_STOP);
+}
+
+static void rtc7301_start(struct rtc7301_priv *priv)
+{
+ rtc7301_update_bits(priv, RTC7301_CONTROL, RTC7301_CONTROL_STOP, 0);
+}
+
+static void rtc7301_select_bank(struct rtc7301_priv *priv, u8 bank)
+{
+ u8 val = 0;
+
+ if (bank == priv->bank)
+ return;
+
+ if (bank & BIT(0))
+ val |= RTC7301_CONTROL_BANK_SEL_0;
+ if (bank & BIT(1))
+ val |= RTC7301_CONTROL_BANK_SEL_1;
+
+ rtc7301_update_bits(priv, RTC7301_CONTROL,
+ RTC7301_CONTROL_BANK_SEL_0 |
+ RTC7301_CONTROL_BANK_SEL_1, val);
+
+ priv->bank = bank;
+}
+
+static void rtc7301_get_time(struct rtc7301_priv *priv, struct rtc_time *tm,
+ bool alarm)
+{
+ int year;
+
+ tm->tm_sec = rtc7301_read(priv, RTC7301_1_SEC);
+ tm->tm_sec += (rtc7301_read(priv, RTC7301_10_SEC) & ~RTC7301_AE) * 10;
+ tm->tm_min = rtc7301_read(priv, RTC7301_1_MIN);
+ tm->tm_min += (rtc7301_read(priv, RTC7301_10_MIN) & ~RTC7301_AE) * 10;
+ tm->tm_hour = rtc7301_read(priv, RTC7301_1_HOUR);
+ tm->tm_hour += (rtc7301_read(priv, RTC7301_10_HOUR) & ~RTC7301_AE) * 10;
+ tm->tm_mday = rtc7301_read(priv, RTC7301_1_DAY);
+ tm->tm_mday += (rtc7301_read(priv, RTC7301_10_DAY) & ~RTC7301_AE) * 10;
+
+ if (alarm) {
+ tm->tm_wday = -1;
+ tm->tm_mon = -1;
+ tm->tm_year = -1;
+ tm->tm_yday = -1;
+ tm->tm_isdst = -1;
+ return;
+ }
+
+ tm->tm_wday = (rtc7301_read(priv, RTC7301_DAY_OF_WEEK) & ~RTC7301_AE);
+ tm->tm_mon = rtc7301_read(priv, RTC7301_10_MONTH) * 10 +
+ rtc7301_read(priv, RTC7301_1_MONTH) - 1;
+ year = rtc7301_read(priv, RTC7301_1000_YEAR) * 1000 +
+ rtc7301_read(priv, RTC7301_100_YEAR) * 100 +
+ rtc7301_read(priv, RTC7301_10_YEAR) * 10 +
+ rtc7301_read(priv, RTC7301_1_YEAR);
+
+ tm->tm_year = year - 1900;
+}
+
+static void rtc7301_write_time(struct rtc7301_priv *priv, struct rtc_time *tm,
+ bool alarm)
+{
+ int year;
+
+ rtc7301_write(priv, tm->tm_sec % 10, RTC7301_1_SEC);
+ rtc7301_write(priv, tm->tm_sec / 10, RTC7301_10_SEC);
+
+ rtc7301_write(priv, tm->tm_min % 10, RTC7301_1_MIN);
+ rtc7301_write(priv, tm->tm_min / 10, RTC7301_10_MIN);
+
+ rtc7301_write(priv, tm->tm_hour % 10, RTC7301_1_HOUR);
+ rtc7301_write(priv, tm->tm_hour / 10, RTC7301_10_HOUR);
+
+ rtc7301_write(priv, tm->tm_mday % 10, RTC7301_1_DAY);
+ rtc7301_write(priv, tm->tm_mday / 10, RTC7301_10_DAY);
+
+ /* Don't care for alarm register */
+ rtc7301_write(priv, alarm ? RTC7301_AE : tm->tm_wday,
+ RTC7301_DAY_OF_WEEK);
+
+ if (alarm)
+ return;
+
+ rtc7301_write(priv, (tm->tm_mon + 1) % 10, RTC7301_1_MONTH);
+ rtc7301_write(priv, (tm->tm_mon + 1) / 10, RTC7301_10_MONTH);
+
+ year = tm->tm_year + 1900;
+
+ rtc7301_write(priv, year % 10, RTC7301_1_YEAR);
+ rtc7301_write(priv, (year / 10) % 10, RTC7301_10_YEAR);
+ rtc7301_write(priv, (year / 100) % 10, RTC7301_100_YEAR);
+ rtc7301_write(priv, year / 1000, RTC7301_1000_YEAR);
+}
+
+static void rtc7301_alarm_irq(struct rtc7301_priv *priv, unsigned int enabled)
+{
+ rtc7301_update_bits(priv, RTC7301_ALARM_CONTROL,
+ RTC7301_ALARM_CONTROL_AF |
+ RTC7301_ALARM_CONTROL_AIE,
+ enabled ? RTC7301_ALARM_CONTROL_AIE : 0);
+}
+
+static int rtc7301_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct rtc7301_priv *priv = dev_get_drvdata(dev);
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ rtc7301_select_bank(priv, 0);
+
+ err = rtc7301_wait_while_busy(priv);
+ if (!err)
+ rtc7301_get_time(priv, tm, false);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return err ? err : rtc_valid_tm(tm);
+}
+
+static int rtc7301_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct rtc7301_priv *priv = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ rtc7301_stop(priv);
+ usleep_range(200, 300);
+ rtc7301_select_bank(priv, 0);
+ rtc7301_write_time(priv, tm, false);
+ rtc7301_start(priv);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static int rtc7301_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct rtc7301_priv *priv = dev_get_drvdata(dev);
+ unsigned long flags;
+ u8 alrm_ctrl;
+
+ if (priv->irq <= 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ rtc7301_select_bank(priv, 1);
+ rtc7301_get_time(priv, &alarm->time, true);
+
+ alrm_ctrl = rtc7301_read(priv, RTC7301_ALARM_CONTROL);
+
+ alarm->enabled = !!(alrm_ctrl & RTC7301_ALARM_CONTROL_AIE);
+ alarm->pending = !!(alrm_ctrl & RTC7301_ALARM_CONTROL_AF);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static int rtc7301_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct rtc7301_priv *priv = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ if (priv->irq <= 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ rtc7301_select_bank(priv, 1);
+ rtc7301_write_time(priv, &alarm->time, true);
+ rtc7301_alarm_irq(priv, alarm->enabled);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static int rtc7301_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct rtc7301_priv *priv = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ if (priv->irq <= 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ rtc7301_select_bank(priv, 1);
+ rtc7301_alarm_irq(priv, enabled);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static const struct rtc_class_ops rtc7301_rtc_ops = {
+ .read_time = rtc7301_read_time,
+ .set_time = rtc7301_set_time,
+ .read_alarm = rtc7301_read_alarm,
+ .set_alarm = rtc7301_set_alarm,
+ .alarm_irq_enable = rtc7301_alarm_irq_enable,
+};
+
+static irqreturn_t rtc7301_irq_handler(int irq, void *dev_id)
+{
+ struct rtc_device *rtc = dev_id;
+ struct rtc7301_priv *priv = dev_get_drvdata(rtc->dev.parent);
+ unsigned long flags;
+ irqreturn_t ret = IRQ_NONE;
+ u8 alrm_ctrl;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ rtc7301_select_bank(priv, 1);
+
+ alrm_ctrl = rtc7301_read(priv, RTC7301_ALARM_CONTROL);
+ if (alrm_ctrl & RTC7301_ALARM_CONTROL_AF) {
+ ret = IRQ_HANDLED;
+ rtc7301_alarm_irq(priv, false);
+ rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return ret;
+}
+
+static void rtc7301_init(struct rtc7301_priv *priv)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ rtc7301_select_bank(priv, 2);
+ rtc7301_write(priv, 0, RTC7301_TIMER_CONTROL);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static int __init rtc7301_rtc_probe(struct platform_device *dev)
+{
+ struct resource *res;
+ void __iomem *regs;
+ struct rtc7301_priv *priv;
+ struct rtc_device *rtc;
+ int ret;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ priv = devm_kzalloc(&dev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ regs = devm_ioremap_resource(&dev->dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ priv->regmap = devm_regmap_init_mmio(&dev->dev, regs,
+ &rtc7301_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ priv->irq = platform_get_irq(dev, 0);
+
+ spin_lock_init(&priv->lock);
+ priv->bank = -1;
+
+ rtc7301_init(priv);
+
+ platform_set_drvdata(dev, priv);
+
+ rtc = devm_rtc_device_register(&dev->dev, DRV_NAME, &rtc7301_rtc_ops,
+ THIS_MODULE);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
+
+ if (priv->irq > 0) {
+ ret = devm_request_irq(&dev->dev, priv->irq,
+ rtc7301_irq_handler, IRQF_SHARED,
+ dev_name(&dev->dev), rtc);
+ if (ret) {
+ priv->irq = 0;
+ dev_err(&dev->dev, "unable to request IRQ\n");
+ } else {
+ device_set_wakeup_capable(&dev->dev, true);
+ }
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int rtc7301_suspend(struct device *dev)
+{
+ struct rtc7301_priv *priv = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(priv->irq);
+
+ return 0;
+}
+
+static int rtc7301_resume(struct device *dev)
+{
+ struct rtc7301_priv *priv = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(priv->irq);
+
+ return 0;
+}
+
+#endif
+
+static SIMPLE_DEV_PM_OPS(rtc7301_pm_ops, rtc7301_suspend, rtc7301_resume);
+
+static const struct of_device_id rtc7301_dt_match[] = {
+ { .compatible = "epson,rtc7301sf" },
+ { .compatible = "epson,rtc7301dg" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rtc7301_dt_match);
+
+static struct platform_driver rtc7301_rtc_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = rtc7301_dt_match,
+ .pm = &rtc7301_pm_ops,
+ },
+};
+
+module_platform_driver_probe(rtc7301_rtc_driver, rtc7301_rtc_probe);
+
+MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("EPSON TOYOCOM RTC-7301SF/DG Driver");
+MODULE_ALIAS("platform:rtc-r7301");
diff --git a/drivers/rtc/rtc-starfire.c b/drivers/rtc/rtc-starfire.c
index 83a057a03060..7fc36973fa33 100644
--- a/drivers/rtc/rtc-starfire.c
+++ b/drivers/rtc/rtc-starfire.c
@@ -1,20 +1,18 @@
/* rtc-starfire.c: Starfire platform RTC driver.
*
+ * Author: David S. Miller
+ * License: GPL
+ *
* Copyright (C) 2008 David S. Miller <davem@davemloft.net>
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/rtc.h>
#include <linux/platform_device.h>
#include <asm/oplib.h>
-MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
-MODULE_DESCRIPTION("Starfire RTC driver");
-MODULE_LICENSE("GPL");
-
static u32 starfire_get_time(void)
{
static char obp_gettod[32];
@@ -57,4 +55,4 @@ static struct platform_driver starfire_rtc_driver = {
},
};
-module_platform_driver_probe(starfire_rtc_driver, starfire_rtc_probe);
+builtin_platform_driver_probe(starfire_rtc_driver, starfire_rtc_probe);
diff --git a/drivers/rtc/rtc-sun4v.c b/drivers/rtc/rtc-sun4v.c
index 7c696c12f28f..11bc562eba5d 100644
--- a/drivers/rtc/rtc-sun4v.c
+++ b/drivers/rtc/rtc-sun4v.c
@@ -1,12 +1,14 @@
/* rtc-sun4v.c: Hypervisor based RTC for SUN4V systems.
*
+ * Author: David S. Miller
+ * License: GPL
+ *
* Copyright (C) 2008 David S. Miller <davem@davemloft.net>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/rtc.h>
@@ -98,8 +100,4 @@ static struct platform_driver sun4v_rtc_driver = {
},
};
-module_platform_driver_probe(sun4v_rtc_driver, sun4v_rtc_probe);
-
-MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
-MODULE_DESCRIPTION("SUN4V RTC driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver_probe(sun4v_rtc_driver, sun4v_rtc_probe);
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 176720b7b9e5..c18c39212ce6 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -33,6 +33,10 @@
#include <linux/i2c/twl.h>
+enum twl_class {
+ TWL_4030 = 0,
+ TWL_6030,
+};
/*
* RTC block register offsets (use TWL_MODULE_RTC)
@@ -136,16 +140,30 @@ static const u8 twl6030_rtc_reg_map[] = {
#define ALL_TIME_REGS 6
/*----------------------------------------------------------------------*/
-static u8 *rtc_reg_map;
+struct twl_rtc {
+ struct device *dev;
+ struct rtc_device *rtc;
+ u8 *reg_map;
+ /*
+ * Cache the value for timer/alarm interrupts register; this is
+ * only changed by callers holding rtc ops lock (or resume).
+ */
+ unsigned char rtc_irq_bits;
+ bool wake_enabled;
+#ifdef CONFIG_PM_SLEEP
+ unsigned char irqstat;
+#endif
+ enum twl_class class;
+};
/*
* Supports 1 byte read from TWL RTC register.
*/
-static int twl_rtc_read_u8(u8 *data, u8 reg)
+static int twl_rtc_read_u8(struct twl_rtc *twl_rtc, u8 *data, u8 reg)
{
int ret;
- ret = twl_i2c_read_u8(TWL_MODULE_RTC, data, (rtc_reg_map[reg]));
+ ret = twl_i2c_read_u8(TWL_MODULE_RTC, data, (twl_rtc->reg_map[reg]));
if (ret < 0)
pr_err("Could not read TWL register %X - error %d\n", reg, ret);
return ret;
@@ -154,11 +172,11 @@ static int twl_rtc_read_u8(u8 *data, u8 reg)
/*
* Supports 1 byte write to TWL RTC registers.
*/
-static int twl_rtc_write_u8(u8 data, u8 reg)
+static int twl_rtc_write_u8(struct twl_rtc *twl_rtc, u8 data, u8 reg)
{
int ret;
- ret = twl_i2c_write_u8(TWL_MODULE_RTC, data, (rtc_reg_map[reg]));
+ ret = twl_i2c_write_u8(TWL_MODULE_RTC, data, (twl_rtc->reg_map[reg]));
if (ret < 0)
pr_err("Could not write TWL register %X - error %d\n",
reg, ret);
@@ -166,28 +184,22 @@ static int twl_rtc_write_u8(u8 data, u8 reg)
}
/*
- * Cache the value for timer/alarm interrupts register; this is
- * only changed by callers holding rtc ops lock (or resume).
- */
-static unsigned char rtc_irq_bits;
-
-/*
* Enable 1/second update and/or alarm interrupts.
*/
-static int set_rtc_irq_bit(unsigned char bit)
+static int set_rtc_irq_bit(struct twl_rtc *twl_rtc, unsigned char bit)
{
unsigned char val;
int ret;
/* if the bit is set, return from here */
- if (rtc_irq_bits & bit)
+ if (twl_rtc->rtc_irq_bits & bit)
return 0;
- val = rtc_irq_bits | bit;
+ val = twl_rtc->rtc_irq_bits | bit;
val &= ~BIT_RTC_INTERRUPTS_REG_EVERY_M;
- ret = twl_rtc_write_u8(val, REG_RTC_INTERRUPTS_REG);
+ ret = twl_rtc_write_u8(twl_rtc, val, REG_RTC_INTERRUPTS_REG);
if (ret == 0)
- rtc_irq_bits = val;
+ twl_rtc->rtc_irq_bits = val;
return ret;
}
@@ -195,19 +207,19 @@ static int set_rtc_irq_bit(unsigned char bit)
/*
* Disable update and/or alarm interrupts.
*/
-static int mask_rtc_irq_bit(unsigned char bit)
+static int mask_rtc_irq_bit(struct twl_rtc *twl_rtc, unsigned char bit)
{
unsigned char val;
int ret;
/* if the bit is clear, return from here */
- if (!(rtc_irq_bits & bit))
+ if (!(twl_rtc->rtc_irq_bits & bit))
return 0;
- val = rtc_irq_bits & ~bit;
- ret = twl_rtc_write_u8(val, REG_RTC_INTERRUPTS_REG);
+ val = twl_rtc->rtc_irq_bits & ~bit;
+ ret = twl_rtc_write_u8(twl_rtc, val, REG_RTC_INTERRUPTS_REG);
if (ret == 0)
- rtc_irq_bits = val;
+ twl_rtc->rtc_irq_bits = val;
return ret;
}
@@ -215,21 +227,23 @@ static int mask_rtc_irq_bit(unsigned char bit)
static int twl_rtc_alarm_irq_enable(struct device *dev, unsigned enabled)
{
struct platform_device *pdev = to_platform_device(dev);
+ struct twl_rtc *twl_rtc = dev_get_drvdata(dev);
int irq = platform_get_irq(pdev, 0);
- static bool twl_rtc_wake_enabled;
int ret;
if (enabled) {
- ret = set_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
- if (device_can_wakeup(dev) && !twl_rtc_wake_enabled) {
+ ret = set_rtc_irq_bit(twl_rtc,
+ BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
+ if (device_can_wakeup(dev) && !twl_rtc->wake_enabled) {
enable_irq_wake(irq);
- twl_rtc_wake_enabled = true;
+ twl_rtc->wake_enabled = true;
}
} else {
- ret = mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
- if (twl_rtc_wake_enabled) {
+ ret = mask_rtc_irq_bit(twl_rtc,
+ BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
+ if (twl_rtc->wake_enabled) {
disable_irq_wake(irq);
- twl_rtc_wake_enabled = false;
+ twl_rtc->wake_enabled = false;
}
}
@@ -247,21 +261,23 @@ static int twl_rtc_alarm_irq_enable(struct device *dev, unsigned enabled)
*/
static int twl_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
+ struct twl_rtc *twl_rtc = dev_get_drvdata(dev);
unsigned char rtc_data[ALL_TIME_REGS];
int ret;
u8 save_control;
u8 rtc_control;
- ret = twl_rtc_read_u8(&save_control, REG_RTC_CTRL_REG);
+ ret = twl_rtc_read_u8(twl_rtc, &save_control, REG_RTC_CTRL_REG);
if (ret < 0) {
dev_err(dev, "%s: reading CTRL_REG, error %d\n", __func__, ret);
return ret;
}
/* for twl6030/32 make sure BIT_RTC_CTRL_REG_GET_TIME_M is clear */
- if (twl_class_is_6030()) {
+ if (twl_rtc->class == TWL_6030) {
if (save_control & BIT_RTC_CTRL_REG_GET_TIME_M) {
save_control &= ~BIT_RTC_CTRL_REG_GET_TIME_M;
- ret = twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
+ ret = twl_rtc_write_u8(twl_rtc, save_control,
+ REG_RTC_CTRL_REG);
if (ret < 0) {
dev_err(dev, "%s clr GET_TIME, error %d\n",
__func__, ret);
@@ -274,17 +290,17 @@ static int twl_rtc_read_time(struct device *dev, struct rtc_time *tm)
rtc_control = save_control | BIT_RTC_CTRL_REG_GET_TIME_M;
/* for twl6030/32 enable read access to static shadowed registers */
- if (twl_class_is_6030())
+ if (twl_rtc->class == TWL_6030)
rtc_control |= BIT_RTC_CTRL_REG_RTC_V_OPT;
- ret = twl_rtc_write_u8(rtc_control, REG_RTC_CTRL_REG);
+ ret = twl_rtc_write_u8(twl_rtc, rtc_control, REG_RTC_CTRL_REG);
if (ret < 0) {
dev_err(dev, "%s: writing CTRL_REG, error %d\n", __func__, ret);
return ret;
}
ret = twl_i2c_read(TWL_MODULE_RTC, rtc_data,
- (rtc_reg_map[REG_SECONDS_REG]), ALL_TIME_REGS);
+ (twl_rtc->reg_map[REG_SECONDS_REG]), ALL_TIME_REGS);
if (ret < 0) {
dev_err(dev, "%s: reading data, error %d\n", __func__, ret);
@@ -292,8 +308,8 @@ static int twl_rtc_read_time(struct device *dev, struct rtc_time *tm)
}
/* for twl6030 restore original state of rtc control register */
- if (twl_class_is_6030()) {
- ret = twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
+ if (twl_rtc->class == TWL_6030) {
+ ret = twl_rtc_write_u8(twl_rtc, save_control, REG_RTC_CTRL_REG);
if (ret < 0) {
dev_err(dev, "%s: restore CTRL_REG, error %d\n",
__func__, ret);
@@ -313,6 +329,7 @@ static int twl_rtc_read_time(struct device *dev, struct rtc_time *tm)
static int twl_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
+ struct twl_rtc *twl_rtc = dev_get_drvdata(dev);
unsigned char save_control;
unsigned char rtc_data[ALL_TIME_REGS];
int ret;
@@ -325,18 +342,18 @@ static int twl_rtc_set_time(struct device *dev, struct rtc_time *tm)
rtc_data[5] = bin2bcd(tm->tm_year - 100);
/* Stop RTC while updating the TC registers */
- ret = twl_rtc_read_u8(&save_control, REG_RTC_CTRL_REG);
+ ret = twl_rtc_read_u8(twl_rtc, &save_control, REG_RTC_CTRL_REG);
if (ret < 0)
goto out;
save_control &= ~BIT_RTC_CTRL_REG_STOP_RTC_M;
- ret = twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
+ ret = twl_rtc_write_u8(twl_rtc, save_control, REG_RTC_CTRL_REG);
if (ret < 0)
goto out;
/* update all the time registers in one shot */
ret = twl_i2c_write(TWL_MODULE_RTC, rtc_data,
- (rtc_reg_map[REG_SECONDS_REG]), ALL_TIME_REGS);
+ (twl_rtc->reg_map[REG_SECONDS_REG]), ALL_TIME_REGS);
if (ret < 0) {
dev_err(dev, "rtc_set_time error %d\n", ret);
goto out;
@@ -344,7 +361,7 @@ static int twl_rtc_set_time(struct device *dev, struct rtc_time *tm)
/* Start back RTC */
save_control |= BIT_RTC_CTRL_REG_STOP_RTC_M;
- ret = twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
+ ret = twl_rtc_write_u8(twl_rtc, save_control, REG_RTC_CTRL_REG);
out:
return ret;
@@ -355,11 +372,12 @@ out:
*/
static int twl_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
+ struct twl_rtc *twl_rtc = dev_get_drvdata(dev);
unsigned char rtc_data[ALL_TIME_REGS];
int ret;
ret = twl_i2c_read(TWL_MODULE_RTC, rtc_data,
- (rtc_reg_map[REG_ALARM_SECONDS_REG]), ALL_TIME_REGS);
+ twl_rtc->reg_map[REG_ALARM_SECONDS_REG], ALL_TIME_REGS);
if (ret < 0) {
dev_err(dev, "rtc_read_alarm error %d\n", ret);
return ret;
@@ -374,7 +392,7 @@ static int twl_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
alm->time.tm_year = bcd2bin(rtc_data[5]) + 100;
/* report cached alarm enable state */
- if (rtc_irq_bits & BIT_RTC_INTERRUPTS_REG_IT_ALARM_M)
+ if (twl_rtc->rtc_irq_bits & BIT_RTC_INTERRUPTS_REG_IT_ALARM_M)
alm->enabled = 1;
return ret;
@@ -382,6 +400,8 @@ static int twl_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
static int twl_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
+ struct twl_rtc *twl_rtc = dev_get_drvdata(dev);
+
unsigned char alarm_data[ALL_TIME_REGS];
int ret;
@@ -398,7 +418,7 @@ static int twl_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
/* update all the alarm registers in one shot */
ret = twl_i2c_write(TWL_MODULE_RTC, alarm_data,
- (rtc_reg_map[REG_ALARM_SECONDS_REG]), ALL_TIME_REGS);
+ twl_rtc->reg_map[REG_ALARM_SECONDS_REG], ALL_TIME_REGS);
if (ret) {
dev_err(dev, "rtc_set_alarm error %d\n", ret);
goto out;
@@ -410,14 +430,15 @@ out:
return ret;
}
-static irqreturn_t twl_rtc_interrupt(int irq, void *rtc)
+static irqreturn_t twl_rtc_interrupt(int irq, void *data)
{
+ struct twl_rtc *twl_rtc = data;
unsigned long events;
int ret = IRQ_NONE;
int res;
u8 rd_reg;
- res = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
+ res = twl_rtc_read_u8(twl_rtc, &rd_reg, REG_RTC_STATUS_REG);
if (res)
goto out;
/*
@@ -431,12 +452,12 @@ static irqreturn_t twl_rtc_interrupt(int irq, void *rtc)
else
events = RTC_IRQF | RTC_PF;
- res = twl_rtc_write_u8(BIT_RTC_STATUS_REG_ALARM_M,
- REG_RTC_STATUS_REG);
+ res = twl_rtc_write_u8(twl_rtc, BIT_RTC_STATUS_REG_ALARM_M,
+ REG_RTC_STATUS_REG);
if (res)
goto out;
- if (twl_class_is_4030()) {
+ if (twl_rtc->class == TWL_4030) {
/* Clear on Read enabled. RTC_IT bit of TWL4030_INT_PWR_ISR1
* needs 2 reads to clear the interrupt. One read is done in
* do_twl_pwrirq(). Doing the second read, to clear
@@ -455,7 +476,7 @@ static irqreturn_t twl_rtc_interrupt(int irq, void *rtc)
}
/* Notify RTC core on event */
- rtc_update_irq(rtc, 1, events);
+ rtc_update_irq(twl_rtc->rtc, 1, events);
ret = IRQ_HANDLED;
out:
@@ -474,21 +495,36 @@ static const struct rtc_class_ops twl_rtc_ops = {
static int twl_rtc_probe(struct platform_device *pdev)
{
- struct rtc_device *rtc;
+ struct twl_rtc *twl_rtc;
+ struct device_node *np = pdev->dev.of_node;
int ret = -EINVAL;
int irq = platform_get_irq(pdev, 0);
u8 rd_reg;
+ if (!np) {
+ dev_err(&pdev->dev, "no DT info\n");
+ return -EINVAL;
+ }
+
if (irq <= 0)
return ret;
- /* Initialize the register map */
- if (twl_class_is_4030())
- rtc_reg_map = (u8 *)twl4030_rtc_reg_map;
- else
- rtc_reg_map = (u8 *)twl6030_rtc_reg_map;
+ twl_rtc = devm_kzalloc(&pdev->dev, sizeof(*twl_rtc), GFP_KERNEL);
+ if (!twl_rtc)
+ return -ENOMEM;
- ret = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
+ if (twl_class_is_4030()) {
+ twl_rtc->class = TWL_4030;
+ twl_rtc->reg_map = (u8 *)twl4030_rtc_reg_map;
+ } else if (twl_class_is_6030()) {
+ twl_rtc->class = TWL_6030;
+ twl_rtc->reg_map = (u8 *)twl6030_rtc_reg_map;
+ } else {
+ dev_err(&pdev->dev, "TWL Class not supported.\n");
+ return -EINVAL;
+ }
+
+ ret = twl_rtc_read_u8(twl_rtc, &rd_reg, REG_RTC_STATUS_REG);
if (ret < 0)
return ret;
@@ -499,11 +535,11 @@ static int twl_rtc_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "Pending Alarm interrupt detected.\n");
/* Clear RTC Power up reset and pending alarm interrupts */
- ret = twl_rtc_write_u8(rd_reg, REG_RTC_STATUS_REG);
+ ret = twl_rtc_write_u8(twl_rtc, rd_reg, REG_RTC_STATUS_REG);
if (ret < 0)
return ret;
- if (twl_class_is_6030()) {
+ if (twl_rtc->class == TWL_6030) {
twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK,
REG_INT_MSK_LINE_A);
twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK,
@@ -511,40 +547,42 @@ static int twl_rtc_probe(struct platform_device *pdev)
}
dev_info(&pdev->dev, "Enabling TWL-RTC\n");
- ret = twl_rtc_write_u8(BIT_RTC_CTRL_REG_STOP_RTC_M, REG_RTC_CTRL_REG);
+ ret = twl_rtc_write_u8(twl_rtc, BIT_RTC_CTRL_REG_STOP_RTC_M,
+ REG_RTC_CTRL_REG);
if (ret < 0)
return ret;
/* ensure interrupts are disabled, bootloaders can be strange */
- ret = twl_rtc_write_u8(0, REG_RTC_INTERRUPTS_REG);
+ ret = twl_rtc_write_u8(twl_rtc, 0, REG_RTC_INTERRUPTS_REG);
if (ret < 0)
dev_warn(&pdev->dev, "unable to disable interrupt\n");
/* init cached IRQ enable bits */
- ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG);
+ ret = twl_rtc_read_u8(twl_rtc, &twl_rtc->rtc_irq_bits,
+ REG_RTC_INTERRUPTS_REG);
if (ret < 0)
return ret;
+ platform_set_drvdata(pdev, twl_rtc);
device_init_wakeup(&pdev->dev, 1);
- rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
+ twl_rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
&twl_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc)) {
+ if (IS_ERR(twl_rtc->rtc)) {
dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
- PTR_ERR(rtc));
- return PTR_ERR(rtc);
+ PTR_ERR(twl_rtc->rtc));
+ return PTR_ERR(twl_rtc->rtc);
}
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
twl_rtc_interrupt,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
- dev_name(&rtc->dev), rtc);
+ dev_name(&twl_rtc->rtc->dev), twl_rtc);
if (ret < 0) {
dev_err(&pdev->dev, "IRQ is not free.\n");
return ret;
}
- platform_set_drvdata(pdev, rtc);
return 0;
}
@@ -554,10 +592,12 @@ static int twl_rtc_probe(struct platform_device *pdev)
*/
static int twl_rtc_remove(struct platform_device *pdev)
{
+ struct twl_rtc *twl_rtc = platform_get_drvdata(pdev);
+
/* leave rtc running, but disable irqs */
- mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
- mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
- if (twl_class_is_6030()) {
+ mask_rtc_irq_bit(twl_rtc, BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
+ mask_rtc_irq_bit(twl_rtc, BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
+ if (twl_rtc->class == TWL_6030) {
twl6030_interrupt_mask(TWL6030_RTC_INT_MASK,
REG_INT_MSK_LINE_A);
twl6030_interrupt_mask(TWL6030_RTC_INT_MASK,
@@ -569,40 +609,40 @@ static int twl_rtc_remove(struct platform_device *pdev)
static void twl_rtc_shutdown(struct platform_device *pdev)
{
+ struct twl_rtc *twl_rtc = platform_get_drvdata(pdev);
+
/* mask timer interrupts, but leave alarm interrupts on to enable
power-on when alarm is triggered */
- mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
+ mask_rtc_irq_bit(twl_rtc, BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
}
#ifdef CONFIG_PM_SLEEP
-static unsigned char irqstat;
-
static int twl_rtc_suspend(struct device *dev)
{
- irqstat = rtc_irq_bits;
+ struct twl_rtc *twl_rtc = dev_get_drvdata(dev);
+
+ twl_rtc->irqstat = twl_rtc->rtc_irq_bits;
- mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
+ mask_rtc_irq_bit(twl_rtc, BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
return 0;
}
static int twl_rtc_resume(struct device *dev)
{
- set_rtc_irq_bit(irqstat);
+ struct twl_rtc *twl_rtc = dev_get_drvdata(dev);
+
+ set_rtc_irq_bit(twl_rtc, twl_rtc->irqstat);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(twl_rtc_pm_ops, twl_rtc_suspend, twl_rtc_resume);
-#ifdef CONFIG_OF
static const struct of_device_id twl_rtc_of_match[] = {
{.compatible = "ti,twl4030-rtc", },
{ },
};
MODULE_DEVICE_TABLE(of, twl_rtc_of_match);
-#endif
-
-MODULE_ALIAS("platform:twl_rtc");
static struct platform_driver twl4030rtc_driver = {
.probe = twl_rtc_probe,
@@ -611,7 +651,7 @@ static struct platform_driver twl4030rtc_driver = {
.driver = {
.name = "twl_rtc",
.pm = &twl_rtc_pm_ops,
- .of_match_table = of_match_ptr(twl_rtc_of_match),
+ .of_match_table = twl_rtc_of_match,
},
};
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 95f7645e3c37..774da20ceb58 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -674,7 +674,7 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
break;
case 0x0D:
dev_warn(&device->cdev->dev,
- "FORMAT 4 - No syn byte in count "
+ "FORMAT 4 - No sync byte in count "
"address area; offset active\n");
break;
case 0x0E:
@@ -684,7 +684,7 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
break;
case 0x0F:
dev_warn(&device->cdev->dev,
- "FORMAT 4 - No syn byte in data area; "
+ "FORMAT 4 - No sync byte in data area; "
"offset active\n");
break;
default:
@@ -999,7 +999,7 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
break;
default:
dev_warn(&device->cdev->dev,
- "FORMAT D - Reserved\n");
+ "FORMAT F - Reserved\n");
}
break;
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 84ca314c87e3..dd46e96a3034 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -20,7 +20,7 @@
#include <linux/slab.h>
#include <asm/debug.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/ipl.h>
/* This is ugly... */
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 67bf50c9946f..ade04216c970 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -26,7 +26,7 @@
#include <asm/idals.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/cio.h>
#include <asm/ccwdev.h>
#include <asm/itcw.h>
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 6c5d671304b4..8713fefd794b 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -20,7 +20,7 @@
#include <linux/err.h>
#include <linux/slab.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/atomic.h>
#include <asm/ebcdic.h>
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index 113c1c1fa1af..9e3419124264 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -15,7 +15,7 @@
#include <asm/debug.h>
#include <asm/ebcdic.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/* This is ugly... */
#define PRINTK_HEADER "dasd_erp:"
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index e2fa759bf2ad..8b1341fb2e0d 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -16,7 +16,7 @@
#include <linux/fs.h>
#include <linux/blkpg.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/* This is ugly... */
#define PRINTK_HEADER "dasd_gendisk:"
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 9dfbd972f844..ec65c1e51c2a 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -21,7 +21,7 @@
#include <asm/ccwdev.h>
#include <asm/schid.h>
#include <asm/cmb.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/* This is ugly... */
#define PRINTK_HEADER "dasd_ioctl:"
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index bad7a196bf84..70dc2c4cd3f7 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -20,7 +20,7 @@
#include <linux/proc_fs.h>
#include <asm/debug.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/* This is ugly... */
#define PRINTK_HEADER "dasd_proc:"
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 288f59a4147b..b9d7e755c8a3 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -41,7 +41,7 @@
#include <linux/suspend.h>
#include <linux/platform_device.h>
#include <linux/gfp.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define XPRAM_NAME "xpram"
#define XPRAM_DEVS 1 /* one partition */
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 1b8d825623bd..9ec4ae056158 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -25,7 +25,7 @@
#include <asm/cio.h>
#include <asm/io.h>
#include <asm/ebcdic.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/delay.h>
#include <asm/cpcmd.h>
#include <asm/setup.h>
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index 7b9c50aa4cc9..82c913318b73 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -14,7 +14,7 @@
#include <linux/consolemap.h>
#include <linux/kbd_kern.h>
#include <linux/kbd_diacr.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "keyboard.h"
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index ebdeaa53182d..027ac6ae5eea 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -23,7 +23,7 @@
#include <linux/device.h>
#include <linux/slab.h>
#include <net/iucv/iucv.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/ebcdic.h>
#include <asm/extmem.h>
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index 9b5d1138b2e2..571a7e352755 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -21,7 +21,7 @@
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
#include <asm/appldata.h>
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index 6010cd347a08..91b26df5227d 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -13,7 +13,7 @@
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/ctype.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "sclp.h"
#include "sclp_rw.h"
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 9259017a1295..236b736ae136 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -15,7 +15,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/gfp.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "ctrlchar.h"
#include "sclp.h"
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index 68d6ee7ae504..095481d32236 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -26,7 +26,7 @@
#include <linux/reboot.h>
#include <linux/slab.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "sclp.h"
#include "ctrlchar.h"
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index 77f9b9c2f701..46ac1164f242 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -18,7 +18,7 @@
#include <linux/mtio.h>
#include <linux/compat.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define TAPE_DBF_AREA tape_core_dbf
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 272cb6cd1b2a..e5ebe2fbee23 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -24,7 +24,7 @@
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <asm/ebcdic.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "raw3270.h"
#include "tty3270.h"
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index 2a67b496a9e2..65f5a794f26d 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -21,7 +21,7 @@
#include <asm/compat.h>
#include <asm/cpcmd.h>
#include <asm/debug.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "vmcp.h"
static debug_info_t *vmcp_debug;
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 3167e8581994..57974a1e0e03 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -21,7 +21,7 @@
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/atomic.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/cpcmd.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index ff18f373af9a..04aceb694d51 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -15,7 +15,7 @@
#include <linux/slab.h>
#include <linux/module.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/cio.h>
#include <asm/ccwdev.h>
#include <asm/debug.h>
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index f771e5e9e26b..d3b51edb056e 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -23,7 +23,7 @@
#include <asm/ipl.h>
#include <asm/sclp.h>
#include <asm/setup.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/debug.h>
#include <asm/processor.h>
#include <asm/irqflags.h>
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 9082476b51db..bf7f5d4c50e1 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -17,7 +17,7 @@
#include <linux/ctype.h>
#include <linux/device.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/cio.h>
#include <asm/ipl.h>
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index b8ab18676e69..0a7fb83f35e5 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -2,10 +2,11 @@
# S/390 crypto devices
#
-ap-objs := ap_bus.o
-# zcrypt_api depends on ap
-obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o
-# msgtype* depend on zcrypt_api
-obj-$(CONFIG_ZCRYPT) += zcrypt_msgtype6.o zcrypt_msgtype50.o
-# adapter drivers depend on ap, zcrypt_api and msgtype*
+ap-objs := ap_bus.o ap_card.o ap_queue.o
+obj-$(subst m,y,$(CONFIG_ZCRYPT)) += ap.o
+# zcrypt_api.o and zcrypt_msgtype*.o depend on ap.o
+zcrypt-objs := zcrypt_api.o zcrypt_card.o zcrypt_queue.o
+zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o
+obj-$(CONFIG_ZCRYPT) += zcrypt.o
+# adapter drivers depend on ap.o and zcrypt.o
obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o
diff --git a/drivers/s390/crypto/ap_asm.h b/drivers/s390/crypto/ap_asm.h
new file mode 100644
index 000000000000..7a630047c372
--- /dev/null
+++ b/drivers/s390/crypto/ap_asm.h
@@ -0,0 +1,191 @@
+/*
+ * Copyright IBM Corp. 2016
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * Adjunct processor bus inline assemblies.
+ */
+
+#ifndef _AP_ASM_H_
+#define _AP_ASM_H_
+
+#include <asm/isc.h>
+
+/**
+ * ap_intructions_available() - Test if AP instructions are available.
+ *
+ * Returns 0 if the AP instructions are installed.
+ */
+static inline int ap_instructions_available(void)
+{
+ register unsigned long reg0 asm ("0") = AP_MKQID(0, 0);
+ register unsigned long reg1 asm ("1") = -ENODEV;
+ register unsigned long reg2 asm ("2") = 0UL;
+
+ asm volatile(
+ " .long 0xb2af0000\n" /* PQAP(TAPQ) */
+ "0: la %1,0\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc");
+ return reg1;
+}
+
+/**
+ * ap_tapq(): Test adjunct processor queue.
+ * @qid: The AP queue number
+ * @info: Pointer to queue descriptor
+ *
+ * Returns AP queue status structure.
+ */
+static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
+{
+ register unsigned long reg0 asm ("0") = qid;
+ register struct ap_queue_status reg1 asm ("1");
+ register unsigned long reg2 asm ("2") = 0UL;
+
+ asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */
+ : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
+ if (info)
+ *info = reg2;
+ return reg1;
+}
+
+/**
+ * ap_pqap_rapq(): Reset adjunct processor queue.
+ * @qid: The AP queue number
+ *
+ * Returns AP queue status structure.
+ */
+static inline struct ap_queue_status ap_rapq(ap_qid_t qid)
+{
+ register unsigned long reg0 asm ("0") = qid | 0x01000000UL;
+ register struct ap_queue_status reg1 asm ("1");
+ register unsigned long reg2 asm ("2") = 0UL;
+
+ asm volatile(
+ ".long 0xb2af0000" /* PQAP(RAPQ) */
+ : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
+ return reg1;
+}
+
+/**
+ * ap_aqic(): Enable interruption for a specific AP.
+ * @qid: The AP queue number
+ * @ind: The notification indicator byte
+ *
+ * Returns AP queue status.
+ */
+static inline struct ap_queue_status ap_aqic(ap_qid_t qid, void *ind)
+{
+ register unsigned long reg0 asm ("0") = qid | (3UL << 24);
+ register unsigned long reg1_in asm ("1") = (8UL << 44) | AP_ISC;
+ register struct ap_queue_status reg1_out asm ("1");
+ register void *reg2 asm ("2") = ind;
+
+ asm volatile(
+ ".long 0xb2af0000" /* PQAP(AQIC) */
+ : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
+ :
+ : "cc");
+ return reg1_out;
+}
+
+/**
+ * ap_qci(): Get AP configuration data
+ *
+ * Returns 0 on success, or -EOPNOTSUPP.
+ */
+static inline int ap_qci(void *config)
+{
+ register unsigned long reg0 asm ("0") = 0x04000000UL;
+ register unsigned long reg1 asm ("1") = -EINVAL;
+ register void *reg2 asm ("2") = (void *) config;
+
+ asm volatile(
+ ".long 0xb2af0000\n" /* PQAP(QCI) */
+ "0: la %1,0\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : "+d" (reg0), "+d" (reg1), "+d" (reg2)
+ :
+ : "cc", "memory");
+
+ return reg1;
+}
+
+/**
+ * ap_nqap(): Send message to adjunct processor queue.
+ * @qid: The AP queue number
+ * @psmid: The program supplied message identifier
+ * @msg: The message text
+ * @length: The message length
+ *
+ * Returns AP queue status structure.
+ * Condition code 1 on NQAP can't happen because the L bit is 1.
+ * Condition code 2 on NQAP also means the send is incomplete,
+ * because a segment boundary was reached. The NQAP is repeated.
+ */
+static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
+ unsigned long long psmid,
+ void *msg, size_t length)
+{
+ struct msgblock { char _[length]; };
+ register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
+ register struct ap_queue_status reg1 asm ("1");
+ register unsigned long reg2 asm ("2") = (unsigned long) msg;
+ register unsigned long reg3 asm ("3") = (unsigned long) length;
+ register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
+ register unsigned long reg5 asm ("5") = psmid & 0xffffffff;
+
+ asm volatile (
+ "0: .long 0xb2ad0042\n" /* NQAP */
+ " brc 2,0b"
+ : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
+ : "d" (reg4), "d" (reg5), "m" (*(struct msgblock *) msg)
+ : "cc");
+ return reg1;
+}
+
+/**
+ * ap_dqap(): Receive message from adjunct processor queue.
+ * @qid: The AP queue number
+ * @psmid: Pointer to program supplied message identifier
+ * @msg: The message text
+ * @length: The message length
+ *
+ * Returns AP queue status structure.
+ * Condition code 1 on DQAP means the receive has taken place
+ * but only partially. The response is incomplete, hence the
+ * DQAP is repeated.
+ * Condition code 2 on DQAP also means the receive is incomplete,
+ * this time because a segment boundary was reached. Again, the
+ * DQAP is repeated.
+ * Note that gpr2 is used by the DQAP instruction to keep track of
+ * any 'residual' length, in case the instruction gets interrupted.
+ * Hence it gets zeroed before the instruction.
+ */
+static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
+ unsigned long long *psmid,
+ void *msg, size_t length)
+{
+ struct msgblock { char _[length]; };
+ register unsigned long reg0 asm("0") = qid | 0x80000000UL;
+ register struct ap_queue_status reg1 asm ("1");
+ register unsigned long reg2 asm("2") = 0UL;
+ register unsigned long reg4 asm("4") = (unsigned long) msg;
+ register unsigned long reg5 asm("5") = (unsigned long) length;
+ register unsigned long reg6 asm("6") = 0UL;
+ register unsigned long reg7 asm("7") = 0UL;
+
+
+ asm volatile(
+ "0: .long 0xb2ae0064\n" /* DQAP */
+ " brc 6,0b\n"
+ : "+d" (reg0), "=d" (reg1), "+d" (reg2),
+ "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7),
+ "=m" (*(struct msgblock *) msg) : : "cc");
+ *psmid = (((unsigned long long) reg6) << 32) + reg7;
+ return reg1;
+}
+
+#endif /* _AP_ASM_H_ */
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index f407b4f9d0ba..5fa699192864 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -46,8 +46,12 @@
#include <linux/ktime.h>
#include <asm/facility.h>
#include <linux/crypto.h>
+#include <linux/mod_devicetable.h>
+#include <linux/debugfs.h>
#include "ap_bus.h"
+#include "ap_asm.h"
+#include "ap_debug.h"
/*
* Module description.
@@ -62,6 +66,7 @@ MODULE_ALIAS_CRYPTO("z90crypt");
* Module parameter
*/
int ap_domain_index = -1; /* Adjunct Processor Domain Index */
+static DEFINE_SPINLOCK(ap_domain_lock);
module_param_named(domain, ap_domain_index, int, S_IRUSR|S_IRGRP);
MODULE_PARM_DESC(domain, "domain index for ap devices");
EXPORT_SYMBOL(ap_domain_index);
@@ -70,13 +75,21 @@ static int ap_thread_flag = 0;
module_param_named(poll_thread, ap_thread_flag, int, S_IRUSR|S_IRGRP);
MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off).");
-static struct device *ap_root_device = NULL;
+static struct device *ap_root_device;
+
+DEFINE_SPINLOCK(ap_list_lock);
+LIST_HEAD(ap_card_list);
+
static struct ap_config_info *ap_configuration;
-static DEFINE_SPINLOCK(ap_device_list_lock);
-static LIST_HEAD(ap_device_list);
static bool initialised;
/*
+ * AP bus related debug feature things.
+ */
+static struct dentry *ap_dbf_root;
+debug_info_t *ap_dbf_info;
+
+/*
* Workqueue timer for bus rescan.
*/
static struct timer_list ap_config_timer;
@@ -89,7 +102,6 @@ static DECLARE_WORK(ap_scan_work, ap_scan_bus);
*/
static void ap_tasklet_fn(unsigned long);
static DECLARE_TASKLET(ap_tasklet, ap_tasklet_fn, 0);
-static atomic_t ap_poll_requests = ATOMIC_INIT(0);
static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
static struct task_struct *ap_poll_kthread = NULL;
static DEFINE_MUTEX(ap_poll_thread_mutex);
@@ -129,23 +141,17 @@ static inline int ap_using_interrupts(void)
}
/**
- * ap_intructions_available() - Test if AP instructions are available.
+ * ap_airq_ptr() - Get the address of the adapter interrupt indicator
*
- * Returns 0 if the AP instructions are installed.
+ * Returns the address of the local-summary-indicator of the adapter
+ * interrupt handler for AP, or NULL if adapter interrupts are not
+ * available.
*/
-static inline int ap_instructions_available(void)
+void *ap_airq_ptr(void)
{
- register unsigned long reg0 asm ("0") = AP_MKQID(0,0);
- register unsigned long reg1 asm ("1") = -ENODEV;
- register unsigned long reg2 asm ("2") = 0UL;
-
- asm volatile(
- " .long 0xb2af0000\n" /* PQAP(TAPQ) */
- "0: la %1,0\n"
- "1:\n"
- EX_TABLE(0b, 1b)
- : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc" );
- return reg1;
+ if (ap_using_interrupts())
+ return ap_airq.lsi_ptr;
+ return NULL;
}
/**
@@ -169,19 +175,6 @@ static int ap_configuration_available(void)
return test_facility(12);
}
-static inline struct ap_queue_status
-__pqap_tapq(ap_qid_t qid, unsigned long *info)
-{
- register unsigned long reg0 asm ("0") = qid;
- register struct ap_queue_status reg1 asm ("1");
- register unsigned long reg2 asm ("2") = 0UL;
-
- asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */
- : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
- *info = reg2;
- return reg1;
-}
-
/**
* ap_test_queue(): Test adjunct processor queue.
* @qid: The AP queue number
@@ -192,85 +185,16 @@ __pqap_tapq(ap_qid_t qid, unsigned long *info)
static inline struct ap_queue_status
ap_test_queue(ap_qid_t qid, unsigned long *info)
{
- struct ap_queue_status aqs;
- unsigned long _info;
-
if (test_facility(15))
qid |= 1UL << 23; /* set APFT T bit*/
- aqs = __pqap_tapq(qid, &_info);
- if (info)
- *info = _info;
- return aqs;
-}
-
-/**
- * ap_reset_queue(): Reset adjunct processor queue.
- * @qid: The AP queue number
- *
- * Returns AP queue status structure.
- */
-static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid)
-{
- register unsigned long reg0 asm ("0") = qid | 0x01000000UL;
- register struct ap_queue_status reg1 asm ("1");
- register unsigned long reg2 asm ("2") = 0UL;
-
- asm volatile(
- ".long 0xb2af0000" /* PQAP(RAPQ) */
- : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
- return reg1;
-}
-
-/**
- * ap_queue_interruption_control(): Enable interruption for a specific AP.
- * @qid: The AP queue number
- * @ind: The notification indicator byte
- *
- * Returns AP queue status.
- */
-static inline struct ap_queue_status
-ap_queue_interruption_control(ap_qid_t qid, void *ind)
-{
- register unsigned long reg0 asm ("0") = qid | 0x03000000UL;
- register unsigned long reg1_in asm ("1") = 0x0000800000000000UL | AP_ISC;
- register struct ap_queue_status reg1_out asm ("1");
- register void *reg2 asm ("2") = ind;
- asm volatile(
- ".long 0xb2af0000" /* PQAP(AQIC) */
- : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
- :
- : "cc" );
- return reg1_out;
-}
-
-/**
- * ap_query_configuration(): Get AP configuration data
- *
- * Returns 0 on success, or -EOPNOTSUPP.
- */
-static inline int __ap_query_configuration(void)
-{
- register unsigned long reg0 asm ("0") = 0x04000000UL;
- register unsigned long reg1 asm ("1") = -EINVAL;
- register void *reg2 asm ("2") = (void *) ap_configuration;
-
- asm volatile(
- ".long 0xb2af0000\n" /* PQAP(QCI) */
- "0: la %1,0\n"
- "1:\n"
- EX_TABLE(0b, 1b)
- : "+d" (reg0), "+d" (reg1), "+d" (reg2)
- :
- : "cc");
-
- return reg1;
+ return ap_tapq(qid, info);
}
static inline int ap_query_configuration(void)
{
if (!ap_configuration)
return -EOPNOTSUPP;
- return __ap_query_configuration();
+ return ap_qci(ap_configuration);
}
/**
@@ -331,162 +255,6 @@ static inline int ap_test_config_domain(unsigned int domain)
}
/**
- * ap_queue_enable_interruption(): Enable interruption on an AP.
- * @qid: The AP queue number
- * @ind: the notification indicator byte
- *
- * Enables interruption on AP queue via ap_queue_interruption_control(). Based
- * on the return value it waits a while and tests the AP queue if interrupts
- * have been switched on using ap_test_queue().
- */
-static int ap_queue_enable_interruption(struct ap_device *ap_dev, void *ind)
-{
- struct ap_queue_status status;
-
- status = ap_queue_interruption_control(ap_dev->qid, ind);
- switch (status.response_code) {
- case AP_RESPONSE_NORMAL:
- case AP_RESPONSE_OTHERWISE_CHANGED:
- return 0;
- case AP_RESPONSE_Q_NOT_AVAIL:
- case AP_RESPONSE_DECONFIGURED:
- case AP_RESPONSE_CHECKSTOPPED:
- case AP_RESPONSE_INVALID_ADDRESS:
- pr_err("Registering adapter interrupts for AP %d failed\n",
- AP_QID_DEVICE(ap_dev->qid));
- return -EOPNOTSUPP;
- case AP_RESPONSE_RESET_IN_PROGRESS:
- case AP_RESPONSE_BUSY:
- default:
- return -EBUSY;
- }
-}
-
-static inline struct ap_queue_status
-__nqap(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
-{
- typedef struct { char _[length]; } msgblock;
- register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
- register struct ap_queue_status reg1 asm ("1");
- register unsigned long reg2 asm ("2") = (unsigned long) msg;
- register unsigned long reg3 asm ("3") = (unsigned long) length;
- register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
- register unsigned long reg5 asm ("5") = psmid & 0xffffffff;
-
- asm volatile (
- "0: .long 0xb2ad0042\n" /* NQAP */
- " brc 2,0b"
- : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
- : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg)
- : "cc");
- return reg1;
-}
-
-/**
- * __ap_send(): Send message to adjunct processor queue.
- * @qid: The AP queue number
- * @psmid: The program supplied message identifier
- * @msg: The message text
- * @length: The message length
- * @special: Special Bit
- *
- * Returns AP queue status structure.
- * Condition code 1 on NQAP can't happen because the L bit is 1.
- * Condition code 2 on NQAP also means the send is incomplete,
- * because a segment boundary was reached. The NQAP is repeated.
- */
-static inline struct ap_queue_status
-__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
- unsigned int special)
-{
- if (special == 1)
- qid |= 0x400000UL;
- return __nqap(qid, psmid, msg, length);
-}
-
-int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
-{
- struct ap_queue_status status;
-
- status = __ap_send(qid, psmid, msg, length, 0);
- switch (status.response_code) {
- case AP_RESPONSE_NORMAL:
- return 0;
- case AP_RESPONSE_Q_FULL:
- case AP_RESPONSE_RESET_IN_PROGRESS:
- return -EBUSY;
- case AP_RESPONSE_REQ_FAC_NOT_INST:
- return -EINVAL;
- default: /* Device is gone. */
- return -ENODEV;
- }
-}
-EXPORT_SYMBOL(ap_send);
-
-/**
- * __ap_recv(): Receive message from adjunct processor queue.
- * @qid: The AP queue number
- * @psmid: Pointer to program supplied message identifier
- * @msg: The message text
- * @length: The message length
- *
- * Returns AP queue status structure.
- * Condition code 1 on DQAP means the receive has taken place
- * but only partially. The response is incomplete, hence the
- * DQAP is repeated.
- * Condition code 2 on DQAP also means the receive is incomplete,
- * this time because a segment boundary was reached. Again, the
- * DQAP is repeated.
- * Note that gpr2 is used by the DQAP instruction to keep track of
- * any 'residual' length, in case the instruction gets interrupted.
- * Hence it gets zeroed before the instruction.
- */
-static inline struct ap_queue_status
-__ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
-{
- typedef struct { char _[length]; } msgblock;
- register unsigned long reg0 asm("0") = qid | 0x80000000UL;
- register struct ap_queue_status reg1 asm ("1");
- register unsigned long reg2 asm("2") = 0UL;
- register unsigned long reg4 asm("4") = (unsigned long) msg;
- register unsigned long reg5 asm("5") = (unsigned long) length;
- register unsigned long reg6 asm("6") = 0UL;
- register unsigned long reg7 asm("7") = 0UL;
-
-
- asm volatile(
- "0: .long 0xb2ae0064\n" /* DQAP */
- " brc 6,0b\n"
- : "+d" (reg0), "=d" (reg1), "+d" (reg2),
- "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7),
- "=m" (*(msgblock *) msg) : : "cc" );
- *psmid = (((unsigned long long) reg6) << 32) + reg7;
- return reg1;
-}
-
-int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
-{
- struct ap_queue_status status;
-
- if (msg == NULL)
- return -EINVAL;
- status = __ap_recv(qid, psmid, msg, length);
- switch (status.response_code) {
- case AP_RESPONSE_NORMAL:
- return 0;
- case AP_RESPONSE_NO_PENDING_REPLY:
- if (status.queue_empty)
- return -ENOENT;
- return -EBUSY;
- case AP_RESPONSE_RESET_IN_PROGRESS:
- return -EBUSY;
- default:
- return -ENODEV;
- }
-}
-EXPORT_SYMBOL(ap_recv);
-
-/**
* ap_query_queue(): Check if an AP queue is available.
* @qid: The AP queue number
* @queue_depth: Pointer to queue depth value
@@ -500,7 +268,7 @@ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type,
unsigned long info;
int nd;
- if (!ap_test_config_card_id(AP_QID_DEVICE(qid)))
+ if (!ap_test_config_card_id(AP_QID_CARD(qid)))
return -ENODEV;
status = ap_test_queue(qid, &info);
@@ -511,8 +279,28 @@ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type,
*facilities = (unsigned int)(info >> 32);
/* Update maximum domain id */
nd = (info >> 16) & 0xff;
+ /* if N bit is available, z13 and newer */
if ((info & (1UL << 57)) && nd > 0)
ap_max_domain_id = nd;
+ else /* older machine types */
+ ap_max_domain_id = 15;
+ switch (*device_type) {
+ /* For CEX2 and CEX3 the available functions
+ * are not refrected by the facilities bits.
+ * Instead it is coded into the type. So here
+ * modify the function bits based on the type.
+ */
+ case AP_DEVICE_TYPE_CEX2A:
+ case AP_DEVICE_TYPE_CEX3A:
+ *facilities |= 0x08000000;
+ break;
+ case AP_DEVICE_TYPE_CEX2C:
+ case AP_DEVICE_TYPE_CEX3C:
+ *facilities |= 0x10000000;
+ break;
+ default:
+ break;
+ }
return 0;
case AP_RESPONSE_Q_NOT_AVAIL:
case AP_RESPONSE_DECONFIGURED:
@@ -528,9 +316,7 @@ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type,
}
}
-/* State machine definitions and helpers */
-
-static void ap_sm_wait(enum ap_wait wait)
+void ap_wait(enum ap_wait wait)
{
ktime_t hr_time;
@@ -547,7 +333,7 @@ static void ap_sm_wait(enum ap_wait wait)
case AP_WAIT_TIMEOUT:
spin_lock_bh(&ap_poll_timer_lock);
if (!hrtimer_is_queued(&ap_poll_timer)) {
- hr_time = ktime_set(0, poll_timeout);
+ hr_time = poll_timeout;
hrtimer_forward_now(&ap_poll_timer, hr_time);
hrtimer_restart(&ap_poll_timer);
}
@@ -559,350 +345,21 @@ static void ap_sm_wait(enum ap_wait wait)
}
}
-static enum ap_wait ap_sm_nop(struct ap_device *ap_dev)
-{
- return AP_WAIT_NONE;
-}
-
-/**
- * ap_sm_recv(): Receive pending reply messages from an AP device but do
- * not change the state of the device.
- * @ap_dev: pointer to the AP device
- *
- * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
- */
-static struct ap_queue_status ap_sm_recv(struct ap_device *ap_dev)
-{
- struct ap_queue_status status;
- struct ap_message *ap_msg;
-
- status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid,
- ap_dev->reply->message, ap_dev->reply->length);
- switch (status.response_code) {
- case AP_RESPONSE_NORMAL:
- atomic_dec(&ap_poll_requests);
- ap_dev->queue_count--;
- if (ap_dev->queue_count > 0)
- mod_timer(&ap_dev->timeout,
- jiffies + ap_dev->drv->request_timeout);
- list_for_each_entry(ap_msg, &ap_dev->pendingq, list) {
- if (ap_msg->psmid != ap_dev->reply->psmid)
- continue;
- list_del_init(&ap_msg->list);
- ap_dev->pendingq_count--;
- ap_msg->receive(ap_dev, ap_msg, ap_dev->reply);
- break;
- }
- case AP_RESPONSE_NO_PENDING_REPLY:
- if (!status.queue_empty || ap_dev->queue_count <= 0)
- break;
- /* The card shouldn't forget requests but who knows. */
- atomic_sub(ap_dev->queue_count, &ap_poll_requests);
- ap_dev->queue_count = 0;
- list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
- ap_dev->requestq_count += ap_dev->pendingq_count;
- ap_dev->pendingq_count = 0;
- break;
- default:
- break;
- }
- return status;
-}
-
-/**
- * ap_sm_read(): Receive pending reply messages from an AP device.
- * @ap_dev: pointer to the AP device
- *
- * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
- */
-static enum ap_wait ap_sm_read(struct ap_device *ap_dev)
-{
- struct ap_queue_status status;
-
- if (!ap_dev->reply)
- return AP_WAIT_NONE;
- status = ap_sm_recv(ap_dev);
- switch (status.response_code) {
- case AP_RESPONSE_NORMAL:
- if (ap_dev->queue_count > 0) {
- ap_dev->state = AP_STATE_WORKING;
- return AP_WAIT_AGAIN;
- }
- ap_dev->state = AP_STATE_IDLE;
- return AP_WAIT_NONE;
- case AP_RESPONSE_NO_PENDING_REPLY:
- if (ap_dev->queue_count > 0)
- return AP_WAIT_INTERRUPT;
- ap_dev->state = AP_STATE_IDLE;
- return AP_WAIT_NONE;
- default:
- ap_dev->state = AP_STATE_BORKED;
- return AP_WAIT_NONE;
- }
-}
-
-/**
- * ap_sm_suspend_read(): Receive pending reply messages from an AP device
- * without changing the device state in between. In suspend mode we don't
- * allow sending new requests, therefore just fetch pending replies.
- * @ap_dev: pointer to the AP device
- *
- * Returns AP_WAIT_NONE or AP_WAIT_AGAIN
- */
-static enum ap_wait ap_sm_suspend_read(struct ap_device *ap_dev)
-{
- struct ap_queue_status status;
-
- if (!ap_dev->reply)
- return AP_WAIT_NONE;
- status = ap_sm_recv(ap_dev);
- switch (status.response_code) {
- case AP_RESPONSE_NORMAL:
- if (ap_dev->queue_count > 0)
- return AP_WAIT_AGAIN;
- /* fall through */
- default:
- return AP_WAIT_NONE;
- }
-}
-
-/**
- * ap_sm_write(): Send messages from the request queue to an AP device.
- * @ap_dev: pointer to the AP device
- *
- * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
- */
-static enum ap_wait ap_sm_write(struct ap_device *ap_dev)
-{
- struct ap_queue_status status;
- struct ap_message *ap_msg;
-
- if (ap_dev->requestq_count <= 0)
- return AP_WAIT_NONE;
- /* Start the next request on the queue. */
- ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list);
- status = __ap_send(ap_dev->qid, ap_msg->psmid,
- ap_msg->message, ap_msg->length, ap_msg->special);
- switch (status.response_code) {
- case AP_RESPONSE_NORMAL:
- atomic_inc(&ap_poll_requests);
- ap_dev->queue_count++;
- if (ap_dev->queue_count == 1)
- mod_timer(&ap_dev->timeout,
- jiffies + ap_dev->drv->request_timeout);
- list_move_tail(&ap_msg->list, &ap_dev->pendingq);
- ap_dev->requestq_count--;
- ap_dev->pendingq_count++;
- if (ap_dev->queue_count < ap_dev->queue_depth) {
- ap_dev->state = AP_STATE_WORKING;
- return AP_WAIT_AGAIN;
- }
- /* fall through */
- case AP_RESPONSE_Q_FULL:
- ap_dev->state = AP_STATE_QUEUE_FULL;
- return AP_WAIT_INTERRUPT;
- case AP_RESPONSE_RESET_IN_PROGRESS:
- ap_dev->state = AP_STATE_RESET_WAIT;
- return AP_WAIT_TIMEOUT;
- case AP_RESPONSE_MESSAGE_TOO_BIG:
- case AP_RESPONSE_REQ_FAC_NOT_INST:
- list_del_init(&ap_msg->list);
- ap_dev->requestq_count--;
- ap_msg->rc = -EINVAL;
- ap_msg->receive(ap_dev, ap_msg, NULL);
- return AP_WAIT_AGAIN;
- default:
- ap_dev->state = AP_STATE_BORKED;
- return AP_WAIT_NONE;
- }
-}
-
-/**
- * ap_sm_read_write(): Send and receive messages to/from an AP device.
- * @ap_dev: pointer to the AP device
- *
- * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
- */
-static enum ap_wait ap_sm_read_write(struct ap_device *ap_dev)
-{
- return min(ap_sm_read(ap_dev), ap_sm_write(ap_dev));
-}
-
-/**
- * ap_sm_reset(): Reset an AP queue.
- * @qid: The AP queue number
- *
- * Submit the Reset command to an AP queue.
- */
-static enum ap_wait ap_sm_reset(struct ap_device *ap_dev)
-{
- struct ap_queue_status status;
-
- status = ap_reset_queue(ap_dev->qid);
- switch (status.response_code) {
- case AP_RESPONSE_NORMAL:
- case AP_RESPONSE_RESET_IN_PROGRESS:
- ap_dev->state = AP_STATE_RESET_WAIT;
- ap_dev->interrupt = AP_INTR_DISABLED;
- return AP_WAIT_TIMEOUT;
- case AP_RESPONSE_BUSY:
- return AP_WAIT_TIMEOUT;
- case AP_RESPONSE_Q_NOT_AVAIL:
- case AP_RESPONSE_DECONFIGURED:
- case AP_RESPONSE_CHECKSTOPPED:
- default:
- ap_dev->state = AP_STATE_BORKED;
- return AP_WAIT_NONE;
- }
-}
-
-/**
- * ap_sm_reset_wait(): Test queue for completion of the reset operation
- * @ap_dev: pointer to the AP device
- *
- * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
- */
-static enum ap_wait ap_sm_reset_wait(struct ap_device *ap_dev)
-{
- struct ap_queue_status status;
- unsigned long info;
-
- if (ap_dev->queue_count > 0 && ap_dev->reply)
- /* Try to read a completed message and get the status */
- status = ap_sm_recv(ap_dev);
- else
- /* Get the status with TAPQ */
- status = ap_test_queue(ap_dev->qid, &info);
-
- switch (status.response_code) {
- case AP_RESPONSE_NORMAL:
- if (ap_using_interrupts() &&
- ap_queue_enable_interruption(ap_dev,
- ap_airq.lsi_ptr) == 0)
- ap_dev->state = AP_STATE_SETIRQ_WAIT;
- else
- ap_dev->state = (ap_dev->queue_count > 0) ?
- AP_STATE_WORKING : AP_STATE_IDLE;
- return AP_WAIT_AGAIN;
- case AP_RESPONSE_BUSY:
- case AP_RESPONSE_RESET_IN_PROGRESS:
- return AP_WAIT_TIMEOUT;
- case AP_RESPONSE_Q_NOT_AVAIL:
- case AP_RESPONSE_DECONFIGURED:
- case AP_RESPONSE_CHECKSTOPPED:
- default:
- ap_dev->state = AP_STATE_BORKED;
- return AP_WAIT_NONE;
- }
-}
-
-/**
- * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
- * @ap_dev: pointer to the AP device
- *
- * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
- */
-static enum ap_wait ap_sm_setirq_wait(struct ap_device *ap_dev)
-{
- struct ap_queue_status status;
- unsigned long info;
-
- if (ap_dev->queue_count > 0 && ap_dev->reply)
- /* Try to read a completed message and get the status */
- status = ap_sm_recv(ap_dev);
- else
- /* Get the status with TAPQ */
- status = ap_test_queue(ap_dev->qid, &info);
-
- if (status.int_enabled == 1) {
- /* Irqs are now enabled */
- ap_dev->interrupt = AP_INTR_ENABLED;
- ap_dev->state = (ap_dev->queue_count > 0) ?
- AP_STATE_WORKING : AP_STATE_IDLE;
- }
-
- switch (status.response_code) {
- case AP_RESPONSE_NORMAL:
- if (ap_dev->queue_count > 0)
- return AP_WAIT_AGAIN;
- /* fallthrough */
- case AP_RESPONSE_NO_PENDING_REPLY:
- return AP_WAIT_TIMEOUT;
- default:
- ap_dev->state = AP_STATE_BORKED;
- return AP_WAIT_NONE;
- }
-}
-
-/*
- * AP state machine jump table
- */
-static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
- [AP_STATE_RESET_START] = {
- [AP_EVENT_POLL] = ap_sm_reset,
- [AP_EVENT_TIMEOUT] = ap_sm_nop,
- },
- [AP_STATE_RESET_WAIT] = {
- [AP_EVENT_POLL] = ap_sm_reset_wait,
- [AP_EVENT_TIMEOUT] = ap_sm_nop,
- },
- [AP_STATE_SETIRQ_WAIT] = {
- [AP_EVENT_POLL] = ap_sm_setirq_wait,
- [AP_EVENT_TIMEOUT] = ap_sm_nop,
- },
- [AP_STATE_IDLE] = {
- [AP_EVENT_POLL] = ap_sm_write,
- [AP_EVENT_TIMEOUT] = ap_sm_nop,
- },
- [AP_STATE_WORKING] = {
- [AP_EVENT_POLL] = ap_sm_read_write,
- [AP_EVENT_TIMEOUT] = ap_sm_reset,
- },
- [AP_STATE_QUEUE_FULL] = {
- [AP_EVENT_POLL] = ap_sm_read,
- [AP_EVENT_TIMEOUT] = ap_sm_reset,
- },
- [AP_STATE_SUSPEND_WAIT] = {
- [AP_EVENT_POLL] = ap_sm_suspend_read,
- [AP_EVENT_TIMEOUT] = ap_sm_nop,
- },
- [AP_STATE_BORKED] = {
- [AP_EVENT_POLL] = ap_sm_nop,
- [AP_EVENT_TIMEOUT] = ap_sm_nop,
- },
-};
-
-static inline enum ap_wait ap_sm_event(struct ap_device *ap_dev,
- enum ap_event event)
-{
- return ap_jumptable[ap_dev->state][event](ap_dev);
-}
-
-static inline enum ap_wait ap_sm_event_loop(struct ap_device *ap_dev,
- enum ap_event event)
-{
- enum ap_wait wait;
-
- while ((wait = ap_sm_event(ap_dev, event)) == AP_WAIT_AGAIN)
- ;
- return wait;
-}
-
/**
* ap_request_timeout(): Handling of request timeouts
* @data: Holds the AP device.
*
* Handles request timeouts.
*/
-static void ap_request_timeout(unsigned long data)
+void ap_request_timeout(unsigned long data)
{
- struct ap_device *ap_dev = (struct ap_device *) data;
+ struct ap_queue *aq = (struct ap_queue *) data;
if (ap_suspend_flag)
return;
- spin_lock_bh(&ap_dev->lock);
- ap_sm_wait(ap_sm_event(ap_dev, AP_EVENT_TIMEOUT));
- spin_unlock_bh(&ap_dev->lock);
+ spin_lock_bh(&aq->lock);
+ ap_wait(ap_sm_event(aq, AP_EVENT_TIMEOUT));
+ spin_unlock_bh(&aq->lock);
}
/**
@@ -937,7 +394,8 @@ static void ap_interrupt_handler(struct airq_struct *airq)
*/
static void ap_tasklet_fn(unsigned long dummy)
{
- struct ap_device *ap_dev;
+ struct ap_card *ac;
+ struct ap_queue *aq;
enum ap_wait wait = AP_WAIT_NONE;
/* Reset the indicator if interrupts are used. Thus new interrupts can
@@ -947,14 +405,35 @@ static void ap_tasklet_fn(unsigned long dummy)
if (ap_using_interrupts())
xchg(ap_airq.lsi_ptr, 0);
- spin_lock(&ap_device_list_lock);
- list_for_each_entry(ap_dev, &ap_device_list, list) {
- spin_lock_bh(&ap_dev->lock);
- wait = min(wait, ap_sm_event_loop(ap_dev, AP_EVENT_POLL));
- spin_unlock_bh(&ap_dev->lock);
+ spin_lock_bh(&ap_list_lock);
+ for_each_ap_card(ac) {
+ for_each_ap_queue(aq, ac) {
+ spin_lock_bh(&aq->lock);
+ wait = min(wait, ap_sm_event_loop(aq, AP_EVENT_POLL));
+ spin_unlock_bh(&aq->lock);
+ }
}
- spin_unlock(&ap_device_list_lock);
- ap_sm_wait(wait);
+ spin_unlock_bh(&ap_list_lock);
+
+ ap_wait(wait);
+}
+
+static int ap_pending_requests(void)
+{
+ struct ap_card *ac;
+ struct ap_queue *aq;
+
+ spin_lock_bh(&ap_list_lock);
+ for_each_ap_card(ac) {
+ for_each_ap_queue(aq, ac) {
+ if (aq->queue_count == 0)
+ continue;
+ spin_unlock_bh(&ap_list_lock);
+ return 1;
+ }
+ }
+ spin_unlock_bh(&ap_list_lock);
+ return 0;
}
/**
@@ -976,8 +455,7 @@ static int ap_poll_thread(void *data)
while (!kthread_should_stop()) {
add_wait_queue(&ap_poll_wait, &wait);
set_current_state(TASK_INTERRUPTIBLE);
- if (ap_suspend_flag ||
- atomic_read(&ap_poll_requests) <= 0) {
+ if (ap_suspend_flag || !ap_pending_requests()) {
schedule();
try_to_freeze();
}
@@ -989,7 +467,8 @@ static int ap_poll_thread(void *data)
continue;
}
ap_tasklet_fn(0);
- } while (!kthread_should_stop());
+ }
+
return 0;
}
@@ -1018,207 +497,8 @@ static void ap_poll_thread_stop(void)
mutex_unlock(&ap_poll_thread_mutex);
}
-/**
- * ap_queue_message(): Queue a request to an AP device.
- * @ap_dev: The AP device to queue the message to
- * @ap_msg: The message that is to be added
- */
-void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
-{
- /* For asynchronous message handling a valid receive-callback
- * is required. */
- BUG_ON(!ap_msg->receive);
-
- spin_lock_bh(&ap_dev->lock);
- /* Queue the message. */
- list_add_tail(&ap_msg->list, &ap_dev->requestq);
- ap_dev->requestq_count++;
- ap_dev->total_request_count++;
- /* Send/receive as many request from the queue as possible. */
- ap_sm_wait(ap_sm_event_loop(ap_dev, AP_EVENT_POLL));
- spin_unlock_bh(&ap_dev->lock);
-}
-EXPORT_SYMBOL(ap_queue_message);
-
-/**
- * ap_cancel_message(): Cancel a crypto request.
- * @ap_dev: The AP device that has the message queued
- * @ap_msg: The message that is to be removed
- *
- * Cancel a crypto request. This is done by removing the request
- * from the device pending or request queue. Note that the
- * request stays on the AP queue. When it finishes the message
- * reply will be discarded because the psmid can't be found.
- */
-void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
-{
- struct ap_message *tmp;
-
- spin_lock_bh(&ap_dev->lock);
- if (!list_empty(&ap_msg->list)) {
- list_for_each_entry(tmp, &ap_dev->pendingq, list)
- if (tmp->psmid == ap_msg->psmid) {
- ap_dev->pendingq_count--;
- goto found;
- }
- ap_dev->requestq_count--;
-found:
- list_del_init(&ap_msg->list);
- }
- spin_unlock_bh(&ap_dev->lock);
-}
-EXPORT_SYMBOL(ap_cancel_message);
-
-/*
- * AP device related attributes.
- */
-static ssize_t ap_hwtype_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ap_device *ap_dev = to_ap_dev(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type);
-}
-
-static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL);
-
-static ssize_t ap_raw_hwtype_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ap_device *ap_dev = to_ap_dev(dev);
-
- return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->raw_hwtype);
-}
-
-static DEVICE_ATTR(raw_hwtype, 0444, ap_raw_hwtype_show, NULL);
-
-static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct ap_device *ap_dev = to_ap_dev(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth);
-}
-
-static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL);
-static ssize_t ap_request_count_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ap_device *ap_dev = to_ap_dev(dev);
- int rc;
-
- spin_lock_bh(&ap_dev->lock);
- rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->total_request_count);
- spin_unlock_bh(&ap_dev->lock);
- return rc;
-}
-
-static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
-
-static ssize_t ap_requestq_count_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ap_device *ap_dev = to_ap_dev(dev);
- int rc;
-
- spin_lock_bh(&ap_dev->lock);
- rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->requestq_count);
- spin_unlock_bh(&ap_dev->lock);
- return rc;
-}
-
-static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL);
-
-static ssize_t ap_pendingq_count_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ap_device *ap_dev = to_ap_dev(dev);
- int rc;
-
- spin_lock_bh(&ap_dev->lock);
- rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->pendingq_count);
- spin_unlock_bh(&ap_dev->lock);
- return rc;
-}
-
-static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL);
-
-static ssize_t ap_reset_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ap_device *ap_dev = to_ap_dev(dev);
- int rc = 0;
-
- spin_lock_bh(&ap_dev->lock);
- switch (ap_dev->state) {
- case AP_STATE_RESET_START:
- case AP_STATE_RESET_WAIT:
- rc = snprintf(buf, PAGE_SIZE, "Reset in progress.\n");
- break;
- case AP_STATE_WORKING:
- case AP_STATE_QUEUE_FULL:
- rc = snprintf(buf, PAGE_SIZE, "Reset Timer armed.\n");
- break;
- default:
- rc = snprintf(buf, PAGE_SIZE, "No Reset Timer set.\n");
- }
- spin_unlock_bh(&ap_dev->lock);
- return rc;
-}
-
-static DEVICE_ATTR(reset, 0444, ap_reset_show, NULL);
-
-static ssize_t ap_interrupt_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ap_device *ap_dev = to_ap_dev(dev);
- int rc = 0;
-
- spin_lock_bh(&ap_dev->lock);
- if (ap_dev->state == AP_STATE_SETIRQ_WAIT)
- rc = snprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
- else if (ap_dev->interrupt == AP_INTR_ENABLED)
- rc = snprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
- else
- rc = snprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
- spin_unlock_bh(&ap_dev->lock);
- return rc;
-}
-
-static DEVICE_ATTR(interrupt, 0444, ap_interrupt_show, NULL);
-
-static ssize_t ap_modalias_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "ap:t%02X\n", to_ap_dev(dev)->device_type);
-}
-
-static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL);
-
-static ssize_t ap_functions_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ap_device *ap_dev = to_ap_dev(dev);
- return snprintf(buf, PAGE_SIZE, "0x%08X\n", ap_dev->functions);
-}
-
-static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL);
-
-static struct attribute *ap_dev_attrs[] = {
- &dev_attr_hwtype.attr,
- &dev_attr_raw_hwtype.attr,
- &dev_attr_depth.attr,
- &dev_attr_request_count.attr,
- &dev_attr_requestq_count.attr,
- &dev_attr_pendingq_count.attr,
- &dev_attr_reset.attr,
- &dev_attr_interrupt.attr,
- &dev_attr_modalias.attr,
- &dev_attr_ap_functions.attr,
- NULL
-};
-static struct attribute_group ap_dev_attr_group = {
- .attrs = ap_dev_attrs
-};
+#define is_card_dev(x) ((x)->parent == ap_root_device)
+#define is_queue_dev(x) ((x)->parent != ap_root_device)
/**
* ap_bus_match()
@@ -1229,7 +509,6 @@ static struct attribute_group ap_dev_attr_group = {
*/
static int ap_bus_match(struct device *dev, struct device_driver *drv)
{
- struct ap_device *ap_dev = to_ap_dev(dev);
struct ap_driver *ap_drv = to_ap_drv(drv);
struct ap_device_id *id;
@@ -1238,10 +517,14 @@ static int ap_bus_match(struct device *dev, struct device_driver *drv)
* supported types of the device_driver.
*/
for (id = ap_drv->ids; id->match_flags; id++) {
- if ((id->match_flags & AP_DEVICE_ID_MATCH_DEVICE_TYPE) &&
- (id->dev_type != ap_dev->device_type))
- continue;
- return 1;
+ if (is_card_dev(dev) &&
+ id->match_flags & AP_DEVICE_ID_MATCH_CARD_TYPE &&
+ id->dev_type == to_ap_dev(dev)->device_type)
+ return 1;
+ if (is_queue_dev(dev) &&
+ id->match_flags & AP_DEVICE_ID_MATCH_QUEUE_TYPE &&
+ id->dev_type == to_ap_dev(dev)->device_type)
+ return 1;
}
return 0;
}
@@ -1277,18 +560,24 @@ static int ap_dev_suspend(struct device *dev)
{
struct ap_device *ap_dev = to_ap_dev(dev);
- /* Poll on the device until all requests are finished. */
- spin_lock_bh(&ap_dev->lock);
- ap_dev->state = AP_STATE_SUSPEND_WAIT;
- while (ap_sm_event(ap_dev, AP_EVENT_POLL) != AP_WAIT_NONE)
- ;
- ap_dev->state = AP_STATE_BORKED;
- spin_unlock_bh(&ap_dev->lock);
+ if (ap_dev->drv && ap_dev->drv->suspend)
+ ap_dev->drv->suspend(ap_dev);
+ return 0;
+}
+
+static int ap_dev_resume(struct device *dev)
+{
+ struct ap_device *ap_dev = to_ap_dev(dev);
+
+ if (ap_dev->drv && ap_dev->drv->resume)
+ ap_dev->drv->resume(ap_dev);
return 0;
}
static void ap_bus_suspend(void)
{
+ AP_DBF(DBF_DEBUG, "ap_bus_suspend running\n");
+
ap_suspend_flag = 1;
/*
* Disable scanning for devices, thus we do not want to scan
@@ -1298,9 +587,25 @@ static void ap_bus_suspend(void)
tasklet_disable(&ap_tasklet);
}
-static int __ap_devices_unregister(struct device *dev, void *dummy)
+static int __ap_card_devices_unregister(struct device *dev, void *dummy)
+{
+ if (is_card_dev(dev))
+ device_unregister(dev);
+ return 0;
+}
+
+static int __ap_queue_devices_unregister(struct device *dev, void *dummy)
{
- device_unregister(dev);
+ if (is_queue_dev(dev))
+ device_unregister(dev);
+ return 0;
+}
+
+static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data)
+{
+ if (is_queue_dev(dev) &&
+ AP_QID_CARD(to_ap_queue(dev)->qid) == (int)(long) data)
+ device_unregister(dev);
return 0;
}
@@ -1308,8 +613,15 @@ static void ap_bus_resume(void)
{
int rc;
- /* Unconditionally remove all AP devices */
- bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_devices_unregister);
+ AP_DBF(DBF_DEBUG, "ap_bus_resume running\n");
+
+ /* remove all queue devices */
+ bus_for_each_dev(&ap_bus_type, NULL, NULL,
+ __ap_queue_devices_unregister);
+ /* remove all card devices */
+ bus_for_each_dev(&ap_bus_type, NULL, NULL,
+ __ap_card_devices_unregister);
+
/* Reset thin interrupt setting */
if (ap_interrupts_available() && !ap_using_interrupts()) {
rc = register_adapter_interrupt(&ap_airq);
@@ -1351,7 +663,7 @@ static struct notifier_block ap_power_notifier = {
.notifier_call = ap_power_event,
};
-static SIMPLE_DEV_PM_OPS(ap_bus_pm_ops, ap_dev_suspend, NULL);
+static SIMPLE_DEV_PM_OPS(ap_bus_pm_ops, ap_dev_suspend, ap_dev_resume);
static struct bus_type ap_bus_type = {
.name = "ap",
@@ -1360,17 +672,6 @@ static struct bus_type ap_bus_type = {
.pm = &ap_bus_pm_ops,
};
-void ap_device_init_reply(struct ap_device *ap_dev,
- struct ap_message *reply)
-{
- ap_dev->reply = reply;
-
- spin_lock_bh(&ap_dev->lock);
- ap_sm_wait(ap_sm_event(ap_dev, AP_EVENT_POLL));
- spin_unlock_bh(&ap_dev->lock);
-}
-EXPORT_SYMBOL(ap_device_init_reply);
-
static int ap_device_probe(struct device *dev)
{
struct ap_device *ap_dev = to_ap_dev(dev);
@@ -1384,61 +685,22 @@ static int ap_device_probe(struct device *dev)
return rc;
}
-/**
- * __ap_flush_queue(): Flush requests.
- * @ap_dev: Pointer to the AP device
- *
- * Flush all requests from the request/pending queue of an AP device.
- */
-static void __ap_flush_queue(struct ap_device *ap_dev)
-{
- struct ap_message *ap_msg, *next;
-
- list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) {
- list_del_init(&ap_msg->list);
- ap_dev->pendingq_count--;
- ap_msg->rc = -EAGAIN;
- ap_msg->receive(ap_dev, ap_msg, NULL);
- }
- list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) {
- list_del_init(&ap_msg->list);
- ap_dev->requestq_count--;
- ap_msg->rc = -EAGAIN;
- ap_msg->receive(ap_dev, ap_msg, NULL);
- }
-}
-
-void ap_flush_queue(struct ap_device *ap_dev)
-{
- spin_lock_bh(&ap_dev->lock);
- __ap_flush_queue(ap_dev);
- spin_unlock_bh(&ap_dev->lock);
-}
-EXPORT_SYMBOL(ap_flush_queue);
-
static int ap_device_remove(struct device *dev)
{
struct ap_device *ap_dev = to_ap_dev(dev);
struct ap_driver *ap_drv = ap_dev->drv;
- ap_flush_queue(ap_dev);
- del_timer_sync(&ap_dev->timeout);
- spin_lock_bh(&ap_device_list_lock);
- list_del_init(&ap_dev->list);
- spin_unlock_bh(&ap_device_list_lock);
+ spin_lock_bh(&ap_list_lock);
+ if (is_card_dev(dev))
+ list_del_init(&to_ap_card(dev)->list);
+ else
+ list_del_init(&to_ap_queue(dev)->list);
+ spin_unlock_bh(&ap_list_lock);
if (ap_drv->remove)
ap_drv->remove(ap_dev);
- spin_lock_bh(&ap_dev->lock);
- atomic_sub(ap_dev->queue_count, &ap_poll_requests);
- spin_unlock_bh(&ap_dev->lock);
return 0;
}
-static void ap_device_release(struct device *dev)
-{
- kfree(to_ap_dev(dev));
-}
-
int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
char *name)
{
@@ -1481,18 +743,30 @@ static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
}
-static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL);
+static ssize_t ap_domain_store(struct bus_type *bus,
+ const char *buf, size_t count)
+{
+ int domain;
+
+ if (sscanf(buf, "%i\n", &domain) != 1 ||
+ domain < 0 || domain > ap_max_domain_id)
+ return -EINVAL;
+ spin_lock_bh(&ap_domain_lock);
+ ap_domain_index = domain;
+ spin_unlock_bh(&ap_domain_lock);
+
+ AP_DBF(DBF_DEBUG, "store new default domain=%d\n", domain);
+
+ return count;
+}
+
+static BUS_ATTR(ap_domain, 0644, ap_domain_show, ap_domain_store);
static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf)
{
if (!ap_configuration) /* QCI not supported */
return snprintf(buf, PAGE_SIZE, "not supported\n");
- if (!test_facility(76))
- /* format 0 - 16 bit domain field */
- return snprintf(buf, PAGE_SIZE, "%08x%08x\n",
- ap_configuration->adm[0],
- ap_configuration->adm[1]);
- /* format 1 - 256 bit domain field */
+
return snprintf(buf, PAGE_SIZE,
"0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
ap_configuration->adm[0], ap_configuration->adm[1],
@@ -1504,6 +778,22 @@ static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf)
static BUS_ATTR(ap_control_domain_mask, 0444,
ap_control_domain_mask_show, NULL);
+static ssize_t ap_usage_domain_mask_show(struct bus_type *bus, char *buf)
+{
+ if (!ap_configuration) /* QCI not supported */
+ return snprintf(buf, PAGE_SIZE, "not supported\n");
+
+ return snprintf(buf, PAGE_SIZE,
+ "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
+ ap_configuration->aqm[0], ap_configuration->aqm[1],
+ ap_configuration->aqm[2], ap_configuration->aqm[3],
+ ap_configuration->aqm[4], ap_configuration->aqm[5],
+ ap_configuration->aqm[6], ap_configuration->aqm[7]);
+}
+
+static BUS_ATTR(ap_usage_domain_mask, 0444,
+ ap_usage_domain_mask_show, NULL);
+
static ssize_t ap_config_time_show(struct bus_type *bus, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
@@ -1570,7 +860,7 @@ static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
time > 120000000000ULL)
return -EINVAL;
poll_timeout = time;
- hr_time = ktime_set(0, poll_timeout);
+ hr_time = poll_timeout;
spin_lock_bh(&ap_poll_timer_lock);
hrtimer_cancel(&ap_poll_timer);
@@ -1599,6 +889,7 @@ static BUS_ATTR(ap_max_domain_id, 0444, ap_max_domain_id_show, NULL);
static struct bus_attribute *const ap_bus_attrs[] = {
&bus_attr_ap_domain,
&bus_attr_ap_control_domain_mask,
+ &bus_attr_ap_usage_domain_mask,
&bus_attr_config_time,
&bus_attr_poll_thread,
&bus_attr_ap_interrupts,
@@ -1623,9 +914,12 @@ static int ap_select_domain(void)
* the "domain=" parameter or the domain with the maximum number
* of devices.
*/
- if (ap_domain_index >= 0)
+ spin_lock_bh(&ap_domain_lock);
+ if (ap_domain_index >= 0) {
/* Domain has already been selected. */
+ spin_unlock_bh(&ap_domain_lock);
return 0;
+ }
best_domain = -1;
max_count = 0;
for (i = 0; i < AP_DOMAINS; i++) {
@@ -1647,109 +941,171 @@ static int ap_select_domain(void)
}
if (best_domain >= 0){
ap_domain_index = best_domain;
+ spin_unlock_bh(&ap_domain_lock);
return 0;
}
+ spin_unlock_bh(&ap_domain_lock);
return -ENODEV;
}
-/**
- * __ap_scan_bus(): Scan the AP bus.
- * @dev: Pointer to device
- * @data: Pointer to data
- *
- * Scan the AP bus for new devices.
+/*
+ * helper function to be used with bus_find_dev
+ * matches for the card device with the given id
*/
-static int __ap_scan_bus(struct device *dev, void *data)
+static int __match_card_device_with_id(struct device *dev, void *data)
{
- return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data;
+ return is_card_dev(dev) && to_ap_card(dev)->id == (int)(long) data;
}
+/* helper function to be used with bus_find_dev
+ * matches for the queue device with a given qid
+ */
+static int __match_queue_device_with_qid(struct device *dev, void *data)
+{
+ return is_queue_dev(dev) && to_ap_queue(dev)->qid == (int)(long) data;
+}
+
+/**
+ * ap_scan_bus(): Scan the AP bus for new devices
+ * Runs periodically, workqueue timer (ap_config_time)
+ */
static void ap_scan_bus(struct work_struct *unused)
{
- struct ap_device *ap_dev;
+ struct ap_queue *aq;
+ struct ap_card *ac;
struct device *dev;
ap_qid_t qid;
- int queue_depth = 0, device_type = 0;
- unsigned int device_functions = 0;
- int rc, i, borked;
+ int depth = 0, type = 0;
+ unsigned int functions = 0;
+ int rc, id, dom, borked, domains;
+
+ AP_DBF(DBF_DEBUG, "ap_scan_bus running\n");
ap_query_configuration();
if (ap_select_domain() != 0)
goto out;
- for (i = 0; i < AP_DEVICES; i++) {
- qid = AP_MKQID(i, ap_domain_index);
+ for (id = 0; id < AP_DEVICES; id++) {
+ /* check if device is registered */
dev = bus_find_device(&ap_bus_type, NULL,
- (void *)(unsigned long)qid,
- __ap_scan_bus);
- rc = ap_query_queue(qid, &queue_depth, &device_type,
- &device_functions);
- if (dev) {
- ap_dev = to_ap_dev(dev);
- spin_lock_bh(&ap_dev->lock);
- if (rc == -ENODEV)
- ap_dev->state = AP_STATE_BORKED;
- borked = ap_dev->state == AP_STATE_BORKED;
- spin_unlock_bh(&ap_dev->lock);
- if (borked) /* Remove broken device */
+ (void *)(long) id,
+ __match_card_device_with_id);
+ ac = dev ? to_ap_card(dev) : NULL;
+ if (!ap_test_config_card_id(id)) {
+ if (dev) {
+ /* Card device has been removed from
+ * configuration, remove the belonging
+ * queue devices.
+ */
+ bus_for_each_dev(&ap_bus_type, NULL,
+ (void *)(long) id,
+ __ap_queue_devices_with_id_unregister);
+ /* now remove the card device */
device_unregister(dev);
- put_device(dev);
- if (!borked)
- continue;
- }
- if (rc)
- continue;
- ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL);
- if (!ap_dev)
- break;
- ap_dev->qid = qid;
- ap_dev->state = AP_STATE_RESET_START;
- ap_dev->interrupt = AP_INTR_DISABLED;
- ap_dev->queue_depth = queue_depth;
- ap_dev->raw_hwtype = device_type;
- ap_dev->device_type = device_type;
- ap_dev->functions = device_functions;
- spin_lock_init(&ap_dev->lock);
- INIT_LIST_HEAD(&ap_dev->pendingq);
- INIT_LIST_HEAD(&ap_dev->requestq);
- INIT_LIST_HEAD(&ap_dev->list);
- setup_timer(&ap_dev->timeout, ap_request_timeout,
- (unsigned long) ap_dev);
-
- ap_dev->device.bus = &ap_bus_type;
- ap_dev->device.parent = ap_root_device;
- rc = dev_set_name(&ap_dev->device, "card%02x",
- AP_QID_DEVICE(ap_dev->qid));
- if (rc) {
- kfree(ap_dev);
- continue;
- }
- /* Add to list of devices */
- spin_lock_bh(&ap_device_list_lock);
- list_add(&ap_dev->list, &ap_device_list);
- spin_unlock_bh(&ap_device_list_lock);
- /* Start with a device reset */
- spin_lock_bh(&ap_dev->lock);
- ap_sm_wait(ap_sm_event(ap_dev, AP_EVENT_POLL));
- spin_unlock_bh(&ap_dev->lock);
- /* Register device */
- ap_dev->device.release = ap_device_release;
- rc = device_register(&ap_dev->device);
- if (rc) {
- spin_lock_bh(&ap_dev->lock);
- list_del_init(&ap_dev->list);
- spin_unlock_bh(&ap_dev->lock);
- put_device(&ap_dev->device);
+ put_device(dev);
+ }
continue;
}
- /* Add device attributes. */
- rc = sysfs_create_group(&ap_dev->device.kobj,
- &ap_dev_attr_group);
- if (rc) {
- device_unregister(&ap_dev->device);
- continue;
+ /* According to the configuration there should be a card
+ * device, so check if there is at least one valid queue
+ * and maybe create queue devices and the card device.
+ */
+ domains = 0;
+ for (dom = 0; dom < AP_DOMAINS; dom++) {
+ qid = AP_MKQID(id, dom);
+ dev = bus_find_device(&ap_bus_type, NULL,
+ (void *)(long) qid,
+ __match_queue_device_with_qid);
+ aq = dev ? to_ap_queue(dev) : NULL;
+ if (!ap_test_config_domain(dom)) {
+ if (dev) {
+ /* Queue device exists but has been
+ * removed from configuration.
+ */
+ device_unregister(dev);
+ put_device(dev);
+ }
+ continue;
+ }
+ rc = ap_query_queue(qid, &depth, &type, &functions);
+ if (dev) {
+ spin_lock_bh(&aq->lock);
+ if (rc == -ENODEV ||
+ /* adapter reconfiguration */
+ (ac && ac->functions != functions))
+ aq->state = AP_STATE_BORKED;
+ borked = aq->state == AP_STATE_BORKED;
+ spin_unlock_bh(&aq->lock);
+ if (borked) /* Remove broken device */
+ device_unregister(dev);
+ put_device(dev);
+ if (!borked) {
+ domains++;
+ continue;
+ }
+ }
+ if (rc)
+ continue;
+ /* new queue device needed */
+ if (!ac) {
+ /* but first create the card device */
+ ac = ap_card_create(id, depth,
+ type, functions);
+ if (!ac)
+ continue;
+ ac->ap_dev.device.bus = &ap_bus_type;
+ ac->ap_dev.device.parent = ap_root_device;
+ dev_set_name(&ac->ap_dev.device,
+ "card%02x", id);
+ /* Register card with AP bus */
+ rc = device_register(&ac->ap_dev.device);
+ if (rc) {
+ put_device(&ac->ap_dev.device);
+ ac = NULL;
+ break;
+ }
+ /* get it and thus adjust reference counter */
+ get_device(&ac->ap_dev.device);
+ /* Add card device to card list */
+ spin_lock_bh(&ap_list_lock);
+ list_add(&ac->list, &ap_card_list);
+ spin_unlock_bh(&ap_list_lock);
+ }
+ /* now create the new queue device */
+ aq = ap_queue_create(qid, type);
+ if (!aq)
+ continue;
+ aq->card = ac;
+ aq->ap_dev.device.bus = &ap_bus_type;
+ aq->ap_dev.device.parent = &ac->ap_dev.device;
+ dev_set_name(&aq->ap_dev.device,
+ "%02x.%04x", id, dom);
+ /* Add queue device to card queue list */
+ spin_lock_bh(&ap_list_lock);
+ list_add(&aq->list, &ac->queues);
+ spin_unlock_bh(&ap_list_lock);
+ /* Start with a device reset */
+ spin_lock_bh(&aq->lock);
+ ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
+ spin_unlock_bh(&aq->lock);
+ /* Register device */
+ rc = device_register(&aq->ap_dev.device);
+ if (rc) {
+ spin_lock_bh(&ap_list_lock);
+ list_del_init(&aq->list);
+ spin_unlock_bh(&ap_list_lock);
+ put_device(&aq->ap_dev.device);
+ continue;
+ }
+ domains++;
+ } /* end domain loop */
+ if (ac) {
+ /* remove card dev if there are no queue devices */
+ if (!domains)
+ device_unregister(&ac->ap_dev.device);
+ put_device(&ac->ap_dev.device);
}
- }
+ } /* end device loop */
out:
mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
}
@@ -1768,7 +1124,7 @@ static void ap_reset_domain(void)
if (ap_domain_index == -1 || !ap_test_config_domain(ap_domain_index))
return;
for (i = 0; i < AP_DEVICES; i++)
- ap_reset_queue(AP_MKQID(i, ap_domain_index));
+ ap_rapq(AP_MKQID(i, ap_domain_index));
}
static void ap_reset_all(void)
@@ -1781,7 +1137,7 @@ static void ap_reset_all(void)
for (j = 0; j < AP_DEVICES; j++) {
if (!ap_test_config_card_id(j))
continue;
- ap_reset_queue(AP_MKQID(j, i));
+ ap_rapq(AP_MKQID(j, i));
}
}
}
@@ -1790,6 +1146,23 @@ static struct reset_call ap_reset_call = {
.fn = ap_reset_all,
};
+int __init ap_debug_init(void)
+{
+ ap_dbf_root = debugfs_create_dir("ap", NULL);
+ ap_dbf_info = debug_register("ap", 1, 1,
+ DBF_MAX_SPRINTF_ARGS * sizeof(long));
+ debug_register_view(ap_dbf_info, &debug_sprintf_view);
+ debug_set_level(ap_dbf_info, DBF_ERR);
+
+ return 0;
+}
+
+void ap_debug_exit(void)
+{
+ debugfs_remove(ap_dbf_root);
+ debug_unregister(ap_dbf_info);
+}
+
/**
* ap_module_init(): The module initialization code.
*
@@ -1800,6 +1173,10 @@ int __init ap_module_init(void)
int max_domain_id;
int rc, i;
+ rc = ap_debug_init();
+ if (rc)
+ return rc;
+
if (ap_instructions_available() != 0) {
pr_warn("The hardware system does not support AP instructions\n");
return -ENODEV;
@@ -1909,7 +1286,15 @@ void ap_module_exit(void)
del_timer_sync(&ap_config_timer);
hrtimer_cancel(&ap_poll_timer);
tasklet_kill(&ap_tasklet);
- bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_devices_unregister);
+
+ /* first remove queue devices */
+ bus_for_each_dev(&ap_bus_type, NULL, NULL,
+ __ap_queue_devices_unregister);
+ /* now remove the card devices */
+ bus_for_each_dev(&ap_bus_type, NULL, NULL,
+ __ap_card_devices_unregister);
+
+ /* remove bus attributes */
for (i = 0; ap_bus_attrs[i]; i++)
bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
unregister_pm_notifier(&ap_power_notifier);
@@ -1919,6 +1304,8 @@ void ap_module_exit(void)
unregister_reset_call(&ap_reset_call);
if (ap_using_interrupts())
unregister_adapter_interrupt(&ap_airq);
+
+ ap_debug_exit();
}
module_init(ap_module_init);
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index d7fdf5c024d7..4dc7c88fb054 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -27,7 +27,6 @@
#define _AP_BUS_H_
#include <linux/device.h>
-#include <linux/mod_devicetable.h>
#include <linux/types.h>
#define AP_DEVICES 64 /* Number of AP devices. */
@@ -38,14 +37,17 @@
extern int ap_domain_index;
+extern spinlock_t ap_list_lock;
+extern struct list_head ap_card_list;
+
/**
* The ap_qid_t identifier of an ap queue. It contains a
- * 6 bit device index and a 4 bit queue index (domain).
+ * 6 bit card index and a 4 bit queue index (domain).
*/
typedef unsigned int ap_qid_t;
-#define AP_MKQID(_device, _queue) (((_device) & 63) << 8 | ((_queue) & 255))
-#define AP_QID_DEVICE(_qid) (((_qid) >> 8) & 63)
+#define AP_MKQID(_card, _queue) (((_card) & 63) << 8 | ((_queue) & 255))
+#define AP_QID_CARD(_qid) (((_qid) >> 8) & 63)
#define AP_QID_QUEUE(_qid) ((_qid) & 255)
/**
@@ -55,7 +57,7 @@ typedef unsigned int ap_qid_t;
* @queue_full: Is 1 if the queue is full
* @pad: A 4 bit pad
* @int_enabled: Shows if interrupts are enabled for the AP
- * @response_conde: Holds the 8 bit response code
+ * @response_code: Holds the 8 bit response code
* @pad2: A 16 bit pad
*
* The ap queue status word is returned by all three AP functions
@@ -105,6 +107,7 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
#define AP_DEVICE_TYPE_CEX3C 9
#define AP_DEVICE_TYPE_CEX4 10
#define AP_DEVICE_TYPE_CEX5 11
+#define AP_DEVICE_TYPE_CEX6 12
/*
* Known function facilities
@@ -166,7 +169,8 @@ struct ap_driver {
int (*probe)(struct ap_device *);
void (*remove)(struct ap_device *);
- int request_timeout; /* request timeout in jiffies */
+ void (*suspend)(struct ap_device *);
+ void (*resume)(struct ap_device *);
};
#define to_ap_drv(x) container_of((x), struct ap_driver, driver)
@@ -174,38 +178,51 @@ struct ap_driver {
int ap_driver_register(struct ap_driver *, struct module *, char *);
void ap_driver_unregister(struct ap_driver *);
-typedef enum ap_wait (ap_func_t)(struct ap_device *ap_dev);
-
struct ap_device {
struct device device;
struct ap_driver *drv; /* Pointer to AP device driver. */
- spinlock_t lock; /* Per device lock. */
- struct list_head list; /* private list of all AP devices. */
+ int device_type; /* AP device type. */
+};
- enum ap_state state; /* State of the AP device. */
+#define to_ap_dev(x) container_of((x), struct ap_device, device)
- ap_qid_t qid; /* AP queue id. */
- int queue_depth; /* AP queue depth.*/
- int device_type; /* AP device type. */
+struct ap_card {
+ struct ap_device ap_dev;
+ struct list_head list; /* Private list of AP cards. */
+ struct list_head queues; /* List of assoc. AP queues */
+ void *private; /* ap driver private pointer. */
int raw_hwtype; /* AP raw hardware type. */
unsigned int functions; /* AP device function bitfield. */
- struct timer_list timeout; /* Timer for request timeouts. */
+ int queue_depth; /* AP queue depth.*/
+ int id; /* AP card number. */
+ atomic_t total_request_count; /* # requests ever for this AP device.*/
+};
+
+#define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device)
+struct ap_queue {
+ struct ap_device ap_dev;
+ struct list_head list; /* Private list of AP queues. */
+ struct ap_card *card; /* Ptr to assoc. AP card. */
+ spinlock_t lock; /* Per device lock. */
+ void *private; /* ap driver private pointer. */
+ ap_qid_t qid; /* AP queue id. */
int interrupt; /* indicate if interrupts are enabled */
int queue_count; /* # messages currently on AP queue. */
-
- struct list_head pendingq; /* List of message sent to AP queue. */
+ enum ap_state state; /* State of the AP device. */
int pendingq_count; /* # requests on pendingq list. */
- struct list_head requestq; /* List of message yet to be sent. */
int requestq_count; /* # requests on requestq list. */
- int total_request_count; /* # requests ever for this AP device. */
-
+ int total_request_count; /* # requests ever for this AP device.*/
+ int request_timeout; /* Request timout in jiffies. */
+ struct timer_list timeout; /* Timer for request timeouts. */
+ struct list_head pendingq; /* List of message sent to AP queue. */
+ struct list_head requestq; /* List of message yet to be sent. */
struct ap_message *reply; /* Per device reply message. */
-
- void *private; /* ap driver private pointer. */
};
-#define to_ap_dev(x) container_of((x), struct ap_device, device)
+#define to_ap_queue(x) container_of((x), struct ap_queue, ap_dev.device)
+
+typedef enum ap_wait (ap_func_t)(struct ap_queue *queue);
struct ap_message {
struct list_head list; /* Request queueing. */
@@ -217,7 +234,7 @@ struct ap_message {
void *private; /* ap driver private pointer. */
unsigned int special:1; /* Used for special commands. */
/* receive is called from tasklet context */
- void (*receive)(struct ap_device *, struct ap_message *,
+ void (*receive)(struct ap_queue *, struct ap_message *,
struct ap_message *);
};
@@ -232,10 +249,6 @@ struct ap_config_info {
unsigned char reserved4[16];
} __packed;
-#define AP_DEVICE(dt) \
- .dev_type=(dt), \
- .match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE,
-
/**
* ap_init_message() - Initialize ap_message.
* Initialize a message before using. Otherwise this might result in
@@ -250,6 +263,12 @@ static inline void ap_init_message(struct ap_message *ap_msg)
ap_msg->receive = NULL;
}
+#define for_each_ap_card(_ac) \
+ list_for_each_entry(_ac, &ap_card_list, list)
+
+#define for_each_ap_queue(_aq, _ac) \
+ list_for_each_entry(_aq, &(_ac)->queues, list)
+
/*
* Note: don't use ap_send/ap_recv after using ap_queue_message
* for the first time. Otherwise the ap message queue will get
@@ -258,11 +277,26 @@ static inline void ap_init_message(struct ap_message *ap_msg)
int ap_send(ap_qid_t, unsigned long long, void *, size_t);
int ap_recv(ap_qid_t, unsigned long long *, void *, size_t);
-void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg);
-void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg);
-void ap_flush_queue(struct ap_device *ap_dev);
+enum ap_wait ap_sm_event(struct ap_queue *aq, enum ap_event event);
+enum ap_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_event event);
+
+void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg);
+void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg);
+void ap_flush_queue(struct ap_queue *aq);
+
+void *ap_airq_ptr(void);
+void ap_wait(enum ap_wait wait);
+void ap_request_timeout(unsigned long data);
void ap_bus_force_rescan(void);
-void ap_device_init_reply(struct ap_device *ap_dev, struct ap_message *ap_msg);
+
+void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg);
+struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
+void ap_queue_remove(struct ap_queue *aq);
+void ap_queue_suspend(struct ap_device *ap_dev);
+void ap_queue_resume(struct ap_device *ap_dev);
+
+struct ap_card *ap_card_create(int id, int queue_depth, int device_type,
+ unsigned int device_functions);
int ap_module_init(void);
void ap_module_exit(void);
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
new file mode 100644
index 000000000000..0110d44172a3
--- /dev/null
+++ b/drivers/s390/crypto/ap_card.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright IBM Corp. 2016
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * Adjunct processor bus, card related code.
+ */
+
+#define KMSG_COMPONENT "ap"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <asm/facility.h>
+
+#include "ap_bus.h"
+#include "ap_asm.h"
+
+/*
+ * AP card related attributes.
+ */
+static ssize_t ap_hwtype_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_card *ac = to_ap_card(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ac->ap_dev.device_type);
+}
+
+static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL);
+
+static ssize_t ap_raw_hwtype_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_card *ac = to_ap_card(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ac->raw_hwtype);
+}
+
+static DEVICE_ATTR(raw_hwtype, 0444, ap_raw_hwtype_show, NULL);
+
+static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ap_card *ac = to_ap_card(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ac->queue_depth);
+}
+
+static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL);
+
+static ssize_t ap_functions_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_card *ac = to_ap_card(dev);
+
+ return snprintf(buf, PAGE_SIZE, "0x%08X\n", ac->functions);
+}
+
+static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL);
+
+static ssize_t ap_request_count_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ap_card *ac = to_ap_card(dev);
+ unsigned int req_cnt;
+
+ req_cnt = 0;
+ spin_lock_bh(&ap_list_lock);
+ req_cnt = atomic_read(&ac->total_request_count);
+ spin_unlock_bh(&ap_list_lock);
+ return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
+}
+
+static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
+
+static ssize_t ap_requestq_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_card *ac = to_ap_card(dev);
+ struct ap_queue *aq;
+ unsigned int reqq_cnt;
+
+ reqq_cnt = 0;
+ spin_lock_bh(&ap_list_lock);
+ for_each_ap_queue(aq, ac)
+ reqq_cnt += aq->requestq_count;
+ spin_unlock_bh(&ap_list_lock);
+ return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
+}
+
+static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL);
+
+static ssize_t ap_pendingq_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_card *ac = to_ap_card(dev);
+ struct ap_queue *aq;
+ unsigned int penq_cnt;
+
+ penq_cnt = 0;
+ spin_lock_bh(&ap_list_lock);
+ for_each_ap_queue(aq, ac)
+ penq_cnt += aq->pendingq_count;
+ spin_unlock_bh(&ap_list_lock);
+ return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
+}
+
+static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL);
+
+static ssize_t ap_modalias_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "ap:t%02X\n", to_ap_dev(dev)->device_type);
+}
+
+static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL);
+
+static struct attribute *ap_card_dev_attrs[] = {
+ &dev_attr_hwtype.attr,
+ &dev_attr_raw_hwtype.attr,
+ &dev_attr_depth.attr,
+ &dev_attr_ap_functions.attr,
+ &dev_attr_request_count.attr,
+ &dev_attr_requestq_count.attr,
+ &dev_attr_pendingq_count.attr,
+ &dev_attr_modalias.attr,
+ NULL
+};
+
+static struct attribute_group ap_card_dev_attr_group = {
+ .attrs = ap_card_dev_attrs
+};
+
+static const struct attribute_group *ap_card_dev_attr_groups[] = {
+ &ap_card_dev_attr_group,
+ NULL
+};
+
+struct device_type ap_card_type = {
+ .name = "ap_card",
+ .groups = ap_card_dev_attr_groups,
+};
+
+static void ap_card_device_release(struct device *dev)
+{
+ kfree(to_ap_card(dev));
+}
+
+struct ap_card *ap_card_create(int id, int queue_depth, int device_type,
+ unsigned int functions)
+{
+ struct ap_card *ac;
+
+ ac = kzalloc(sizeof(*ac), GFP_KERNEL);
+ if (!ac)
+ return NULL;
+ INIT_LIST_HEAD(&ac->queues);
+ ac->ap_dev.device.release = ap_card_device_release;
+ ac->ap_dev.device.type = &ap_card_type;
+ ac->ap_dev.device_type = device_type;
+ /* CEX6 toleration: map to CEX5 */
+ if (device_type == AP_DEVICE_TYPE_CEX6)
+ ac->ap_dev.device_type = AP_DEVICE_TYPE_CEX5;
+ ac->raw_hwtype = device_type;
+ ac->queue_depth = queue_depth;
+ ac->functions = functions;
+ ac->id = id;
+ return ac;
+}
diff --git a/drivers/s390/crypto/ap_debug.h b/drivers/s390/crypto/ap_debug.h
new file mode 100644
index 000000000000..78dbff842dae
--- /dev/null
+++ b/drivers/s390/crypto/ap_debug.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright IBM Corp. 2016
+ * Author(s): Harald Freudenberger <freude@de.ibm.com>
+ */
+#ifndef AP_DEBUG_H
+#define AP_DEBUG_H
+
+#include <asm/debug.h>
+
+#define DBF_ERR 3 /* error conditions */
+#define DBF_WARN 4 /* warning conditions */
+#define DBF_INFO 5 /* informational */
+#define DBF_DEBUG 6 /* for debugging only */
+
+#define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO)
+#define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO)
+
+#define DBF_MAX_SPRINTF_ARGS 5
+
+#define AP_DBF(...) \
+ debug_sprintf_event(ap_dbf_info, ##__VA_ARGS__)
+
+extern debug_info_t *ap_dbf_info;
+
+int ap_debug_init(void);
+void ap_debug_exit(void);
+
+#endif /* AP_DEBUG_H */
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
new file mode 100644
index 000000000000..b58a917dc510
--- /dev/null
+++ b/drivers/s390/crypto/ap_queue.c
@@ -0,0 +1,701 @@
+/*
+ * Copyright IBM Corp. 2016
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * Adjunct processor bus, queue related code.
+ */
+
+#define KMSG_COMPONENT "ap"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <asm/facility.h>
+
+#include "ap_bus.h"
+#include "ap_asm.h"
+
+/**
+ * ap_queue_enable_interruption(): Enable interruption on an AP queue.
+ * @qid: The AP queue number
+ * @ind: the notification indicator byte
+ *
+ * Enables interruption on AP queue via ap_aqic(). Based on the return
+ * value it waits a while and tests the AP queue if interrupts
+ * have been switched on using ap_test_queue().
+ */
+static int ap_queue_enable_interruption(struct ap_queue *aq, void *ind)
+{
+ struct ap_queue_status status;
+
+ status = ap_aqic(aq->qid, ind);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ case AP_RESPONSE_OTHERWISE_CHANGED:
+ return 0;
+ case AP_RESPONSE_Q_NOT_AVAIL:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ case AP_RESPONSE_INVALID_ADDRESS:
+ pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n",
+ AP_QID_CARD(aq->qid),
+ AP_QID_QUEUE(aq->qid));
+ return -EOPNOTSUPP;
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ case AP_RESPONSE_BUSY:
+ default:
+ return -EBUSY;
+ }
+}
+
+/**
+ * __ap_send(): Send message to adjunct processor queue.
+ * @qid: The AP queue number
+ * @psmid: The program supplied message identifier
+ * @msg: The message text
+ * @length: The message length
+ * @special: Special Bit
+ *
+ * Returns AP queue status structure.
+ * Condition code 1 on NQAP can't happen because the L bit is 1.
+ * Condition code 2 on NQAP also means the send is incomplete,
+ * because a segment boundary was reached. The NQAP is repeated.
+ */
+static inline struct ap_queue_status
+__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
+ unsigned int special)
+{
+ if (special == 1)
+ qid |= 0x400000UL;
+ return ap_nqap(qid, psmid, msg, length);
+}
+
+int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
+{
+ struct ap_queue_status status;
+
+ status = __ap_send(qid, psmid, msg, length, 0);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ return 0;
+ case AP_RESPONSE_Q_FULL:
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ return -EBUSY;
+ case AP_RESPONSE_REQ_FAC_NOT_INST:
+ return -EINVAL;
+ default: /* Device is gone. */
+ return -ENODEV;
+ }
+}
+EXPORT_SYMBOL(ap_send);
+
+int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
+{
+ struct ap_queue_status status;
+
+ if (msg == NULL)
+ return -EINVAL;
+ status = ap_dqap(qid, psmid, msg, length);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ return 0;
+ case AP_RESPONSE_NO_PENDING_REPLY:
+ if (status.queue_empty)
+ return -ENOENT;
+ return -EBUSY;
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ return -EBUSY;
+ default:
+ return -ENODEV;
+ }
+}
+EXPORT_SYMBOL(ap_recv);
+
+/* State machine definitions and helpers */
+
+static enum ap_wait ap_sm_nop(struct ap_queue *aq)
+{
+ return AP_WAIT_NONE;
+}
+
+/**
+ * ap_sm_recv(): Receive pending reply messages from an AP queue but do
+ * not change the state of the device.
+ * @aq: pointer to the AP queue
+ *
+ * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
+ */
+static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
+{
+ struct ap_queue_status status;
+ struct ap_message *ap_msg;
+
+ status = ap_dqap(aq->qid, &aq->reply->psmid,
+ aq->reply->message, aq->reply->length);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ aq->queue_count--;
+ if (aq->queue_count > 0)
+ mod_timer(&aq->timeout,
+ jiffies + aq->request_timeout);
+ list_for_each_entry(ap_msg, &aq->pendingq, list) {
+ if (ap_msg->psmid != aq->reply->psmid)
+ continue;
+ list_del_init(&ap_msg->list);
+ aq->pendingq_count--;
+ ap_msg->receive(aq, ap_msg, aq->reply);
+ break;
+ }
+ case AP_RESPONSE_NO_PENDING_REPLY:
+ if (!status.queue_empty || aq->queue_count <= 0)
+ break;
+ /* The card shouldn't forget requests but who knows. */
+ aq->queue_count = 0;
+ list_splice_init(&aq->pendingq, &aq->requestq);
+ aq->requestq_count += aq->pendingq_count;
+ aq->pendingq_count = 0;
+ break;
+ default:
+ break;
+ }
+ return status;
+}
+
+/**
+ * ap_sm_read(): Receive pending reply messages from an AP queue.
+ * @aq: pointer to the AP queue
+ *
+ * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
+ */
+static enum ap_wait ap_sm_read(struct ap_queue *aq)
+{
+ struct ap_queue_status status;
+
+ if (!aq->reply)
+ return AP_WAIT_NONE;
+ status = ap_sm_recv(aq);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ if (aq->queue_count > 0) {
+ aq->state = AP_STATE_WORKING;
+ return AP_WAIT_AGAIN;
+ }
+ aq->state = AP_STATE_IDLE;
+ return AP_WAIT_NONE;
+ case AP_RESPONSE_NO_PENDING_REPLY:
+ if (aq->queue_count > 0)
+ return AP_WAIT_INTERRUPT;
+ aq->state = AP_STATE_IDLE;
+ return AP_WAIT_NONE;
+ default:
+ aq->state = AP_STATE_BORKED;
+ return AP_WAIT_NONE;
+ }
+}
+
+/**
+ * ap_sm_suspend_read(): Receive pending reply messages from an AP queue
+ * without changing the device state in between. In suspend mode we don't
+ * allow sending new requests, therefore just fetch pending replies.
+ * @aq: pointer to the AP queue
+ *
+ * Returns AP_WAIT_NONE or AP_WAIT_AGAIN
+ */
+static enum ap_wait ap_sm_suspend_read(struct ap_queue *aq)
+{
+ struct ap_queue_status status;
+
+ if (!aq->reply)
+ return AP_WAIT_NONE;
+ status = ap_sm_recv(aq);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ if (aq->queue_count > 0)
+ return AP_WAIT_AGAIN;
+ /* fall through */
+ default:
+ return AP_WAIT_NONE;
+ }
+}
+
+/**
+ * ap_sm_write(): Send messages from the request queue to an AP queue.
+ * @aq: pointer to the AP queue
+ *
+ * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
+ */
+static enum ap_wait ap_sm_write(struct ap_queue *aq)
+{
+ struct ap_queue_status status;
+ struct ap_message *ap_msg;
+
+ if (aq->requestq_count <= 0)
+ return AP_WAIT_NONE;
+ /* Start the next request on the queue. */
+ ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
+ status = __ap_send(aq->qid, ap_msg->psmid,
+ ap_msg->message, ap_msg->length, ap_msg->special);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ aq->queue_count++;
+ if (aq->queue_count == 1)
+ mod_timer(&aq->timeout, jiffies + aq->request_timeout);
+ list_move_tail(&ap_msg->list, &aq->pendingq);
+ aq->requestq_count--;
+ aq->pendingq_count++;
+ if (aq->queue_count < aq->card->queue_depth) {
+ aq->state = AP_STATE_WORKING;
+ return AP_WAIT_AGAIN;
+ }
+ /* fall through */
+ case AP_RESPONSE_Q_FULL:
+ aq->state = AP_STATE_QUEUE_FULL;
+ return AP_WAIT_INTERRUPT;
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ aq->state = AP_STATE_RESET_WAIT;
+ return AP_WAIT_TIMEOUT;
+ case AP_RESPONSE_MESSAGE_TOO_BIG:
+ case AP_RESPONSE_REQ_FAC_NOT_INST:
+ list_del_init(&ap_msg->list);
+ aq->requestq_count--;
+ ap_msg->rc = -EINVAL;
+ ap_msg->receive(aq, ap_msg, NULL);
+ return AP_WAIT_AGAIN;
+ default:
+ aq->state = AP_STATE_BORKED;
+ return AP_WAIT_NONE;
+ }
+}
+
+/**
+ * ap_sm_read_write(): Send and receive messages to/from an AP queue.
+ * @aq: pointer to the AP queue
+ *
+ * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
+ */
+static enum ap_wait ap_sm_read_write(struct ap_queue *aq)
+{
+ return min(ap_sm_read(aq), ap_sm_write(aq));
+}
+
+/**
+ * ap_sm_reset(): Reset an AP queue.
+ * @qid: The AP queue number
+ *
+ * Submit the Reset command to an AP queue.
+ */
+static enum ap_wait ap_sm_reset(struct ap_queue *aq)
+{
+ struct ap_queue_status status;
+
+ status = ap_rapq(aq->qid);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ aq->state = AP_STATE_RESET_WAIT;
+ aq->interrupt = AP_INTR_DISABLED;
+ return AP_WAIT_TIMEOUT;
+ case AP_RESPONSE_BUSY:
+ return AP_WAIT_TIMEOUT;
+ case AP_RESPONSE_Q_NOT_AVAIL:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ default:
+ aq->state = AP_STATE_BORKED;
+ return AP_WAIT_NONE;
+ }
+}
+
+/**
+ * ap_sm_reset_wait(): Test queue for completion of the reset operation
+ * @aq: pointer to the AP queue
+ *
+ * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
+ */
+static enum ap_wait ap_sm_reset_wait(struct ap_queue *aq)
+{
+ struct ap_queue_status status;
+ void *lsi_ptr;
+
+ if (aq->queue_count > 0 && aq->reply)
+ /* Try to read a completed message and get the status */
+ status = ap_sm_recv(aq);
+ else
+ /* Get the status with TAPQ */
+ status = ap_tapq(aq->qid, NULL);
+
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ lsi_ptr = ap_airq_ptr();
+ if (lsi_ptr && ap_queue_enable_interruption(aq, lsi_ptr) == 0)
+ aq->state = AP_STATE_SETIRQ_WAIT;
+ else
+ aq->state = (aq->queue_count > 0) ?
+ AP_STATE_WORKING : AP_STATE_IDLE;
+ return AP_WAIT_AGAIN;
+ case AP_RESPONSE_BUSY:
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ return AP_WAIT_TIMEOUT;
+ case AP_RESPONSE_Q_NOT_AVAIL:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ default:
+ aq->state = AP_STATE_BORKED;
+ return AP_WAIT_NONE;
+ }
+}
+
+/**
+ * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
+ * @aq: pointer to the AP queue
+ *
+ * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
+ */
+static enum ap_wait ap_sm_setirq_wait(struct ap_queue *aq)
+{
+ struct ap_queue_status status;
+
+ if (aq->queue_count > 0 && aq->reply)
+ /* Try to read a completed message and get the status */
+ status = ap_sm_recv(aq);
+ else
+ /* Get the status with TAPQ */
+ status = ap_tapq(aq->qid, NULL);
+
+ if (status.int_enabled == 1) {
+ /* Irqs are now enabled */
+ aq->interrupt = AP_INTR_ENABLED;
+ aq->state = (aq->queue_count > 0) ?
+ AP_STATE_WORKING : AP_STATE_IDLE;
+ }
+
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ if (aq->queue_count > 0)
+ return AP_WAIT_AGAIN;
+ /* fallthrough */
+ case AP_RESPONSE_NO_PENDING_REPLY:
+ return AP_WAIT_TIMEOUT;
+ default:
+ aq->state = AP_STATE_BORKED;
+ return AP_WAIT_NONE;
+ }
+}
+
+/*
+ * AP state machine jump table
+ */
+static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
+ [AP_STATE_RESET_START] = {
+ [AP_EVENT_POLL] = ap_sm_reset,
+ [AP_EVENT_TIMEOUT] = ap_sm_nop,
+ },
+ [AP_STATE_RESET_WAIT] = {
+ [AP_EVENT_POLL] = ap_sm_reset_wait,
+ [AP_EVENT_TIMEOUT] = ap_sm_nop,
+ },
+ [AP_STATE_SETIRQ_WAIT] = {
+ [AP_EVENT_POLL] = ap_sm_setirq_wait,
+ [AP_EVENT_TIMEOUT] = ap_sm_nop,
+ },
+ [AP_STATE_IDLE] = {
+ [AP_EVENT_POLL] = ap_sm_write,
+ [AP_EVENT_TIMEOUT] = ap_sm_nop,
+ },
+ [AP_STATE_WORKING] = {
+ [AP_EVENT_POLL] = ap_sm_read_write,
+ [AP_EVENT_TIMEOUT] = ap_sm_reset,
+ },
+ [AP_STATE_QUEUE_FULL] = {
+ [AP_EVENT_POLL] = ap_sm_read,
+ [AP_EVENT_TIMEOUT] = ap_sm_reset,
+ },
+ [AP_STATE_SUSPEND_WAIT] = {
+ [AP_EVENT_POLL] = ap_sm_suspend_read,
+ [AP_EVENT_TIMEOUT] = ap_sm_nop,
+ },
+ [AP_STATE_BORKED] = {
+ [AP_EVENT_POLL] = ap_sm_nop,
+ [AP_EVENT_TIMEOUT] = ap_sm_nop,
+ },
+};
+
+enum ap_wait ap_sm_event(struct ap_queue *aq, enum ap_event event)
+{
+ return ap_jumptable[aq->state][event](aq);
+}
+
+enum ap_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_event event)
+{
+ enum ap_wait wait;
+
+ while ((wait = ap_sm_event(aq, event)) == AP_WAIT_AGAIN)
+ ;
+ return wait;
+}
+
+/*
+ * Power management for queue devices
+ */
+void ap_queue_suspend(struct ap_device *ap_dev)
+{
+ struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+
+ /* Poll on the device until all requests are finished. */
+ spin_lock_bh(&aq->lock);
+ aq->state = AP_STATE_SUSPEND_WAIT;
+ while (ap_sm_event(aq, AP_EVENT_POLL) != AP_WAIT_NONE)
+ ;
+ aq->state = AP_STATE_BORKED;
+ spin_unlock_bh(&aq->lock);
+}
+EXPORT_SYMBOL(ap_queue_suspend);
+
+void ap_queue_resume(struct ap_device *ap_dev)
+{
+}
+EXPORT_SYMBOL(ap_queue_resume);
+
+/*
+ * AP queue related attributes.
+ */
+static ssize_t ap_request_count_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ unsigned int req_cnt;
+
+ spin_lock_bh(&aq->lock);
+ req_cnt = aq->total_request_count;
+ spin_unlock_bh(&aq->lock);
+ return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
+}
+
+static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
+
+static ssize_t ap_requestq_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ unsigned int reqq_cnt = 0;
+
+ spin_lock_bh(&aq->lock);
+ reqq_cnt = aq->requestq_count;
+ spin_unlock_bh(&aq->lock);
+ return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
+}
+
+static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL);
+
+static ssize_t ap_pendingq_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ unsigned int penq_cnt = 0;
+
+ spin_lock_bh(&aq->lock);
+ penq_cnt = aq->pendingq_count;
+ spin_unlock_bh(&aq->lock);
+ return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
+}
+
+static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL);
+
+static ssize_t ap_reset_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ int rc = 0;
+
+ spin_lock_bh(&aq->lock);
+ switch (aq->state) {
+ case AP_STATE_RESET_START:
+ case AP_STATE_RESET_WAIT:
+ rc = snprintf(buf, PAGE_SIZE, "Reset in progress.\n");
+ break;
+ case AP_STATE_WORKING:
+ case AP_STATE_QUEUE_FULL:
+ rc = snprintf(buf, PAGE_SIZE, "Reset Timer armed.\n");
+ break;
+ default:
+ rc = snprintf(buf, PAGE_SIZE, "No Reset Timer set.\n");
+ }
+ spin_unlock_bh(&aq->lock);
+ return rc;
+}
+
+static DEVICE_ATTR(reset, 0444, ap_reset_show, NULL);
+
+static ssize_t ap_interrupt_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ int rc = 0;
+
+ spin_lock_bh(&aq->lock);
+ if (aq->state == AP_STATE_SETIRQ_WAIT)
+ rc = snprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
+ else if (aq->interrupt == AP_INTR_ENABLED)
+ rc = snprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
+ else
+ rc = snprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
+ spin_unlock_bh(&aq->lock);
+ return rc;
+}
+
+static DEVICE_ATTR(interrupt, 0444, ap_interrupt_show, NULL);
+
+static struct attribute *ap_queue_dev_attrs[] = {
+ &dev_attr_request_count.attr,
+ &dev_attr_requestq_count.attr,
+ &dev_attr_pendingq_count.attr,
+ &dev_attr_reset.attr,
+ &dev_attr_interrupt.attr,
+ NULL
+};
+
+static struct attribute_group ap_queue_dev_attr_group = {
+ .attrs = ap_queue_dev_attrs
+};
+
+static const struct attribute_group *ap_queue_dev_attr_groups[] = {
+ &ap_queue_dev_attr_group,
+ NULL
+};
+
+struct device_type ap_queue_type = {
+ .name = "ap_queue",
+ .groups = ap_queue_dev_attr_groups,
+};
+
+static void ap_queue_device_release(struct device *dev)
+{
+ kfree(to_ap_queue(dev));
+}
+
+struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
+{
+ struct ap_queue *aq;
+
+ aq = kzalloc(sizeof(*aq), GFP_KERNEL);
+ if (!aq)
+ return NULL;
+ aq->ap_dev.device.release = ap_queue_device_release;
+ aq->ap_dev.device.type = &ap_queue_type;
+ aq->ap_dev.device_type = device_type;
+ /* CEX6 toleration: map to CEX5 */
+ if (device_type == AP_DEVICE_TYPE_CEX6)
+ aq->ap_dev.device_type = AP_DEVICE_TYPE_CEX5;
+ aq->qid = qid;
+ aq->state = AP_STATE_RESET_START;
+ aq->interrupt = AP_INTR_DISABLED;
+ spin_lock_init(&aq->lock);
+ INIT_LIST_HEAD(&aq->pendingq);
+ INIT_LIST_HEAD(&aq->requestq);
+ setup_timer(&aq->timeout, ap_request_timeout, (unsigned long) aq);
+
+ return aq;
+}
+
+void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
+{
+ aq->reply = reply;
+
+ spin_lock_bh(&aq->lock);
+ ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
+ spin_unlock_bh(&aq->lock);
+}
+EXPORT_SYMBOL(ap_queue_init_reply);
+
+/**
+ * ap_queue_message(): Queue a request to an AP device.
+ * @aq: The AP device to queue the message to
+ * @ap_msg: The message that is to be added
+ */
+void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
+{
+ /* For asynchronous message handling a valid receive-callback
+ * is required.
+ */
+ BUG_ON(!ap_msg->receive);
+
+ spin_lock_bh(&aq->lock);
+ /* Queue the message. */
+ list_add_tail(&ap_msg->list, &aq->requestq);
+ aq->requestq_count++;
+ aq->total_request_count++;
+ atomic_inc(&aq->card->total_request_count);
+ /* Send/receive as many request from the queue as possible. */
+ ap_wait(ap_sm_event_loop(aq, AP_EVENT_POLL));
+ spin_unlock_bh(&aq->lock);
+}
+EXPORT_SYMBOL(ap_queue_message);
+
+/**
+ * ap_cancel_message(): Cancel a crypto request.
+ * @aq: The AP device that has the message queued
+ * @ap_msg: The message that is to be removed
+ *
+ * Cancel a crypto request. This is done by removing the request
+ * from the device pending or request queue. Note that the
+ * request stays on the AP queue. When it finishes the message
+ * reply will be discarded because the psmid can't be found.
+ */
+void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg)
+{
+ struct ap_message *tmp;
+
+ spin_lock_bh(&aq->lock);
+ if (!list_empty(&ap_msg->list)) {
+ list_for_each_entry(tmp, &aq->pendingq, list)
+ if (tmp->psmid == ap_msg->psmid) {
+ aq->pendingq_count--;
+ goto found;
+ }
+ aq->requestq_count--;
+found:
+ list_del_init(&ap_msg->list);
+ }
+ spin_unlock_bh(&aq->lock);
+}
+EXPORT_SYMBOL(ap_cancel_message);
+
+/**
+ * __ap_flush_queue(): Flush requests.
+ * @aq: Pointer to the AP queue
+ *
+ * Flush all requests from the request/pending queue of an AP device.
+ */
+static void __ap_flush_queue(struct ap_queue *aq)
+{
+ struct ap_message *ap_msg, *next;
+
+ list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) {
+ list_del_init(&ap_msg->list);
+ aq->pendingq_count--;
+ ap_msg->rc = -EAGAIN;
+ ap_msg->receive(aq, ap_msg, NULL);
+ }
+ list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) {
+ list_del_init(&ap_msg->list);
+ aq->requestq_count--;
+ ap_msg->rc = -EAGAIN;
+ ap_msg->receive(aq, ap_msg, NULL);
+ }
+}
+
+void ap_flush_queue(struct ap_queue *aq)
+{
+ spin_lock_bh(&aq->lock);
+ __ap_flush_queue(aq);
+ spin_unlock_bh(&aq->lock);
+}
+EXPORT_SYMBOL(ap_flush_queue);
+
+void ap_queue_remove(struct ap_queue *aq)
+{
+ ap_flush_queue(aq);
+ del_timer_sync(&aq->timeout);
+}
+EXPORT_SYMBOL(ap_queue_remove);
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 5d3d04c040c2..51eece9af577 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -36,15 +36,19 @@
#include <linux/compat.h>
#include <linux/slab.h>
#include <linux/atomic.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/hw_random.h>
#include <linux/debugfs.h>
#include <asm/debug.h>
-#include "zcrypt_debug.h"
+#define CREATE_TRACE_POINTS
+#include <asm/trace/zcrypt.h>
+
#include "zcrypt_api.h"
+#include "zcrypt_debug.h"
#include "zcrypt_msgtype6.h"
+#include "zcrypt_msgtype50.h"
/*
* Module description.
@@ -54,76 +58,31 @@ MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
"Copyright IBM Corp. 2001, 2012");
MODULE_LICENSE("GPL");
+/*
+ * zcrypt tracepoint functions
+ */
+EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req);
+EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep);
+
static int zcrypt_hwrng_seed = 1;
module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, S_IRUSR|S_IRGRP);
MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on).");
-static DEFINE_SPINLOCK(zcrypt_device_lock);
-static LIST_HEAD(zcrypt_device_list);
-static int zcrypt_device_count = 0;
+DEFINE_SPINLOCK(zcrypt_list_lock);
+LIST_HEAD(zcrypt_card_list);
+int zcrypt_device_count;
+
static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0);
atomic_t zcrypt_rescan_req = ATOMIC_INIT(0);
EXPORT_SYMBOL(zcrypt_rescan_req);
-static int zcrypt_rng_device_add(void);
-static void zcrypt_rng_device_remove(void);
-
-static DEFINE_SPINLOCK(zcrypt_ops_list_lock);
static LIST_HEAD(zcrypt_ops_list);
-static debug_info_t *zcrypt_dbf_common;
-static debug_info_t *zcrypt_dbf_devices;
-static struct dentry *debugfs_root;
-
-/*
- * Device attributes common for all crypto devices.
- */
-static ssize_t zcrypt_type_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zcrypt_device *zdev = to_ap_dev(dev)->private;
- return snprintf(buf, PAGE_SIZE, "%s\n", zdev->type_string);
-}
-
-static DEVICE_ATTR(type, 0444, zcrypt_type_show, NULL);
-
-static ssize_t zcrypt_online_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zcrypt_device *zdev = to_ap_dev(dev)->private;
- return snprintf(buf, PAGE_SIZE, "%d\n", zdev->online);
-}
-
-static ssize_t zcrypt_online_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct zcrypt_device *zdev = to_ap_dev(dev)->private;
- int online;
-
- if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
- return -EINVAL;
- zdev->online = online;
- ZCRYPT_DBF_DEV(DBF_INFO, zdev, "dev%04xo%dman", zdev->ap_dev->qid,
- zdev->online);
- if (!online)
- ap_flush_queue(zdev->ap_dev);
- return count;
-}
-
-static DEVICE_ATTR(online, 0644, zcrypt_online_show, zcrypt_online_store);
-
-static struct attribute * zcrypt_device_attrs[] = {
- &dev_attr_type.attr,
- &dev_attr_online.attr,
- NULL,
-};
-
-static struct attribute_group zcrypt_device_attr_group = {
- .attrs = zcrypt_device_attrs,
-};
+/* Zcrypt related debug feature stuff. */
+static struct dentry *zcrypt_dbf_root;
+debug_info_t *zcrypt_dbf_info;
/**
* Process a rescan of the transport layer.
@@ -136,242 +95,34 @@ static inline int zcrypt_process_rescan(void)
atomic_set(&zcrypt_rescan_req, 0);
atomic_inc(&zcrypt_rescan_count);
ap_bus_force_rescan();
- ZCRYPT_DBF_COMMON(DBF_INFO, "rescan%07d",
- atomic_inc_return(&zcrypt_rescan_count));
+ ZCRYPT_DBF(DBF_INFO, "rescan count=%07d",
+ atomic_inc_return(&zcrypt_rescan_count));
return 1;
}
return 0;
}
-/**
- * __zcrypt_increase_preference(): Increase preference of a crypto device.
- * @zdev: Pointer the crypto device
- *
- * Move the device towards the head of the device list.
- * Need to be called while holding the zcrypt device list lock.
- * Note: cards with speed_rating of 0 are kept at the end of the list.
- */
-static void __zcrypt_increase_preference(struct zcrypt_device *zdev)
-{
- struct zcrypt_device *tmp;
- struct list_head *l;
-
- if (zdev->speed_rating == 0)
- return;
- for (l = zdev->list.prev; l != &zcrypt_device_list; l = l->prev) {
- tmp = list_entry(l, struct zcrypt_device, list);
- if ((tmp->request_count + 1) * tmp->speed_rating <=
- (zdev->request_count + 1) * zdev->speed_rating &&
- tmp->speed_rating != 0)
- break;
- }
- if (l == zdev->list.prev)
- return;
- /* Move zdev behind l */
- list_move(&zdev->list, l);
-}
-
-/**
- * __zcrypt_decrease_preference(): Decrease preference of a crypto device.
- * @zdev: Pointer to a crypto device.
- *
- * Move the device towards the tail of the device list.
- * Need to be called while holding the zcrypt device list lock.
- * Note: cards with speed_rating of 0 are kept at the end of the list.
- */
-static void __zcrypt_decrease_preference(struct zcrypt_device *zdev)
-{
- struct zcrypt_device *tmp;
- struct list_head *l;
-
- if (zdev->speed_rating == 0)
- return;
- for (l = zdev->list.next; l != &zcrypt_device_list; l = l->next) {
- tmp = list_entry(l, struct zcrypt_device, list);
- if ((tmp->request_count + 1) * tmp->speed_rating >
- (zdev->request_count + 1) * zdev->speed_rating ||
- tmp->speed_rating == 0)
- break;
- }
- if (l == zdev->list.next)
- return;
- /* Move zdev before l */
- list_move_tail(&zdev->list, l);
-}
-
-static void zcrypt_device_release(struct kref *kref)
-{
- struct zcrypt_device *zdev =
- container_of(kref, struct zcrypt_device, refcount);
- zcrypt_device_free(zdev);
-}
-
-void zcrypt_device_get(struct zcrypt_device *zdev)
-{
- kref_get(&zdev->refcount);
-}
-EXPORT_SYMBOL(zcrypt_device_get);
-
-int zcrypt_device_put(struct zcrypt_device *zdev)
-{
- return kref_put(&zdev->refcount, zcrypt_device_release);
-}
-EXPORT_SYMBOL(zcrypt_device_put);
-
-struct zcrypt_device *zcrypt_device_alloc(size_t max_response_size)
-{
- struct zcrypt_device *zdev;
-
- zdev = kzalloc(sizeof(struct zcrypt_device), GFP_KERNEL);
- if (!zdev)
- return NULL;
- zdev->reply.message = kmalloc(max_response_size, GFP_KERNEL);
- if (!zdev->reply.message)
- goto out_free;
- zdev->reply.length = max_response_size;
- spin_lock_init(&zdev->lock);
- INIT_LIST_HEAD(&zdev->list);
- zdev->dbf_area = zcrypt_dbf_devices;
- return zdev;
-
-out_free:
- kfree(zdev);
- return NULL;
-}
-EXPORT_SYMBOL(zcrypt_device_alloc);
-
-void zcrypt_device_free(struct zcrypt_device *zdev)
-{
- kfree(zdev->reply.message);
- kfree(zdev);
-}
-EXPORT_SYMBOL(zcrypt_device_free);
-
-/**
- * zcrypt_device_register() - Register a crypto device.
- * @zdev: Pointer to a crypto device
- *
- * Register a crypto device. Returns 0 if successful.
- */
-int zcrypt_device_register(struct zcrypt_device *zdev)
-{
- int rc;
-
- if (!zdev->ops)
- return -ENODEV;
- rc = sysfs_create_group(&zdev->ap_dev->device.kobj,
- &zcrypt_device_attr_group);
- if (rc)
- goto out;
- get_device(&zdev->ap_dev->device);
- kref_init(&zdev->refcount);
- spin_lock_bh(&zcrypt_device_lock);
- zdev->online = 1; /* New devices are online by default. */
- ZCRYPT_DBF_DEV(DBF_INFO, zdev, "dev%04xo%dreg", zdev->ap_dev->qid,
- zdev->online);
- list_add_tail(&zdev->list, &zcrypt_device_list);
- __zcrypt_increase_preference(zdev);
- zcrypt_device_count++;
- spin_unlock_bh(&zcrypt_device_lock);
- if (zdev->ops->rng) {
- rc = zcrypt_rng_device_add();
- if (rc)
- goto out_unregister;
- }
- return 0;
-
-out_unregister:
- spin_lock_bh(&zcrypt_device_lock);
- zcrypt_device_count--;
- list_del_init(&zdev->list);
- spin_unlock_bh(&zcrypt_device_lock);
- sysfs_remove_group(&zdev->ap_dev->device.kobj,
- &zcrypt_device_attr_group);
- put_device(&zdev->ap_dev->device);
- zcrypt_device_put(zdev);
-out:
- return rc;
-}
-EXPORT_SYMBOL(zcrypt_device_register);
-
-/**
- * zcrypt_device_unregister(): Unregister a crypto device.
- * @zdev: Pointer to crypto device
- *
- * Unregister a crypto device.
- */
-void zcrypt_device_unregister(struct zcrypt_device *zdev)
-{
- if (zdev->ops->rng)
- zcrypt_rng_device_remove();
- spin_lock_bh(&zcrypt_device_lock);
- zcrypt_device_count--;
- list_del_init(&zdev->list);
- spin_unlock_bh(&zcrypt_device_lock);
- sysfs_remove_group(&zdev->ap_dev->device.kobj,
- &zcrypt_device_attr_group);
- put_device(&zdev->ap_dev->device);
- zcrypt_device_put(zdev);
-}
-EXPORT_SYMBOL(zcrypt_device_unregister);
-
void zcrypt_msgtype_register(struct zcrypt_ops *zops)
{
- spin_lock_bh(&zcrypt_ops_list_lock);
list_add_tail(&zops->list, &zcrypt_ops_list);
- spin_unlock_bh(&zcrypt_ops_list_lock);
}
-EXPORT_SYMBOL(zcrypt_msgtype_register);
void zcrypt_msgtype_unregister(struct zcrypt_ops *zops)
{
- spin_lock_bh(&zcrypt_ops_list_lock);
list_del_init(&zops->list);
- spin_unlock_bh(&zcrypt_ops_list_lock);
}
-EXPORT_SYMBOL(zcrypt_msgtype_unregister);
-static inline
-struct zcrypt_ops *__ops_lookup(unsigned char *name, int variant)
+struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant)
{
struct zcrypt_ops *zops;
- int found = 0;
- spin_lock_bh(&zcrypt_ops_list_lock);
- list_for_each_entry(zops, &zcrypt_ops_list, list) {
+ list_for_each_entry(zops, &zcrypt_ops_list, list)
if ((zops->variant == variant) &&
- (!strncmp(zops->name, name, sizeof(zops->name)))) {
- found = 1;
- break;
- }
- }
- if (!found || !try_module_get(zops->owner))
- zops = NULL;
-
- spin_unlock_bh(&zcrypt_ops_list_lock);
-
- return zops;
-}
-
-struct zcrypt_ops *zcrypt_msgtype_request(unsigned char *name, int variant)
-{
- struct zcrypt_ops *zops = NULL;
-
- zops = __ops_lookup(name, variant);
- if (!zops) {
- request_module("%s", name);
- zops = __ops_lookup(name, variant);
- }
- return zops;
-}
-EXPORT_SYMBOL(zcrypt_msgtype_request);
-
-void zcrypt_msgtype_release(struct zcrypt_ops *zops)
-{
- if (zops)
- module_put(zops->owner);
+ (!strncmp(zops->name, name, sizeof(zops->name))))
+ return zops;
+ return NULL;
}
-EXPORT_SYMBOL(zcrypt_msgtype_release);
+EXPORT_SYMBOL(zcrypt_msgtype);
/**
* zcrypt_read (): Not supported beyond zcrypt 1.3.1.
@@ -417,16 +168,80 @@ static int zcrypt_release(struct inode *inode, struct file *filp)
return 0;
}
+static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
+ struct zcrypt_queue *zq,
+ unsigned int weight)
+{
+ if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner))
+ return NULL;
+ zcrypt_queue_get(zq);
+ get_device(&zq->queue->ap_dev.device);
+ atomic_add(weight, &zc->load);
+ atomic_add(weight, &zq->load);
+ zq->request_count++;
+ return zq;
+}
+
+static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
+ struct zcrypt_queue *zq,
+ unsigned int weight)
+{
+ struct module *mod = zq->queue->ap_dev.drv->driver.owner;
+
+ zq->request_count--;
+ atomic_sub(weight, &zc->load);
+ atomic_sub(weight, &zq->load);
+ put_device(&zq->queue->ap_dev.device);
+ zcrypt_queue_put(zq);
+ module_put(mod);
+}
+
+static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
+ struct zcrypt_card *pref_zc,
+ unsigned weight, unsigned pref_weight)
+{
+ if (!pref_zc)
+ return 0;
+ weight += atomic_read(&zc->load);
+ pref_weight += atomic_read(&pref_zc->load);
+ if (weight == pref_weight)
+ return atomic_read(&zc->card->total_request_count) >
+ atomic_read(&pref_zc->card->total_request_count);
+ return weight > pref_weight;
+}
+
+static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
+ struct zcrypt_queue *pref_zq,
+ unsigned weight, unsigned pref_weight)
+{
+ if (!pref_zq)
+ return 0;
+ weight += atomic_read(&zq->load);
+ pref_weight += atomic_read(&pref_zq->load);
+ if (weight == pref_weight)
+ return &zq->queue->total_request_count >
+ &pref_zq->queue->total_request_count;
+ return weight > pref_weight;
+}
+
/*
* zcrypt ioctls.
*/
static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
{
- struct zcrypt_device *zdev;
- int rc;
+ struct zcrypt_card *zc, *pref_zc;
+ struct zcrypt_queue *zq, *pref_zq;
+ unsigned int weight, pref_weight;
+ unsigned int func_code;
+ int qid = 0, rc = -ENODEV;
+
+ trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
+
+ if (mex->outputdatalength < mex->inputdatalength) {
+ rc = -EINVAL;
+ goto out;
+ }
- if (mex->outputdatalength < mex->inputdatalength)
- return -EINVAL;
/*
* As long as outputdatalength is big enough, we can set the
* outputdatalength equal to the inputdatalength, since that is the
@@ -434,44 +249,73 @@ static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
*/
mex->outputdatalength = mex->inputdatalength;
- spin_lock_bh(&zcrypt_device_lock);
- list_for_each_entry(zdev, &zcrypt_device_list, list) {
- if (!zdev->online ||
- !zdev->ops->rsa_modexpo ||
- zdev->min_mod_size > mex->inputdatalength ||
- zdev->max_mod_size < mex->inputdatalength)
+ rc = get_rsa_modex_fc(mex, &func_code);
+ if (rc)
+ goto out;
+
+ pref_zc = NULL;
+ pref_zq = NULL;
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ /* Check for online accelarator and CCA cards */
+ if (!zc->online || !(zc->card->functions & 0x18000000))
+ continue;
+ /* Check for size limits */
+ if (zc->min_mod_size > mex->inputdatalength ||
+ zc->max_mod_size < mex->inputdatalength)
+ continue;
+ /* get weight index of the card device */
+ weight = zc->speed_rating[func_code];
+ if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
continue;
- zcrypt_device_get(zdev);
- get_device(&zdev->ap_dev->device);
- zdev->request_count++;
- __zcrypt_decrease_preference(zdev);
- if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
- spin_unlock_bh(&zcrypt_device_lock);
- rc = zdev->ops->rsa_modexpo(zdev, mex);
- spin_lock_bh(&zcrypt_device_lock);
- module_put(zdev->ap_dev->drv->driver.owner);
+ for_each_zcrypt_queue(zq, zc) {
+ /* check if device is online and eligible */
+ if (!zq->online || !zq->ops->rsa_modexpo)
+ continue;
+ if (zcrypt_queue_compare(zq, pref_zq,
+ weight, pref_weight))
+ continue;
+ pref_zc = zc;
+ pref_zq = zq;
+ pref_weight = weight;
}
- else
- rc = -EAGAIN;
- zdev->request_count--;
- __zcrypt_increase_preference(zdev);
- put_device(&zdev->ap_dev->device);
- zcrypt_device_put(zdev);
- spin_unlock_bh(&zcrypt_device_lock);
- return rc;
}
- spin_unlock_bh(&zcrypt_device_lock);
- return -ENODEV;
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+ spin_unlock(&zcrypt_list_lock);
+
+ if (!pref_zq) {
+ rc = -ENODEV;
+ goto out;
+ }
+
+ qid = pref_zq->queue->qid;
+ rc = pref_zq->ops->rsa_modexpo(pref_zq, mex);
+
+ spin_lock(&zcrypt_list_lock);
+ zcrypt_drop_queue(pref_zc, pref_zq, weight);
+ spin_unlock(&zcrypt_list_lock);
+
+out:
+ trace_s390_zcrypt_rep(mex, func_code, rc,
+ AP_QID_CARD(qid), AP_QID_QUEUE(qid));
+ return rc;
}
static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
{
- struct zcrypt_device *zdev;
- unsigned long long z1, z2, z3;
- int rc, copied;
+ struct zcrypt_card *zc, *pref_zc;
+ struct zcrypt_queue *zq, *pref_zq;
+ unsigned int weight, pref_weight;
+ unsigned int func_code;
+ int qid = 0, rc = -ENODEV;
+
+ trace_s390_zcrypt_req(crt, TP_ICARSACRT);
+
+ if (crt->outputdatalength < crt->inputdatalength) {
+ rc = -EINVAL;
+ goto out;
+ }
- if (crt->outputdatalength < crt->inputdatalength)
- return -EINVAL;
/*
* As long as outputdatalength is big enough, we can set the
* outputdatalength equal to the inputdatalength, since that is the
@@ -479,308 +323,445 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
*/
crt->outputdatalength = crt->inputdatalength;
- copied = 0;
- restart:
- spin_lock_bh(&zcrypt_device_lock);
- list_for_each_entry(zdev, &zcrypt_device_list, list) {
- if (!zdev->online ||
- !zdev->ops->rsa_modexpo_crt ||
- zdev->min_mod_size > crt->inputdatalength ||
- zdev->max_mod_size < crt->inputdatalength)
+ rc = get_rsa_crt_fc(crt, &func_code);
+ if (rc)
+ goto out;
+
+ pref_zc = NULL;
+ pref_zq = NULL;
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ /* Check for online accelarator and CCA cards */
+ if (!zc->online || !(zc->card->functions & 0x18000000))
+ continue;
+ /* Check for size limits */
+ if (zc->min_mod_size > crt->inputdatalength ||
+ zc->max_mod_size < crt->inputdatalength)
continue;
- if (zdev->short_crt && crt->inputdatalength > 240) {
- /*
- * Check inputdata for leading zeros for cards
- * that can't handle np_prime, bp_key, or
- * u_mult_inv > 128 bytes.
- */
- if (copied == 0) {
- unsigned int len;
- spin_unlock_bh(&zcrypt_device_lock);
- /* len is max 256 / 2 - 120 = 8
- * For bigger device just assume len of leading
- * 0s is 8 as stated in the requirements for
- * ica_rsa_modexpo_crt struct in zcrypt.h.
- */
- if (crt->inputdatalength <= 256)
- len = crt->inputdatalength / 2 - 120;
- else
- len = 8;
- if (len > sizeof(z1))
- return -EFAULT;
- z1 = z2 = z3 = 0;
- if (copy_from_user(&z1, crt->np_prime, len) ||
- copy_from_user(&z2, crt->bp_key, len) ||
- copy_from_user(&z3, crt->u_mult_inv, len))
- return -EFAULT;
- z1 = z2 = z3 = 0;
- copied = 1;
- /*
- * We have to restart device lookup -
- * the device list may have changed by now.
- */
- goto restart;
- }
- if (z1 != 0ULL || z2 != 0ULL || z3 != 0ULL)
- /* The device can't handle this request. */
+ /* get weight index of the card device */
+ weight = zc->speed_rating[func_code];
+ if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+ continue;
+ for_each_zcrypt_queue(zq, zc) {
+ /* check if device is online and eligible */
+ if (!zq->online || !zq->ops->rsa_modexpo_crt)
continue;
+ if (zcrypt_queue_compare(zq, pref_zq,
+ weight, pref_weight))
+ continue;
+ pref_zc = zc;
+ pref_zq = zq;
+ pref_weight = weight;
}
- zcrypt_device_get(zdev);
- get_device(&zdev->ap_dev->device);
- zdev->request_count++;
- __zcrypt_decrease_preference(zdev);
- if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
- spin_unlock_bh(&zcrypt_device_lock);
- rc = zdev->ops->rsa_modexpo_crt(zdev, crt);
- spin_lock_bh(&zcrypt_device_lock);
- module_put(zdev->ap_dev->drv->driver.owner);
- }
- else
- rc = -EAGAIN;
- zdev->request_count--;
- __zcrypt_increase_preference(zdev);
- put_device(&zdev->ap_dev->device);
- zcrypt_device_put(zdev);
- spin_unlock_bh(&zcrypt_device_lock);
- return rc;
}
- spin_unlock_bh(&zcrypt_device_lock);
- return -ENODEV;
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+ spin_unlock(&zcrypt_list_lock);
+
+ if (!pref_zq) {
+ rc = -ENODEV;
+ goto out;
+ }
+
+ qid = pref_zq->queue->qid;
+ rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt);
+
+ spin_lock(&zcrypt_list_lock);
+ zcrypt_drop_queue(pref_zc, pref_zq, weight);
+ spin_unlock(&zcrypt_list_lock);
+
+out:
+ trace_s390_zcrypt_rep(crt, func_code, rc,
+ AP_QID_CARD(qid), AP_QID_QUEUE(qid));
+ return rc;
}
static long zcrypt_send_cprb(struct ica_xcRB *xcRB)
{
- struct zcrypt_device *zdev;
- int rc;
+ struct zcrypt_card *zc, *pref_zc;
+ struct zcrypt_queue *zq, *pref_zq;
+ struct ap_message ap_msg;
+ unsigned int weight, pref_weight;
+ unsigned int func_code;
+ unsigned short *domain;
+ int qid = 0, rc = -ENODEV;
+
+ trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB);
- spin_lock_bh(&zcrypt_device_lock);
- list_for_each_entry(zdev, &zcrypt_device_list, list) {
- if (!zdev->online || !zdev->ops->send_cprb ||
- (zdev->ops->variant == MSGTYPE06_VARIANT_EP11) ||
- (xcRB->user_defined != AUTOSELECT &&
- AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined))
+ rc = get_cprb_fc(xcRB, &ap_msg, &func_code, &domain);
+ if (rc)
+ goto out;
+
+ pref_zc = NULL;
+ pref_zq = NULL;
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ /* Check for online CCA cards */
+ if (!zc->online || !(zc->card->functions & 0x10000000))
+ continue;
+ /* Check for user selected CCA card */
+ if (xcRB->user_defined != AUTOSELECT &&
+ xcRB->user_defined != zc->card->id)
continue;
- zcrypt_device_get(zdev);
- get_device(&zdev->ap_dev->device);
- zdev->request_count++;
- __zcrypt_decrease_preference(zdev);
- if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
- spin_unlock_bh(&zcrypt_device_lock);
- rc = zdev->ops->send_cprb(zdev, xcRB);
- spin_lock_bh(&zcrypt_device_lock);
- module_put(zdev->ap_dev->drv->driver.owner);
+ /* get weight index of the card device */
+ weight = speed_idx_cca(func_code) * zc->speed_rating[SECKEY];
+ if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+ continue;
+ for_each_zcrypt_queue(zq, zc) {
+ /* check if device is online and eligible */
+ if (!zq->online ||
+ !zq->ops->send_cprb ||
+ ((*domain != (unsigned short) AUTOSELECT) &&
+ (*domain != AP_QID_QUEUE(zq->queue->qid))))
+ continue;
+ if (zcrypt_queue_compare(zq, pref_zq,
+ weight, pref_weight))
+ continue;
+ pref_zc = zc;
+ pref_zq = zq;
+ pref_weight = weight;
}
- else
- rc = -EAGAIN;
- zdev->request_count--;
- __zcrypt_increase_preference(zdev);
- put_device(&zdev->ap_dev->device);
- zcrypt_device_put(zdev);
- spin_unlock_bh(&zcrypt_device_lock);
- return rc;
}
- spin_unlock_bh(&zcrypt_device_lock);
- return -ENODEV;
-}
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+ spin_unlock(&zcrypt_list_lock);
-struct ep11_target_dev_list {
- unsigned short targets_num;
- struct ep11_target_dev *targets;
-};
+ if (!pref_zq) {
+ rc = -ENODEV;
+ goto out;
+ }
+
+ /* in case of auto select, provide the correct domain */
+ qid = pref_zq->queue->qid;
+ if (*domain == (unsigned short) AUTOSELECT)
+ *domain = AP_QID_QUEUE(qid);
-static bool is_desired_ep11dev(unsigned int dev_qid,
- struct ep11_target_dev_list dev_list)
+ rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg);
+
+ spin_lock(&zcrypt_list_lock);
+ zcrypt_drop_queue(pref_zc, pref_zq, weight);
+ spin_unlock(&zcrypt_list_lock);
+
+out:
+ trace_s390_zcrypt_rep(xcRB, func_code, rc,
+ AP_QID_CARD(qid), AP_QID_QUEUE(qid));
+ return rc;
+}
+
+static bool is_desired_ep11_card(unsigned int dev_id,
+ unsigned short target_num,
+ struct ep11_target_dev *targets)
{
- int n;
+ while (target_num-- > 0) {
+ if (dev_id == targets->ap_id)
+ return true;
+ targets++;
+ }
+ return false;
+}
- for (n = 0; n < dev_list.targets_num; n++, dev_list.targets++) {
- if ((AP_QID_DEVICE(dev_qid) == dev_list.targets->ap_id) &&
- (AP_QID_QUEUE(dev_qid) == dev_list.targets->dom_id)) {
+static bool is_desired_ep11_queue(unsigned int dev_qid,
+ unsigned short target_num,
+ struct ep11_target_dev *targets)
+{
+ while (target_num-- > 0) {
+ if (AP_MKQID(targets->ap_id, targets->dom_id) == dev_qid)
return true;
- }
+ targets++;
}
return false;
}
static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
{
- struct zcrypt_device *zdev;
- bool autoselect = false;
- int rc;
- struct ep11_target_dev_list ep11_dev_list = {
- .targets_num = 0x00,
- .targets = NULL,
- };
+ struct zcrypt_card *zc, *pref_zc;
+ struct zcrypt_queue *zq, *pref_zq;
+ struct ep11_target_dev *targets;
+ unsigned short target_num;
+ unsigned int weight, pref_weight;
+ unsigned int func_code;
+ struct ap_message ap_msg;
+ int qid = 0, rc = -ENODEV;
+
+ trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
- ep11_dev_list.targets_num = (unsigned short) xcrb->targets_num;
+ target_num = (unsigned short) xcrb->targets_num;
/* empty list indicates autoselect (all available targets) */
- if (ep11_dev_list.targets_num == 0)
- autoselect = true;
- else {
- ep11_dev_list.targets = kcalloc((unsigned short)
- xcrb->targets_num,
- sizeof(struct ep11_target_dev),
- GFP_KERNEL);
- if (!ep11_dev_list.targets)
- return -ENOMEM;
+ targets = NULL;
+ if (target_num != 0) {
+ struct ep11_target_dev __user *uptr;
- if (copy_from_user(ep11_dev_list.targets,
- (struct ep11_target_dev __force __user *)
- xcrb->targets, xcrb->targets_num *
- sizeof(struct ep11_target_dev)))
- return -EFAULT;
+ targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL);
+ if (!targets) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ uptr = (struct ep11_target_dev __force __user *) xcrb->targets;
+ if (copy_from_user(targets, uptr,
+ target_num * sizeof(*targets))) {
+ rc = -EFAULT;
+ goto out;
+ }
}
- spin_lock_bh(&zcrypt_device_lock);
- list_for_each_entry(zdev, &zcrypt_device_list, list) {
- /* check if device is eligible */
- if (!zdev->online ||
- zdev->ops->variant != MSGTYPE06_VARIANT_EP11)
- continue;
+ rc = get_ep11cprb_fc(xcrb, &ap_msg, &func_code);
+ if (rc)
+ goto out_free;
- /* check if device is selected as valid target */
- if (!is_desired_ep11dev(zdev->ap_dev->qid, ep11_dev_list) &&
- !autoselect)
+ pref_zc = NULL;
+ pref_zq = NULL;
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ /* Check for online EP11 cards */
+ if (!zc->online || !(zc->card->functions & 0x04000000))
+ continue;
+ /* Check for user selected EP11 card */
+ if (targets &&
+ !is_desired_ep11_card(zc->card->id, target_num, targets))
continue;
+ /* get weight index of the card device */
+ weight = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY];
+ if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+ continue;
+ for_each_zcrypt_queue(zq, zc) {
+ /* check if device is online and eligible */
+ if (!zq->online ||
+ !zq->ops->send_ep11_cprb ||
+ (targets &&
+ !is_desired_ep11_queue(zq->queue->qid,
+ target_num, targets)))
+ continue;
+ if (zcrypt_queue_compare(zq, pref_zq,
+ weight, pref_weight))
+ continue;
+ pref_zc = zc;
+ pref_zq = zq;
+ pref_weight = weight;
+ }
+ }
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+ spin_unlock(&zcrypt_list_lock);
- zcrypt_device_get(zdev);
- get_device(&zdev->ap_dev->device);
- zdev->request_count++;
- __zcrypt_decrease_preference(zdev);
- if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
- spin_unlock_bh(&zcrypt_device_lock);
- rc = zdev->ops->send_ep11_cprb(zdev, xcrb);
- spin_lock_bh(&zcrypt_device_lock);
- module_put(zdev->ap_dev->drv->driver.owner);
- } else {
- rc = -EAGAIN;
- }
- zdev->request_count--;
- __zcrypt_increase_preference(zdev);
- put_device(&zdev->ap_dev->device);
- zcrypt_device_put(zdev);
- spin_unlock_bh(&zcrypt_device_lock);
- return rc;
+ if (!pref_zq) {
+ rc = -ENODEV;
+ goto out_free;
}
- spin_unlock_bh(&zcrypt_device_lock);
- return -ENODEV;
+
+ qid = pref_zq->queue->qid;
+ rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg);
+
+ spin_lock(&zcrypt_list_lock);
+ zcrypt_drop_queue(pref_zc, pref_zq, weight);
+ spin_unlock(&zcrypt_list_lock);
+
+out_free:
+ kfree(targets);
+out:
+ trace_s390_zcrypt_rep(xcrb, func_code, rc,
+ AP_QID_CARD(qid), AP_QID_QUEUE(qid));
+ return rc;
}
static long zcrypt_rng(char *buffer)
{
- struct zcrypt_device *zdev;
- int rc;
+ struct zcrypt_card *zc, *pref_zc;
+ struct zcrypt_queue *zq, *pref_zq;
+ unsigned int weight, pref_weight;
+ unsigned int func_code;
+ struct ap_message ap_msg;
+ unsigned int domain;
+ int qid = 0, rc = -ENODEV;
+
+ trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
- spin_lock_bh(&zcrypt_device_lock);
- list_for_each_entry(zdev, &zcrypt_device_list, list) {
- if (!zdev->online || !zdev->ops->rng)
+ rc = get_rng_fc(&ap_msg, &func_code, &domain);
+ if (rc)
+ goto out;
+
+ pref_zc = NULL;
+ pref_zq = NULL;
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ /* Check for online CCA cards */
+ if (!zc->online || !(zc->card->functions & 0x10000000))
continue;
- zcrypt_device_get(zdev);
- get_device(&zdev->ap_dev->device);
- zdev->request_count++;
- __zcrypt_decrease_preference(zdev);
- if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
- spin_unlock_bh(&zcrypt_device_lock);
- rc = zdev->ops->rng(zdev, buffer);
- spin_lock_bh(&zcrypt_device_lock);
- module_put(zdev->ap_dev->drv->driver.owner);
- } else
- rc = -EAGAIN;
- zdev->request_count--;
- __zcrypt_increase_preference(zdev);
- put_device(&zdev->ap_dev->device);
- zcrypt_device_put(zdev);
- spin_unlock_bh(&zcrypt_device_lock);
- return rc;
+ /* get weight index of the card device */
+ weight = zc->speed_rating[func_code];
+ if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+ continue;
+ for_each_zcrypt_queue(zq, zc) {
+ /* check if device is online and eligible */
+ if (!zq->online || !zq->ops->rng)
+ continue;
+ if (zcrypt_queue_compare(zq, pref_zq,
+ weight, pref_weight))
+ continue;
+ pref_zc = zc;
+ pref_zq = zq;
+ pref_weight = weight;
+ }
+ }
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+ spin_unlock(&zcrypt_list_lock);
+
+ if (!pref_zq)
+ return -ENODEV;
+
+ qid = pref_zq->queue->qid;
+ rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
+
+ spin_lock(&zcrypt_list_lock);
+ zcrypt_drop_queue(pref_zc, pref_zq, weight);
+ spin_unlock(&zcrypt_list_lock);
+
+out:
+ trace_s390_zcrypt_rep(buffer, func_code, rc,
+ AP_QID_CARD(qid), AP_QID_QUEUE(qid));
+ return rc;
+}
+
+static void zcrypt_device_status_mask(struct zcrypt_device_matrix *matrix)
+{
+ struct zcrypt_card *zc;
+ struct zcrypt_queue *zq;
+ struct zcrypt_device_status *stat;
+
+ memset(matrix, 0, sizeof(*matrix));
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ for_each_zcrypt_queue(zq, zc) {
+ stat = matrix->device;
+ stat += AP_QID_CARD(zq->queue->qid) * MAX_ZDEV_DOMAINS;
+ stat += AP_QID_QUEUE(zq->queue->qid);
+ stat->hwtype = zc->card->ap_dev.device_type;
+ stat->functions = zc->card->functions >> 26;
+ stat->qid = zq->queue->qid;
+ stat->online = zq->online ? 0x01 : 0x00;
+ }
}
- spin_unlock_bh(&zcrypt_device_lock);
- return -ENODEV;
+ spin_unlock(&zcrypt_list_lock);
}
+EXPORT_SYMBOL(zcrypt_device_status_mask);
static void zcrypt_status_mask(char status[AP_DEVICES])
{
- struct zcrypt_device *zdev;
+ struct zcrypt_card *zc;
+ struct zcrypt_queue *zq;
memset(status, 0, sizeof(char) * AP_DEVICES);
- spin_lock_bh(&zcrypt_device_lock);
- list_for_each_entry(zdev, &zcrypt_device_list, list)
- status[AP_QID_DEVICE(zdev->ap_dev->qid)] =
- zdev->online ? zdev->user_space_type : 0x0d;
- spin_unlock_bh(&zcrypt_device_lock);
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ for_each_zcrypt_queue(zq, zc) {
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+ continue;
+ status[AP_QID_CARD(zq->queue->qid)] =
+ zc->online ? zc->user_space_type : 0x0d;
+ }
+ }
+ spin_unlock(&zcrypt_list_lock);
}
static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES])
{
- struct zcrypt_device *zdev;
+ struct zcrypt_card *zc;
+ struct zcrypt_queue *zq;
memset(qdepth, 0, sizeof(char) * AP_DEVICES);
- spin_lock_bh(&zcrypt_device_lock);
- list_for_each_entry(zdev, &zcrypt_device_list, list) {
- spin_lock(&zdev->ap_dev->lock);
- qdepth[AP_QID_DEVICE(zdev->ap_dev->qid)] =
- zdev->ap_dev->pendingq_count +
- zdev->ap_dev->requestq_count;
- spin_unlock(&zdev->ap_dev->lock);
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ for_each_zcrypt_queue(zq, zc) {
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+ continue;
+ spin_lock(&zq->queue->lock);
+ qdepth[AP_QID_CARD(zq->queue->qid)] =
+ zq->queue->pendingq_count +
+ zq->queue->requestq_count;
+ spin_unlock(&zq->queue->lock);
+ }
}
- spin_unlock_bh(&zcrypt_device_lock);
+ spin_unlock(&zcrypt_list_lock);
}
static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES])
{
- struct zcrypt_device *zdev;
+ struct zcrypt_card *zc;
+ struct zcrypt_queue *zq;
memset(reqcnt, 0, sizeof(int) * AP_DEVICES);
- spin_lock_bh(&zcrypt_device_lock);
- list_for_each_entry(zdev, &zcrypt_device_list, list) {
- spin_lock(&zdev->ap_dev->lock);
- reqcnt[AP_QID_DEVICE(zdev->ap_dev->qid)] =
- zdev->ap_dev->total_request_count;
- spin_unlock(&zdev->ap_dev->lock);
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ for_each_zcrypt_queue(zq, zc) {
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+ continue;
+ spin_lock(&zq->queue->lock);
+ reqcnt[AP_QID_CARD(zq->queue->qid)] =
+ zq->queue->total_request_count;
+ spin_unlock(&zq->queue->lock);
+ }
}
- spin_unlock_bh(&zcrypt_device_lock);
+ spin_unlock(&zcrypt_list_lock);
}
static int zcrypt_pendingq_count(void)
{
- struct zcrypt_device *zdev;
- int pendingq_count = 0;
-
- spin_lock_bh(&zcrypt_device_lock);
- list_for_each_entry(zdev, &zcrypt_device_list, list) {
- spin_lock(&zdev->ap_dev->lock);
- pendingq_count += zdev->ap_dev->pendingq_count;
- spin_unlock(&zdev->ap_dev->lock);
+ struct zcrypt_card *zc;
+ struct zcrypt_queue *zq;
+ int pendingq_count;
+
+ pendingq_count = 0;
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ for_each_zcrypt_queue(zq, zc) {
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+ continue;
+ spin_lock(&zq->queue->lock);
+ pendingq_count += zq->queue->pendingq_count;
+ spin_unlock(&zq->queue->lock);
+ }
}
- spin_unlock_bh(&zcrypt_device_lock);
+ spin_unlock(&zcrypt_list_lock);
return pendingq_count;
}
static int zcrypt_requestq_count(void)
{
- struct zcrypt_device *zdev;
- int requestq_count = 0;
-
- spin_lock_bh(&zcrypt_device_lock);
- list_for_each_entry(zdev, &zcrypt_device_list, list) {
- spin_lock(&zdev->ap_dev->lock);
- requestq_count += zdev->ap_dev->requestq_count;
- spin_unlock(&zdev->ap_dev->lock);
+ struct zcrypt_card *zc;
+ struct zcrypt_queue *zq;
+ int requestq_count;
+
+ requestq_count = 0;
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ for_each_zcrypt_queue(zq, zc) {
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+ continue;
+ spin_lock(&zq->queue->lock);
+ requestq_count += zq->queue->requestq_count;
+ spin_unlock(&zq->queue->lock);
+ }
}
- spin_unlock_bh(&zcrypt_device_lock);
+ spin_unlock(&zcrypt_list_lock);
return requestq_count;
}
static int zcrypt_count_type(int type)
{
- struct zcrypt_device *zdev;
- int device_count = 0;
-
- spin_lock_bh(&zcrypt_device_lock);
- list_for_each_entry(zdev, &zcrypt_device_list, list)
- if (zdev->user_space_type == type)
+ struct zcrypt_card *zc;
+ struct zcrypt_queue *zq;
+ int device_count;
+
+ device_count = 0;
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ if (zc->card->id != type)
+ continue;
+ for_each_zcrypt_queue(zq, zc) {
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+ continue;
device_count++;
- spin_unlock_bh(&zcrypt_device_lock);
+ }
+ }
+ spin_unlock(&zcrypt_list_lock);
return device_count;
}
@@ -887,6 +868,25 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
return -EFAULT;
return rc;
}
+ case ZDEVICESTATUS: {
+ struct zcrypt_device_matrix *device_status;
+
+ device_status = kzalloc(sizeof(struct zcrypt_device_matrix),
+ GFP_KERNEL);
+ if (!device_status)
+ return -ENOMEM;
+
+ zcrypt_device_status_mask(device_status);
+
+ if (copy_to_user((char __user *) arg, device_status,
+ sizeof(struct zcrypt_device_matrix))) {
+ kfree(device_status);
+ return -EFAULT;
+ }
+
+ kfree(device_status);
+ return 0;
+ }
case Z90STAT_STATUS_MASK: {
char status[AP_DEVICES];
zcrypt_status_mask(status);
@@ -1249,29 +1249,36 @@ static int zcrypt_proc_open(struct inode *inode, struct file *file)
static void zcrypt_disable_card(int index)
{
- struct zcrypt_device *zdev;
+ struct zcrypt_card *zc;
+ struct zcrypt_queue *zq;
- spin_lock_bh(&zcrypt_device_lock);
- list_for_each_entry(zdev, &zcrypt_device_list, list)
- if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) {
- zdev->online = 0;
- ap_flush_queue(zdev->ap_dev);
- break;
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ for_each_zcrypt_queue(zq, zc) {
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+ continue;
+ zq->online = 0;
+ ap_flush_queue(zq->queue);
}
- spin_unlock_bh(&zcrypt_device_lock);
+ }
+ spin_unlock(&zcrypt_list_lock);
}
static void zcrypt_enable_card(int index)
{
- struct zcrypt_device *zdev;
+ struct zcrypt_card *zc;
+ struct zcrypt_queue *zq;
- spin_lock_bh(&zcrypt_device_lock);
- list_for_each_entry(zdev, &zcrypt_device_list, list)
- if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) {
- zdev->online = 1;
- break;
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ for_each_zcrypt_queue(zq, zc) {
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+ continue;
+ zq->online = 1;
+ ap_flush_queue(zq->queue);
}
- spin_unlock_bh(&zcrypt_device_lock);
+ }
+ spin_unlock(&zcrypt_list_lock);
}
static ssize_t zcrypt_proc_write(struct file *file, const char __user *buffer,
@@ -1369,7 +1376,7 @@ static struct hwrng zcrypt_rng_dev = {
.quality = 990,
};
-static int zcrypt_rng_device_add(void)
+int zcrypt_rng_device_add(void)
{
int rc = 0;
@@ -1399,7 +1406,7 @@ out:
return rc;
}
-static void zcrypt_rng_device_remove(void)
+void zcrypt_rng_device_remove(void)
{
mutex_lock(&zcrypt_rng_mutex);
zcrypt_rng_device_count--;
@@ -1412,24 +1419,19 @@ static void zcrypt_rng_device_remove(void)
int __init zcrypt_debug_init(void)
{
- debugfs_root = debugfs_create_dir("zcrypt", NULL);
-
- zcrypt_dbf_common = debug_register("zcrypt_common", 1, 1, 16);
- debug_register_view(zcrypt_dbf_common, &debug_hex_ascii_view);
- debug_set_level(zcrypt_dbf_common, DBF_ERR);
-
- zcrypt_dbf_devices = debug_register("zcrypt_devices", 1, 1, 16);
- debug_register_view(zcrypt_dbf_devices, &debug_hex_ascii_view);
- debug_set_level(zcrypt_dbf_devices, DBF_ERR);
+ zcrypt_dbf_root = debugfs_create_dir("zcrypt", NULL);
+ zcrypt_dbf_info = debug_register("zcrypt", 1, 1,
+ DBF_MAX_SPRINTF_ARGS * sizeof(long));
+ debug_register_view(zcrypt_dbf_info, &debug_sprintf_view);
+ debug_set_level(zcrypt_dbf_info, DBF_ERR);
return 0;
}
void zcrypt_debug_exit(void)
{
- debugfs_remove(debugfs_root);
- debug_unregister(zcrypt_dbf_common);
- debug_unregister(zcrypt_dbf_devices);
+ debugfs_remove(zcrypt_dbf_root);
+ debug_unregister(zcrypt_dbf_info);
}
/**
@@ -1453,12 +1455,15 @@ int __init zcrypt_api_init(void)
goto out;
/* Set up the proc file system */
- zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL, &zcrypt_proc_fops);
+ zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL,
+ &zcrypt_proc_fops);
if (!zcrypt_entry) {
rc = -ENOMEM;
goto out_misc;
}
+ zcrypt_msgtype6_init();
+ zcrypt_msgtype50_init();
return 0;
out_misc:
@@ -1472,10 +1477,12 @@ out:
*
* The module termination code.
*/
-void zcrypt_api_exit(void)
+void __exit zcrypt_api_exit(void)
{
remove_proc_entry("driver/z90crypt", NULL);
misc_deregister(&zcrypt_misc_device);
+ zcrypt_msgtype6_exit();
+ zcrypt_msgtype50_exit();
zcrypt_debug_exit();
}
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 38618f05ad92..274a59051534 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -84,57 +84,110 @@ struct ica_z90_status {
*/
#define ZCRYPT_RNG_BUFFER_SIZE 4096
-struct zcrypt_device;
+/*
+ * Identifier for Crypto Request Performance Index
+ */
+enum crypto_ops {
+ MEX_1K,
+ MEX_2K,
+ MEX_4K,
+ CRT_1K,
+ CRT_2K,
+ CRT_4K,
+ HWRNG,
+ SECKEY,
+ NUM_OPS
+};
+
+struct zcrypt_queue;
struct zcrypt_ops {
- long (*rsa_modexpo)(struct zcrypt_device *, struct ica_rsa_modexpo *);
- long (*rsa_modexpo_crt)(struct zcrypt_device *,
+ long (*rsa_modexpo)(struct zcrypt_queue *, struct ica_rsa_modexpo *);
+ long (*rsa_modexpo_crt)(struct zcrypt_queue *,
struct ica_rsa_modexpo_crt *);
- long (*send_cprb)(struct zcrypt_device *, struct ica_xcRB *);
- long (*send_ep11_cprb)(struct zcrypt_device *, struct ep11_urb *);
- long (*rng)(struct zcrypt_device *, char *);
+ long (*send_cprb)(struct zcrypt_queue *, struct ica_xcRB *,
+ struct ap_message *);
+ long (*send_ep11_cprb)(struct zcrypt_queue *, struct ep11_urb *,
+ struct ap_message *);
+ long (*rng)(struct zcrypt_queue *, char *, struct ap_message *);
struct list_head list; /* zcrypt ops list. */
struct module *owner;
int variant;
char name[128];
};
-struct zcrypt_device {
+struct zcrypt_card {
struct list_head list; /* Device list. */
- spinlock_t lock; /* Per device lock. */
+ struct list_head zqueues; /* List of zcrypt queues */
struct kref refcount; /* device refcounting */
- struct ap_device *ap_dev; /* The "real" ap device. */
- struct zcrypt_ops *ops; /* Crypto operations. */
+ struct ap_card *card; /* The "real" ap card device. */
int online; /* User online/offline */
int user_space_type; /* User space device id. */
char *type_string; /* User space device name. */
int min_mod_size; /* Min number of bits. */
int max_mod_size; /* Max number of bits. */
- int short_crt; /* Card has crt length restriction. */
- int speed_rating; /* Speed of the crypto device. */
+ int max_exp_bit_length;
+ int speed_rating[NUM_OPS]; /* Speed idx of crypto ops. */
+ atomic_t load; /* Utilization of the crypto device */
int request_count; /* # current requests. */
+};
- struct ap_message reply; /* Per-device reply structure. */
- int max_exp_bit_length;
+struct zcrypt_queue {
+ struct list_head list; /* Device list. */
+ struct kref refcount; /* device refcounting */
+ struct zcrypt_card *zcard;
+ struct zcrypt_ops *ops; /* Crypto operations. */
+ struct ap_queue *queue; /* The "real" ap queue device. */
+ int online; /* User online/offline */
+
+ atomic_t load; /* Utilization of the crypto device */
- debug_info_t *dbf_area; /* debugging */
+ int request_count; /* # current requests. */
+
+ struct ap_message reply; /* Per-device reply structure. */
};
/* transport layer rescanning */
extern atomic_t zcrypt_rescan_req;
-struct zcrypt_device *zcrypt_device_alloc(size_t);
-void zcrypt_device_free(struct zcrypt_device *);
-void zcrypt_device_get(struct zcrypt_device *);
-int zcrypt_device_put(struct zcrypt_device *);
-int zcrypt_device_register(struct zcrypt_device *);
-void zcrypt_device_unregister(struct zcrypt_device *);
+extern spinlock_t zcrypt_list_lock;
+extern int zcrypt_device_count;
+extern struct list_head zcrypt_card_list;
+
+#define for_each_zcrypt_card(_zc) \
+ list_for_each_entry(_zc, &zcrypt_card_list, list)
+
+#define for_each_zcrypt_queue(_zq, _zc) \
+ list_for_each_entry(_zq, &(_zc)->zqueues, list)
+
+struct zcrypt_card *zcrypt_card_alloc(void);
+void zcrypt_card_free(struct zcrypt_card *);
+void zcrypt_card_get(struct zcrypt_card *);
+int zcrypt_card_put(struct zcrypt_card *);
+int zcrypt_card_register(struct zcrypt_card *);
+void zcrypt_card_unregister(struct zcrypt_card *);
+struct zcrypt_card *zcrypt_card_get_best(unsigned int *,
+ unsigned int, unsigned int);
+void zcrypt_card_put_best(struct zcrypt_card *, unsigned int);
+
+struct zcrypt_queue *zcrypt_queue_alloc(size_t);
+void zcrypt_queue_free(struct zcrypt_queue *);
+void zcrypt_queue_get(struct zcrypt_queue *);
+int zcrypt_queue_put(struct zcrypt_queue *);
+int zcrypt_queue_register(struct zcrypt_queue *);
+void zcrypt_queue_unregister(struct zcrypt_queue *);
+void zcrypt_queue_force_online(struct zcrypt_queue *, int);
+struct zcrypt_queue *zcrypt_queue_get_best(unsigned int, unsigned int);
+void zcrypt_queue_put_best(struct zcrypt_queue *, unsigned int);
+
+int zcrypt_rng_device_add(void);
+void zcrypt_rng_device_remove(void);
+
void zcrypt_msgtype_register(struct zcrypt_ops *);
void zcrypt_msgtype_unregister(struct zcrypt_ops *);
-struct zcrypt_ops *zcrypt_msgtype_request(unsigned char *, int);
-void zcrypt_msgtype_release(struct zcrypt_ops *);
+struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int);
int zcrypt_api_init(void);
void zcrypt_api_exit(void);
diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c
new file mode 100644
index 000000000000..53436ea52230
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_card.c
@@ -0,0 +1,187 @@
+/*
+ * zcrypt 2.1.0
+ *
+ * Copyright IBM Corp. 2001, 2012
+ * Author(s): Robert Burroughs
+ * Eric Rossman (edrossma@us.ibm.com)
+ * Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Ralph Wuerthner <rwuerthn@de.ibm.com>
+ * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/compat.h>
+#include <linux/slab.h>
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/hw_random.h>
+#include <linux/debugfs.h>
+#include <asm/debug.h>
+
+#include "zcrypt_debug.h"
+#include "zcrypt_api.h"
+
+#include "zcrypt_msgtype6.h"
+#include "zcrypt_msgtype50.h"
+
+/*
+ * Device attributes common for all crypto card devices.
+ */
+
+static ssize_t zcrypt_card_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zcrypt_card *zc = to_ap_card(dev)->private;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", zc->type_string);
+}
+
+static DEVICE_ATTR(type, 0444, zcrypt_card_type_show, NULL);
+
+static ssize_t zcrypt_card_online_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct zcrypt_card *zc = to_ap_card(dev)->private;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", zc->online);
+}
+
+static ssize_t zcrypt_card_online_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct zcrypt_card *zc = to_ap_card(dev)->private;
+ struct zcrypt_queue *zq;
+ int online, id;
+
+ if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
+ return -EINVAL;
+
+ zc->online = online;
+ id = zc->card->id;
+
+ ZCRYPT_DBF(DBF_INFO, "card=%02x online=%d\n", id, online);
+
+ spin_lock(&zcrypt_list_lock);
+ list_for_each_entry(zq, &zc->zqueues, list)
+ zcrypt_queue_force_online(zq, online);
+ spin_unlock(&zcrypt_list_lock);
+ return count;
+}
+
+static DEVICE_ATTR(online, 0644, zcrypt_card_online_show,
+ zcrypt_card_online_store);
+
+static struct attribute *zcrypt_card_attrs[] = {
+ &dev_attr_type.attr,
+ &dev_attr_online.attr,
+ NULL,
+};
+
+static struct attribute_group zcrypt_card_attr_group = {
+ .attrs = zcrypt_card_attrs,
+};
+
+struct zcrypt_card *zcrypt_card_alloc(void)
+{
+ struct zcrypt_card *zc;
+
+ zc = kzalloc(sizeof(struct zcrypt_card), GFP_KERNEL);
+ if (!zc)
+ return NULL;
+ INIT_LIST_HEAD(&zc->list);
+ INIT_LIST_HEAD(&zc->zqueues);
+ kref_init(&zc->refcount);
+ return zc;
+}
+EXPORT_SYMBOL(zcrypt_card_alloc);
+
+void zcrypt_card_free(struct zcrypt_card *zc)
+{
+ kfree(zc);
+}
+EXPORT_SYMBOL(zcrypt_card_free);
+
+static void zcrypt_card_release(struct kref *kref)
+{
+ struct zcrypt_card *zdev =
+ container_of(kref, struct zcrypt_card, refcount);
+ zcrypt_card_free(zdev);
+}
+
+void zcrypt_card_get(struct zcrypt_card *zc)
+{
+ kref_get(&zc->refcount);
+}
+EXPORT_SYMBOL(zcrypt_card_get);
+
+int zcrypt_card_put(struct zcrypt_card *zc)
+{
+ return kref_put(&zc->refcount, zcrypt_card_release);
+}
+EXPORT_SYMBOL(zcrypt_card_put);
+
+/**
+ * zcrypt_card_register() - Register a crypto card device.
+ * @zc: Pointer to a crypto card device
+ *
+ * Register a crypto card device. Returns 0 if successful.
+ */
+int zcrypt_card_register(struct zcrypt_card *zc)
+{
+ int rc;
+
+ rc = sysfs_create_group(&zc->card->ap_dev.device.kobj,
+ &zcrypt_card_attr_group);
+ if (rc)
+ return rc;
+
+ spin_lock(&zcrypt_list_lock);
+ list_add_tail(&zc->list, &zcrypt_card_list);
+ spin_unlock(&zcrypt_list_lock);
+
+ zc->online = 1;
+
+ ZCRYPT_DBF(DBF_INFO, "card=%02x register online=1\n", zc->card->id);
+
+ return rc;
+}
+EXPORT_SYMBOL(zcrypt_card_register);
+
+/**
+ * zcrypt_card_unregister(): Unregister a crypto card device.
+ * @zc: Pointer to crypto card device
+ *
+ * Unregister a crypto card device.
+ */
+void zcrypt_card_unregister(struct zcrypt_card *zc)
+{
+ ZCRYPT_DBF(DBF_INFO, "card=%02x unregister\n", zc->card->id);
+
+ spin_lock(&zcrypt_list_lock);
+ list_del_init(&zc->list);
+ spin_unlock(&zcrypt_list_lock);
+ sysfs_remove_group(&zc->card->ap_dev.device.kobj,
+ &zcrypt_card_attr_group);
+}
+EXPORT_SYMBOL(zcrypt_card_unregister);
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 15104aaa075a..b97c5d5ee5a4 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -30,7 +30,8 @@
#include <linux/init.h>
#include <linux/err.h>
#include <linux/atomic.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
+#include <linux/mod_devicetable.h>
#include "ap_bus.h"
#include "zcrypt_api.h"
@@ -43,9 +44,6 @@
#define CEX3A_MIN_MOD_SIZE CEX2A_MIN_MOD_SIZE
#define CEX3A_MAX_MOD_SIZE 512 /* 4096 bits */
-#define CEX2A_SPEED_RATING 970
-#define CEX3A_SPEED_RATING 900 /* Fixme: Needs finetuning */
-
#define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */
#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */
@@ -57,107 +55,195 @@
#define CEX2A_CLEANUP_TIME (15*HZ)
#define CEX3A_CLEANUP_TIME CEX2A_CLEANUP_TIME
-static struct ap_device_id zcrypt_cex2a_ids[] = {
- { AP_DEVICE(AP_DEVICE_TYPE_CEX2A) },
- { AP_DEVICE(AP_DEVICE_TYPE_CEX3A) },
- { /* end of list */ },
-};
-
-MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_ids);
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("CEX2A Cryptographic Coprocessor device driver, " \
"Copyright IBM Corp. 2001, 2012");
MODULE_LICENSE("GPL");
-static int zcrypt_cex2a_probe(struct ap_device *ap_dev);
-static void zcrypt_cex2a_remove(struct ap_device *ap_dev);
+static struct ap_device_id zcrypt_cex2a_card_ids[] = {
+ { .dev_type = AP_DEVICE_TYPE_CEX2A,
+ .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX3A,
+ .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+ { /* end of list */ },
+};
+
+MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_card_ids);
-static struct ap_driver zcrypt_cex2a_driver = {
- .probe = zcrypt_cex2a_probe,
- .remove = zcrypt_cex2a_remove,
- .ids = zcrypt_cex2a_ids,
- .request_timeout = CEX2A_CLEANUP_TIME,
+static struct ap_device_id zcrypt_cex2a_queue_ids[] = {
+ { .dev_type = AP_DEVICE_TYPE_CEX2A,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX3A,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { /* end of list */ },
};
+MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_queue_ids);
+
/**
- * Probe function for CEX2A cards. It always accepts the AP device
- * since the bus_match already checked the hardware type.
+ * Probe function for CEX2A card devices. It always accepts the AP device
+ * since the bus_match already checked the card type.
* @ap_dev: pointer to the AP device.
*/
-static int zcrypt_cex2a_probe(struct ap_device *ap_dev)
+static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev)
{
- struct zcrypt_device *zdev = NULL;
+ /*
+ * Normalized speed ratings per crypto adapter
+ * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
+ */
+ static const int CEX2A_SPEED_IDX[] = {
+ 800, 1000, 2000, 900, 1200, 2400, 0, 0};
+ static const int CEX3A_SPEED_IDX[] = {
+ 400, 500, 1000, 450, 550, 1200, 0, 0};
+
+ struct ap_card *ac = to_ap_card(&ap_dev->device);
+ struct zcrypt_card *zc;
int rc = 0;
+ zc = zcrypt_card_alloc();
+ if (!zc)
+ return -ENOMEM;
+ zc->card = ac;
+ ac->private = zc;
+
+ if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A) {
+ zc->min_mod_size = CEX2A_MIN_MOD_SIZE;
+ zc->max_mod_size = CEX2A_MAX_MOD_SIZE;
+ memcpy(zc->speed_rating, CEX2A_SPEED_IDX,
+ sizeof(CEX2A_SPEED_IDX));
+ zc->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
+ zc->type_string = "CEX2A";
+ zc->user_space_type = ZCRYPT_CEX2A;
+ } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX3A) {
+ zc->min_mod_size = CEX2A_MIN_MOD_SIZE;
+ zc->max_mod_size = CEX2A_MAX_MOD_SIZE;
+ zc->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
+ if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) &&
+ ap_test_bit(&ac->functions, AP_FUNC_CRT4K)) {
+ zc->max_mod_size = CEX3A_MAX_MOD_SIZE;
+ zc->max_exp_bit_length = CEX3A_MAX_MOD_SIZE;
+ }
+ memcpy(zc->speed_rating, CEX3A_SPEED_IDX,
+ sizeof(CEX3A_SPEED_IDX));
+ zc->type_string = "CEX3A";
+ zc->user_space_type = ZCRYPT_CEX3A;
+ } else {
+ zcrypt_card_free(zc);
+ return -ENODEV;
+ }
+ zc->online = 1;
+
+ rc = zcrypt_card_register(zc);
+ if (rc) {
+ ac->private = NULL;
+ zcrypt_card_free(zc);
+ }
+
+ return rc;
+}
+
+/**
+ * This is called to remove the CEX2A card driver information
+ * if an AP card device is removed.
+ */
+static void zcrypt_cex2a_card_remove(struct ap_device *ap_dev)
+{
+ struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
+
+ if (zc)
+ zcrypt_card_unregister(zc);
+}
+
+static struct ap_driver zcrypt_cex2a_card_driver = {
+ .probe = zcrypt_cex2a_card_probe,
+ .remove = zcrypt_cex2a_card_remove,
+ .ids = zcrypt_cex2a_card_ids,
+};
+
+/**
+ * Probe function for CEX2A queue devices. It always accepts the AP device
+ * since the bus_match already checked the queue type.
+ * @ap_dev: pointer to the AP device.
+ */
+static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev)
+{
+ struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+ struct zcrypt_queue *zq = NULL;
+ int rc;
+
switch (ap_dev->device_type) {
case AP_DEVICE_TYPE_CEX2A:
- zdev = zcrypt_device_alloc(CEX2A_MAX_RESPONSE_SIZE);
- if (!zdev)
+ zq = zcrypt_queue_alloc(CEX2A_MAX_RESPONSE_SIZE);
+ if (!zq)
return -ENOMEM;
- zdev->user_space_type = ZCRYPT_CEX2A;
- zdev->type_string = "CEX2A";
- zdev->min_mod_size = CEX2A_MIN_MOD_SIZE;
- zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
- zdev->short_crt = 1;
- zdev->speed_rating = CEX2A_SPEED_RATING;
- zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
break;
case AP_DEVICE_TYPE_CEX3A:
- zdev = zcrypt_device_alloc(CEX3A_MAX_RESPONSE_SIZE);
- if (!zdev)
+ zq = zcrypt_queue_alloc(CEX3A_MAX_RESPONSE_SIZE);
+ if (!zq)
return -ENOMEM;
- zdev->user_space_type = ZCRYPT_CEX3A;
- zdev->type_string = "CEX3A";
- zdev->min_mod_size = CEX2A_MIN_MOD_SIZE;
- zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
- zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
- if (ap_test_bit(&ap_dev->functions, AP_FUNC_MEX4K) &&
- ap_test_bit(&ap_dev->functions, AP_FUNC_CRT4K)) {
- zdev->max_mod_size = CEX3A_MAX_MOD_SIZE;
- zdev->max_exp_bit_length = CEX3A_MAX_MOD_SIZE;
- }
- zdev->short_crt = 1;
- zdev->speed_rating = CEX3A_SPEED_RATING;
break;
}
- if (!zdev)
+ if (!zq)
return -ENODEV;
- zdev->ops = zcrypt_msgtype_request(MSGTYPE50_NAME,
- MSGTYPE50_VARIANT_DEFAULT);
- zdev->ap_dev = ap_dev;
- zdev->online = 1;
- ap_device_init_reply(ap_dev, &zdev->reply);
- ap_dev->private = zdev;
- rc = zcrypt_device_register(zdev);
+ zq->ops = zcrypt_msgtype(MSGTYPE50_NAME, MSGTYPE50_VARIANT_DEFAULT);
+ zq->queue = aq;
+ zq->online = 1;
+ atomic_set(&zq->load, 0);
+ ap_queue_init_reply(aq, &zq->reply);
+ aq->request_timeout = CEX2A_CLEANUP_TIME,
+ aq->private = zq;
+ rc = zcrypt_queue_register(zq);
if (rc) {
- ap_dev->private = NULL;
- zcrypt_msgtype_release(zdev->ops);
- zcrypt_device_free(zdev);
+ aq->private = NULL;
+ zcrypt_queue_free(zq);
}
+
return rc;
}
/**
- * This is called to remove the extended CEX2A driver information
- * if an AP device is removed.
+ * This is called to remove the CEX2A queue driver information
+ * if an AP queue device is removed.
*/
-static void zcrypt_cex2a_remove(struct ap_device *ap_dev)
+static void zcrypt_cex2a_queue_remove(struct ap_device *ap_dev)
{
- struct zcrypt_device *zdev = ap_dev->private;
- struct zcrypt_ops *zops = zdev->ops;
+ struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+ struct zcrypt_queue *zq = aq->private;
- zcrypt_device_unregister(zdev);
- zcrypt_msgtype_release(zops);
+ ap_queue_remove(aq);
+ if (zq)
+ zcrypt_queue_unregister(zq);
}
+static struct ap_driver zcrypt_cex2a_queue_driver = {
+ .probe = zcrypt_cex2a_queue_probe,
+ .remove = zcrypt_cex2a_queue_remove,
+ .suspend = ap_queue_suspend,
+ .resume = ap_queue_resume,
+ .ids = zcrypt_cex2a_queue_ids,
+};
+
int __init zcrypt_cex2a_init(void)
{
- return ap_driver_register(&zcrypt_cex2a_driver, THIS_MODULE, "cex2a");
+ int rc;
+
+ rc = ap_driver_register(&zcrypt_cex2a_card_driver,
+ THIS_MODULE, "cex2acard");
+ if (rc)
+ return rc;
+
+ rc = ap_driver_register(&zcrypt_cex2a_queue_driver,
+ THIS_MODULE, "cex2aqueue");
+ if (rc)
+ ap_driver_unregister(&zcrypt_cex2a_card_driver);
+
+ return rc;
}
void __exit zcrypt_cex2a_exit(void)
{
- ap_driver_unregister(&zcrypt_cex2a_driver);
+ ap_driver_unregister(&zcrypt_cex2a_queue_driver);
+ ap_driver_unregister(&zcrypt_cex2a_card_driver);
}
module_init(zcrypt_cex2a_init);
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index ccb2e78ebf0e..4e91163d70a6 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -9,6 +9,7 @@
#include <linux/err.h>
#include <linux/atomic.h>
#include <linux/uaccess.h>
+#include <linux/mod_devicetable.h>
#include "ap_bus.h"
#include "zcrypt_api.h"
@@ -24,13 +25,6 @@
#define CEX4C_MIN_MOD_SIZE 16 /* 256 bits */
#define CEX4C_MAX_MOD_SIZE 512 /* 4096 bits */
-#define CEX4A_SPEED_RATING 900 /* TODO new card, new speed rating */
-#define CEX4C_SPEED_RATING 6500 /* TODO new card, new speed rating */
-#define CEX4P_SPEED_RATING 7000 /* TODO new card, new speed rating */
-#define CEX5A_SPEED_RATING 450 /* TODO new card, new speed rating */
-#define CEX5C_SPEED_RATING 3250 /* TODO new card, new speed rating */
-#define CEX5P_SPEED_RATING 3500 /* TODO new card, new speed rating */
-
#define CEX4A_MAX_MESSAGE_SIZE MSGTYPE50_CRB3_MAX_MSG_SIZE
#define CEX4C_MAX_MESSAGE_SIZE MSGTYPE06_MAX_MSG_SIZE
@@ -41,147 +35,246 @@
*/
#define CEX4_CLEANUP_TIME (900*HZ)
-static struct ap_device_id zcrypt_cex4_ids[] = {
- { AP_DEVICE(AP_DEVICE_TYPE_CEX4) },
- { AP_DEVICE(AP_DEVICE_TYPE_CEX5) },
- { /* end of list */ },
-};
-
-MODULE_DEVICE_TABLE(ap, zcrypt_cex4_ids);
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("CEX4 Cryptographic Card device driver, " \
"Copyright IBM Corp. 2012");
MODULE_LICENSE("GPL");
-static int zcrypt_cex4_probe(struct ap_device *ap_dev);
-static void zcrypt_cex4_remove(struct ap_device *ap_dev);
+static struct ap_device_id zcrypt_cex4_card_ids[] = {
+ { .dev_type = AP_DEVICE_TYPE_CEX4,
+ .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX5,
+ .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+ { /* end of list */ },
+};
-static struct ap_driver zcrypt_cex4_driver = {
- .probe = zcrypt_cex4_probe,
- .remove = zcrypt_cex4_remove,
- .ids = zcrypt_cex4_ids,
- .request_timeout = CEX4_CLEANUP_TIME,
+MODULE_DEVICE_TABLE(ap, zcrypt_cex4_card_ids);
+
+static struct ap_device_id zcrypt_cex4_queue_ids[] = {
+ { .dev_type = AP_DEVICE_TYPE_CEX4,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX5,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { /* end of list */ },
};
+MODULE_DEVICE_TABLE(ap, zcrypt_cex4_queue_ids);
+
/**
- * Probe function for CEX4 cards. It always accepts the AP device
+ * Probe function for CEX4 card device. It always accepts the AP device
* since the bus_match already checked the hardware type.
* @ap_dev: pointer to the AP device.
*/
-static int zcrypt_cex4_probe(struct ap_device *ap_dev)
+static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
{
- struct zcrypt_device *zdev = NULL;
+ /*
+ * Normalized speed ratings per crypto adapter
+ * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
+ */
+ static const int CEX4A_SPEED_IDX[] = {
+ 5, 6, 59, 20, 115, 581, 0, 0};
+ static const int CEX5A_SPEED_IDX[] = {
+ 3, 3, 6, 8, 32, 218, 0, 0};
+ static const int CEX4C_SPEED_IDX[] = {
+ 24, 25, 82, 41, 138, 1111, 79, 8};
+ static const int CEX5C_SPEED_IDX[] = {
+ 10, 14, 23, 17, 45, 242, 63, 4};
+ static const int CEX4P_SPEED_IDX[] = {
+ 142, 198, 1852, 203, 331, 1563, 0, 8};
+ static const int CEX5P_SPEED_IDX[] = {
+ 49, 67, 131, 52, 85, 287, 0, 4};
+
+ struct ap_card *ac = to_ap_card(&ap_dev->device);
+ struct zcrypt_card *zc;
int rc = 0;
- switch (ap_dev->device_type) {
- case AP_DEVICE_TYPE_CEX4:
- case AP_DEVICE_TYPE_CEX5:
- if (ap_test_bit(&ap_dev->functions, AP_FUNC_ACCEL)) {
- zdev = zcrypt_device_alloc(CEX4A_MAX_MESSAGE_SIZE);
- if (!zdev)
- return -ENOMEM;
- if (ap_dev->device_type == AP_DEVICE_TYPE_CEX4) {
- zdev->type_string = "CEX4A";
- zdev->speed_rating = CEX4A_SPEED_RATING;
- } else {
- zdev->type_string = "CEX5A";
- zdev->speed_rating = CEX5A_SPEED_RATING;
- }
- zdev->user_space_type = ZCRYPT_CEX3A;
- zdev->min_mod_size = CEX4A_MIN_MOD_SIZE;
- if (ap_test_bit(&ap_dev->functions, AP_FUNC_MEX4K) &&
- ap_test_bit(&ap_dev->functions, AP_FUNC_CRT4K)) {
- zdev->max_mod_size =
- CEX4A_MAX_MOD_SIZE_4K;
- zdev->max_exp_bit_length =
- CEX4A_MAX_MOD_SIZE_4K;
- } else {
- zdev->max_mod_size =
- CEX4A_MAX_MOD_SIZE_2K;
- zdev->max_exp_bit_length =
- CEX4A_MAX_MOD_SIZE_2K;
- }
- zdev->short_crt = 1;
- zdev->ops = zcrypt_msgtype_request(MSGTYPE50_NAME,
- MSGTYPE50_VARIANT_DEFAULT);
- } else if (ap_test_bit(&ap_dev->functions, AP_FUNC_COPRO)) {
- zdev = zcrypt_device_alloc(CEX4C_MAX_MESSAGE_SIZE);
- if (!zdev)
- return -ENOMEM;
- if (ap_dev->device_type == AP_DEVICE_TYPE_CEX4) {
- zdev->type_string = "CEX4C";
- zdev->speed_rating = CEX4C_SPEED_RATING;
- } else {
- zdev->type_string = "CEX5C";
- zdev->speed_rating = CEX5C_SPEED_RATING;
- }
- zdev->user_space_type = ZCRYPT_CEX3C;
- zdev->min_mod_size = CEX4C_MIN_MOD_SIZE;
- zdev->max_mod_size = CEX4C_MAX_MOD_SIZE;
- zdev->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
- zdev->short_crt = 0;
- zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
- MSGTYPE06_VARIANT_DEFAULT);
- } else if (ap_test_bit(&ap_dev->functions, AP_FUNC_EP11)) {
- zdev = zcrypt_device_alloc(CEX4C_MAX_MESSAGE_SIZE);
- if (!zdev)
- return -ENOMEM;
- if (ap_dev->device_type == AP_DEVICE_TYPE_CEX4) {
- zdev->type_string = "CEX4P";
- zdev->speed_rating = CEX4P_SPEED_RATING;
- } else {
- zdev->type_string = "CEX5P";
- zdev->speed_rating = CEX5P_SPEED_RATING;
- }
- zdev->user_space_type = ZCRYPT_CEX4;
- zdev->min_mod_size = CEX4C_MIN_MOD_SIZE;
- zdev->max_mod_size = CEX4C_MAX_MOD_SIZE;
- zdev->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
- zdev->short_crt = 0;
- zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
- MSGTYPE06_VARIANT_EP11);
+ zc = zcrypt_card_alloc();
+ if (!zc)
+ return -ENOMEM;
+ zc->card = ac;
+ ac->private = zc;
+ if (ap_test_bit(&ac->functions, AP_FUNC_ACCEL)) {
+ if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
+ zc->type_string = "CEX4A";
+ zc->user_space_type = ZCRYPT_CEX4;
+ memcpy(zc->speed_rating, CEX4A_SPEED_IDX,
+ sizeof(CEX4A_SPEED_IDX));
+ } else {
+ zc->type_string = "CEX5A";
+ zc->user_space_type = ZCRYPT_CEX5;
+ memcpy(zc->speed_rating, CEX5A_SPEED_IDX,
+ sizeof(CEX5A_SPEED_IDX));
}
- break;
- }
- if (!zdev)
+ zc->min_mod_size = CEX4A_MIN_MOD_SIZE;
+ if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) &&
+ ap_test_bit(&ac->functions, AP_FUNC_CRT4K)) {
+ zc->max_mod_size = CEX4A_MAX_MOD_SIZE_4K;
+ zc->max_exp_bit_length =
+ CEX4A_MAX_MOD_SIZE_4K;
+ } else {
+ zc->max_mod_size = CEX4A_MAX_MOD_SIZE_2K;
+ zc->max_exp_bit_length =
+ CEX4A_MAX_MOD_SIZE_2K;
+ }
+ } else if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) {
+ if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
+ zc->type_string = "CEX4C";
+ /* wrong user space type, must be CEX4
+ * just keep it for cca compatibility
+ */
+ zc->user_space_type = ZCRYPT_CEX3C;
+ memcpy(zc->speed_rating, CEX4C_SPEED_IDX,
+ sizeof(CEX4C_SPEED_IDX));
+ } else {
+ zc->type_string = "CEX5C";
+ /* wrong user space type, must be CEX5
+ * just keep it for cca compatibility
+ */
+ zc->user_space_type = ZCRYPT_CEX3C;
+ memcpy(zc->speed_rating, CEX5C_SPEED_IDX,
+ sizeof(CEX5C_SPEED_IDX));
+ }
+ zc->min_mod_size = CEX4C_MIN_MOD_SIZE;
+ zc->max_mod_size = CEX4C_MAX_MOD_SIZE;
+ zc->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
+ } else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) {
+ if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
+ zc->type_string = "CEX4P";
+ zc->user_space_type = ZCRYPT_CEX4;
+ memcpy(zc->speed_rating, CEX4P_SPEED_IDX,
+ sizeof(CEX4P_SPEED_IDX));
+ } else {
+ zc->type_string = "CEX5P";
+ zc->user_space_type = ZCRYPT_CEX5;
+ memcpy(zc->speed_rating, CEX5P_SPEED_IDX,
+ sizeof(CEX5P_SPEED_IDX));
+ }
+ zc->min_mod_size = CEX4C_MIN_MOD_SIZE;
+ zc->max_mod_size = CEX4C_MAX_MOD_SIZE;
+ zc->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
+ } else {
+ zcrypt_card_free(zc);
return -ENODEV;
- zdev->ap_dev = ap_dev;
- zdev->online = 1;
- ap_device_init_reply(ap_dev, &zdev->reply);
- ap_dev->private = zdev;
- rc = zcrypt_device_register(zdev);
+ }
+ zc->online = 1;
+
+ rc = zcrypt_card_register(zc);
if (rc) {
- zcrypt_msgtype_release(zdev->ops);
- ap_dev->private = NULL;
- zcrypt_device_free(zdev);
+ ac->private = NULL;
+ zcrypt_card_free(zc);
}
+
return rc;
}
/**
- * This is called to remove the extended CEX4 driver information
- * if an AP device is removed.
+ * This is called to remove the CEX4 card driver information
+ * if an AP card device is removed.
+ */
+static void zcrypt_cex4_card_remove(struct ap_device *ap_dev)
+{
+ struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
+
+ if (zc)
+ zcrypt_card_unregister(zc);
+}
+
+static struct ap_driver zcrypt_cex4_card_driver = {
+ .probe = zcrypt_cex4_card_probe,
+ .remove = zcrypt_cex4_card_remove,
+ .ids = zcrypt_cex4_card_ids,
+};
+
+/**
+ * Probe function for CEX4 queue device. It always accepts the AP device
+ * since the bus_match already checked the hardware type.
+ * @ap_dev: pointer to the AP device.
*/
-static void zcrypt_cex4_remove(struct ap_device *ap_dev)
+static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
{
- struct zcrypt_device *zdev = ap_dev->private;
- struct zcrypt_ops *zops;
+ struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+ struct zcrypt_queue *zq;
+ int rc;
- if (zdev) {
- zops = zdev->ops;
- zcrypt_device_unregister(zdev);
- zcrypt_msgtype_release(zops);
+ if (ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL)) {
+ zq = zcrypt_queue_alloc(CEX4A_MAX_MESSAGE_SIZE);
+ if (!zq)
+ return -ENOMEM;
+ zq->ops = zcrypt_msgtype(MSGTYPE50_NAME,
+ MSGTYPE50_VARIANT_DEFAULT);
+ } else if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) {
+ zq = zcrypt_queue_alloc(CEX4C_MAX_MESSAGE_SIZE);
+ if (!zq)
+ return -ENOMEM;
+ zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
+ MSGTYPE06_VARIANT_DEFAULT);
+ } else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) {
+ zq = zcrypt_queue_alloc(CEX4C_MAX_MESSAGE_SIZE);
+ if (!zq)
+ return -ENOMEM;
+ zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
+ MSGTYPE06_VARIANT_EP11);
+ } else {
+ return -ENODEV;
}
+ zq->queue = aq;
+ zq->online = 1;
+ atomic_set(&zq->load, 0);
+ ap_queue_init_reply(aq, &zq->reply);
+ aq->request_timeout = CEX4_CLEANUP_TIME,
+ aq->private = zq;
+ rc = zcrypt_queue_register(zq);
+ if (rc) {
+ aq->private = NULL;
+ zcrypt_queue_free(zq);
+ }
+
+ return rc;
+}
+
+/**
+ * This is called to remove the CEX4 queue driver information
+ * if an AP queue device is removed.
+ */
+static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev)
+{
+ struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+ struct zcrypt_queue *zq = aq->private;
+
+ ap_queue_remove(aq);
+ if (zq)
+ zcrypt_queue_unregister(zq);
}
+static struct ap_driver zcrypt_cex4_queue_driver = {
+ .probe = zcrypt_cex4_queue_probe,
+ .remove = zcrypt_cex4_queue_remove,
+ .suspend = ap_queue_suspend,
+ .resume = ap_queue_resume,
+ .ids = zcrypt_cex4_queue_ids,
+};
+
int __init zcrypt_cex4_init(void)
{
- return ap_driver_register(&zcrypt_cex4_driver, THIS_MODULE, "cex4");
+ int rc;
+
+ rc = ap_driver_register(&zcrypt_cex4_card_driver,
+ THIS_MODULE, "cex4card");
+ if (rc)
+ return rc;
+
+ rc = ap_driver_register(&zcrypt_cex4_queue_driver,
+ THIS_MODULE, "cex4queue");
+ if (rc)
+ ap_driver_unregister(&zcrypt_cex4_card_driver);
+
+ return rc;
}
void __exit zcrypt_cex4_exit(void)
{
- ap_driver_unregister(&zcrypt_cex4_driver);
+ ap_driver_unregister(&zcrypt_cex4_queue_driver);
+ ap_driver_unregister(&zcrypt_cex4_card_driver);
}
module_init(zcrypt_cex4_init);
diff --git a/drivers/s390/crypto/zcrypt_debug.h b/drivers/s390/crypto/zcrypt_debug.h
index 28d9349de1ad..13e38defb6b8 100644
--- a/drivers/s390/crypto/zcrypt_debug.h
+++ b/drivers/s390/crypto/zcrypt_debug.h
@@ -1,51 +1,27 @@
/*
- * Copyright IBM Corp. 2012
+ * Copyright IBM Corp. 2016
* Author(s): Holger Dengler (hd@linux.vnet.ibm.com)
+ * Harald Freudenberger <freude@de.ibm.com>
*/
#ifndef ZCRYPT_DEBUG_H
#define ZCRYPT_DEBUG_H
#include <asm/debug.h>
-#include "zcrypt_api.h"
-/* that gives us 15 characters in the text event views */
-#define ZCRYPT_DBF_LEN 16
-
-#define DBF_ERR 3 /* error conditions */
-#define DBF_WARN 4 /* warning conditions */
-#define DBF_INFO 6 /* informational */
+#define DBF_ERR 3 /* error conditions */
+#define DBF_WARN 4 /* warning conditions */
+#define DBF_INFO 5 /* informational */
+#define DBF_DEBUG 6 /* for debugging only */
+#define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO)
#define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO)
-#define ZCRYPT_DBF_COMMON(level, text...) \
- do { \
- if (debug_level_enabled(zcrypt_dbf_common, level)) { \
- char debug_buffer[ZCRYPT_DBF_LEN]; \
- snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
- debug_text_event(zcrypt_dbf_common, level, \
- debug_buffer); \
- } \
- } while (0)
-
-#define ZCRYPT_DBF_DEVICES(level, text...) \
- do { \
- if (debug_level_enabled(zcrypt_dbf_devices, level)) { \
- char debug_buffer[ZCRYPT_DBF_LEN]; \
- snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
- debug_text_event(zcrypt_dbf_devices, level, \
- debug_buffer); \
- } \
- } while (0)
-
-#define ZCRYPT_DBF_DEV(level, device, text...) \
- do { \
- if (debug_level_enabled(device->dbf_area, level)) { \
- char debug_buffer[ZCRYPT_DBF_LEN]; \
- snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
- debug_text_event(device->dbf_area, level, \
- debug_buffer); \
- } \
- } while (0)
+#define DBF_MAX_SPRINTF_ARGS 5
+
+#define ZCRYPT_DBF(...) \
+ debug_sprintf_event(zcrypt_dbf_info, ##__VA_ARGS__)
+
+extern debug_info_t *zcrypt_dbf_info;
int zcrypt_debug_init(void);
void zcrypt_debug_exit(void);
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index de1b6c1d172c..13df60209ed3 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -55,52 +55,61 @@ struct error_hdr {
#define TYPE82_RSP_CODE 0x82
#define TYPE88_RSP_CODE 0x88
-#define REP82_ERROR_MACHINE_FAILURE 0x10
-#define REP82_ERROR_PREEMPT_FAILURE 0x12
-#define REP82_ERROR_CHECKPT_FAILURE 0x14
-#define REP82_ERROR_MESSAGE_TYPE 0x20
-#define REP82_ERROR_INVALID_COMM_CD 0x21 /* Type 84 */
-#define REP82_ERROR_INVALID_MSG_LEN 0x23
-#define REP82_ERROR_RESERVD_FIELD 0x24 /* was 0x50 */
-#define REP82_ERROR_FORMAT_FIELD 0x29
-#define REP82_ERROR_INVALID_COMMAND 0x30
-#define REP82_ERROR_MALFORMED_MSG 0x40
-#define REP82_ERROR_RESERVED_FIELDO 0x50 /* old value */
-#define REP82_ERROR_WORD_ALIGNMENT 0x60
-#define REP82_ERROR_MESSAGE_LENGTH 0x80
-#define REP82_ERROR_OPERAND_INVALID 0x82
-#define REP82_ERROR_OPERAND_SIZE 0x84
-#define REP82_ERROR_EVEN_MOD_IN_OPND 0x85
-#define REP82_ERROR_RESERVED_FIELD 0x88
-#define REP82_ERROR_TRANSPORT_FAIL 0x90
-#define REP82_ERROR_PACKET_TRUNCATED 0xA0
-#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0
+#define REP82_ERROR_MACHINE_FAILURE 0x10
+#define REP82_ERROR_PREEMPT_FAILURE 0x12
+#define REP82_ERROR_CHECKPT_FAILURE 0x14
+#define REP82_ERROR_MESSAGE_TYPE 0x20
+#define REP82_ERROR_INVALID_COMM_CD 0x21 /* Type 84 */
+#define REP82_ERROR_INVALID_MSG_LEN 0x23
+#define REP82_ERROR_RESERVD_FIELD 0x24 /* was 0x50 */
+#define REP82_ERROR_FORMAT_FIELD 0x29
+#define REP82_ERROR_INVALID_COMMAND 0x30
+#define REP82_ERROR_MALFORMED_MSG 0x40
+#define REP82_ERROR_INVALID_DOMAIN_PRECHECK 0x42
+#define REP82_ERROR_RESERVED_FIELDO 0x50 /* old value */
+#define REP82_ERROR_WORD_ALIGNMENT 0x60
+#define REP82_ERROR_MESSAGE_LENGTH 0x80
+#define REP82_ERROR_OPERAND_INVALID 0x82
+#define REP82_ERROR_OPERAND_SIZE 0x84
+#define REP82_ERROR_EVEN_MOD_IN_OPND 0x85
+#define REP82_ERROR_RESERVED_FIELD 0x88
+#define REP82_ERROR_INVALID_DOMAIN_PENDING 0x8A
+#define REP82_ERROR_TRANSPORT_FAIL 0x90
+#define REP82_ERROR_PACKET_TRUNCATED 0xA0
+#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0
-#define REP88_ERROR_MODULE_FAILURE 0x10
+#define REP88_ERROR_MODULE_FAILURE 0x10
-#define REP88_ERROR_MESSAGE_TYPE 0x20
-#define REP88_ERROR_MESSAGE_MALFORMD 0x22
-#define REP88_ERROR_MESSAGE_LENGTH 0x23
-#define REP88_ERROR_RESERVED_FIELD 0x24
-#define REP88_ERROR_KEY_TYPE 0x34
-#define REP88_ERROR_INVALID_KEY 0x82 /* CEX2A */
-#define REP88_ERROR_OPERAND 0x84 /* CEX2A */
-#define REP88_ERROR_OPERAND_EVEN_MOD 0x85 /* CEX2A */
+#define REP88_ERROR_MESSAGE_TYPE 0x20
+#define REP88_ERROR_MESSAGE_MALFORMD 0x22
+#define REP88_ERROR_MESSAGE_LENGTH 0x23
+#define REP88_ERROR_RESERVED_FIELD 0x24
+#define REP88_ERROR_KEY_TYPE 0x34
+#define REP88_ERROR_INVALID_KEY 0x82 /* CEX2A */
+#define REP88_ERROR_OPERAND 0x84 /* CEX2A */
+#define REP88_ERROR_OPERAND_EVEN_MOD 0x85 /* CEX2A */
-static inline int convert_error(struct zcrypt_device *zdev,
+static inline int convert_error(struct zcrypt_queue *zq,
struct ap_message *reply)
{
struct error_hdr *ehdr = reply->message;
+ int card = AP_QID_CARD(zq->queue->qid);
+ int queue = AP_QID_QUEUE(zq->queue->qid);
switch (ehdr->reply_code) {
case REP82_ERROR_OPERAND_INVALID:
case REP82_ERROR_OPERAND_SIZE:
case REP82_ERROR_EVEN_MOD_IN_OPND:
case REP88_ERROR_MESSAGE_MALFORMD:
+ case REP82_ERROR_INVALID_DOMAIN_PRECHECK:
+ case REP82_ERROR_INVALID_DOMAIN_PENDING:
// REP88_ERROR_INVALID_KEY // '82' CEX2A
// REP88_ERROR_OPERAND // '84' CEX2A
// REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A
/* Invalid input data. */
+ ZCRYPT_DBF(DBF_WARN,
+ "device=%02x.%04x reply=0x%02x => rc=EINVAL\n",
+ card, queue, ehdr->reply_code);
return -EINVAL;
case REP82_ERROR_MESSAGE_TYPE:
// REP88_ERROR_MESSAGE_TYPE // '20' CEX2A
@@ -110,32 +119,32 @@ static inline int convert_error(struct zcrypt_device *zdev,
* and then repeat the request.
*/
atomic_set(&zcrypt_rescan_req, 1);
- zdev->online = 0;
- pr_err("Cryptographic device %x failed and was set offline\n",
- AP_QID_DEVICE(zdev->ap_dev->qid));
- ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
- AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online,
- ehdr->reply_code);
+ zq->online = 0;
+ pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+ card, queue);
+ ZCRYPT_DBF(DBF_ERR,
+ "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
+ card, queue, ehdr->reply_code);
return -EAGAIN;
case REP82_ERROR_TRANSPORT_FAIL:
case REP82_ERROR_MACHINE_FAILURE:
// REP88_ERROR_MODULE_FAILURE // '10' CEX2A
/* If a card fails disable it and repeat the request. */
atomic_set(&zcrypt_rescan_req, 1);
- zdev->online = 0;
- pr_err("Cryptographic device %x failed and was set offline\n",
- AP_QID_DEVICE(zdev->ap_dev->qid));
- ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
- AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online,
- ehdr->reply_code);
+ zq->online = 0;
+ pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+ card, queue);
+ ZCRYPT_DBF(DBF_ERR,
+ "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
+ card, queue, ehdr->reply_code);
return -EAGAIN;
default:
- zdev->online = 0;
- pr_err("Cryptographic device %x failed and was set offline\n",
- AP_QID_DEVICE(zdev->ap_dev->qid));
- ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
- AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online,
- ehdr->reply_code);
+ zq->online = 0;
+ pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+ card, queue);
+ ZCRYPT_DBF(DBF_ERR,
+ "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
+ card, queue, ehdr->reply_code);
return -EAGAIN; /* repeat the request on a different device. */
}
}
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index eedfaa2cf715..6dd5d7c58dd0 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -53,9 +53,6 @@ MODULE_DESCRIPTION("Cryptographic Accelerator (message type 50), " \
"Copyright IBM Corp. 2001, 2012");
MODULE_LICENSE("GPL");
-static void zcrypt_cex2a_receive(struct ap_device *, struct ap_message *,
- struct ap_message *);
-
/**
* The type 50 message family is associated with a CEX2A card.
*
@@ -173,16 +170,48 @@ struct type80_hdr {
unsigned char reserved3[8];
} __packed;
+unsigned int get_rsa_modex_fc(struct ica_rsa_modexpo *mex, int *fcode)
+{
+
+ if (!mex->inputdatalength)
+ return -EINVAL;
+
+ if (mex->inputdatalength <= 128) /* 1024 bit */
+ *fcode = MEX_1K;
+ else if (mex->inputdatalength <= 256) /* 2048 bit */
+ *fcode = MEX_2K;
+ else /* 4096 bit */
+ *fcode = MEX_4K;
+
+ return 0;
+}
+
+unsigned int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *crt, int *fcode)
+{
+
+ if (!crt->inputdatalength)
+ return -EINVAL;
+
+ if (crt->inputdatalength <= 128) /* 1024 bit */
+ *fcode = CRT_1K;
+ else if (crt->inputdatalength <= 256) /* 2048 bit */
+ *fcode = CRT_2K;
+ else /* 4096 bit */
+ *fcode = CRT_4K;
+
+ return 0;
+}
+
/**
* Convert a ICAMEX message to a type50 MEX message.
*
- * @zdev: crypto device pointer
- * @zreq: crypto request pointer
+ * @zq: crypto queue pointer
+ * @ap_msg: crypto request pointer
* @mex: pointer to user input data
*
* Returns 0 on success or -EFAULT.
*/
-static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev,
+static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq,
struct ap_message *ap_msg,
struct ica_rsa_modexpo *mex)
{
@@ -234,13 +263,13 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev,
/**
* Convert a ICACRT message to a type50 CRT message.
*
- * @zdev: crypto device pointer
- * @zreq: crypto request pointer
+ * @zq: crypto queue pointer
+ * @ap_msg: crypto request pointer
* @crt: pointer to user input data
*
* Returns 0 on success or -EFAULT.
*/
-static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
+static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq,
struct ap_message *ap_msg,
struct ica_rsa_modexpo_crt *crt)
{
@@ -283,7 +312,7 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
u = crb2->u + sizeof(crb2->u) - short_len;
inp = crb2->message + sizeof(crb2->message) - mod_len;
} else if ((mod_len <= 512) && /* up to 4096 bit key size */
- (zdev->max_mod_size == CEX3A_MAX_MOD_SIZE)) { /* >= CEX3A */
+ (zq->zcard->max_mod_size == CEX3A_MAX_MOD_SIZE)) {
struct type50_crb3_msg *crb3 = ap_msg->message;
memset(crb3, 0, sizeof(*crb3));
ap_msg->length = sizeof(*crb3);
@@ -317,14 +346,14 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
/**
* Copy results from a type 80 reply message back to user space.
*
- * @zdev: crypto device pointer
+ * @zq: crypto device pointer
* @reply: reply AP message.
* @data: pointer to user output data
* @length: size of user output data
*
* Returns 0 on success or -EFAULT.
*/
-static int convert_type80(struct zcrypt_device *zdev,
+static int convert_type80(struct zcrypt_queue *zq,
struct ap_message *reply,
char __user *outputdata,
unsigned int outputdatalength)
@@ -334,16 +363,18 @@ static int convert_type80(struct zcrypt_device *zdev,
if (t80h->len < sizeof(*t80h) + outputdatalength) {
/* The result is too short, the CEX2A card may not do that.. */
- zdev->online = 0;
- pr_err("Cryptographic device %x failed and was set offline\n",
- AP_QID_DEVICE(zdev->ap_dev->qid));
- ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
- AP_QID_DEVICE(zdev->ap_dev->qid),
- zdev->online, t80h->code);
-
+ zq->online = 0;
+ pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid));
+ ZCRYPT_DBF(DBF_ERR,
+ "device=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ t80h->code);
return -EAGAIN; /* repeat the request on a different device. */
}
- if (zdev->user_space_type == ZCRYPT_CEX2A)
+ if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE);
else
BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE);
@@ -353,25 +384,31 @@ static int convert_type80(struct zcrypt_device *zdev,
return 0;
}
-static int convert_response(struct zcrypt_device *zdev,
+static int convert_response(struct zcrypt_queue *zq,
struct ap_message *reply,
char __user *outputdata,
unsigned int outputdatalength)
{
/* Response type byte is the second byte in the response. */
- switch (((unsigned char *) reply->message)[1]) {
+ unsigned char rtype = ((unsigned char *) reply->message)[1];
+
+ switch (rtype) {
case TYPE82_RSP_CODE:
case TYPE88_RSP_CODE:
- return convert_error(zdev, reply);
+ return convert_error(zq, reply);
case TYPE80_RSP_CODE:
- return convert_type80(zdev, reply,
+ return convert_type80(zq, reply,
outputdata, outputdatalength);
default: /* Unknown response type, this should NEVER EVER happen */
- zdev->online = 0;
- pr_err("Cryptographic device %x failed and was set offline\n",
- AP_QID_DEVICE(zdev->ap_dev->qid));
- ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
- AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online);
+ zq->online = 0;
+ pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid));
+ ZCRYPT_DBF(DBF_ERR,
+ "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (unsigned int) rtype);
return -EAGAIN; /* repeat the request on a different device. */
}
}
@@ -380,11 +417,11 @@ static int convert_response(struct zcrypt_device *zdev,
* This function is called from the AP bus code after a crypto request
* "msg" has finished with the reply message "reply".
* It is called from tasklet context.
- * @ap_dev: pointer to the AP device
+ * @aq: pointer to the AP device
* @msg: pointer to the AP message
* @reply: pointer to the AP reply message
*/
-static void zcrypt_cex2a_receive(struct ap_device *ap_dev,
+static void zcrypt_cex2a_receive(struct ap_queue *aq,
struct ap_message *msg,
struct ap_message *reply)
{
@@ -400,7 +437,7 @@ static void zcrypt_cex2a_receive(struct ap_device *ap_dev,
goto out; /* ap_msg->rc indicates the error */
t80h = reply->message;
if (t80h->type == TYPE80_RSP_CODE) {
- if (ap_dev->device_type == AP_DEVICE_TYPE_CEX2A)
+ if (aq->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A)
length = min_t(int,
CEX2A_MAX_RESPONSE_SIZE, t80h->len);
else
@@ -418,11 +455,11 @@ static atomic_t zcrypt_step = ATOMIC_INIT(0);
/**
* The request distributor calls this function if it picked the CEX2A
* device to handle a modexpo request.
- * @zdev: pointer to zcrypt_device structure that identifies the
+ * @zq: pointer to zcrypt_queue structure that identifies the
* CEX2A device to the request distributor
* @mex: pointer to the modexpo request buffer
*/
-static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
+static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq,
struct ica_rsa_modexpo *mex)
{
struct ap_message ap_msg;
@@ -430,7 +467,7 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
int rc;
ap_init_message(&ap_msg);
- if (zdev->user_space_type == ZCRYPT_CEX2A)
+ if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE,
GFP_KERNEL);
else
@@ -442,20 +479,20 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg.private = &work;
- rc = ICAMEX_msg_to_type50MEX_msg(zdev, &ap_msg, mex);
+ rc = ICAMEX_msg_to_type50MEX_msg(zq, &ap_msg, mex);
if (rc)
goto out_free;
init_completion(&work);
- ap_queue_message(zdev->ap_dev, &ap_msg);
+ ap_queue_message(zq->queue, &ap_msg);
rc = wait_for_completion_interruptible(&work);
if (rc == 0) {
rc = ap_msg.rc;
if (rc == 0)
- rc = convert_response(zdev, &ap_msg, mex->outputdata,
+ rc = convert_response(zq, &ap_msg, mex->outputdata,
mex->outputdatalength);
} else
/* Signal pending. */
- ap_cancel_message(zdev->ap_dev, &ap_msg);
+ ap_cancel_message(zq->queue, &ap_msg);
out_free:
kfree(ap_msg.message);
return rc;
@@ -464,11 +501,11 @@ out_free:
/**
* The request distributor calls this function if it picked the CEX2A
* device to handle a modexpo_crt request.
- * @zdev: pointer to zcrypt_device structure that identifies the
+ * @zq: pointer to zcrypt_queue structure that identifies the
* CEX2A device to the request distributor
* @crt: pointer to the modexpoc_crt request buffer
*/
-static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
+static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq,
struct ica_rsa_modexpo_crt *crt)
{
struct ap_message ap_msg;
@@ -476,7 +513,7 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
int rc;
ap_init_message(&ap_msg);
- if (zdev->user_space_type == ZCRYPT_CEX2A)
+ if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE,
GFP_KERNEL);
else
@@ -488,20 +525,20 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg.private = &work;
- rc = ICACRT_msg_to_type50CRT_msg(zdev, &ap_msg, crt);
+ rc = ICACRT_msg_to_type50CRT_msg(zq, &ap_msg, crt);
if (rc)
goto out_free;
init_completion(&work);
- ap_queue_message(zdev->ap_dev, &ap_msg);
+ ap_queue_message(zq->queue, &ap_msg);
rc = wait_for_completion_interruptible(&work);
if (rc == 0) {
rc = ap_msg.rc;
if (rc == 0)
- rc = convert_response(zdev, &ap_msg, crt->outputdata,
+ rc = convert_response(zq, &ap_msg, crt->outputdata,
crt->outputdatalength);
} else
/* Signal pending. */
- ap_cancel_message(zdev->ap_dev, &ap_msg);
+ ap_cancel_message(zq->queue, &ap_msg);
out_free:
kfree(ap_msg.message);
return rc;
@@ -518,16 +555,12 @@ static struct zcrypt_ops zcrypt_msgtype50_ops = {
.variant = MSGTYPE50_VARIANT_DEFAULT,
};
-int __init zcrypt_msgtype50_init(void)
+void __init zcrypt_msgtype50_init(void)
{
zcrypt_msgtype_register(&zcrypt_msgtype50_ops);
- return 0;
}
void __exit zcrypt_msgtype50_exit(void)
{
zcrypt_msgtype_unregister(&zcrypt_msgtype50_ops);
}
-
-module_init(zcrypt_msgtype50_init);
-module_exit(zcrypt_msgtype50_exit);
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.h b/drivers/s390/crypto/zcrypt_msgtype50.h
index 0a66e4aeeb50..5cc280318ee7 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.h
+++ b/drivers/s390/crypto/zcrypt_msgtype50.h
@@ -35,7 +35,10 @@
#define MSGTYPE_ADJUSTMENT 0x08 /*type04 extension (not needed in type50)*/
-int zcrypt_msgtype50_init(void);
+unsigned int get_rsa_modex_fc(struct ica_rsa_modexpo *, int *);
+unsigned int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *, int *);
+
+void zcrypt_msgtype50_init(void);
void zcrypt_msgtype50_exit(void);
#endif /* _ZCRYPT_MSGTYPE50_H_ */
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 21959719daef..e5563ffeb839 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -60,9 +60,6 @@ MODULE_DESCRIPTION("Cryptographic Coprocessor (message type 6), " \
"Copyright IBM Corp. 2001, 2012");
MODULE_LICENSE("GPL");
-static void zcrypt_msgtype6_receive(struct ap_device *, struct ap_message *,
- struct ap_message *);
-
/**
* CPRB
* Note that all shorts, ints and longs are little-endian.
@@ -149,16 +146,122 @@ static struct CPRBX static_cprbx = {
.func_id = {0x54, 0x32},
};
+int speed_idx_cca(int req_type)
+{
+ switch (req_type) {
+ case 0x4142:
+ case 0x4149:
+ case 0x414D:
+ case 0x4341:
+ case 0x4344:
+ case 0x4354:
+ case 0x4358:
+ case 0x444B:
+ case 0x4558:
+ case 0x4643:
+ case 0x4651:
+ case 0x4C47:
+ case 0x4C4B:
+ case 0x4C51:
+ case 0x4F48:
+ case 0x504F:
+ case 0x5053:
+ case 0x5058:
+ case 0x5343:
+ case 0x5344:
+ case 0x5345:
+ case 0x5350:
+ return LOW;
+ case 0x414B:
+ case 0x4345:
+ case 0x4349:
+ case 0x434D:
+ case 0x4847:
+ case 0x4849:
+ case 0x484D:
+ case 0x4850:
+ case 0x4851:
+ case 0x4954:
+ case 0x4958:
+ case 0x4B43:
+ case 0x4B44:
+ case 0x4B45:
+ case 0x4B47:
+ case 0x4B48:
+ case 0x4B49:
+ case 0x4B4E:
+ case 0x4B50:
+ case 0x4B52:
+ case 0x4B54:
+ case 0x4B58:
+ case 0x4D50:
+ case 0x4D53:
+ case 0x4D56:
+ case 0x4D58:
+ case 0x5044:
+ case 0x5045:
+ case 0x5046:
+ case 0x5047:
+ case 0x5049:
+ case 0x504B:
+ case 0x504D:
+ case 0x5254:
+ case 0x5347:
+ case 0x5349:
+ case 0x534B:
+ case 0x534D:
+ case 0x5356:
+ case 0x5358:
+ case 0x5443:
+ case 0x544B:
+ case 0x5647:
+ return HIGH;
+ default:
+ return MEDIUM;
+ }
+}
+
+int speed_idx_ep11(int req_type)
+{
+ switch (req_type) {
+ case 1:
+ case 2:
+ case 36:
+ case 37:
+ case 38:
+ case 39:
+ case 40:
+ return LOW;
+ case 17:
+ case 18:
+ case 19:
+ case 20:
+ case 21:
+ case 22:
+ case 26:
+ case 30:
+ case 31:
+ case 32:
+ case 33:
+ case 34:
+ case 35:
+ return HIGH;
+ default:
+ return MEDIUM;
+ }
+}
+
+
/**
* Convert a ICAMEX message to a type6 MEX message.
*
- * @zdev: crypto device pointer
+ * @zq: crypto device pointer
* @ap_msg: pointer to AP message
* @mex: pointer to user input data
*
* Returns 0 on success or -EFAULT.
*/
-static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev,
+static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq,
struct ap_message *ap_msg,
struct ica_rsa_modexpo *mex)
{
@@ -173,11 +276,6 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev,
.ulen = 10,
.only_rule = {'M', 'R', 'P', ' ', ' ', ' ', ' ', ' '}
};
- static struct function_and_rules_block static_pke_fnr_MCL2 = {
- .function_code = {'P', 'K'},
- .ulen = 10,
- .only_rule = {'Z', 'E', 'R', 'O', '-', 'P', 'A', 'D'}
- };
struct {
struct type6_hdr hdr;
struct CPRBX cprbx;
@@ -204,11 +302,10 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev,
msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
msg->cprbx = static_cprbx;
- msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid);
+ msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
msg->cprbx.rpl_msgbl = msg->hdr.FromCardLen1;
- msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ?
- static_pke_fnr_MCL2 : static_pke_fnr;
+ msg->fr = static_pke_fnr;
msg->cprbx.req_parml = size - sizeof(msg->hdr) - sizeof(msg->cprbx);
@@ -219,13 +316,13 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev,
/**
* Convert a ICACRT message to a type6 CRT message.
*
- * @zdev: crypto device pointer
+ * @zq: crypto device pointer
* @ap_msg: pointer to AP message
* @crt: pointer to user input data
*
* Returns 0 on success or -EFAULT.
*/
-static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev,
+static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq,
struct ap_message *ap_msg,
struct ica_rsa_modexpo_crt *crt)
{
@@ -241,11 +338,6 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev,
.only_rule = {'Z', 'E', 'R', 'O', '-', 'P', 'A', 'D'}
};
- static struct function_and_rules_block static_pkd_fnr_MCL2 = {
- .function_code = {'P', 'D'},
- .ulen = 10,
- .only_rule = {'P', 'K', 'C', 'S', '-', '1', '.', '2'}
- };
struct {
struct type6_hdr hdr;
struct CPRBX cprbx;
@@ -272,12 +364,11 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev,
msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
msg->cprbx = static_cprbx;
- msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid);
+ msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
msg->cprbx.req_parml = msg->cprbx.rpl_msgbl =
size - sizeof(msg->hdr) - sizeof(msg->cprbx);
- msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ?
- static_pkd_fnr_MCL2 : static_pkd_fnr;
+ msg->fr = static_pkd_fnr;
ap_msg->length = size;
return 0;
@@ -286,7 +377,7 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev,
/**
* Convert a XCRB message to a type6 CPRB message.
*
- * @zdev: crypto device pointer
+ * @zq: crypto device pointer
* @ap_msg: pointer to AP message
* @xcRB: pointer to user input data
*
@@ -297,9 +388,10 @@ struct type86_fmt2_msg {
struct type86_fmt2_ext fmt2;
} __packed;
-static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
- struct ap_message *ap_msg,
- struct ica_xcRB *xcRB)
+static int XCRB_msg_to_type6CPRB_msgX(struct ap_message *ap_msg,
+ struct ica_xcRB *xcRB,
+ unsigned int *fcode,
+ unsigned short **dom)
{
static struct type6_hdr static_type6_hdrX = {
.type = 0x06,
@@ -379,6 +471,9 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
memcpy(msg->hdr.function_code, function_code,
sizeof(msg->hdr.function_code));
+ *fcode = (msg->hdr.function_code[0] << 8) | msg->hdr.function_code[1];
+ *dom = (unsigned short *)&msg->cprbx.domain;
+
if (memcmp(function_code, "US", 2) == 0)
ap_msg->special = 1;
else
@@ -389,15 +484,15 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
copy_from_user(req_data, xcRB->request_data_address,
xcRB->request_data_length))
return -EFAULT;
+
return 0;
}
-static int xcrb_msg_to_type6_ep11cprb_msgx(struct zcrypt_device *zdev,
- struct ap_message *ap_msg,
- struct ep11_urb *xcRB)
+static int xcrb_msg_to_type6_ep11cprb_msgx(struct ap_message *ap_msg,
+ struct ep11_urb *xcRB,
+ unsigned int *fcode)
{
unsigned int lfmt;
-
static struct type6_hdr static_type6_ep11_hdr = {
.type = 0x06,
.rqid = {0x00, 0x01},
@@ -421,7 +516,7 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(struct zcrypt_device *zdev,
unsigned char dom_tag; /* fixed value 0x4 */
unsigned char dom_len; /* fixed value 0x4 */
unsigned int dom_val; /* domain id */
- } __packed * payload_hdr;
+ } __packed * payload_hdr = NULL;
if (CEIL4(xcRB->req_len) < xcRB->req_len)
return -EINVAL; /* overflow after alignment*/
@@ -450,43 +545,30 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(struct zcrypt_device *zdev,
return -EFAULT;
}
- /*
- The target domain field within the cprb body/payload block will be
- replaced by the usage domain for non-management commands only.
- Therefore we check the first bit of the 'flags' parameter for
- management command indication.
- 0 - non management command
- 1 - management command
- */
- if (!((msg->cprbx.flags & 0x80) == 0x80)) {
- msg->cprbx.target_id = (unsigned int)
- AP_QID_QUEUE(zdev->ap_dev->qid);
-
- if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/
- switch (msg->pld_lenfmt & 0x03) {
- case 1:
- lfmt = 2;
- break;
- case 2:
- lfmt = 3;
- break;
- default:
- return -EINVAL;
- }
- } else {
- lfmt = 1; /* length format #1 */
- }
- payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt);
- payload_hdr->dom_val = (unsigned int)
- AP_QID_QUEUE(zdev->ap_dev->qid);
+ if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/
+ switch (msg->pld_lenfmt & 0x03) {
+ case 1:
+ lfmt = 2;
+ break;
+ case 2:
+ lfmt = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ lfmt = 1; /* length format #1 */
}
+ payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt);
+ *fcode = payload_hdr->func_val & 0xFFFF;
+
return 0;
}
/**
* Copy results from a type 86 ICA reply message back to user space.
*
- * @zdev: crypto device pointer
+ * @zq: crypto device pointer
* @reply: reply AP message.
* @data: pointer to user output data
* @length: size of user output data
@@ -508,7 +590,7 @@ struct type86_ep11_reply {
struct ep11_cprb cprbx;
} __packed;
-static int convert_type86_ica(struct zcrypt_device *zdev,
+static int convert_type86_ica(struct zcrypt_queue *zq,
struct ap_message *reply,
char __user *outputdata,
unsigned int outputdatalength)
@@ -556,26 +638,37 @@ static int convert_type86_ica(struct zcrypt_device *zdev,
service_rc = msg->cprbx.ccp_rtcode;
if (unlikely(service_rc != 0)) {
service_rs = msg->cprbx.ccp_rscode;
- if (service_rc == 8 && service_rs == 66)
- return -EINVAL;
- if (service_rc == 8 && service_rs == 65)
- return -EINVAL;
- if (service_rc == 8 && service_rs == 770)
+ if ((service_rc == 8 && service_rs == 66) ||
+ (service_rc == 8 && service_rs == 65) ||
+ (service_rc == 8 && service_rs == 72) ||
+ (service_rc == 8 && service_rs == 770) ||
+ (service_rc == 12 && service_rs == 769)) {
+ ZCRYPT_DBF(DBF_DEBUG,
+ "device=%02x.%04x rc/rs=%d/%d => rc=EINVAL\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) service_rc, (int) service_rs);
return -EINVAL;
+ }
if (service_rc == 8 && service_rs == 783) {
- zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
+ zq->zcard->min_mod_size =
+ PCIXCC_MIN_MOD_SIZE_OLD;
+ ZCRYPT_DBF(DBF_DEBUG,
+ "device=%02x.%04x rc/rs=%d/%d => rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) service_rc, (int) service_rs);
return -EAGAIN;
}
- if (service_rc == 12 && service_rs == 769)
- return -EINVAL;
- if (service_rc == 8 && service_rs == 72)
- return -EINVAL;
- zdev->online = 0;
- pr_err("Cryptographic device %x failed and was set offline\n",
- AP_QID_DEVICE(zdev->ap_dev->qid));
- ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
- AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online,
- msg->hdr.reply_code);
+ zq->online = 0;
+ pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid));
+ ZCRYPT_DBF(DBF_ERR,
+ "device=%02x.%04x rc/rs=%d/%d => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) service_rc, (int) service_rs);
return -EAGAIN; /* repeat the request on a different device. */
}
data = msg->text;
@@ -611,13 +704,13 @@ static int convert_type86_ica(struct zcrypt_device *zdev,
/**
* Copy results from a type 86 XCRB reply message back to user space.
*
- * @zdev: crypto device pointer
+ * @zq: crypto device pointer
* @reply: reply AP message.
* @xcRB: pointer to XCRB
*
* Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
*/
-static int convert_type86_xcrb(struct zcrypt_device *zdev,
+static int convert_type86_xcrb(struct zcrypt_queue *zq,
struct ap_message *reply,
struct ica_xcRB *xcRB)
{
@@ -642,13 +735,13 @@ static int convert_type86_xcrb(struct zcrypt_device *zdev,
/**
* Copy results from a type 86 EP11 XCRB reply message back to user space.
*
- * @zdev: crypto device pointer
+ * @zq: crypto device pointer
* @reply: reply AP message.
* @xcRB: pointer to EP11 user request block
*
* Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
*/
-static int convert_type86_ep11_xcrb(struct zcrypt_device *zdev,
+static int convert_type86_ep11_xcrb(struct zcrypt_queue *zq,
struct ap_message *reply,
struct ep11_urb *xcRB)
{
@@ -666,7 +759,7 @@ static int convert_type86_ep11_xcrb(struct zcrypt_device *zdev,
return 0;
}
-static int convert_type86_rng(struct zcrypt_device *zdev,
+static int convert_type86_rng(struct zcrypt_queue *zq,
struct ap_message *reply,
char *buffer)
{
@@ -683,104 +776,113 @@ static int convert_type86_rng(struct zcrypt_device *zdev,
return msg->fmt2.count2;
}
-static int convert_response_ica(struct zcrypt_device *zdev,
+static int convert_response_ica(struct zcrypt_queue *zq,
struct ap_message *reply,
char __user *outputdata,
unsigned int outputdatalength)
{
struct type86x_reply *msg = reply->message;
- /* Response type byte is the second byte in the response. */
- switch (((unsigned char *) reply->message)[1]) {
+ switch (msg->hdr.type) {
case TYPE82_RSP_CODE:
case TYPE88_RSP_CODE:
- return convert_error(zdev, reply);
+ return convert_error(zq, reply);
case TYPE86_RSP_CODE:
if (msg->cprbx.ccp_rtcode &&
(msg->cprbx.ccp_rscode == 0x14f) &&
(outputdatalength > 256)) {
- if (zdev->max_exp_bit_length <= 17) {
- zdev->max_exp_bit_length = 17;
+ if (zq->zcard->max_exp_bit_length <= 17) {
+ zq->zcard->max_exp_bit_length = 17;
return -EAGAIN;
} else
return -EINVAL;
}
if (msg->hdr.reply_code)
- return convert_error(zdev, reply);
+ return convert_error(zq, reply);
if (msg->cprbx.cprb_ver_id == 0x02)
- return convert_type86_ica(zdev, reply,
+ return convert_type86_ica(zq, reply,
outputdata, outputdatalength);
/* Fall through, no break, incorrect cprb version is an unknown
* response */
default: /* Unknown response type, this should NEVER EVER happen */
- zdev->online = 0;
- pr_err("Cryptographic device %x failed and was set offline\n",
- AP_QID_DEVICE(zdev->ap_dev->qid));
- ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
- AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online);
+ zq->online = 0;
+ pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid));
+ ZCRYPT_DBF(DBF_ERR,
+ "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) msg->hdr.type);
return -EAGAIN; /* repeat the request on a different device. */
}
}
-static int convert_response_xcrb(struct zcrypt_device *zdev,
+static int convert_response_xcrb(struct zcrypt_queue *zq,
struct ap_message *reply,
struct ica_xcRB *xcRB)
{
struct type86x_reply *msg = reply->message;
- /* Response type byte is the second byte in the response. */
- switch (((unsigned char *) reply->message)[1]) {
+ switch (msg->hdr.type) {
case TYPE82_RSP_CODE:
case TYPE88_RSP_CODE:
xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
- return convert_error(zdev, reply);
+ return convert_error(zq, reply);
case TYPE86_RSP_CODE:
if (msg->hdr.reply_code) {
memcpy(&(xcRB->status), msg->fmt2.apfs, sizeof(u32));
- return convert_error(zdev, reply);
+ return convert_error(zq, reply);
}
if (msg->cprbx.cprb_ver_id == 0x02)
- return convert_type86_xcrb(zdev, reply, xcRB);
+ return convert_type86_xcrb(zq, reply, xcRB);
/* Fall through, no break, incorrect cprb version is an unknown
* response */
default: /* Unknown response type, this should NEVER EVER happen */
xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
- zdev->online = 0;
- pr_err("Cryptographic device %x failed and was set offline\n",
- AP_QID_DEVICE(zdev->ap_dev->qid));
- ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
- AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online);
+ zq->online = 0;
+ pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid));
+ ZCRYPT_DBF(DBF_ERR,
+ "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) msg->hdr.type);
return -EAGAIN; /* repeat the request on a different device. */
}
}
-static int convert_response_ep11_xcrb(struct zcrypt_device *zdev,
+static int convert_response_ep11_xcrb(struct zcrypt_queue *zq,
struct ap_message *reply, struct ep11_urb *xcRB)
{
struct type86_ep11_reply *msg = reply->message;
- /* Response type byte is the second byte in the response. */
- switch (((unsigned char *)reply->message)[1]) {
+ switch (msg->hdr.type) {
case TYPE82_RSP_CODE:
case TYPE87_RSP_CODE:
- return convert_error(zdev, reply);
+ return convert_error(zq, reply);
case TYPE86_RSP_CODE:
if (msg->hdr.reply_code)
- return convert_error(zdev, reply);
+ return convert_error(zq, reply);
if (msg->cprbx.cprb_ver_id == 0x04)
- return convert_type86_ep11_xcrb(zdev, reply, xcRB);
+ return convert_type86_ep11_xcrb(zq, reply, xcRB);
/* Fall through, no break, incorrect cprb version is an unknown resp.*/
default: /* Unknown response type, this should NEVER EVER happen */
- zdev->online = 0;
- pr_err("Cryptographic device %x failed and was set offline\n",
- AP_QID_DEVICE(zdev->ap_dev->qid));
- ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
- AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online);
+ zq->online = 0;
+ pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid));
+ ZCRYPT_DBF(DBF_ERR,
+ "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) msg->hdr.type);
return -EAGAIN; /* repeat the request on a different device. */
}
}
-static int convert_response_rng(struct zcrypt_device *zdev,
+static int convert_response_rng(struct zcrypt_queue *zq,
struct ap_message *reply,
char *data)
{
@@ -794,15 +896,19 @@ static int convert_response_rng(struct zcrypt_device *zdev,
if (msg->hdr.reply_code)
return -EINVAL;
if (msg->cprbx.cprb_ver_id == 0x02)
- return convert_type86_rng(zdev, reply, data);
+ return convert_type86_rng(zq, reply, data);
/* Fall through, no break, incorrect cprb version is an unknown
* response */
default: /* Unknown response type, this should NEVER EVER happen */
- zdev->online = 0;
- pr_err("Cryptographic device %x failed and was set offline\n",
- AP_QID_DEVICE(zdev->ap_dev->qid));
- ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
- AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online);
+ zq->online = 0;
+ pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid));
+ ZCRYPT_DBF(DBF_ERR,
+ "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) msg->hdr.type);
return -EAGAIN; /* repeat the request on a different device. */
}
}
@@ -811,11 +917,11 @@ static int convert_response_rng(struct zcrypt_device *zdev,
* This function is called from the AP bus code after a crypto request
* "msg" has finished with the reply message "reply".
* It is called from tasklet context.
- * @ap_dev: pointer to the AP device
+ * @aq: pointer to the AP queue
* @msg: pointer to the AP message
* @reply: pointer to the AP reply message
*/
-static void zcrypt_msgtype6_receive(struct ap_device *ap_dev,
+static void zcrypt_msgtype6_receive(struct ap_queue *aq,
struct ap_message *msg,
struct ap_message *reply)
{
@@ -860,11 +966,11 @@ out:
* This function is called from the AP bus code after a crypto request
* "msg" has finished with the reply message "reply".
* It is called from tasklet context.
- * @ap_dev: pointer to the AP device
+ * @aq: pointer to the AP queue
* @msg: pointer to the AP message
* @reply: pointer to the AP reply message
*/
-static void zcrypt_msgtype6_receive_ep11(struct ap_device *ap_dev,
+static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq,
struct ap_message *msg,
struct ap_message *reply)
{
@@ -904,11 +1010,11 @@ static atomic_t zcrypt_step = ATOMIC_INIT(0);
/**
* The request distributor calls this function if it picked the PCIXCC/CEX2C
* device to handle a modexpo request.
- * @zdev: pointer to zcrypt_device structure that identifies the
+ * @zq: pointer to zcrypt_queue structure that identifies the
* PCIXCC/CEX2C device to the request distributor
* @mex: pointer to the modexpo request buffer
*/
-static long zcrypt_msgtype6_modexpo(struct zcrypt_device *zdev,
+static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq,
struct ica_rsa_modexpo *mex)
{
struct ap_message ap_msg;
@@ -925,21 +1031,21 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_device *zdev,
ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg.private = &resp_type;
- rc = ICAMEX_msg_to_type6MEX_msgX(zdev, &ap_msg, mex);
+ rc = ICAMEX_msg_to_type6MEX_msgX(zq, &ap_msg, mex);
if (rc)
goto out_free;
init_completion(&resp_type.work);
- ap_queue_message(zdev->ap_dev, &ap_msg);
+ ap_queue_message(zq->queue, &ap_msg);
rc = wait_for_completion_interruptible(&resp_type.work);
if (rc == 0) {
rc = ap_msg.rc;
if (rc == 0)
- rc = convert_response_ica(zdev, &ap_msg,
+ rc = convert_response_ica(zq, &ap_msg,
mex->outputdata,
mex->outputdatalength);
} else
/* Signal pending. */
- ap_cancel_message(zdev->ap_dev, &ap_msg);
+ ap_cancel_message(zq->queue, &ap_msg);
out_free:
free_page((unsigned long) ap_msg.message);
return rc;
@@ -948,11 +1054,11 @@ out_free:
/**
* The request distributor calls this function if it picked the PCIXCC/CEX2C
* device to handle a modexpo_crt request.
- * @zdev: pointer to zcrypt_device structure that identifies the
+ * @zq: pointer to zcrypt_queue structure that identifies the
* PCIXCC/CEX2C device to the request distributor
* @crt: pointer to the modexpoc_crt request buffer
*/
-static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_device *zdev,
+static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq,
struct ica_rsa_modexpo_crt *crt)
{
struct ap_message ap_msg;
@@ -969,148 +1075,258 @@ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_device *zdev,
ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg.private = &resp_type;
- rc = ICACRT_msg_to_type6CRT_msgX(zdev, &ap_msg, crt);
+ rc = ICACRT_msg_to_type6CRT_msgX(zq, &ap_msg, crt);
if (rc)
goto out_free;
init_completion(&resp_type.work);
- ap_queue_message(zdev->ap_dev, &ap_msg);
+ ap_queue_message(zq->queue, &ap_msg);
rc = wait_for_completion_interruptible(&resp_type.work);
if (rc == 0) {
rc = ap_msg.rc;
if (rc == 0)
- rc = convert_response_ica(zdev, &ap_msg,
+ rc = convert_response_ica(zq, &ap_msg,
crt->outputdata,
crt->outputdatalength);
- } else
+ } else {
/* Signal pending. */
- ap_cancel_message(zdev->ap_dev, &ap_msg);
+ ap_cancel_message(zq->queue, &ap_msg);
+ }
out_free:
free_page((unsigned long) ap_msg.message);
return rc;
}
+unsigned int get_cprb_fc(struct ica_xcRB *xcRB,
+ struct ap_message *ap_msg,
+ unsigned int *func_code, unsigned short **dom)
+{
+ struct response_type resp_type = {
+ .type = PCIXCC_RESPONSE_TYPE_XCRB,
+ };
+ int rc;
+
+ ap_init_message(ap_msg);
+ ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
+ if (!ap_msg->message)
+ return -ENOMEM;
+ ap_msg->receive = zcrypt_msgtype6_receive;
+ ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL);
+ if (!ap_msg->private) {
+ kzfree(ap_msg->message);
+ return -ENOMEM;
+ }
+ memcpy(ap_msg->private, &resp_type, sizeof(resp_type));
+ rc = XCRB_msg_to_type6CPRB_msgX(ap_msg, xcRB, func_code, dom);
+ if (rc) {
+ kzfree(ap_msg->message);
+ kzfree(ap_msg->private);
+ }
+ return rc;
+}
+
/**
* The request distributor calls this function if it picked the PCIXCC/CEX2C
* device to handle a send_cprb request.
- * @zdev: pointer to zcrypt_device structure that identifies the
+ * @zq: pointer to zcrypt_queue structure that identifies the
* PCIXCC/CEX2C device to the request distributor
* @xcRB: pointer to the send_cprb request buffer
*/
-static long zcrypt_msgtype6_send_cprb(struct zcrypt_device *zdev,
- struct ica_xcRB *xcRB)
+static long zcrypt_msgtype6_send_cprb(struct zcrypt_queue *zq,
+ struct ica_xcRB *xcRB,
+ struct ap_message *ap_msg)
{
- struct ap_message ap_msg;
- struct response_type resp_type = {
- .type = PCIXCC_RESPONSE_TYPE_XCRB,
- };
int rc;
+ struct response_type *rtype = (struct response_type *)(ap_msg->private);
- ap_init_message(&ap_msg);
- ap_msg.message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
- if (!ap_msg.message)
- return -ENOMEM;
- ap_msg.receive = zcrypt_msgtype6_receive;
- ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
- atomic_inc_return(&zcrypt_step);
- ap_msg.private = &resp_type;
- rc = XCRB_msg_to_type6CPRB_msgX(zdev, &ap_msg, xcRB);
- if (rc)
- goto out_free;
- init_completion(&resp_type.work);
- ap_queue_message(zdev->ap_dev, &ap_msg);
- rc = wait_for_completion_interruptible(&resp_type.work);
+ init_completion(&rtype->work);
+ ap_queue_message(zq->queue, ap_msg);
+ rc = wait_for_completion_interruptible(&rtype->work);
if (rc == 0) {
- rc = ap_msg.rc;
+ rc = ap_msg->rc;
if (rc == 0)
- rc = convert_response_xcrb(zdev, &ap_msg, xcRB);
+ rc = convert_response_xcrb(zq, ap_msg, xcRB);
} else
/* Signal pending. */
- ap_cancel_message(zdev->ap_dev, &ap_msg);
-out_free:
- kzfree(ap_msg.message);
+ ap_cancel_message(zq->queue, ap_msg);
+
+ kzfree(ap_msg->message);
+ kzfree(ap_msg->private);
+ return rc;
+}
+
+unsigned int get_ep11cprb_fc(struct ep11_urb *xcrb,
+ struct ap_message *ap_msg,
+ unsigned int *func_code)
+{
+ struct response_type resp_type = {
+ .type = PCIXCC_RESPONSE_TYPE_EP11,
+ };
+ int rc;
+
+ ap_init_message(ap_msg);
+ ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
+ if (!ap_msg->message)
+ return -ENOMEM;
+ ap_msg->receive = zcrypt_msgtype6_receive_ep11;
+ ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL);
+ if (!ap_msg->private) {
+ kzfree(ap_msg->message);
+ return -ENOMEM;
+ }
+ memcpy(ap_msg->private, &resp_type, sizeof(resp_type));
+ rc = xcrb_msg_to_type6_ep11cprb_msgx(ap_msg, xcrb, func_code);
+ if (rc) {
+ kzfree(ap_msg->message);
+ kzfree(ap_msg->private);
+ }
return rc;
}
/**
* The request distributor calls this function if it picked the CEX4P
* device to handle a send_ep11_cprb request.
- * @zdev: pointer to zcrypt_device structure that identifies the
+ * @zq: pointer to zcrypt_queue structure that identifies the
* CEX4P device to the request distributor
* @xcRB: pointer to the ep11 user request block
*/
-static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_device *zdev,
- struct ep11_urb *xcrb)
+static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_queue *zq,
+ struct ep11_urb *xcrb,
+ struct ap_message *ap_msg)
{
- struct ap_message ap_msg;
- struct response_type resp_type = {
- .type = PCIXCC_RESPONSE_TYPE_EP11,
- };
int rc;
+ unsigned int lfmt;
+ struct response_type *rtype = (struct response_type *)(ap_msg->private);
+ struct {
+ struct type6_hdr hdr;
+ struct ep11_cprb cprbx;
+ unsigned char pld_tag; /* fixed value 0x30 */
+ unsigned char pld_lenfmt; /* payload length format */
+ } __packed * msg = ap_msg->message;
+ struct pld_hdr {
+ unsigned char func_tag; /* fixed value 0x4 */
+ unsigned char func_len; /* fixed value 0x4 */
+ unsigned int func_val; /* function ID */
+ unsigned char dom_tag; /* fixed value 0x4 */
+ unsigned char dom_len; /* fixed value 0x4 */
+ unsigned int dom_val; /* domain id */
+ } __packed * payload_hdr = NULL;
- ap_init_message(&ap_msg);
- ap_msg.message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
- if (!ap_msg.message)
- return -ENOMEM;
- ap_msg.receive = zcrypt_msgtype6_receive_ep11;
- ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
- atomic_inc_return(&zcrypt_step);
- ap_msg.private = &resp_type;
- rc = xcrb_msg_to_type6_ep11cprb_msgx(zdev, &ap_msg, xcrb);
- if (rc)
- goto out_free;
- init_completion(&resp_type.work);
- ap_queue_message(zdev->ap_dev, &ap_msg);
- rc = wait_for_completion_interruptible(&resp_type.work);
+
+ /**
+ * The target domain field within the cprb body/payload block will be
+ * replaced by the usage domain for non-management commands only.
+ * Therefore we check the first bit of the 'flags' parameter for
+ * management command indication.
+ * 0 - non management command
+ * 1 - management command
+ */
+ if (!((msg->cprbx.flags & 0x80) == 0x80)) {
+ msg->cprbx.target_id = (unsigned int)
+ AP_QID_QUEUE(zq->queue->qid);
+
+ if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/
+ switch (msg->pld_lenfmt & 0x03) {
+ case 1:
+ lfmt = 2;
+ break;
+ case 2:
+ lfmt = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ lfmt = 1; /* length format #1 */
+ }
+ payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt);
+ payload_hdr->dom_val = (unsigned int)
+ AP_QID_QUEUE(zq->queue->qid);
+ }
+
+ init_completion(&rtype->work);
+ ap_queue_message(zq->queue, ap_msg);
+ rc = wait_for_completion_interruptible(&rtype->work);
if (rc == 0) {
- rc = ap_msg.rc;
+ rc = ap_msg->rc;
if (rc == 0)
- rc = convert_response_ep11_xcrb(zdev, &ap_msg, xcrb);
+ rc = convert_response_ep11_xcrb(zq, ap_msg, xcrb);
} else
/* Signal pending. */
- ap_cancel_message(zdev->ap_dev, &ap_msg);
+ ap_cancel_message(zq->queue, ap_msg);
-out_free:
- kzfree(ap_msg.message);
+ kzfree(ap_msg->message);
+ kzfree(ap_msg->private);
return rc;
}
+unsigned int get_rng_fc(struct ap_message *ap_msg, int *func_code,
+ unsigned int *domain)
+{
+ struct response_type resp_type = {
+ .type = PCIXCC_RESPONSE_TYPE_XCRB,
+ };
+
+ ap_init_message(ap_msg);
+ ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
+ if (!ap_msg->message)
+ return -ENOMEM;
+ ap_msg->receive = zcrypt_msgtype6_receive;
+ ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL);
+ if (!ap_msg->private) {
+ kzfree(ap_msg->message);
+ return -ENOMEM;
+ }
+ memcpy(ap_msg->private, &resp_type, sizeof(resp_type));
+
+ rng_type6CPRB_msgX(ap_msg, ZCRYPT_RNG_BUFFER_SIZE, domain);
+
+ *func_code = HWRNG;
+ return 0;
+}
+
/**
* The request distributor calls this function if it picked the PCIXCC/CEX2C
* device to generate random data.
- * @zdev: pointer to zcrypt_device structure that identifies the
+ * @zq: pointer to zcrypt_queue structure that identifies the
* PCIXCC/CEX2C device to the request distributor
* @buffer: pointer to a memory page to return random data
*/
-
-static long zcrypt_msgtype6_rng(struct zcrypt_device *zdev,
- char *buffer)
+static long zcrypt_msgtype6_rng(struct zcrypt_queue *zq,
+ char *buffer, struct ap_message *ap_msg)
{
- struct ap_message ap_msg;
- struct response_type resp_type = {
- .type = PCIXCC_RESPONSE_TYPE_XCRB,
- };
+ struct {
+ struct type6_hdr hdr;
+ struct CPRBX cprbx;
+ char function_code[2];
+ short int rule_length;
+ char rule[8];
+ short int verb_length;
+ short int key_length;
+ } __packed * msg = ap_msg->message;
+ struct response_type *rtype = (struct response_type *)(ap_msg->private);
int rc;
- ap_init_message(&ap_msg);
- ap_msg.message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
- if (!ap_msg.message)
- return -ENOMEM;
- ap_msg.receive = zcrypt_msgtype6_receive;
- ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
- atomic_inc_return(&zcrypt_step);
- ap_msg.private = &resp_type;
- rng_type6CPRB_msgX(zdev->ap_dev, &ap_msg, ZCRYPT_RNG_BUFFER_SIZE);
- init_completion(&resp_type.work);
- ap_queue_message(zdev->ap_dev, &ap_msg);
- rc = wait_for_completion_interruptible(&resp_type.work);
+ msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
+
+ init_completion(&rtype->work);
+ ap_queue_message(zq->queue, ap_msg);
+ rc = wait_for_completion_interruptible(&rtype->work);
if (rc == 0) {
- rc = ap_msg.rc;
+ rc = ap_msg->rc;
if (rc == 0)
- rc = convert_response_rng(zdev, &ap_msg, buffer);
+ rc = convert_response_rng(zq, ap_msg, buffer);
} else
/* Signal pending. */
- ap_cancel_message(zdev->ap_dev, &ap_msg);
- kfree(ap_msg.message);
+ ap_cancel_message(zq->queue, ap_msg);
+
+ kzfree(ap_msg->message);
+ kzfree(ap_msg->private);
return rc;
}
@@ -1145,12 +1361,11 @@ static struct zcrypt_ops zcrypt_msgtype6_ep11_ops = {
.send_ep11_cprb = zcrypt_msgtype6_send_ep11_cprb,
};
-int __init zcrypt_msgtype6_init(void)
+void __init zcrypt_msgtype6_init(void)
{
zcrypt_msgtype_register(&zcrypt_msgtype6_norng_ops);
zcrypt_msgtype_register(&zcrypt_msgtype6_ops);
zcrypt_msgtype_register(&zcrypt_msgtype6_ep11_ops);
- return 0;
}
void __exit zcrypt_msgtype6_exit(void)
@@ -1159,6 +1374,3 @@ void __exit zcrypt_msgtype6_exit(void)
zcrypt_msgtype_unregister(&zcrypt_msgtype6_ops);
zcrypt_msgtype_unregister(&zcrypt_msgtype6_ep11_ops);
}
-
-module_init(zcrypt_msgtype6_init);
-module_exit(zcrypt_msgtype6_exit);
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.h b/drivers/s390/crypto/zcrypt_msgtype6.h
index 207247570623..7a0d5b57821f 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.h
+++ b/drivers/s390/crypto/zcrypt_msgtype6.h
@@ -116,15 +116,28 @@ struct type86_fmt2_ext {
unsigned int offset4; /* 0x00000000 */
} __packed;
+unsigned int get_cprb_fc(struct ica_xcRB *, struct ap_message *,
+ unsigned int *, unsigned short **);
+unsigned int get_ep11cprb_fc(struct ep11_urb *, struct ap_message *,
+ unsigned int *);
+unsigned int get_rng_fc(struct ap_message *, int *, unsigned int *);
+
+#define LOW 10
+#define MEDIUM 100
+#define HIGH 500
+
+int speed_idx_cca(int);
+int speed_idx_ep11(int);
+
/**
* Prepare a type6 CPRB message for random number generation
*
* @ap_dev: AP device pointer
* @ap_msg: pointer to AP message
*/
-static inline void rng_type6CPRB_msgX(struct ap_device *ap_dev,
- struct ap_message *ap_msg,
- unsigned random_number_length)
+static inline void rng_type6CPRB_msgX(struct ap_message *ap_msg,
+ unsigned int random_number_length,
+ unsigned int *domain)
{
struct {
struct type6_hdr hdr;
@@ -156,16 +169,16 @@ static inline void rng_type6CPRB_msgX(struct ap_device *ap_dev,
msg->hdr.FromCardLen2 = random_number_length,
msg->cprbx = local_cprbx;
msg->cprbx.rpl_datal = random_number_length,
- msg->cprbx.domain = AP_QID_QUEUE(ap_dev->qid);
memcpy(msg->function_code, msg->hdr.function_code, 0x02);
msg->rule_length = 0x0a;
memcpy(msg->rule, "RANDOM ", 8);
msg->verb_length = 0x02;
msg->key_length = 0x02;
ap_msg->length = sizeof(*msg);
+ *domain = (unsigned short)msg->cprbx.domain;
}
-int zcrypt_msgtype6_init(void);
+void zcrypt_msgtype6_init(void);
void zcrypt_msgtype6_exit(void);
#endif /* _ZCRYPT_MSGTYPE6_H_ */
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index df8f0c4dacb7..600604782b65 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -31,7 +31,8 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/atomic.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
+#include <linux/mod_devicetable.h>
#include "ap_bus.h"
#include "zcrypt_api.h"
@@ -46,11 +47,6 @@
#define CEX3C_MIN_MOD_SIZE PCIXCC_MIN_MOD_SIZE
#define CEX3C_MAX_MOD_SIZE 512 /* 4096 bits */
-#define PCIXCC_MCL2_SPEED_RATING 7870
-#define PCIXCC_MCL3_SPEED_RATING 7870
-#define CEX2C_SPEED_RATING 7000
-#define CEX3C_SPEED_RATING 6500
-
#define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c /* max size type6 v2 crt message */
#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */
@@ -67,142 +63,34 @@ struct response_type {
#define PCIXCC_RESPONSE_TYPE_ICA 0
#define PCIXCC_RESPONSE_TYPE_XCRB 1
-static struct ap_device_id zcrypt_pcixcc_ids[] = {
- { AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) },
- { AP_DEVICE(AP_DEVICE_TYPE_CEX2C) },
- { AP_DEVICE(AP_DEVICE_TYPE_CEX3C) },
- { /* end of list */ },
-};
-
-MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_ids);
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, " \
"Copyright IBM Corp. 2001, 2012");
MODULE_LICENSE("GPL");
-static int zcrypt_pcixcc_probe(struct ap_device *ap_dev);
-static void zcrypt_pcixcc_remove(struct ap_device *ap_dev);
-
-static struct ap_driver zcrypt_pcixcc_driver = {
- .probe = zcrypt_pcixcc_probe,
- .remove = zcrypt_pcixcc_remove,
- .ids = zcrypt_pcixcc_ids,
- .request_timeout = PCIXCC_CLEANUP_TIME,
+static struct ap_device_id zcrypt_pcixcc_card_ids[] = {
+ { .dev_type = AP_DEVICE_TYPE_PCIXCC,
+ .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX2C,
+ .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX3C,
+ .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+ { /* end of list */ },
};
-/**
- * Micro-code detection function. Its sends a message to a pcixcc card
- * to find out the microcode level.
- * @ap_dev: pointer to the AP device.
- */
-static int zcrypt_pcixcc_mcl(struct ap_device *ap_dev)
-{
- static unsigned char msg[] = {
- 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x00,
- 0x00,0x00,0x01,0xC4,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x07,0x24,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0xDC,0x02,0x00,0x00,0x00,0x54,0x32,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE8,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x0A,
- 0x4D,0x52,0x50,0x20,0x20,0x20,0x20,0x20,
- 0x00,0x42,0x00,0x01,0x02,0x03,0x04,0x05,
- 0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,
- 0x0E,0x0F,0x00,0x11,0x22,0x33,0x44,0x55,
- 0x66,0x77,0x88,0x99,0xAA,0xBB,0xCC,0xDD,
- 0xEE,0xFF,0xFF,0xEE,0xDD,0xCC,0xBB,0xAA,
- 0x99,0x88,0x77,0x66,0x55,0x44,0x33,0x22,
- 0x11,0x00,0x01,0x23,0x45,0x67,0x89,0xAB,
- 0xCD,0xEF,0xFE,0xDC,0xBA,0x98,0x76,0x54,
- 0x32,0x10,0x00,0x9A,0x00,0x98,0x00,0x00,
- 0x1E,0x00,0x00,0x94,0x00,0x00,0x00,0x00,
- 0x04,0x00,0x00,0x8C,0x00,0x00,0x00,0x40,
- 0x02,0x00,0x00,0x40,0xBA,0xE8,0x23,0x3C,
- 0x75,0xF3,0x91,0x61,0xD6,0x73,0x39,0xCF,
- 0x7B,0x6D,0x8E,0x61,0x97,0x63,0x9E,0xD9,
- 0x60,0x55,0xD6,0xC7,0xEF,0xF8,0x1E,0x63,
- 0x95,0x17,0xCC,0x28,0x45,0x60,0x11,0xC5,
- 0xC4,0x4E,0x66,0xC6,0xE6,0xC3,0xDE,0x8A,
- 0x19,0x30,0xCF,0x0E,0xD7,0xAA,0xDB,0x01,
- 0xD8,0x00,0xBB,0x8F,0x39,0x9F,0x64,0x28,
- 0xF5,0x7A,0x77,0x49,0xCC,0x6B,0xA3,0x91,
- 0x97,0x70,0xE7,0x60,0x1E,0x39,0xE1,0xE5,
- 0x33,0xE1,0x15,0x63,0x69,0x08,0x80,0x4C,
- 0x67,0xC4,0x41,0x8F,0x48,0xDF,0x26,0x98,
- 0xF1,0xD5,0x8D,0x88,0xD9,0x6A,0xA4,0x96,
- 0xC5,0x84,0xD9,0x30,0x49,0x67,0x7D,0x19,
- 0xB1,0xB3,0x45,0x4D,0xB2,0x53,0x9A,0x47,
- 0x3C,0x7C,0x55,0xBF,0xCC,0x85,0x00,0x36,
- 0xF1,0x3D,0x93,0x53
- };
- unsigned long long psmid;
- struct CPRBX *cprbx;
- char *reply;
- int rc, i;
-
- reply = (void *) get_zeroed_page(GFP_KERNEL);
- if (!reply)
- return -ENOMEM;
+MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_card_ids);
- rc = ap_send(ap_dev->qid, 0x0102030405060708ULL, msg, sizeof(msg));
- if (rc)
- goto out_free;
-
- /* Wait for the test message to complete. */
- for (i = 0; i < 6; i++) {
- msleep(300);
- rc = ap_recv(ap_dev->qid, &psmid, reply, 4096);
- if (rc == 0 && psmid == 0x0102030405060708ULL)
- break;
- }
-
- if (i >= 6) {
- /* Got no answer. */
- rc = -ENODEV;
- goto out_free;
- }
+static struct ap_device_id zcrypt_pcixcc_queue_ids[] = {
+ { .dev_type = AP_DEVICE_TYPE_PCIXCC,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX2C,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX3C,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { /* end of list */ },
+};
- cprbx = (struct CPRBX *) (reply + 48);
- if (cprbx->ccp_rtcode == 8 && cprbx->ccp_rscode == 33)
- rc = ZCRYPT_PCIXCC_MCL2;
- else
- rc = ZCRYPT_PCIXCC_MCL3;
-out_free:
- free_page((unsigned long) reply);
- return rc;
-}
+MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_queue_ids);
/**
* Large random number detection function. Its sends a message to a pcixcc
@@ -211,15 +99,25 @@ out_free:
*
* Returns 1 if large random numbers are supported, 0 if not and < 0 on error.
*/
-static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev)
+static int zcrypt_pcixcc_rng_supported(struct ap_queue *aq)
{
struct ap_message ap_msg;
unsigned long long psmid;
+ unsigned int domain;
struct {
struct type86_hdr hdr;
struct type86_fmt2_ext fmt2;
struct CPRBX cprbx;
} __attribute__((packed)) *reply;
+ struct {
+ struct type6_hdr hdr;
+ struct CPRBX cprbx;
+ char function_code[2];
+ short int rule_length;
+ char rule[8];
+ short int verb_length;
+ short int key_length;
+ } __packed * msg;
int rc, i;
ap_init_message(&ap_msg);
@@ -227,8 +125,12 @@ static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev)
if (!ap_msg.message)
return -ENOMEM;
- rng_type6CPRB_msgX(ap_dev, &ap_msg, 4);
- rc = ap_send(ap_dev->qid, 0x0102030405060708ULL, ap_msg.message,
+ rng_type6CPRB_msgX(&ap_msg, 4, &domain);
+
+ msg = ap_msg.message;
+ msg->cprbx.domain = AP_QID_QUEUE(aq->qid);
+
+ rc = ap_send(aq->qid, 0x0102030405060708ULL, ap_msg.message,
ap_msg.length);
if (rc)
goto out_free;
@@ -236,7 +138,7 @@ static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev)
/* Wait for the test message to complete. */
for (i = 0; i < 2 * HZ; i++) {
msleep(1000 / HZ);
- rc = ap_recv(ap_dev->qid, &psmid, ap_msg.message, 4096);
+ rc = ap_recv(aq->qid, &psmid, ap_msg.message, 4096);
if (rc == 0 && psmid == 0x0102030405060708ULL)
break;
}
@@ -258,110 +160,168 @@ out_free:
}
/**
- * Probe function for PCIXCC/CEX2C cards. It always accepts the AP device
- * since the bus_match already checked the hardware type. The PCIXCC
- * cards come in two flavours: micro code level 2 and micro code level 3.
- * This is checked by sending a test message to the device.
- * @ap_dev: pointer to the AP device.
+ * Probe function for PCIXCC/CEX2C card devices. It always accepts the
+ * AP device since the bus_match already checked the hardware type. The
+ * PCIXCC cards come in two flavours: micro code level 2 and micro code
+ * level 3. This is checked by sending a test message to the device.
+ * @ap_dev: pointer to the AP card device.
*/
-static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
+static int zcrypt_pcixcc_card_probe(struct ap_device *ap_dev)
{
- struct zcrypt_device *zdev;
+ /*
+ * Normalized speed ratings per crypto adapter
+ * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
+ */
+ static const int CEX2C_SPEED_IDX[] = {
+ 1000, 1400, 2400, 1100, 1500, 2600, 100, 12};
+ static const int CEX3C_SPEED_IDX[] = {
+ 500, 700, 1400, 550, 800, 1500, 80, 10};
+
+ struct ap_card *ac = to_ap_card(&ap_dev->device);
+ struct zcrypt_card *zc;
int rc = 0;
- zdev = zcrypt_device_alloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE);
- if (!zdev)
+ zc = zcrypt_card_alloc();
+ if (!zc)
return -ENOMEM;
- zdev->ap_dev = ap_dev;
- zdev->online = 1;
- switch (ap_dev->device_type) {
- case AP_DEVICE_TYPE_PCIXCC:
- rc = zcrypt_pcixcc_mcl(ap_dev);
- if (rc < 0) {
- zcrypt_device_free(zdev);
- return rc;
- }
- zdev->user_space_type = rc;
- if (rc == ZCRYPT_PCIXCC_MCL2) {
- zdev->type_string = "PCIXCC_MCL2";
- zdev->speed_rating = PCIXCC_MCL2_SPEED_RATING;
- zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
- zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
- zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
- } else {
- zdev->type_string = "PCIXCC_MCL3";
- zdev->speed_rating = PCIXCC_MCL3_SPEED_RATING;
- zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
- zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
- zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
- }
- break;
+ zc->card = ac;
+ ac->private = zc;
+ switch (ac->ap_dev.device_type) {
case AP_DEVICE_TYPE_CEX2C:
- zdev->user_space_type = ZCRYPT_CEX2C;
- zdev->type_string = "CEX2C";
- zdev->speed_rating = CEX2C_SPEED_RATING;
- zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
- zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
- zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
+ zc->user_space_type = ZCRYPT_CEX2C;
+ zc->type_string = "CEX2C";
+ memcpy(zc->speed_rating, CEX2C_SPEED_IDX,
+ sizeof(CEX2C_SPEED_IDX));
+ zc->min_mod_size = PCIXCC_MIN_MOD_SIZE;
+ zc->max_mod_size = PCIXCC_MAX_MOD_SIZE;
+ zc->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
break;
case AP_DEVICE_TYPE_CEX3C:
- zdev->user_space_type = ZCRYPT_CEX3C;
- zdev->type_string = "CEX3C";
- zdev->speed_rating = CEX3C_SPEED_RATING;
- zdev->min_mod_size = CEX3C_MIN_MOD_SIZE;
- zdev->max_mod_size = CEX3C_MAX_MOD_SIZE;
- zdev->max_exp_bit_length = CEX3C_MAX_MOD_SIZE;
+ zc->user_space_type = ZCRYPT_CEX3C;
+ zc->type_string = "CEX3C";
+ memcpy(zc->speed_rating, CEX3C_SPEED_IDX,
+ sizeof(CEX3C_SPEED_IDX));
+ zc->min_mod_size = CEX3C_MIN_MOD_SIZE;
+ zc->max_mod_size = CEX3C_MAX_MOD_SIZE;
+ zc->max_exp_bit_length = CEX3C_MAX_MOD_SIZE;
break;
default:
- goto out_free;
+ zcrypt_card_free(zc);
+ return -ENODEV;
}
+ zc->online = 1;
+
+ rc = zcrypt_card_register(zc);
+ if (rc) {
+ ac->private = NULL;
+ zcrypt_card_free(zc);
+ }
+
+ return rc;
+}
- rc = zcrypt_pcixcc_rng_supported(ap_dev);
+/**
+ * This is called to remove the PCIXCC/CEX2C card driver information
+ * if an AP card device is removed.
+ */
+static void zcrypt_pcixcc_card_remove(struct ap_device *ap_dev)
+{
+ struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
+
+ if (zc)
+ zcrypt_card_unregister(zc);
+}
+
+static struct ap_driver zcrypt_pcixcc_card_driver = {
+ .probe = zcrypt_pcixcc_card_probe,
+ .remove = zcrypt_pcixcc_card_remove,
+ .ids = zcrypt_pcixcc_card_ids,
+};
+
+/**
+ * Probe function for PCIXCC/CEX2C queue devices. It always accepts the
+ * AP device since the bus_match already checked the hardware type. The
+ * PCIXCC cards come in two flavours: micro code level 2 and micro code
+ * level 3. This is checked by sending a test message to the device.
+ * @ap_dev: pointer to the AP card device.
+ */
+static int zcrypt_pcixcc_queue_probe(struct ap_device *ap_dev)
+{
+ struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+ struct zcrypt_queue *zq;
+ int rc;
+
+ zq = zcrypt_queue_alloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE);
+ if (!zq)
+ return -ENOMEM;
+ zq->queue = aq;
+ zq->online = 1;
+ atomic_set(&zq->load, 0);
+ rc = zcrypt_pcixcc_rng_supported(aq);
if (rc < 0) {
- zcrypt_device_free(zdev);
+ zcrypt_queue_free(zq);
return rc;
}
if (rc)
- zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
- MSGTYPE06_VARIANT_DEFAULT);
+ zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
+ MSGTYPE06_VARIANT_DEFAULT);
else
- zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
- MSGTYPE06_VARIANT_NORNG);
- ap_device_init_reply(ap_dev, &zdev->reply);
- ap_dev->private = zdev;
- rc = zcrypt_device_register(zdev);
- if (rc)
- goto out_free;
- return 0;
-
- out_free:
- ap_dev->private = NULL;
- zcrypt_msgtype_release(zdev->ops);
- zcrypt_device_free(zdev);
+ zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
+ MSGTYPE06_VARIANT_NORNG);
+ ap_queue_init_reply(aq, &zq->reply);
+ aq->request_timeout = PCIXCC_CLEANUP_TIME,
+ aq->private = zq;
+ rc = zcrypt_queue_register(zq);
+ if (rc) {
+ aq->private = NULL;
+ zcrypt_queue_free(zq);
+ }
return rc;
}
/**
- * This is called to remove the extended PCIXCC/CEX2C driver information
- * if an AP device is removed.
+ * This is called to remove the PCIXCC/CEX2C queue driver information
+ * if an AP queue device is removed.
*/
-static void zcrypt_pcixcc_remove(struct ap_device *ap_dev)
+static void zcrypt_pcixcc_queue_remove(struct ap_device *ap_dev)
{
- struct zcrypt_device *zdev = ap_dev->private;
- struct zcrypt_ops *zops = zdev->ops;
+ struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+ struct zcrypt_queue *zq = aq->private;
- zcrypt_device_unregister(zdev);
- zcrypt_msgtype_release(zops);
+ ap_queue_remove(aq);
+ if (zq)
+ zcrypt_queue_unregister(zq);
}
+static struct ap_driver zcrypt_pcixcc_queue_driver = {
+ .probe = zcrypt_pcixcc_queue_probe,
+ .remove = zcrypt_pcixcc_queue_remove,
+ .suspend = ap_queue_suspend,
+ .resume = ap_queue_resume,
+ .ids = zcrypt_pcixcc_queue_ids,
+};
+
int __init zcrypt_pcixcc_init(void)
{
- return ap_driver_register(&zcrypt_pcixcc_driver, THIS_MODULE, "pcixcc");
+ int rc;
+
+ rc = ap_driver_register(&zcrypt_pcixcc_card_driver,
+ THIS_MODULE, "pcixcccard");
+ if (rc)
+ return rc;
+
+ rc = ap_driver_register(&zcrypt_pcixcc_queue_driver,
+ THIS_MODULE, "pcixccqueue");
+ if (rc)
+ ap_driver_unregister(&zcrypt_pcixcc_card_driver);
+
+ return rc;
}
void zcrypt_pcixcc_exit(void)
{
- ap_driver_unregister(&zcrypt_pcixcc_driver);
+ ap_driver_unregister(&zcrypt_pcixcc_queue_driver);
+ ap_driver_unregister(&zcrypt_pcixcc_card_driver);
}
module_init(zcrypt_pcixcc_init);
diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c
new file mode 100644
index 000000000000..a303f3b2c328
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_queue.c
@@ -0,0 +1,226 @@
+/*
+ * zcrypt 2.1.0
+ *
+ * Copyright IBM Corp. 2001, 2012
+ * Author(s): Robert Burroughs
+ * Eric Rossman (edrossma@us.ibm.com)
+ * Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Ralph Wuerthner <rwuerthn@de.ibm.com>
+ * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/compat.h>
+#include <linux/slab.h>
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/hw_random.h>
+#include <linux/debugfs.h>
+#include <asm/debug.h>
+
+#include "zcrypt_debug.h"
+#include "zcrypt_api.h"
+
+#include "zcrypt_msgtype6.h"
+#include "zcrypt_msgtype50.h"
+
+/*
+ * Device attributes common for all crypto queue devices.
+ */
+
+static ssize_t zcrypt_queue_online_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", zq->online);
+}
+
+static ssize_t zcrypt_queue_online_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+ struct zcrypt_card *zc = zq->zcard;
+ int online;
+
+ if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
+ return -EINVAL;
+
+ if (online && !zc->online)
+ return -EINVAL;
+ zq->online = online;
+
+ ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x online=%d\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ online);
+
+ if (!online)
+ ap_flush_queue(zq->queue);
+ return count;
+}
+
+static DEVICE_ATTR(online, 0644, zcrypt_queue_online_show,
+ zcrypt_queue_online_store);
+
+static struct attribute *zcrypt_queue_attrs[] = {
+ &dev_attr_online.attr,
+ NULL,
+};
+
+static struct attribute_group zcrypt_queue_attr_group = {
+ .attrs = zcrypt_queue_attrs,
+};
+
+void zcrypt_queue_force_online(struct zcrypt_queue *zq, int online)
+{
+ zq->online = online;
+ if (!online)
+ ap_flush_queue(zq->queue);
+}
+
+struct zcrypt_queue *zcrypt_queue_alloc(size_t max_response_size)
+{
+ struct zcrypt_queue *zq;
+
+ zq = kzalloc(sizeof(struct zcrypt_queue), GFP_KERNEL);
+ if (!zq)
+ return NULL;
+ zq->reply.message = kmalloc(max_response_size, GFP_KERNEL);
+ if (!zq->reply.message)
+ goto out_free;
+ zq->reply.length = max_response_size;
+ INIT_LIST_HEAD(&zq->list);
+ kref_init(&zq->refcount);
+ return zq;
+
+out_free:
+ kfree(zq);
+ return NULL;
+}
+EXPORT_SYMBOL(zcrypt_queue_alloc);
+
+void zcrypt_queue_free(struct zcrypt_queue *zq)
+{
+ kfree(zq->reply.message);
+ kfree(zq);
+}
+EXPORT_SYMBOL(zcrypt_queue_free);
+
+static void zcrypt_queue_release(struct kref *kref)
+{
+ struct zcrypt_queue *zq =
+ container_of(kref, struct zcrypt_queue, refcount);
+ zcrypt_queue_free(zq);
+}
+
+void zcrypt_queue_get(struct zcrypt_queue *zq)
+{
+ kref_get(&zq->refcount);
+}
+EXPORT_SYMBOL(zcrypt_queue_get);
+
+int zcrypt_queue_put(struct zcrypt_queue *zq)
+{
+ return kref_put(&zq->refcount, zcrypt_queue_release);
+}
+EXPORT_SYMBOL(zcrypt_queue_put);
+
+/**
+ * zcrypt_queue_register() - Register a crypto queue device.
+ * @zq: Pointer to a crypto queue device
+ *
+ * Register a crypto queue device. Returns 0 if successful.
+ */
+int zcrypt_queue_register(struct zcrypt_queue *zq)
+{
+ struct zcrypt_card *zc;
+ int rc;
+
+ spin_lock(&zcrypt_list_lock);
+ zc = zq->queue->card->private;
+ zcrypt_card_get(zc);
+ zq->zcard = zc;
+ zq->online = 1; /* New devices are online by default. */
+
+ ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x register online=1\n",
+ AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid));
+
+ list_add_tail(&zq->list, &zc->zqueues);
+ zcrypt_device_count++;
+ spin_unlock(&zcrypt_list_lock);
+
+ rc = sysfs_create_group(&zq->queue->ap_dev.device.kobj,
+ &zcrypt_queue_attr_group);
+ if (rc)
+ goto out;
+ get_device(&zq->queue->ap_dev.device);
+
+ if (zq->ops->rng) {
+ rc = zcrypt_rng_device_add();
+ if (rc)
+ goto out_unregister;
+ }
+ return 0;
+
+out_unregister:
+ sysfs_remove_group(&zq->queue->ap_dev.device.kobj,
+ &zcrypt_queue_attr_group);
+ put_device(&zq->queue->ap_dev.device);
+out:
+ spin_lock(&zcrypt_list_lock);
+ list_del_init(&zq->list);
+ spin_unlock(&zcrypt_list_lock);
+ zcrypt_card_put(zc);
+ return rc;
+}
+EXPORT_SYMBOL(zcrypt_queue_register);
+
+/**
+ * zcrypt_queue_unregister(): Unregister a crypto queue device.
+ * @zq: Pointer to crypto queue device
+ *
+ * Unregister a crypto queue device.
+ */
+void zcrypt_queue_unregister(struct zcrypt_queue *zq)
+{
+ struct zcrypt_card *zc;
+
+ ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x unregister\n",
+ AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid));
+
+ zc = zq->zcard;
+ spin_lock(&zcrypt_list_lock);
+ list_del_init(&zq->list);
+ zcrypt_device_count--;
+ spin_unlock(&zcrypt_list_lock);
+ zcrypt_card_put(zc);
+ if (zq->ops->rng)
+ zcrypt_rng_device_remove();
+ sysfs_remove_group(&zq->queue->ap_dev.device.kobj,
+ &zcrypt_queue_attr_group);
+ put_device(&zq->queue->ap_dev.device);
+ zcrypt_queue_put(zq);
+}
+EXPORT_SYMBOL(zcrypt_queue_unregister);
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 2981024a2438..3f85b97ab8d2 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -62,7 +62,7 @@
#include <net/dst.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/ebcdic.h>
#include <net/iucv/iucv.h>
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 581001989937..d5bf36ec8a75 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -289,11 +289,12 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
/**
- * zfcp_dbf_rec_run - trace event related to running recovery
+ * zfcp_dbf_rec_run_lvl - trace event related to running recovery
+ * @level: trace level to be used for event
* @tag: identifier for event
* @erp: erp_action running
*/
-void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
+void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp)
{
struct zfcp_dbf *dbf = erp->adapter->dbf;
struct zfcp_dbf_rec *rec = &dbf->rec_buf;
@@ -319,11 +320,21 @@ void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
else
rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter);
- debug_event(dbf->rec, 1, rec, sizeof(*rec));
+ debug_event(dbf->rec, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->rec_lock, flags);
}
/**
+ * zfcp_dbf_rec_run - trace event related to running recovery
+ * @tag: identifier for event
+ * @erp: erp_action running
+ */
+void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
+{
+ zfcp_dbf_rec_run_lvl(1, tag, erp);
+}
+
+/**
* zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
* @tag: identifier for event
* @wka_port: well known address port
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 36d07584271d..db186d44cfaf 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -2,7 +2,7 @@
* zfcp device driver
* debug feature declarations
*
- * Copyright IBM Corp. 2008, 2015
+ * Copyright IBM Corp. 2008, 2016
*/
#ifndef ZFCP_DBF_H
@@ -283,6 +283,30 @@ struct zfcp_dbf {
struct zfcp_dbf_scsi scsi_buf;
};
+/**
+ * zfcp_dbf_hba_fsf_resp_suppress - true if we should not trace by default
+ * @req: request that has been completed
+ *
+ * Returns true if FCP response with only benign residual under count.
+ */
+static inline
+bool zfcp_dbf_hba_fsf_resp_suppress(struct zfcp_fsf_req *req)
+{
+ struct fsf_qtcb *qtcb = req->qtcb;
+ u32 fsf_stat = qtcb->header.fsf_status;
+ struct fcp_resp *fcp_rsp;
+ u8 rsp_flags, fr_status;
+
+ if (qtcb->prefix.qtcb_type != FSF_IO_COMMAND)
+ return false; /* not an FCP response */
+ fcp_rsp = (struct fcp_resp *)&qtcb->bottom.io.fcp_rsp;
+ rsp_flags = fcp_rsp->fr_flags;
+ fr_status = fcp_rsp->fr_status;
+ return (fsf_stat == FSF_FCP_RSP_AVAILABLE) &&
+ (rsp_flags == FCP_RESID_UNDER) &&
+ (fr_status == SAM_STAT_GOOD);
+}
+
static inline
void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
{
@@ -304,7 +328,9 @@ void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
} else if (qtcb->header.fsf_status != FSF_GOOD) {
- zfcp_dbf_hba_fsf_resp("fs_ferr", 1, req);
+ zfcp_dbf_hba_fsf_resp("fs_ferr",
+ zfcp_dbf_hba_fsf_resp_suppress(req)
+ ? 5 : 1, req);
} else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
(req->fsf_command == FSF_QTCB_OPEN_LUN)) {
@@ -388,4 +414,15 @@ void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
_zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL);
}
+/**
+ * zfcp_dbf_scsi_nullcmnd() - trace NULLify of SCSI command in dev/tgt-reset.
+ * @scmnd: SCSI command that was NULLified.
+ * @fsf_req: request that owned @scmnd.
+ */
+static inline void zfcp_dbf_scsi_nullcmnd(struct scsi_cmnd *scmnd,
+ struct zfcp_fsf_req *fsf_req)
+{
+ _zfcp_dbf_scsi("scfc__1", 3, scmnd, fsf_req);
+}
+
#endif /* ZFCP_DBF_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index a59d678125bd..7ccfce559034 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -3,7 +3,7 @@
*
* Error Recovery Procedures (ERP).
*
- * Copyright IBM Corp. 2002, 2015
+ * Copyright IBM Corp. 2002, 2016
*/
#define KMSG_COMPONENT "zfcp"
@@ -1204,6 +1204,62 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
}
}
+/**
+ * zfcp_erp_try_rport_unblock - unblock rport if no more/new recovery
+ * @port: zfcp_port whose fc_rport we should try to unblock
+ */
+static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
+{
+ unsigned long flags;
+ struct zfcp_adapter *adapter = port->adapter;
+ int port_status;
+ struct Scsi_Host *shost = adapter->scsi_host;
+ struct scsi_device *sdev;
+
+ write_lock_irqsave(&adapter->erp_lock, flags);
+ port_status = atomic_read(&port->status);
+ if ((port_status & ZFCP_STATUS_COMMON_UNBLOCKED) == 0 ||
+ (port_status & (ZFCP_STATUS_COMMON_ERP_INUSE |
+ ZFCP_STATUS_COMMON_ERP_FAILED)) != 0) {
+ /* new ERP of severity >= port triggered elsewhere meanwhile or
+ * local link down (adapter erp_failed but not clear unblock)
+ */
+ zfcp_dbf_rec_run_lvl(4, "ertru_p", &port->erp_action);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
+ return;
+ }
+ spin_lock(shost->host_lock);
+ __shost_for_each_device(sdev, shost) {
+ struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
+ int lun_status;
+
+ if (zsdev->port != port)
+ continue;
+ /* LUN under port of interest */
+ lun_status = atomic_read(&zsdev->status);
+ if ((lun_status & ZFCP_STATUS_COMMON_ERP_FAILED) != 0)
+ continue; /* unblock rport despite failed LUNs */
+ /* LUN recovery not given up yet [maybe follow-up pending] */
+ if ((lun_status & ZFCP_STATUS_COMMON_UNBLOCKED) == 0 ||
+ (lun_status & ZFCP_STATUS_COMMON_ERP_INUSE) != 0) {
+ /* LUN blocked:
+ * not yet unblocked [LUN recovery pending]
+ * or meanwhile blocked [new LUN recovery triggered]
+ */
+ zfcp_dbf_rec_run_lvl(4, "ertru_l", &zsdev->erp_action);
+ spin_unlock(shost->host_lock);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
+ return;
+ }
+ }
+ /* now port has no child or all children have completed recovery,
+ * and no ERP of severity >= port was meanwhile triggered elsewhere
+ */
+ zfcp_scsi_schedule_rport_register(port);
+ spin_unlock(shost->host_lock);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
{
struct zfcp_adapter *adapter = act->adapter;
@@ -1214,6 +1270,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
case ZFCP_ERP_ACTION_REOPEN_LUN:
if (!(act->status & ZFCP_STATUS_ERP_NO_REF))
scsi_device_put(sdev);
+ zfcp_erp_try_rport_unblock(port);
break;
case ZFCP_ERP_ACTION_REOPEN_PORT:
@@ -1224,7 +1281,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
*/
if (act->step != ZFCP_ERP_STEP_UNINITIALIZED)
if (result == ZFCP_ERP_SUCCEEDED)
- zfcp_scsi_schedule_rport_register(port);
+ zfcp_erp_try_rport_unblock(port);
/* fall through */
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
put_device(&port->dev);
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 968a0ab4b398..9afdbc32b23f 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -3,7 +3,7 @@
*
* External function declarations.
*
- * Copyright IBM Corp. 2002, 2015
+ * Copyright IBM Corp. 2002, 2016
*/
#ifndef ZFCP_EXT_H
@@ -35,6 +35,8 @@ extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
struct zfcp_port *, struct scsi_device *, u8, u8);
extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
+extern void zfcp_dbf_rec_run_lvl(int level, char *tag,
+ struct zfcp_erp_action *erp);
extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64);
extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *);
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index be1c04b334c5..ea3c76ac0de1 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -3,7 +3,7 @@
*
* Interface to the FSF support functions.
*
- * Copyright IBM Corp. 2002, 2015
+ * Copyright IBM Corp. 2002, 2016
*/
#ifndef FSF_H
@@ -78,6 +78,7 @@
#define FSF_APP_TAG_CHECK_FAILURE 0x00000082
#define FSF_REF_TAG_CHECK_FAILURE 0x00000083
#define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD
+#define FSF_FCP_RSP_AVAILABLE 0x000000AF
#define FSF_UNKNOWN_COMMAND 0x000000E2
#define FSF_UNKNOWN_OP_SUBTYPE 0x000000E3
#define FSF_INVALID_COMMAND_OPTION 0x000000E5
diff --git a/drivers/s390/scsi/zfcp_reqlist.h b/drivers/s390/scsi/zfcp_reqlist.h
index 7c2c6194dfca..703fce59befe 100644
--- a/drivers/s390/scsi/zfcp_reqlist.h
+++ b/drivers/s390/scsi/zfcp_reqlist.h
@@ -4,7 +4,7 @@
* Data structure and helper functions for tracking pending FSF
* requests.
*
- * Copyright IBM Corp. 2009
+ * Copyright IBM Corp. 2009, 2016
*/
#ifndef ZFCP_REQLIST_H
@@ -180,4 +180,32 @@ static inline void zfcp_reqlist_move(struct zfcp_reqlist *rl,
spin_unlock_irqrestore(&rl->lock, flags);
}
+/**
+ * zfcp_reqlist_apply_for_all() - apply a function to every request.
+ * @rl: the requestlist that contains the target requests.
+ * @f: the function to apply to each request; the first parameter of the
+ * function will be the target-request; the second parameter is the same
+ * pointer as given with the argument @data.
+ * @data: freely chosen argument; passed through to @f as second parameter.
+ *
+ * Uses :c:macro:`list_for_each_entry` to iterate over the lists in the hash-
+ * table (not a 'safe' variant, so don't modify the list).
+ *
+ * Holds @rl->lock over the entire request-iteration.
+ */
+static inline void
+zfcp_reqlist_apply_for_all(struct zfcp_reqlist *rl,
+ void (*f)(struct zfcp_fsf_req *, void *), void *data)
+{
+ struct zfcp_fsf_req *req;
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&rl->lock, flags);
+ for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
+ list_for_each_entry(req, &rl->buckets[i], list)
+ f(req, data);
+ spin_unlock_irqrestore(&rl->lock, flags);
+}
+
#endif /* ZFCP_REQLIST_H */
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 9069f98a1817..07ffdbb5107f 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -3,7 +3,7 @@
*
* Interface to Linux SCSI midlayer.
*
- * Copyright IBM Corp. 2002, 2015
+ * Copyright IBM Corp. 2002, 2016
*/
#define KMSG_COMPONENT "zfcp"
@@ -88,9 +88,7 @@ int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt)
}
if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) {
- /* This could be either
- * open LUN pending: this is temporary, will result in
- * open LUN or ERP_FAILED, so retry command
+ /* This could be
* call to rport_delete pending: mimic retry from
* fc_remote_port_chkready until rport is BLOCKED
*/
@@ -209,6 +207,57 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
return retval;
}
+struct zfcp_scsi_req_filter {
+ u8 tmf_scope;
+ u32 lun_handle;
+ u32 port_handle;
+};
+
+static void zfcp_scsi_forget_cmnd(struct zfcp_fsf_req *old_req, void *data)
+{
+ struct zfcp_scsi_req_filter *filter =
+ (struct zfcp_scsi_req_filter *)data;
+
+ /* already aborted - prevent side-effects - or not a SCSI command */
+ if (old_req->data == NULL || old_req->fsf_command != FSF_QTCB_FCP_CMND)
+ return;
+
+ /* (tmf_scope == FCP_TMF_TGT_RESET || tmf_scope == FCP_TMF_LUN_RESET) */
+ if (old_req->qtcb->header.port_handle != filter->port_handle)
+ return;
+
+ if (filter->tmf_scope == FCP_TMF_LUN_RESET &&
+ old_req->qtcb->header.lun_handle != filter->lun_handle)
+ return;
+
+ zfcp_dbf_scsi_nullcmnd((struct scsi_cmnd *)old_req->data, old_req);
+ old_req->data = NULL;
+}
+
+static void zfcp_scsi_forget_cmnds(struct zfcp_scsi_dev *zsdev, u8 tm_flags)
+{
+ struct zfcp_adapter *adapter = zsdev->port->adapter;
+ struct zfcp_scsi_req_filter filter = {
+ .tmf_scope = FCP_TMF_TGT_RESET,
+ .port_handle = zsdev->port->handle,
+ };
+ unsigned long flags;
+
+ if (tm_flags == FCP_TMF_LUN_RESET) {
+ filter.tmf_scope = FCP_TMF_LUN_RESET;
+ filter.lun_handle = zsdev->lun_handle;
+ }
+
+ /*
+ * abort_lock secures against other processings - in the abort-function
+ * and normal cmnd-handler - of (struct zfcp_fsf_req *)->data
+ */
+ write_lock_irqsave(&adapter->abort_lock, flags);
+ zfcp_reqlist_apply_for_all(adapter->req_list, zfcp_scsi_forget_cmnd,
+ &filter);
+ write_unlock_irqrestore(&adapter->abort_lock, flags);
+}
+
static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
@@ -241,8 +290,10 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags);
retval = FAILED;
- } else
+ } else {
zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags);
+ zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags);
+ }
zfcp_fsf_req_free(fsf_req);
return retval;
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index 33fbe8249fd5..04efed171c88 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -17,7 +17,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/atomic.h>
-#include <asm/uaccess.h> /* put_/get_user */
+#include <linux/uaccess.h> /* put_/get_user */
#include <asm/io.h>
#include <asm/display7seg.h>
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
index 5609b602c54d..56e962a01493 100644
--- a/drivers/sbus/char/envctrl.c
+++ b/drivers/sbus/char/envctrl.c
@@ -29,7 +29,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/envctrl.h>
#include <asm/io.h>
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c
index 206ef4232adf..216f923161d1 100644
--- a/drivers/sbus/char/flash.c
+++ b/drivers/sbus/char/flash.c
@@ -15,7 +15,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/upa.h>
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index a40ee1e37486..6ff61dad5e21 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -37,7 +37,7 @@
#include <linux/string.h>
#include <linux/genhd.h>
#include <linux/blkdev.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/pcic.h>
diff --git a/drivers/sbus/char/openprom.c b/drivers/sbus/char/openprom.c
index 4612691c6619..2c2e6a3b4c7e 100644
--- a/drivers/sbus/char/openprom.c
+++ b/drivers/sbus/char/openprom.c
@@ -40,7 +40,7 @@
#include <linux/fs.h>
#include <asm/oplib.h>
#include <asm/prom.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/openpromio.h>
#ifdef CONFIG_PCI
#include <linux/pci.h>
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index a56a7b243e91..00e7968a1d70 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1,8 +1,8 @@
/*
3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@lsi.com>
- Modifications By: Tom Couch <linuxraid@lsi.com>
+ Written By: Adam Radford <aradford@gmail.com>
+ Modifications By: Tom Couch
Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
Copyright (C) 2010 LSI Corporation.
@@ -41,10 +41,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@lsi.com
-
- For more information, goto:
- http://www.lsi.com
+ aradford@gmail.com
Note: This version of the driver does not contain a bundled firmware
image.
@@ -95,7 +92,7 @@
#include <linux/slab.h>
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
index 0fdc83cfa0e1..b6c208cc474f 100644
--- a/drivers/scsi/3w-9xxx.h
+++ b/drivers/scsi/3w-9xxx.h
@@ -1,8 +1,8 @@
/*
3w-9xxx.h -- 3ware 9000 Storage Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@lsi.com>
- Modifications By: Tom Couch <linuxraid@lsi.com>
+ Written By: Adam Radford <aradford@gmail.com>
+ Modifications By: Tom Couch
Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
Copyright (C) 2010 LSI Corporation.
@@ -41,10 +41,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@lsi.com
-
- For more information, goto:
- http://www.lsi.com
+ aradford@gmail.com
*/
#ifndef _3W_9XXX_H
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index f8374850f714..b150e131b2e7 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -1,7 +1,7 @@
/*
3w-sas.c -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@lsi.com>
+ Written By: Adam Radford <aradford@gmail.com>
Copyright (C) 2009 LSI Corporation.
@@ -43,10 +43,7 @@
LSI 3ware 9750 6Gb/s SAS/SATA-RAID
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@lsi.com
-
- For more information, goto:
- http://www.lsi.com
+ aradford@gmail.com
History
-------
@@ -67,7 +64,7 @@
#include <linux/slab.h>
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h
index fec6449c7595..05e77d84c16d 100644
--- a/drivers/scsi/3w-sas.h
+++ b/drivers/scsi/3w-sas.h
@@ -1,7 +1,7 @@
/*
3w-sas.h -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@lsi.com>
+ Written By: Adam Radford <aradford@gmail.com>
Copyright (C) 2009 LSI Corporation.
@@ -39,10 +39,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@lsi.com
-
- For more information, goto:
- http://www.lsi.com
+ aradford@gmail.com
*/
#ifndef _3W_SAS_H
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 25aba1613e21..33261b690774 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1,7 +1,7 @@
/*
3w-xxxx.c -- 3ware Storage Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@lsi.com>
+ Written By: Adam Radford <aradford@gmail.com>
Modifications By: Joel Jacobson <linux@3ware.com>
Arnaldo Carvalho de Melo <acme@conectiva.com.br>
Brad Strand <linux@3ware.com>
@@ -47,10 +47,9 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@lsi.com
- For more information, goto:
- http://www.lsi.com
+ aradford@gmail.com
+
History
-------
@@ -211,7 +210,7 @@
#include <linux/mutex.h>
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
index 6f65e663d393..69e80c1ed1ca 100644
--- a/drivers/scsi/3w-xxxx.h
+++ b/drivers/scsi/3w-xxxx.h
@@ -1,7 +1,7 @@
/*
3w-xxxx.h -- 3ware Storage Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@lsi.com>
+ Written By: Adam Radford <aradford@gmail.com>
Modifications By: Joel Jacobson <linux@3ware.com>
Arnaldo Carvalho de Melo <acme@conectiva.com.br>
Brad Strand <linux@3ware.com>
@@ -45,7 +45,8 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@lsi.com
+
+ aradford@gmail.com
For more information, goto:
http://www.lsi.com
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index dfa93347c752..a4f6b0d95515 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1233,6 +1233,7 @@ config SCSI_QLOGICPTI
source "drivers/scsi/qla2xxx/Kconfig"
source "drivers/scsi/qla4xxx/Kconfig"
+source "drivers/scsi/qedi/Kconfig"
config SCSI_LPFC
tristate "Emulex LightPulse Fibre Channel Support"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index a2d03957cbe2..736b77414a4b 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -131,6 +131,7 @@ obj-$(CONFIG_PS3_ROM) += ps3rom.o
obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
+obj-$(CONFIG_QEDI) += libiscsi.o qedi/
obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/
obj-$(CONFIG_SCSI_ESAS2R) += esas2r/
obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index d849ffa378b1..4f5ca794bb71 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -97,9 +97,6 @@
* and macros and include this file in your driver.
*
* These macros control options :
- * AUTOPROBE_IRQ - if defined, the NCR5380_probe_irq() function will be
- * defined.
- *
* AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
* for commands that return with a CHECK CONDITION status.
*
@@ -127,9 +124,7 @@
* NCR5380_dma_residual - residual byte count
*
* The generic driver is initialized by calling NCR5380_init(instance),
- * after setting the appropriate host specific fields and ID. If the
- * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance,
- * possible) function may be used.
+ * after setting the appropriate host specific fields and ID.
*/
#ifndef NCR5380_io_delay
@@ -351,76 +346,6 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)
}
#endif
-
-static int probe_irq;
-
-/**
- * probe_intr - helper for IRQ autoprobe
- * @irq: interrupt number
- * @dev_id: unused
- * @regs: unused
- *
- * Set a flag to indicate the IRQ in question was received. This is
- * used by the IRQ probe code.
- */
-
-static irqreturn_t probe_intr(int irq, void *dev_id)
-{
- probe_irq = irq;
- return IRQ_HANDLED;
-}
-
-/**
- * NCR5380_probe_irq - find the IRQ of an NCR5380
- * @instance: NCR5380 controller
- * @possible: bitmask of ISA IRQ lines
- *
- * Autoprobe for the IRQ line used by the NCR5380 by triggering an IRQ
- * and then looking to see what interrupt actually turned up.
- */
-
-static int __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
- int possible)
-{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
- unsigned long timeout;
- int trying_irqs, i, mask;
-
- for (trying_irqs = 0, i = 1, mask = 2; i < 16; ++i, mask <<= 1)
- if ((mask & possible) && (request_irq(i, &probe_intr, 0, "NCR-probe", NULL) == 0))
- trying_irqs |= mask;
-
- timeout = jiffies + msecs_to_jiffies(250);
- probe_irq = NO_IRQ;
-
- /*
- * A interrupt is triggered whenever BSY = false, SEL = true
- * and a bit set in the SELECT_ENABLE_REG is asserted on the
- * SCSI bus.
- *
- * Note that the bus is only driven when the phase control signals
- * (I/O, C/D, and MSG) match those in the TCR, so we must reset that
- * to zero.
- */
-
- NCR5380_write(TARGET_COMMAND_REG, 0);
- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
- NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask);
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_SEL);
-
- while (probe_irq == NO_IRQ && time_before(jiffies, timeout))
- schedule_timeout_uninterruptible(1);
-
- NCR5380_write(SELECT_ENABLE_REG, 0);
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-
- for (i = 1, mask = 2; i < 16; ++i, mask <<= 1)
- if (trying_irqs & mask)
- free_irq(i, NULL);
-
- return probe_irq;
-}
-
/**
* NCR58380_info - report driver and host information
* @instance: relevant scsi host instance
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index 3c6ce5434449..51a3567a6fb2 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -199,16 +199,6 @@
#define PHASE_SR_TO_TCR(phase) ((phase) >> 2)
-/*
- * These are "special" values for the irq and dma_channel fields of the
- * Scsi_Host structure
- */
-
-#define DMA_NONE 255
-#define IRQ_AUTO 254
-#define DMA_AUTO 254
-#define PORT_AUTO 0xffff /* autoprobe io port for 53c400a */
-
#ifndef NO_IRQ
#define NO_IRQ 0
#endif
@@ -290,7 +280,6 @@ static void NCR5380_print(struct Scsi_Host *instance);
#define NCR5380_dprint_phase(flg, arg) do {} while (0)
#endif
-static int NCR5380_probe_irq(struct Scsi_Host *instance, int possible);
static int NCR5380_init(struct Scsi_Host *instance, int flags);
static int NCR5380_maybe_reset_bus(struct Scsi_Host *);
static void NCR5380_exit(struct Scsi_Host *instance);
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 6678d1fd897b..1ee7c654f7b8 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -32,7 +32,7 @@
#include <linux/slab.h>
#include <linux/completion.h>
#include <linux/blkdev.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/highmem.h> /* For flush_kernel_dcache_page */
#include <linux/module.h>
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 5648b715fed9..e1daff230c7d 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -41,7 +41,7 @@
#include <linux/delay.h> /* ssleep prototype */
#include <linux/kthread.h>
#include <linux/semaphore.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <scsi/scsi_host.h>
#include "aacraid.h"
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index e4f3e22fcbd9..3ecbf20ca29f 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -160,7 +160,6 @@ static const struct pci_device_id aac_pci_tbl[] = {
{ 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Series 6 (Tupelo) */
{ 0x9005, 0x028c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 63 }, /* Adaptec PMC Series 7 (Denali) */
{ 0x9005, 0x028d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 64 }, /* Adaptec PMC Series 8 */
- { 0x9005, 0x028f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 65 }, /* Adaptec PMC Series 9 */
{ 0,}
};
MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
@@ -239,7 +238,6 @@ static struct aac_driver_ident aac_drivers[] = {
{ aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 6 (Tupelo) */
{ aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 7 (Denali) */
{ aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 8 */
- { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC } /* Adaptec PMC Series 9 */
};
/**
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm.c b/drivers/scsi/aic7xxx/aicasm/aicasm.c
index 2e3117aa382f..21ac265280bf 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm.c
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm.c
@@ -254,7 +254,7 @@ main(int argc, char *argv[])
argv += optind;
if (argc != 1) {
- fprintf(stderr, "%s: No input file specifiled\n", appname);
+ fprintf(stderr, "%s: No input file specified\n", appname);
usage();
/* NOTREACHED */
}
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 9e45749d55ed..af032c46ec0e 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -61,7 +61,7 @@
#include <linux/circ_buf.h>
#include <asm/dma.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 9d253cb83ee7..5caf5f3ff642 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -27,7 +27,7 @@
#include <linux/fs.h>
#include <linux/pci.h>
#include <linux/firmware.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/fcntl.h>
#include "bfad_drv.h"
@@ -64,9 +64,9 @@ int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS;
u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
u32 *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2;
-#define BFAD_FW_FILE_CB "cbfw-3.2.3.0.bin"
-#define BFAD_FW_FILE_CT "ctfw-3.2.3.0.bin"
-#define BFAD_FW_FILE_CT2 "ct2fw-3.2.3.0.bin"
+#define BFAD_FW_FILE_CB "cbfw-3.2.5.1.bin"
+#define BFAD_FW_FILE_CT "ctfw-3.2.5.1.bin"
+#define BFAD_FW_FILE_CT2 "ct2fw-3.2.5.1.bin"
static u32 *bfad_load_fwimg(struct pci_dev *pdev);
static void bfad_free_fwimg(void);
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index f9e862093a25..cfcfff48e8e1 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -58,7 +58,7 @@
#ifdef BFA_DRIVER_VERSION
#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION
#else
-#define BFAD_DRIVER_VERSION "3.2.25.0"
+#define BFAD_DRIVER_VERSION "3.2.25.1"
#endif
#define BFAD_PROTO_NAME FCPI_NAME
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 0990130821fa..c639d5a02656 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -127,13 +127,6 @@ module_param_named(log_fka, bnx2fc_log_fka, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(log_fka, " Print message to kernel log when fcoe is "
"initiating a FIP keep alive when debug logging is enabled.");
-static int bnx2fc_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu);
-/* notification function for CPU hotplug events */
-static struct notifier_block bnx2fc_cpu_notifier = {
- .notifier_call = bnx2fc_cpu_callback,
-};
-
static inline struct net_device *bnx2fc_netdev(const struct fc_lport *lport)
{
return ((struct bnx2fc_interface *)
@@ -2622,37 +2615,19 @@ static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
kthread_stop(thread);
}
-/**
- * bnx2fc_cpu_callback - Handler for CPU hotplug events
- *
- * @nfb: The callback data block
- * @action: The event triggering the callback
- * @hcpu: The index of the CPU that the event is for
- *
- * This creates or destroys per-CPU data for fcoe
- *
- * Returns NOTIFY_OK always.
- */
-static int bnx2fc_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+
+static int bnx2fc_cpu_online(unsigned int cpu)
{
- unsigned cpu = (unsigned long)hcpu;
+ printk(PFX "CPU %x online: Create Rx thread\n", cpu);
+ bnx2fc_percpu_thread_create(cpu);
+ return 0;
+}
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- printk(PFX "CPU %x online: Create Rx thread\n", cpu);
- bnx2fc_percpu_thread_create(cpu);
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- printk(PFX "CPU %x offline: Remove Rx thread\n", cpu);
- bnx2fc_percpu_thread_destroy(cpu);
- break;
- default:
- break;
- }
- return NOTIFY_OK;
+static int bnx2fc_cpu_dead(unsigned int cpu)
+{
+ printk(PFX "CPU %x offline: Remove Rx thread\n", cpu);
+ bnx2fc_percpu_thread_destroy(cpu);
+ return 0;
}
static int bnx2fc_slave_configure(struct scsi_device *sdev)
@@ -2664,6 +2639,8 @@ static int bnx2fc_slave_configure(struct scsi_device *sdev)
return 0;
}
+static enum cpuhp_state bnx2fc_online_state;
+
/**
* bnx2fc_mod_init - module init entry point
*
@@ -2724,21 +2701,31 @@ static int __init bnx2fc_mod_init(void)
spin_lock_init(&p->fp_work_lock);
}
- cpu_notifier_register_begin();
+ get_online_cpus();
- for_each_online_cpu(cpu) {
+ for_each_online_cpu(cpu)
bnx2fc_percpu_thread_create(cpu);
- }
- /* Initialize per CPU interrupt thread */
- __register_hotcpu_notifier(&bnx2fc_cpu_notifier);
+ rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "scsi/bnx2fc:online",
+ bnx2fc_cpu_online, NULL);
+ if (rc < 0)
+ goto stop_threads;
+ bnx2fc_online_state = rc;
- cpu_notifier_register_done();
+ cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD, "scsi/bnx2fc:dead",
+ NULL, bnx2fc_cpu_dead);
+ put_online_cpus();
cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb);
return 0;
+stop_threads:
+ for_each_online_cpu(cpu)
+ bnx2fc_percpu_thread_destroy(cpu);
+ put_online_cpus();
+ kthread_stop(l2_thread);
free_wq:
destroy_workqueue(bnx2fc_wq);
release_bt:
@@ -2797,16 +2784,16 @@ static void __exit bnx2fc_mod_exit(void)
if (l2_thread)
kthread_stop(l2_thread);
- cpu_notifier_register_begin();
-
+ get_online_cpus();
/* Destroy per cpu threads */
for_each_online_cpu(cpu) {
bnx2fc_percpu_thread_destroy(cpu);
}
- __unregister_hotcpu_notifier(&bnx2fc_cpu_notifier);
+ cpuhp_remove_state_nocalls(bnx2fc_online_state);
+ cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD);
- cpu_notifier_register_done();
+ put_online_cpus();
destroy_workqueue(bnx2fc_wq);
/*
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index c8b410c24cf0..86afc002814c 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -70,14 +70,6 @@ u64 iscsi_error_mask = 0x00;
DEFINE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu);
-static int bnx2i_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu);
-/* notification function for CPU hotplug events */
-static struct notifier_block bnx2i_cpu_notifier = {
- .notifier_call = bnx2i_cpu_callback,
-};
-
-
/**
* bnx2i_identify_device - identifies NetXtreme II device type
* @hba: Adapter structure pointer
@@ -461,41 +453,21 @@ static void bnx2i_percpu_thread_destroy(unsigned int cpu)
kthread_stop(thread);
}
-
-/**
- * bnx2i_cpu_callback - Handler for CPU hotplug events
- *
- * @nfb: The callback data block
- * @action: The event triggering the callback
- * @hcpu: The index of the CPU that the event is for
- *
- * This creates or destroys per-CPU data for iSCSI
- *
- * Returns NOTIFY_OK always.
- */
-static int bnx2i_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static int bnx2i_cpu_online(unsigned int cpu)
{
- unsigned cpu = (unsigned long)hcpu;
+ pr_info("bnx2i: CPU %x online: Create Rx thread\n", cpu);
+ bnx2i_percpu_thread_create(cpu);
+ return 0;
+}
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- printk(KERN_INFO "bnx2i: CPU %x online: Create Rx thread\n",
- cpu);
- bnx2i_percpu_thread_create(cpu);
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- printk(KERN_INFO "CPU %x offline: Remove Rx thread\n", cpu);
- bnx2i_percpu_thread_destroy(cpu);
- break;
- default:
- break;
- }
- return NOTIFY_OK;
+static int bnx2i_cpu_dead(unsigned int cpu)
+{
+ pr_info("CPU %x offline: Remove Rx thread\n", cpu);
+ bnx2i_percpu_thread_destroy(cpu);
+ return 0;
}
+static enum cpuhp_state bnx2i_online_state;
/**
* bnx2i_mod_init - module init entry point
@@ -539,18 +511,28 @@ static int __init bnx2i_mod_init(void)
p->iothread = NULL;
}
- cpu_notifier_register_begin();
+ get_online_cpus();
for_each_online_cpu(cpu)
bnx2i_percpu_thread_create(cpu);
- /* Initialize per CPU interrupt thread */
- __register_hotcpu_notifier(&bnx2i_cpu_notifier);
-
- cpu_notifier_register_done();
+ err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "scsi/bnx2i:online",
+ bnx2i_cpu_online, NULL);
+ if (err < 0)
+ goto remove_threads;
+ bnx2i_online_state = err;
+ cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2I_DEAD, "scsi/bnx2i:dead",
+ NULL, bnx2i_cpu_dead);
+ put_online_cpus();
return 0;
+remove_threads:
+ for_each_online_cpu(cpu)
+ bnx2i_percpu_thread_destroy(cpu);
+ put_online_cpus();
+ cnic_unregister_driver(CNIC_ULP_ISCSI);
unreg_xport:
iscsi_unregister_transport(&bnx2i_iscsi_transport);
out:
@@ -587,14 +569,14 @@ static void __exit bnx2i_mod_exit(void)
}
mutex_unlock(&bnx2i_dev_lock);
- cpu_notifier_register_begin();
+ get_online_cpus();
for_each_online_cpu(cpu)
bnx2i_percpu_thread_destroy(cpu);
- __unregister_hotcpu_notifier(&bnx2i_cpu_notifier);
-
- cpu_notifier_register_done();
+ cpuhp_remove_state_nocalls(bnx2i_online_state);
+ cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2I_DEAD);
+ put_online_cpus();
iscsi_unregister_transport(&bnx2i_iscsi_transport);
cnic_unregister_driver(CNIC_ULP_ISCSI);
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 9e6f647ff1c1..9a2fdc305cf2 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -189,7 +189,6 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
struct l2t_entry *e)
{
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
- int t4 = is_t4(lldi->adapter_type);
int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
unsigned long long opt0;
unsigned int opt2;
@@ -232,7 +231,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
csk, &req->local_ip, ntohs(req->local_port),
&req->peer_ip, ntohs(req->peer_port),
csk->atid, csk->rss_qid);
- } else {
+ } else if (is_t5(lldi->adapter_type)) {
struct cpl_t5_act_open_req *req =
(struct cpl_t5_act_open_req *)skb->head;
u32 isn = (prandom_u32() & ~7UL) - 1;
@@ -260,12 +259,45 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
csk, &req->local_ip, ntohs(req->local_port),
&req->peer_ip, ntohs(req->peer_port),
csk->atid, csk->rss_qid);
+ } else {
+ struct cpl_t6_act_open_req *req =
+ (struct cpl_t6_act_open_req *)skb->head;
+ u32 isn = (prandom_u32() & ~7UL) - 1;
+
+ INIT_TP_WR(req, 0);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
+ qid_atid));
+ req->local_port = csk->saddr.sin_port;
+ req->peer_port = csk->daddr.sin_port;
+ req->local_ip = csk->saddr.sin_addr.s_addr;
+ req->peer_ip = csk->daddr.sin_addr.s_addr;
+ req->opt0 = cpu_to_be64(opt0);
+ req->params = cpu_to_be64(FILTER_TUPLE_V(
+ cxgb4_select_ntuple(
+ csk->cdev->ports[csk->port_id],
+ csk->l2t)));
+ req->rsvd = cpu_to_be32(isn);
+
+ opt2 |= T5_ISS_VALID;
+ opt2 |= RX_FC_DISABLE_F;
+ opt2 |= T5_OPT_2_VALID_F;
+
+ req->opt2 = cpu_to_be32(opt2);
+ req->rsvd2 = cpu_to_be32(0);
+ req->opt3 = cpu_to_be32(0);
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk t6 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
+ csk, &req->local_ip, ntohs(req->local_port),
+ &req->peer_ip, ntohs(req->peer_port),
+ csk->atid, csk->rss_qid);
}
set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n",
- (&csk->saddr), (&csk->daddr), t4 ? 4 : 5, csk,
+ (&csk->saddr), (&csk->daddr),
+ CHELSIO_CHIP_VERSION(lldi->adapter_type), csk,
csk->state, csk->flags, csk->atid, csk->rss_qid);
cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
@@ -276,7 +308,6 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
struct l2t_entry *e)
{
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
- int t4 = is_t4(lldi->adapter_type);
int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
unsigned long long opt0;
unsigned int opt2;
@@ -294,10 +325,9 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
opt2 = RX_CHANNEL_V(0) |
RSS_QUEUE_VALID_F |
- RX_FC_DISABLE_F |
RSS_QUEUE_V(csk->rss_qid);
- if (t4) {
+ if (is_t4(lldi->adapter_type)) {
struct cpl_act_open_req6 *req =
(struct cpl_act_open_req6 *)skb->head;
@@ -322,7 +352,7 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
req->params = cpu_to_be32(cxgb4_select_ntuple(
csk->cdev->ports[csk->port_id],
csk->l2t));
- } else {
+ } else if (is_t5(lldi->adapter_type)) {
struct cpl_t5_act_open_req6 *req =
(struct cpl_t5_act_open_req6 *)skb->head;
@@ -345,12 +375,41 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
csk->cdev->ports[csk->port_id],
csk->l2t)));
+ } else {
+ struct cpl_t6_act_open_req6 *req =
+ (struct cpl_t6_act_open_req6 *)skb->head;
+
+ INIT_TP_WR(req, 0);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
+ qid_atid));
+ req->local_port = csk->saddr6.sin6_port;
+ req->peer_port = csk->daddr6.sin6_port;
+ req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
+ req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
+ 8);
+ req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
+ req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
+ 8);
+ req->opt0 = cpu_to_be64(opt0);
+
+ opt2 |= RX_FC_DISABLE_F;
+ opt2 |= T5_OPT_2_VALID_F;
+
+ req->opt2 = cpu_to_be32(opt2);
+
+ req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
+ csk->cdev->ports[csk->port_id],
+ csk->l2t)));
+
+ req->rsvd2 = cpu_to_be32(0);
+ req->opt3 = cpu_to_be32(0);
}
set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n",
- t4 ? 4 : 5, csk, csk->state, csk->flags, csk->atid,
+ CHELSIO_CHIP_VERSION(lldi->adapter_type), csk, csk->state,
+ csk->flags, csk->atid,
&csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port),
&csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port),
csk->rss_qid);
@@ -742,7 +801,7 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
(&csk->saddr), (&csk->daddr),
atid, tid, csk, csk->state, csk->flags, rcv_isn);
- module_put(THIS_MODULE);
+ module_put(cdev->owner);
cxgbi_sock_get(csk);
csk->tid = tid;
@@ -891,7 +950,7 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
if (is_neg_adv(status))
goto rel_skb;
- module_put(THIS_MODULE);
+ module_put(cdev->owner);
if (status && status != CPL_ERR_TCAM_FULL &&
status != CPL_ERR_CONN_EXIST &&
@@ -1173,6 +1232,101 @@ rel_skb:
__kfree_skb(skb);
}
+static void do_rx_iscsi_data(struct cxgbi_device *cdev, struct sk_buff *skb)
+{
+ struct cxgbi_sock *csk;
+ struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data;
+ struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
+ struct tid_info *t = lldi->tids;
+ struct sk_buff *lskb;
+ u32 tid = GET_TID(cpl);
+ u16 pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp);
+
+ csk = lookup_tid(t, tid);
+ if (unlikely(!csk)) {
+ pr_err("can't find conn. for tid %u.\n", tid);
+ goto rel_skb;
+ }
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
+ csk, csk->state, csk->flags, csk->tid, skb,
+ skb->len, pdu_len_ddp);
+
+ spin_lock_bh(&csk->lock);
+
+ if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p,%u,0x%lx,%u, bad state.\n",
+ csk, csk->state, csk->flags, csk->tid);
+
+ if (csk->state != CTP_ABORTING)
+ goto abort_conn;
+ else
+ goto discard;
+ }
+
+ cxgbi_skcb_tcp_seq(skb) = be32_to_cpu(cpl->seq);
+ cxgbi_skcb_flags(skb) = 0;
+
+ skb_reset_transport_header(skb);
+ __skb_pull(skb, sizeof(*cpl));
+ __pskb_trim(skb, ntohs(cpl->len));
+
+ if (!csk->skb_ulp_lhdr)
+ csk->skb_ulp_lhdr = skb;
+
+ lskb = csk->skb_ulp_lhdr;
+ cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA);
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
+ csk, csk->state, csk->flags, skb, lskb);
+
+ __skb_queue_tail(&csk->receive_queue, skb);
+ spin_unlock_bh(&csk->lock);
+ return;
+
+abort_conn:
+ send_abort_req(csk);
+discard:
+ spin_unlock_bh(&csk->lock);
+rel_skb:
+ __kfree_skb(skb);
+}
+
+static void
+cxgb4i_process_ddpvld(struct cxgbi_sock *csk,
+ struct sk_buff *skb, u32 ddpvld)
+{
+ if (ddpvld & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) {
+ pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
+ csk, skb, ddpvld, cxgbi_skcb_flags(skb));
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR);
+ }
+
+ if (ddpvld & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) {
+ pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
+ csk, skb, ddpvld, cxgbi_skcb_flags(skb));
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR);
+ }
+
+ if (ddpvld & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) {
+ log_debug(1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
+ csk, skb, ddpvld);
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR);
+ }
+
+ if ((ddpvld & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) &&
+ !cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) {
+ log_debug(1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
+ csk, skb, ddpvld);
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD);
+ }
+}
+
static void do_rx_data_ddp(struct cxgbi_device *cdev,
struct sk_buff *skb)
{
@@ -1182,7 +1336,7 @@ static void do_rx_data_ddp(struct cxgbi_device *cdev,
unsigned int tid = GET_TID(rpl);
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
struct tid_info *t = lldi->tids;
- unsigned int status = ntohl(rpl->ddpvld);
+ u32 ddpvld = be32_to_cpu(rpl->ddpvld);
csk = lookup_tid(t, tid);
if (unlikely(!csk)) {
@@ -1192,7 +1346,7 @@ static void do_rx_data_ddp(struct cxgbi_device *cdev,
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
"csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n",
- csk, csk->state, csk->flags, skb, status, csk->skb_ulp_lhdr);
+ csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr);
spin_lock_bh(&csk->lock);
@@ -1220,29 +1374,8 @@ static void do_rx_data_ddp(struct cxgbi_device *cdev,
pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n",
csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb));
- if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) {
- pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
- csk, lskb, status, cxgbi_skcb_flags(lskb));
- cxgbi_skcb_set_flag(lskb, SKCBF_RX_HCRC_ERR);
- }
- if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) {
- pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
- csk, lskb, status, cxgbi_skcb_flags(lskb));
- cxgbi_skcb_set_flag(lskb, SKCBF_RX_DCRC_ERR);
- }
- if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) {
- log_debug(1 << CXGBI_DBG_PDU_RX,
- "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
- csk, lskb, status);
- cxgbi_skcb_set_flag(lskb, SKCBF_RX_PAD_ERR);
- }
- if ((status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) &&
- !cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA)) {
- log_debug(1 << CXGBI_DBG_PDU_RX,
- "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
- csk, lskb, status);
- cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA_DDPD);
- }
+ cxgb4i_process_ddpvld(csk, lskb, ddpvld);
+
log_debug(1 << CXGBI_DBG_PDU_RX,
"csk 0x%p, lskb 0x%p, f 0x%lx.\n",
csk, lskb, cxgbi_skcb_flags(lskb));
@@ -1260,6 +1393,98 @@ rel_skb:
__kfree_skb(skb);
}
+static void
+do_rx_iscsi_cmp(struct cxgbi_device *cdev, struct sk_buff *skb)
+{
+ struct cxgbi_sock *csk;
+ struct cpl_rx_iscsi_cmp *rpl = (struct cpl_rx_iscsi_cmp *)skb->data;
+ struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
+ struct tid_info *t = lldi->tids;
+ struct sk_buff *data_skb = NULL;
+ u32 tid = GET_TID(rpl);
+ u32 ddpvld = be32_to_cpu(rpl->ddpvld);
+ u32 seq = be32_to_cpu(rpl->seq);
+ u16 pdu_len_ddp = be16_to_cpu(rpl->pdu_len_ddp);
+
+ csk = lookup_tid(t, tid);
+ if (unlikely(!csk)) {
+ pr_err("can't find connection for tid %u.\n", tid);
+ goto rel_skb;
+ }
+
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
+ "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p, len %u, "
+ "pdu_len_ddp %u, status %u.\n",
+ csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr,
+ ntohs(rpl->len), pdu_len_ddp, rpl->status);
+
+ spin_lock_bh(&csk->lock);
+
+ if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
+ log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+ "csk 0x%p,%u,0x%lx,%u, bad state.\n",
+ csk, csk->state, csk->flags, csk->tid);
+
+ if (csk->state != CTP_ABORTING)
+ goto abort_conn;
+ else
+ goto discard;
+ }
+
+ cxgbi_skcb_tcp_seq(skb) = seq;
+ cxgbi_skcb_flags(skb) = 0;
+ cxgbi_skcb_rx_pdulen(skb) = 0;
+
+ skb_reset_transport_header(skb);
+ __skb_pull(skb, sizeof(*rpl));
+ __pskb_trim(skb, be16_to_cpu(rpl->len));
+
+ csk->rcv_nxt = seq + pdu_len_ddp;
+
+ if (csk->skb_ulp_lhdr) {
+ data_skb = skb_peek(&csk->receive_queue);
+ if (!data_skb ||
+ !cxgbi_skcb_test_flag(data_skb, SKCBF_RX_DATA)) {
+ pr_err("Error! freelist data not found 0x%p, tid %u\n",
+ data_skb, tid);
+
+ goto abort_conn;
+ }
+ __skb_unlink(data_skb, &csk->receive_queue);
+
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA);
+
+ __skb_queue_tail(&csk->receive_queue, skb);
+ __skb_queue_tail(&csk->receive_queue, data_skb);
+ } else {
+ __skb_queue_tail(&csk->receive_queue, skb);
+ }
+
+ csk->skb_ulp_lhdr = NULL;
+
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR);
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS);
+ cxgbi_skcb_set_flag(skb, SKCBF_RX_ISCSI_COMPL);
+ cxgbi_skcb_rx_ddigest(skb) = be32_to_cpu(rpl->ulp_crc);
+
+ cxgb4i_process_ddpvld(csk, skb, ddpvld);
+
+ log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, skb 0x%p, f 0x%lx.\n",
+ csk, skb, cxgbi_skcb_flags(skb));
+
+ cxgbi_conn_pdu_ready(csk);
+ spin_unlock_bh(&csk->lock);
+
+ return;
+
+abort_conn:
+ send_abort_req(csk);
+discard:
+ spin_unlock_bh(&csk->lock);
+rel_skb:
+ __kfree_skb(skb);
+}
+
static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb)
{
struct cxgbi_sock *csk;
@@ -1382,7 +1607,6 @@ static int init_act_open(struct cxgbi_sock *csk)
void *daddr;
unsigned int step;
unsigned int size, size6;
- int t4 = is_t4(lldi->adapter_type);
unsigned int linkspeed;
unsigned int rcv_winf, snd_winf;
@@ -1428,12 +1652,15 @@ static int init_act_open(struct cxgbi_sock *csk)
cxgb4_clip_get(ndev, (const u32 *)&csk->saddr6.sin6_addr, 1);
#endif
- if (t4) {
+ if (is_t4(lldi->adapter_type)) {
size = sizeof(struct cpl_act_open_req);
size6 = sizeof(struct cpl_act_open_req6);
- } else {
+ } else if (is_t5(lldi->adapter_type)) {
size = sizeof(struct cpl_t5_act_open_req);
size6 = sizeof(struct cpl_t5_act_open_req6);
+ } else {
+ size = sizeof(struct cpl_t6_act_open_req);
+ size6 = sizeof(struct cpl_t6_act_open_req6);
}
if (csk->csk_family == AF_INET)
@@ -1452,8 +1679,8 @@ static int init_act_open(struct cxgbi_sock *csk)
csk->mtu = dst_mtu(csk->dst);
cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx);
csk->tx_chan = cxgb4_port_chan(ndev);
- /* SMT two entries per row */
- csk->smac_idx = ((cxgb4_port_viid(ndev) & 0x7F)) << 1;
+ csk->smac_idx = cxgb4_tp_smt_idx(lldi->adapter_type,
+ cxgb4_port_viid(ndev));
step = lldi->ntxq / lldi->nchan;
csk->txq_idx = cxgb4_port_idx(ndev) * step;
step = lldi->nrxq / lldi->nchan;
@@ -1486,7 +1713,11 @@ static int init_act_open(struct cxgbi_sock *csk)
csk->mtu, csk->mss_idx, csk->smac_idx);
/* must wait for either a act_open_rpl or act_open_establish */
- try_module_get(THIS_MODULE);
+ if (!try_module_get(cdev->owner)) {
+ pr_err("%s, try_module_get failed.\n", ndev->name);
+ goto rel_resource;
+ }
+
cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
if (csk->csk_family == AF_INET)
send_act_open_req(csk, skb, csk->l2t);
@@ -1521,10 +1752,11 @@ static cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
[CPL_CLOSE_CON_RPL] = do_close_con_rpl,
[CPL_FW4_ACK] = do_fw4_ack,
[CPL_ISCSI_HDR] = do_rx_iscsi_hdr,
- [CPL_ISCSI_DATA] = do_rx_iscsi_hdr,
+ [CPL_ISCSI_DATA] = do_rx_iscsi_data,
[CPL_SET_TCB_RPL] = do_set_tcb_rpl,
[CPL_RX_DATA_DDP] = do_rx_data_ddp,
[CPL_RX_ISCSI_DDP] = do_rx_data_ddp,
+ [CPL_RX_ISCSI_CMP] = do_rx_iscsi_cmp,
[CPL_RX_DATA] = do_rx_data,
};
@@ -1794,10 +2026,12 @@ static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
cdev->nports = lldi->nports;
cdev->mtus = lldi->mtus;
cdev->nmtus = NMTUS;
- cdev->rx_credit_thres = cxgb4i_rx_credit_thres;
+ cdev->rx_credit_thres = (CHELSIO_CHIP_VERSION(lldi->adapter_type) <=
+ CHELSIO_T5) ? cxgb4i_rx_credit_thres : 0;
cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN;
cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr);
cdev->itp = &cxgb4i_iscsi_transport;
+ cdev->owner = THIS_MODULE;
cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0]))
<< FW_VIID_PFN_S;
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 2ffe029ff2b6..9167bcd9fffe 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -642,6 +642,12 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
n->dev->name, ndev->name, mtu);
}
+ if (!(ndev->flags & IFF_UP) || !netif_carrier_ok(ndev)) {
+ pr_info("%s interface not up.\n", ndev->name);
+ err = -ENETDOWN;
+ goto rel_neigh;
+ }
+
cdev = cxgbi_device_find_by_netdev(ndev, &port);
if (!cdev) {
pr_info("dst %pI4, %s, NOT cxgbi device.\n",
@@ -736,6 +742,12 @@ static struct cxgbi_sock *cxgbi_check_route6(struct sockaddr *dst_addr)
}
ndev = n->dev;
+ if (!(ndev->flags & IFF_UP) || !netif_carrier_ok(ndev)) {
+ pr_info("%s interface not up.\n", ndev->name);
+ err = -ENETDOWN;
+ goto rel_rt;
+ }
+
if (ipv6_addr_is_multicast(&daddr6->sin6_addr)) {
pr_info("multi-cast route %pI6 port %u, dev %s.\n",
daddr6->sin6_addr.s6_addr,
@@ -896,6 +908,7 @@ EXPORT_SYMBOL_GPL(cxgbi_sock_fail_act_open);
void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb)
{
struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk;
+ struct module *owner = csk->cdev->owner;
log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
csk, (csk)->state, (csk)->flags, (csk)->tid);
@@ -906,6 +919,8 @@ void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb)
spin_unlock_bh(&csk->lock);
cxgbi_sock_put(csk);
__kfree_skb(skb);
+
+ module_put(owner);
}
EXPORT_SYMBOL_GPL(cxgbi_sock_act_open_req_arp_failure);
@@ -1574,6 +1589,25 @@ static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb)
return -EIO;
}
+ if (cxgbi_skcb_test_flag(skb, SKCBF_RX_ISCSI_COMPL) &&
+ cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA_DDPD)) {
+ /* If completion flag is set and data is directly
+ * placed in to the host memory then update
+ * task->exp_datasn to the datasn in completion
+ * iSCSI hdr as T6 adapter generates completion only
+ * for the last pdu of a sequence.
+ */
+ itt_t itt = ((struct iscsi_data *)skb->data)->itt;
+ struct iscsi_task *task = iscsi_itt_to_ctask(conn, itt);
+ u32 data_sn = be32_to_cpu(((struct iscsi_data *)
+ skb->data)->datasn);
+ if (task && task->sc) {
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
+
+ tcp_task->exp_datasn = data_sn;
+ }
+ }
+
return read_pdu_skb(conn, skb, 0, 0);
}
@@ -1627,15 +1661,15 @@ static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied)
csk->rcv_wup, cdev->rx_credit_thres,
csk->rcv_win);
+ if (!cdev->rx_credit_thres)
+ return;
+
if (csk->state != CTP_ESTABLISHED)
return;
credits = csk->copied_seq - csk->rcv_wup;
if (unlikely(!credits))
return;
- if (unlikely(cdev->rx_credit_thres == 0))
- return;
-
must_send = credits + 16384 >= csk->rcv_win;
if (must_send || credits >= cdev->rx_credit_thres)
csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits);
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index e7802738f5d2..95ba99044c3e 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -207,6 +207,7 @@ enum cxgbi_skcb_flags {
SKCBF_RX_HDR, /* received pdu header */
SKCBF_RX_DATA, /* received pdu payload */
SKCBF_RX_STATUS, /* received ddp status */
+ SKCBF_RX_ISCSI_COMPL, /* received iscsi completion */
SKCBF_RX_DATA_DDPD, /* pdu payload ddp'd */
SKCBF_RX_HCRC_ERR, /* header digest error */
SKCBF_RX_DCRC_ERR, /* data digest error */
@@ -467,6 +468,7 @@ struct cxgbi_device {
struct pci_dev *pdev;
struct dentry *debugfs_root;
struct iscsi_transport *itp;
+ struct module *owner;
unsigned int pfvf;
unsigned int rx_credit_thres;
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 27c0dce22e72..5f75e638ec95 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -37,7 +37,7 @@ MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
////////////////////////////////////////////////////////////////
#include <linux/ioctl.h> /* For SCSI-Passthrough */
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/stat.h>
#include <linux/slab.h> /* for kmalloc() */
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 9ddc9200e0a4..9e4b7709043e 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -248,6 +248,7 @@ struct fnic {
struct completion *remove_wait; /* device remove thread blocks */
atomic_t in_flight; /* io counter */
+ bool internal_reset_inprogress;
u32 _reserved; /* fill hole */
unsigned long state_flags; /* protected by host lock */
enum fnic_state state;
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 2544a37ece0a..adb3d5871e74 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -2581,6 +2581,19 @@ int fnic_host_reset(struct scsi_cmnd *sc)
unsigned long wait_host_tmo;
struct Scsi_Host *shost = sc->device->host;
struct fc_lport *lp = shost_priv(shost);
+ struct fnic *fnic = lport_priv(lp);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (fnic->internal_reset_inprogress == 0) {
+ fnic->internal_reset_inprogress = 1;
+ } else {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "host reset in progress skipping another host reset\n");
+ return SUCCESS;
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
/*
* If fnic_reset is successful, wait for fabric login to complete
@@ -2601,6 +2614,9 @@ int fnic_host_reset(struct scsi_cmnd *sc)
}
}
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ fnic->internal_reset_inprogress = 0;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return ret;
}
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index de5147a8c959..6f9665d50d84 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -37,7 +37,7 @@
#define MAX_CARDS 8
/* old-style parameters for compatibility */
-static int ncr_irq;
+static int ncr_irq = -1;
static int ncr_addr;
static int ncr_5380;
static int ncr_53c400;
@@ -52,9 +52,9 @@ module_param(ncr_53c400a, int, 0);
module_param(dtc_3181e, int, 0);
module_param(hp_c2502, int, 0);
-static int irq[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+static int irq[] = { -1, -1, -1, -1, -1, -1, -1, -1 };
module_param_array(irq, int, NULL, 0);
-MODULE_PARM_DESC(irq, "IRQ number(s)");
+MODULE_PARM_DESC(irq, "IRQ number(s) (0=none, 254=auto [default])");
static int base[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
module_param_array(base, int, NULL, 0);
@@ -67,6 +67,56 @@ MODULE_PARM_DESC(card, "card type (0=NCR5380, 1=NCR53C400, 2=NCR53C400A, 3=DTC31
MODULE_ALIAS("g_NCR5380_mmio");
MODULE_LICENSE("GPL");
+static void g_NCR5380_trigger_irq(struct Scsi_Host *instance)
+{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+
+ /*
+ * An interrupt is triggered whenever BSY = false, SEL = true
+ * and a bit set in the SELECT_ENABLE_REG is asserted on the
+ * SCSI bus.
+ *
+ * Note that the bus is only driven when the phase control signals
+ * (I/O, C/D, and MSG) match those in the TCR.
+ */
+ NCR5380_write(TARGET_COMMAND_REG,
+ PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG) & PHASE_MASK));
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask);
+ NCR5380_write(INITIATOR_COMMAND_REG,
+ ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_SEL);
+
+ msleep(1);
+
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ NCR5380_write(SELECT_ENABLE_REG, 0);
+ NCR5380_write(TARGET_COMMAND_REG, 0);
+}
+
+/**
+ * g_NCR5380_probe_irq - find the IRQ of a NCR5380 or equivalent
+ * @instance: SCSI host instance
+ *
+ * Autoprobe for the IRQ line used by the card by triggering an IRQ
+ * and then looking to see what interrupt actually turned up.
+ */
+
+static int g_NCR5380_probe_irq(struct Scsi_Host *instance)
+{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ int irq_mask, irq;
+
+ NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ irq_mask = probe_irq_on();
+ g_NCR5380_trigger_irq(instance);
+ irq = probe_irq_off(irq_mask);
+ NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+
+ if (irq <= 0)
+ return NO_IRQ;
+ return irq;
+}
+
/*
* Configure I/O address of 53C400A or DTC436 by writing magic numbers
* to ports 0x779 and 0x379.
@@ -81,14 +131,33 @@ static void magic_configure(int idx, u8 irq, u8 magic[])
outb(magic[3], 0x379);
outb(magic[4], 0x379);
- /* allowed IRQs for HP C2502 */
- if (irq != 2 && irq != 3 && irq != 4 && irq != 5 && irq != 7)
- irq = 0;
+ if (irq == 9)
+ irq = 2;
+
if (idx >= 0 && idx <= 7)
cfg = 0x80 | idx | (irq << 4);
outb(cfg, 0x379);
}
+static irqreturn_t legacy_empty_irq_handler(int irq, void *dev_id)
+{
+ return IRQ_HANDLED;
+}
+
+static int legacy_find_free_irq(int *irq_table)
+{
+ while (*irq_table != -1) {
+ if (!request_irq(*irq_table, legacy_empty_irq_handler,
+ IRQF_PROBE_SHARED, "Test IRQ",
+ (void *)irq_table)) {
+ free_irq(*irq_table, (void *) irq_table);
+ return *irq_table;
+ }
+ irq_table++;
+ }
+ return -1;
+}
+
static unsigned int ncr_53c400a_ports[] = {
0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0
};
@@ -101,6 +170,9 @@ static u8 ncr_53c400a_magic[] = { /* 53C400A & DTC436 */
static u8 hp_c2502_magic[] = { /* HP C2502 */
0x0f, 0x22, 0xf0, 0x20, 0x80
};
+static int hp_c2502_irqs[] = {
+ 9, 5, 7, 3, 4, -1
+};
static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
struct device *pdev, int base, int irq, int board)
@@ -248,6 +320,13 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
}
}
+ /* Check for vacant slot */
+ NCR5380_write(MODE_REG, 0);
+ if (NCR5380_read(MODE_REG) != 0) {
+ ret = -ENODEV;
+ goto out_unregister;
+ }
+
ret = NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP);
if (ret)
goto out_unregister;
@@ -262,31 +341,59 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
NCR5380_maybe_reset_bus(instance);
- if (irq != IRQ_AUTO)
- instance->irq = irq;
- else
- instance->irq = NCR5380_probe_irq(instance, 0xffff);
-
/* Compatibility with documented NCR5380 kernel parameters */
- if (instance->irq == 255)
- instance->irq = NO_IRQ;
+ if (irq == 255 || irq == 0)
+ irq = NO_IRQ;
+ else if (irq == -1)
+ irq = IRQ_AUTO;
+
+ if (board == BOARD_HP_C2502) {
+ int *irq_table = hp_c2502_irqs;
+ int board_irq = -1;
+
+ switch (irq) {
+ case NO_IRQ:
+ board_irq = 0;
+ break;
+ case IRQ_AUTO:
+ board_irq = legacy_find_free_irq(irq_table);
+ break;
+ default:
+ while (*irq_table != -1)
+ if (*irq_table++ == irq)
+ board_irq = irq;
+ }
+
+ if (board_irq <= 0) {
+ board_irq = 0;
+ irq = NO_IRQ;
+ }
+
+ magic_configure(port_idx, board_irq, magic);
+ }
+
+ if (irq == IRQ_AUTO) {
+ instance->irq = g_NCR5380_probe_irq(instance);
+ if (instance->irq == NO_IRQ)
+ shost_printk(KERN_INFO, instance, "no irq detected\n");
+ } else {
+ instance->irq = irq;
+ if (instance->irq == NO_IRQ)
+ shost_printk(KERN_INFO, instance, "no irq provided\n");
+ }
if (instance->irq != NO_IRQ) {
- /* set IRQ for HP C2502 */
- if (board == BOARD_HP_C2502)
- magic_configure(port_idx, instance->irq, magic);
if (request_irq(instance->irq, generic_NCR5380_intr,
0, "NCR5380", instance)) {
- printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
instance->irq = NO_IRQ;
+ shost_printk(KERN_INFO, instance,
+ "irq %d denied\n", instance->irq);
+ } else {
+ shost_printk(KERN_INFO, instance,
+ "irq %d acquired\n", instance->irq);
}
}
- if (instance->irq == NO_IRQ) {
- printk(KERN_INFO "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
- printk(KERN_INFO "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
- }
-
ret = scsi_add_host(instance, pdev);
if (ret)
goto out_free_irq;
@@ -597,7 +704,7 @@ static int __init generic_NCR5380_init(void)
int ret = 0;
/* compatibility with old-style parameters */
- if (irq[0] == 0 && base[0] == 0 && card[0] == -1) {
+ if (irq[0] == -1 && base[0] == 0 && card[0] == -1) {
irq[0] = ncr_irq;
base[0] = ncr_addr;
if (ncr_5380)
diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h
index 3ce5b65ccb00..81b22d989648 100644
--- a/drivers/scsi/g_NCR5380.h
+++ b/drivers/scsi/g_NCR5380.h
@@ -51,4 +51,6 @@
#define BOARD_DTC3181E 3
#define BOARD_HP_C2502 4
+#define IRQ_AUTO 254
+
#endif /* GENERIC_NCR5380_H */
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 0a767740bf02..d020a13646ae 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -130,7 +130,7 @@
#include <asm/dma.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/spinlock.h>
#include <linux/blkdev.h>
#include <linux/scatterlist.h>
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 691a09316952..cbc0c5fe5a60 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -1557,10 +1557,9 @@ static void hpsa_monitor_offline_device(struct ctlr_info *h,
/* Device is not on the list, add it. */
device = kmalloc(sizeof(*device), GFP_KERNEL);
- if (!device) {
- dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
+ if (!device)
return;
- }
+
memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
spin_lock_irqsave(&h->offline_device_lock, flags);
list_add_tail(&device->offline_list, &h->offline_device_list);
@@ -2142,17 +2141,15 @@ static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
GFP_KERNEL);
- if (!h->cmd_sg_list) {
- dev_err(&h->pdev->dev, "Failed to allocate SG list\n");
+ if (!h->cmd_sg_list)
return -ENOMEM;
- }
+
for (i = 0; i < h->nr_cmds; i++) {
h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
h->chainsize, GFP_KERNEL);
- if (!h->cmd_sg_list[i]) {
- dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n");
+ if (!h->cmd_sg_list[i])
goto clean;
- }
+
}
return 0;
@@ -3454,11 +3451,8 @@ static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr,
struct bmic_sense_subsystem_info *ssi;
ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
- if (ssi == NULL) {
- dev_warn(&h->pdev->dev,
- "%s: out of memory\n", __func__);
+ if (!ssi)
return;
- }
rc = hpsa_bmic_sense_subsystem_information(h,
scsi3addr, 0, ssi, sizeof(*ssi));
@@ -4335,8 +4329,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
if (!currentsd[i]) {
- dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
- __FILE__, __LINE__);
h->drv_req_rescan = 1;
goto out;
}
@@ -8597,14 +8589,12 @@ static int hpsa_luns_changed(struct ctlr_info *h)
*/
if (!h->lastlogicals)
- goto out;
+ return rc;
logdev = kzalloc(sizeof(*logdev), GFP_KERNEL);
- if (!logdev) {
- dev_warn(&h->pdev->dev,
- "Out of memory, can't track lun changes.\n");
- goto out;
- }
+ if (!logdev)
+ return rc;
+
if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) {
dev_warn(&h->pdev->dev,
"report luns failed, can't track lun changes.\n");
@@ -8998,11 +8988,8 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
return;
options = kzalloc(sizeof(*options), GFP_KERNEL);
- if (!options) {
- dev_err(&h->pdev->dev,
- "Error: failed to disable rld caching, during alloc.\n");
+ if (!options)
return;
- }
c = cmd_alloc(h);
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index a83f705ed8a5..db17ad15b0c1 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -26,7 +26,7 @@
#include <linux/timer.h>
#include <linux/spinlock.h>
#include <linux/gfp.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/div64.h>
#include <scsi/scsi_cmnd.h>
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index d9534ee6ef52..50cd01165e35 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -95,6 +95,7 @@ static int fast_fail = 1;
static int client_reserve = 1;
static char partition_name[97] = "UNKNOWN";
static unsigned int partition_number = -1;
+static LIST_HEAD(ibmvscsi_head);
static struct scsi_transport_template *ibmvscsi_transport_template;
@@ -232,6 +233,7 @@ static void ibmvscsi_task(void *data)
while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
ibmvscsi_handle_crq(crq, hostdata);
crq->valid = VIOSRP_CRQ_FREE;
+ wmb();
}
vio_enable_interrupts(vdev);
@@ -240,6 +242,7 @@ static void ibmvscsi_task(void *data)
vio_disable_interrupts(vdev);
ibmvscsi_handle_crq(crq, hostdata);
crq->valid = VIOSRP_CRQ_FREE;
+ wmb();
} else {
done = 1;
}
@@ -992,7 +995,7 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
if (unlikely(rsp->opcode != SRP_RSP)) {
if (printk_ratelimit())
dev_warn(evt_struct->hostdata->dev,
- "bad SRP RSP type %d\n", rsp->opcode);
+ "bad SRP RSP type %#02x\n", rsp->opcode);
}
if (cmnd) {
@@ -2270,6 +2273,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
}
dev_set_drvdata(&vdev->dev, hostdata);
+ list_add_tail(&hostdata->host_list, &ibmvscsi_head);
return 0;
add_srp_port_failed:
@@ -2291,6 +2295,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
static int ibmvscsi_remove(struct vio_dev *vdev)
{
struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
+ list_del(&hostdata->host_list);
unmap_persist_bufs(hostdata);
release_event_pool(&hostdata->pool, hostdata);
ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index e0f6c3aeb4ee..3a7875575616 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -90,6 +90,7 @@ struct event_pool {
/* all driver data associated with a host adapter */
struct ibmvscsi_host_data {
+ struct list_head host_list;
atomic_t request_limit;
int client_migrated;
int reset_crq;
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index c9fa3565c671..8fb5c54c7dd3 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -30,6 +30,7 @@
#include <linux/types.h>
#include <linux/list.h>
#include <linux/string.h>
+#include <linux/delay.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
@@ -1693,7 +1694,7 @@ static void srp_snd_msg_failed(struct scsi_info *vscsi, long rc)
if (!vscsi->rsp_q_timer.started) {
if (vscsi->rsp_q_timer.timer_pops <
MAX_TIMER_POPS) {
- kt = ktime_set(0, WAIT_NANO_SECONDS);
+ kt = WAIT_NANO_SECONDS;
} else {
/*
* slide the timeslice if the maximum
@@ -3584,7 +3585,7 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
1, 1);
if (rc) {
pr_err("srp_transfer_data() failed: %d\n", rc);
- return -EAGAIN;
+ return -EIO;
}
/*
* We now tell TCM to add this WRITE CDB directly into the TCM storage
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
index 98b0ca79a5c5..65c6189885ab 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
@@ -26,6 +26,7 @@
#ifndef __H_IBMVSCSI_TGT
#define __H_IBMVSCSI_TGT
+#include <linux/interrupt.h>
#include "libsrp.h"
#define SYS_ID_NAME_LEN 64
diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
index 45b9566b928e..b782bb60baf0 100644
--- a/drivers/scsi/ips.h
+++ b/drivers/scsi/ips.h
@@ -51,7 +51,7 @@
#define _IPS_H_
#include <linux/nmi.h>
- #include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
/*
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 9d05302a3bcd..3c63c292cb92 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -34,7 +34,7 @@
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
#include <linux/completion.h>
#include <linux/delay.h>
diff --git a/drivers/scsi/megaraid/megaraid_mm.h b/drivers/scsi/megaraid/megaraid_mm.h
index 55b425c0a654..a30e725f2d5c 100644
--- a/drivers/scsi/megaraid/megaraid_mm.h
+++ b/drivers/scsi/megaraid/megaraid_mm.h
@@ -17,7 +17,7 @@
#include <linux/spinlock.h>
#include <linux/fs.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 6484c382f670..d5cf15eb8c5e 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -42,7 +42,7 @@
#include <linux/delay.h>
#include <linux/uio.h>
#include <linux/slab.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/fs.h>
#include <linux/compat.h>
#include <linux/blkdev.h>
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 8de0eda8cd00..394fe1338d09 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -402,6 +402,9 @@ struct MPT3SAS_DEVICE {
u8 block;
u8 tlr_snoop_check;
u8 ignore_delay_remove;
+ /* Iopriority Command Handling */
+ u8 ncq_prio_enable;
+
};
#define MPT3_CMD_NOT_USED 0x8000 /* free */
@@ -1458,4 +1461,7 @@ mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
struct _raid_device *raid_device, Mpi2SCSIIORequest_t *mpi_request,
u16 smid);
+/* NCQ Prio Handling Check */
+bool scsih_ncq_prio_supp(struct scsi_device *sdev);
+
#endif /* MPT3SAS_BASE_H_INCLUDED */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 050bd788ad02..95f0f24bac05 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -3325,8 +3325,6 @@ static DEVICE_ATTR(diag_trigger_mpi, S_IRUGO | S_IWUSR,
/*********** diagnostic trigger suppport *** END ****************************/
-
-
/*****************************************/
struct device_attribute *mpt3sas_host_attrs[] = {
@@ -3402,9 +3400,50 @@ _ctl_device_handle_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL);
+/**
+ * _ctl_device_ncq_io_prio_show - send prioritized io commands to device
+ * @dev - pointer to embedded device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' sdev attribute, only works with SATA
+ */
+static ssize_t
+_ctl_device_ncq_prio_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ sas_device_priv_data->ncq_prio_enable);
+}
+
+static ssize_t
+_ctl_device_ncq_prio_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
+ bool ncq_prio_enable = 0;
+
+ if (kstrtobool(buf, &ncq_prio_enable))
+ return -EINVAL;
+
+ if (!scsih_ncq_prio_supp(sdev))
+ return -EINVAL;
+
+ sas_device_priv_data->ncq_prio_enable = ncq_prio_enable;
+ return strlen(buf);
+}
+static DEVICE_ATTR(sas_ncq_prio_enable, S_IRUGO | S_IWUSR,
+ _ctl_device_ncq_prio_enable_show,
+ _ctl_device_ncq_prio_enable_store);
+
struct device_attribute *mpt3sas_dev_attrs[] = {
&dev_attr_sas_address,
&dev_attr_sas_device_handle,
+ &dev_attr_sas_ncq_prio_enable,
NULL,
};
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 5c8f75247d73..b5c966e319d3 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -4053,6 +4053,8 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
struct MPT3SAS_DEVICE *sas_device_priv_data;
struct MPT3SAS_TARGET *sas_target_priv_data;
struct _raid_device *raid_device;
+ struct request *rq = scmd->request;
+ int class;
Mpi2SCSIIORequest_t *mpi_request;
u32 mpi_control;
u16 smid;
@@ -4115,7 +4117,12 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
/* set tags */
mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
-
+ /* NCQ Prio supported, make sure control indicated high priority */
+ if (sas_device_priv_data->ncq_prio_enable) {
+ class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
+ if (class == IOPRIO_CLASS_RT)
+ mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
+ }
/* Make sure Device is not raid volume.
* We do not expose raid functionality to upper layer for warpdrive.
*/
@@ -9099,6 +9106,31 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED;
}
+/**
+ * scsih__ncq_prio_supp - Check for NCQ command priority support
+ * @sdev: scsi device struct
+ *
+ * This is called when a user indicates they would like to enable
+ * ncq command priorities. This works only on SATA devices.
+ */
+bool scsih_ncq_prio_supp(struct scsi_device *sdev)
+{
+ unsigned char *buf;
+ bool ncq_prio_supp = false;
+
+ if (!scsi_device_supports_vpd(sdev))
+ return ncq_prio_supp;
+
+ buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL);
+ if (!buf)
+ return ncq_prio_supp;
+
+ if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN))
+ ncq_prio_supp = (buf[213] >> 4) & 1;
+
+ kfree(buf);
+ return ncq_prio_supp;
+}
/*
* The pci device ids are defined in mpi/mpi2_cnfg.h.
*/
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index a2960f5d98ec..e8196c55b633 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -52,7 +52,7 @@ static const char * osst_version = "0.99.4";
#include <linux/delay.h>
#include <linux/jiffies.h>
#include <linux/mutex.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/dma.h>
/* The driver prints some debugging information on the console if DEBUG
diff --git a/drivers/scsi/qedi/Kconfig b/drivers/scsi/qedi/Kconfig
new file mode 100644
index 000000000000..21331453db7b
--- /dev/null
+++ b/drivers/scsi/qedi/Kconfig
@@ -0,0 +1,10 @@
+config QEDI
+ tristate "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver Support"
+ depends on PCI && SCSI && UIO
+ depends on QED
+ select SCSI_ISCSI_ATTRS
+ select QED_LL2
+ select QED_ISCSI
+ ---help---
+ This driver supports iSCSI offload for the QLogic FastLinQ
+ 41000 Series Converged Network Adapters.
diff --git a/drivers/scsi/qedi/Makefile b/drivers/scsi/qedi/Makefile
new file mode 100644
index 000000000000..2b3e16b24299
--- /dev/null
+++ b/drivers/scsi/qedi/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_QEDI) := qedi.o
+qedi-y := qedi_main.o qedi_iscsi.o qedi_fw.o qedi_sysfs.o \
+ qedi_dbg.o
+
+qedi-$(CONFIG_DEBUG_FS) += qedi_debugfs.o
diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h
new file mode 100644
index 000000000000..5ca3e8c28a3f
--- /dev/null
+++ b/drivers/scsi/qedi/qedi.h
@@ -0,0 +1,364 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_H_
+#define _QEDI_H_
+
+#define __PREVENT_QED_HSI__
+
+#include <scsi/scsi_transport_iscsi.h>
+#include <scsi/libiscsi.h>
+#include <scsi/scsi_host.h>
+#include <linux/uio_driver.h>
+
+#include "qedi_hsi.h"
+#include <linux/qed/qed_if.h>
+#include "qedi_dbg.h"
+#include <linux/qed/qed_iscsi_if.h>
+#include <linux/qed/qed_ll2_if.h>
+#include "qedi_version.h"
+
+#define QEDI_MODULE_NAME "qedi"
+
+struct qedi_endpoint;
+
+/*
+ * PCI function probe defines
+ */
+#define QEDI_MODE_NORMAL 0
+#define QEDI_MODE_RECOVERY 1
+
+#define ISCSI_WQE_SET_PTU_INVALIDATE 1
+#define QEDI_MAX_ISCSI_TASK 4096
+#define QEDI_MAX_TASK_NUM 0x0FFF
+#define QEDI_MAX_ISCSI_CONNS_PER_HBA 1024
+#define QEDI_ISCSI_MAX_BDS_PER_CMD 256 /* Firmware max BDs is 256 */
+#define MAX_OUSTANDING_TASKS_PER_CON 1024
+
+#define QEDI_MAX_BD_LEN 0xffff
+#define QEDI_BD_SPLIT_SZ 0x1000
+#define QEDI_PAGE_SIZE 4096
+#define QEDI_FAST_SGE_COUNT 4
+/* MAX Length for cached SGL */
+#define MAX_SGLEN_FOR_CACHESGL ((1U << 16) - 1)
+
+#define MAX_NUM_MSIX_PF 8
+#define MIN_NUM_CPUS_MSIX(x) min((x)->msix_count, num_online_cpus())
+
+#define QEDI_LOCAL_PORT_MIN 60000
+#define QEDI_LOCAL_PORT_MAX 61024
+#define QEDI_LOCAL_PORT_RANGE (QEDI_LOCAL_PORT_MAX - QEDI_LOCAL_PORT_MIN)
+#define QEDI_LOCAL_PORT_INVALID 0xffff
+#define TX_RX_RING 16
+#define RX_RING (TX_RX_RING - 1)
+#define LL2_SINGLE_BUF_SIZE 0x400
+#define QEDI_PAGE_SIZE 4096
+#define QEDI_PAGE_ALIGN(addr) ALIGN(addr, QEDI_PAGE_SIZE)
+#define QEDI_PAGE_MASK (~((QEDI_PAGE_SIZE) - 1))
+
+#define QEDI_PAGE_SIZE 4096
+#define QEDI_PATH_HANDLE 0xFE0000000UL
+
+struct qedi_uio_ctrl {
+ /* meta data */
+ u32 uio_hsi_version;
+
+ /* user writes */
+ u32 host_tx_prod;
+ u32 host_rx_cons;
+ u32 host_rx_bd_cons;
+ u32 host_tx_pkt_len;
+ u32 host_rx_cons_cnt;
+
+ /* driver writes */
+ u32 hw_tx_cons;
+ u32 hw_rx_prod;
+ u32 hw_rx_bd_prod;
+ u32 hw_rx_prod_cnt;
+
+ /* other */
+ u8 mac_addr[6];
+ u8 reserve[2];
+};
+
+struct qedi_rx_bd {
+ u32 rx_pkt_index;
+ u32 rx_pkt_len;
+ u16 vlan_id;
+};
+
+#define QEDI_RX_DESC_CNT (QEDI_PAGE_SIZE / sizeof(struct qedi_rx_bd))
+#define QEDI_MAX_RX_DESC_CNT (QEDI_RX_DESC_CNT - 1)
+#define QEDI_NUM_RX_BD (QEDI_RX_DESC_CNT * 1)
+#define QEDI_MAX_RX_BD (QEDI_NUM_RX_BD - 1)
+
+#define QEDI_NEXT_RX_IDX(x) ((((x) & (QEDI_MAX_RX_DESC_CNT)) == \
+ (QEDI_MAX_RX_DESC_CNT - 1)) ? \
+ (x) + 2 : (x) + 1)
+
+struct qedi_uio_dev {
+ struct uio_info qedi_uinfo;
+ u32 uio_dev;
+ struct list_head list;
+
+ u32 ll2_ring_size;
+ void *ll2_ring;
+
+ u32 ll2_buf_size;
+ void *ll2_buf;
+
+ void *rx_pkt;
+ void *tx_pkt;
+
+ struct qedi_ctx *qedi;
+ struct pci_dev *pdev;
+ void *uctrl;
+};
+
+/* List to maintain the skb pointers */
+struct skb_work_list {
+ struct list_head list;
+ struct sk_buff *skb;
+ u16 vlan_id;
+};
+
+/* Queue sizes in number of elements */
+#define QEDI_SQ_SIZE MAX_OUSTANDING_TASKS_PER_CON
+#define QEDI_CQ_SIZE 2048
+#define QEDI_CMDQ_SIZE QEDI_MAX_ISCSI_TASK
+#define QEDI_PROTO_CQ_PROD_IDX 0
+
+struct qedi_glbl_q_params {
+ u64 hw_p_cq; /* Completion queue PBL */
+ u64 hw_p_rq; /* Request queue PBL */
+ u64 hw_p_cmdq; /* Command queue PBL */
+};
+
+struct global_queue {
+ union iscsi_cqe *cq;
+ dma_addr_t cq_dma;
+ u32 cq_mem_size;
+ u32 cq_cons_idx; /* Completion queue consumer index */
+
+ void *cq_pbl;
+ dma_addr_t cq_pbl_dma;
+ u32 cq_pbl_size;
+
+};
+
+struct qedi_fastpath {
+ struct qed_sb_info *sb_info;
+ u16 sb_id;
+#define QEDI_NAME_SIZE 16
+ char name[QEDI_NAME_SIZE];
+ struct qedi_ctx *qedi;
+};
+
+/* Used to pass fastpath information needed to process CQEs */
+struct qedi_io_work {
+ struct list_head list;
+ struct iscsi_cqe_solicited cqe;
+ u16 que_idx;
+};
+
+/**
+ * struct iscsi_cid_queue - Per adapter iscsi cid queue
+ *
+ * @cid_que_base: queue base memory
+ * @cid_que: queue memory pointer
+ * @cid_q_prod_idx: produce index
+ * @cid_q_cons_idx: consumer index
+ * @cid_q_max_idx: max index. used to detect wrap around condition
+ * @cid_free_cnt: queue size
+ * @conn_cid_tbl: iscsi cid to conn structure mapping table
+ *
+ * Per adapter iSCSI CID Queue
+ */
+struct iscsi_cid_queue {
+ void *cid_que_base;
+ u32 *cid_que;
+ u32 cid_q_prod_idx;
+ u32 cid_q_cons_idx;
+ u32 cid_q_max_idx;
+ u32 cid_free_cnt;
+ struct qedi_conn **conn_cid_tbl;
+};
+
+struct qedi_portid_tbl {
+ spinlock_t lock; /* Port id lock */
+ u16 start;
+ u16 max;
+ u16 next;
+ unsigned long *table;
+};
+
+struct qedi_itt_map {
+ __le32 itt;
+ struct qedi_cmd *p_cmd;
+};
+
+/* I/O tracing entry */
+#define QEDI_IO_TRACE_SIZE 2048
+struct qedi_io_log {
+#define QEDI_IO_TRACE_REQ 0
+#define QEDI_IO_TRACE_RSP 1
+ u8 direction;
+ u16 task_id;
+ u32 cid;
+ u32 port_id; /* Remote port fabric ID */
+ int lun;
+ u8 op; /* SCSI CDB */
+ u8 lba[4];
+ unsigned int bufflen; /* SCSI buffer length */
+ unsigned int sg_count; /* Number of SG elements */
+ u8 fast_sgs; /* number of fast sgls */
+ u8 slow_sgs; /* number of slow sgls */
+ u8 cached_sgs; /* number of cached sgls */
+ int result; /* Result passed back to mid-layer */
+ unsigned long jiffies; /* Time stamp when I/O logged */
+ int refcount; /* Reference count for task id */
+ unsigned int blk_req_cpu; /* CPU that the task is queued on by
+ * blk layer
+ */
+ unsigned int req_cpu; /* CPU that the task is queued on */
+ unsigned int intr_cpu; /* Interrupt CPU that the task is received on */
+ unsigned int blk_rsp_cpu;/* CPU that task is actually processed and
+ * returned to blk layer
+ */
+ bool cached_sge;
+ bool slow_sge;
+ bool fast_sge;
+};
+
+/* Number of entries in BDQ */
+#define QEDI_BDQ_NUM 256
+#define QEDI_BDQ_BUF_SIZE 256
+
+/* DMA coherent buffers for BDQ */
+struct qedi_bdq_buf {
+ void *buf_addr;
+ dma_addr_t buf_dma;
+};
+
+/* Main port level struct */
+struct qedi_ctx {
+ struct qedi_dbg_ctx dbg_ctx;
+ struct Scsi_Host *shost;
+ struct pci_dev *pdev;
+ struct qed_dev *cdev;
+ struct qed_dev_iscsi_info dev_info;
+ struct qed_int_info int_info;
+ struct qedi_glbl_q_params *p_cpuq;
+ struct global_queue **global_queues;
+ /* uio declaration */
+ struct qedi_uio_dev *udev;
+ struct list_head ll2_skb_list;
+ spinlock_t ll2_lock; /* Light L2 lock */
+ spinlock_t hba_lock; /* per port lock */
+ struct task_struct *ll2_recv_thread;
+ unsigned long flags;
+#define UIO_DEV_OPENED 1
+#define QEDI_IOTHREAD_WAKE 2
+#define QEDI_IN_RECOVERY 5
+#define QEDI_IN_OFFLINE 6
+
+ u8 mac[ETH_ALEN];
+ u32 src_ip[4];
+ u8 ip_type;
+
+ /* Physical address of above array */
+ dma_addr_t hw_p_cpuq;
+
+ struct qedi_bdq_buf bdq[QEDI_BDQ_NUM];
+ void *bdq_pbl;
+ dma_addr_t bdq_pbl_dma;
+ size_t bdq_pbl_mem_size;
+ void *bdq_pbl_list;
+ dma_addr_t bdq_pbl_list_dma;
+ u8 bdq_pbl_list_num_entries;
+ void __iomem *bdq_primary_prod;
+ void __iomem *bdq_secondary_prod;
+ u16 bdq_prod_idx;
+ u16 rq_num_entries;
+
+ u32 msix_count;
+ u32 max_sqes;
+ u8 num_queues;
+ u32 max_active_conns;
+
+ struct iscsi_cid_queue cid_que;
+ struct qedi_endpoint **ep_tbl;
+ struct qedi_portid_tbl lcl_port_tbl;
+
+ /* Rx fast path intr context */
+ struct qed_sb_info *sb_array;
+ struct qedi_fastpath *fp_array;
+ struct qed_iscsi_tid tasks;
+
+#define QEDI_LINK_DOWN 0
+#define QEDI_LINK_UP 1
+ atomic_t link_state;
+
+#define QEDI_RESERVE_TASK_ID 0
+#define MAX_ISCSI_TASK_ENTRIES 4096
+#define QEDI_INVALID_TASK_ID (MAX_ISCSI_TASK_ENTRIES + 1)
+ unsigned long task_idx_map[MAX_ISCSI_TASK_ENTRIES / BITS_PER_LONG];
+ struct qedi_itt_map *itt_map;
+ u16 tid_reuse_count[QEDI_MAX_ISCSI_TASK];
+ struct qed_pf_params pf_params;
+
+ struct workqueue_struct *tmf_thread;
+ struct workqueue_struct *offload_thread;
+
+ u16 ll2_mtu;
+
+ struct workqueue_struct *dpc_wq;
+
+ spinlock_t task_idx_lock; /* To protect gbl context */
+ s32 last_tidx_alloc;
+ s32 last_tidx_clear;
+
+ struct qedi_io_log io_trace_buf[QEDI_IO_TRACE_SIZE];
+ spinlock_t io_trace_lock; /* prtect trace Log buf */
+ u16 io_trace_idx;
+ unsigned int intr_cpu;
+ u32 cached_sgls;
+ bool use_cached_sge;
+ u32 slow_sgls;
+ bool use_slow_sge;
+ u32 fast_sgls;
+ bool use_fast_sge;
+
+ atomic_t num_offloads;
+};
+
+struct qedi_work {
+ struct list_head list;
+ struct qedi_ctx *qedi;
+ union iscsi_cqe cqe;
+ u16 que_idx;
+ bool is_solicited;
+};
+
+struct qedi_percpu_s {
+ struct task_struct *iothread;
+ struct list_head work_list;
+ spinlock_t p_work_lock; /* Per cpu worker lock */
+};
+
+static inline void *qedi_get_task_mem(struct qed_iscsi_tid *info, u32 tid)
+{
+ return (info->blocks[tid / info->num_tids_per_block] +
+ (tid % info->num_tids_per_block) * info->size);
+}
+
+#define QEDI_U64_HI(val) ((u32)(((u64)(val)) >> 32))
+#define QEDI_U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff))
+
+#endif /* _QEDI_H_ */
diff --git a/drivers/scsi/qedi/qedi_dbg.c b/drivers/scsi/qedi/qedi_dbg.c
new file mode 100644
index 000000000000..2bdedb9c39bc
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_dbg.c
@@ -0,0 +1,143 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include "qedi_dbg.h"
+#include <linux/vmalloc.h>
+
+void
+qedi_dbg_err(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+ const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+ char nfunc[32];
+
+ memset(nfunc, 0, sizeof(nfunc));
+ memcpy(nfunc, func, sizeof(nfunc) - 1);
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ if (likely(qedi) && likely(qedi->pdev))
+ pr_err("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
+ nfunc, line, qedi->host_no, &vaf);
+ else
+ pr_err("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+
+ va_end(va);
+}
+
+void
+qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+ const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+ char nfunc[32];
+
+ memset(nfunc, 0, sizeof(nfunc));
+ memcpy(nfunc, func, sizeof(nfunc) - 1);
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ if (!(qedi_dbg_log & QEDI_LOG_WARN))
+ return;
+
+ if (likely(qedi) && likely(qedi->pdev))
+ pr_warn("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
+ nfunc, line, qedi->host_no, &vaf);
+ else
+ pr_warn("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+
+ va_end(va);
+}
+
+void
+qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+ const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+ char nfunc[32];
+
+ memset(nfunc, 0, sizeof(nfunc));
+ memcpy(nfunc, func, sizeof(nfunc) - 1);
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ if (!(qedi_dbg_log & QEDI_LOG_NOTICE))
+ return;
+
+ if (likely(qedi) && likely(qedi->pdev))
+ pr_notice("[%s]:[%s:%d]:%d: %pV",
+ dev_name(&qedi->pdev->dev), nfunc, line,
+ qedi->host_no, &vaf);
+ else
+ pr_notice("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+
+ va_end(va);
+}
+
+void
+qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+ u32 level, const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+ char nfunc[32];
+
+ memset(nfunc, 0, sizeof(nfunc));
+ memcpy(nfunc, func, sizeof(nfunc) - 1);
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ if (!(qedi_dbg_log & level))
+ return;
+
+ if (likely(qedi) && likely(qedi->pdev))
+ pr_info("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
+ nfunc, line, qedi->host_no, &vaf);
+ else
+ pr_info("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+
+ va_end(va);
+}
+
+int
+qedi_create_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter)
+{
+ int ret = 0;
+
+ for (; iter->name; iter++) {
+ ret = sysfs_create_bin_file(&shost->shost_gendev.kobj,
+ iter->attr);
+ if (ret)
+ pr_err("Unable to create sysfs %s attr, err(%d).\n",
+ iter->name, ret);
+ }
+ return ret;
+}
+
+void
+qedi_remove_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter)
+{
+ for (; iter->name; iter++)
+ sysfs_remove_bin_file(&shost->shost_gendev.kobj, iter->attr);
+}
diff --git a/drivers/scsi/qedi/qedi_dbg.h b/drivers/scsi/qedi/qedi_dbg.h
new file mode 100644
index 000000000000..c55572badfb0
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_dbg.h
@@ -0,0 +1,144 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_DBG_H_
+#define _QEDI_DBG_H_
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/string.h>
+#include <linux/version.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_iscsi.h>
+#include <linux/fs.h>
+
+#define __PREVENT_QED_HSI__
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/qed_if.h>
+
+extern uint qedi_dbg_log;
+
+/* Debug print level definitions */
+#define QEDI_LOG_DEFAULT 0x1 /* Set default logging mask */
+#define QEDI_LOG_INFO 0x2 /* Informational logs,
+ * MAC address, WWPN, WWNN
+ */
+#define QEDI_LOG_DISC 0x4 /* Init, discovery, rport */
+#define QEDI_LOG_LL2 0x8 /* LL2, VLAN logs */
+#define QEDI_LOG_CONN 0x10 /* Connection setup, cleanup */
+#define QEDI_LOG_EVT 0x20 /* Events, link, mtu */
+#define QEDI_LOG_TIMER 0x40 /* Timer events */
+#define QEDI_LOG_MP_REQ 0x80 /* Middle Path (MP) logs */
+#define QEDI_LOG_SCSI_TM 0x100 /* SCSI Aborts, Task Mgmt */
+#define QEDI_LOG_UNSOL 0x200 /* unsolicited event logs */
+#define QEDI_LOG_IO 0x400 /* scsi cmd, completion */
+#define QEDI_LOG_MQ 0x800 /* Multi Queue logs */
+#define QEDI_LOG_BSG 0x1000 /* BSG logs */
+#define QEDI_LOG_DEBUGFS 0x2000 /* debugFS logs */
+#define QEDI_LOG_LPORT 0x4000 /* lport logs */
+#define QEDI_LOG_ELS 0x8000 /* ELS logs */
+#define QEDI_LOG_NPIV 0x10000 /* NPIV logs */
+#define QEDI_LOG_SESS 0x20000 /* Conection setup, cleanup */
+#define QEDI_LOG_UIO 0x40000 /* iSCSI UIO logs */
+#define QEDI_LOG_TID 0x80000 /* FW TID context acquire,
+ * free
+ */
+#define QEDI_TRACK_TID 0x100000 /* Track TID state. To be
+ * enabled only at module load
+ * and not run-time.
+ */
+#define QEDI_TRACK_CMD_LIST 0x300000 /* Track active cmd list nodes,
+ * done with reference to TID,
+ * hence TRACK_TID also enabled.
+ */
+#define QEDI_LOG_NOTICE 0x40000000 /* Notice logs */
+#define QEDI_LOG_WARN 0x80000000 /* Warning logs */
+
+/* Debug context structure */
+struct qedi_dbg_ctx {
+ unsigned int host_no;
+ struct pci_dev *pdev;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *bdf_dentry;
+#endif
+};
+
+#define QEDI_ERR(pdev, fmt, ...) \
+ qedi_dbg_err(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__)
+#define QEDI_WARN(pdev, fmt, ...) \
+ qedi_dbg_warn(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__)
+#define QEDI_NOTICE(pdev, fmt, ...) \
+ qedi_dbg_notice(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__)
+#define QEDI_INFO(pdev, level, fmt, ...) \
+ qedi_dbg_info(pdev, __func__, __LINE__, level, fmt, \
+ ## __VA_ARGS__)
+
+void qedi_dbg_err(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+ const char *fmt, ...);
+void qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+ const char *fmt, ...);
+void qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+ const char *fmt, ...);
+void qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+ u32 info, const char *fmt, ...);
+
+struct Scsi_Host;
+
+struct sysfs_bin_attrs {
+ char *name;
+ struct bin_attribute *attr;
+};
+
+int qedi_create_sysfs_attr(struct Scsi_Host *shost,
+ struct sysfs_bin_attrs *iter);
+void qedi_remove_sysfs_attr(struct Scsi_Host *shost,
+ struct sysfs_bin_attrs *iter);
+
+#ifdef CONFIG_DEBUG_FS
+/* DebugFS related code */
+struct qedi_list_of_funcs {
+ char *oper_str;
+ ssize_t (*oper_func)(struct qedi_dbg_ctx *qedi);
+};
+
+struct qedi_debugfs_ops {
+ char *name;
+ struct qedi_list_of_funcs *qedi_funcs;
+};
+
+#define qedi_dbg_fileops(drv, ops) \
+{ \
+ .owner = THIS_MODULE, \
+ .open = simple_open, \
+ .read = drv##_dbg_##ops##_cmd_read, \
+ .write = drv##_dbg_##ops##_cmd_write \
+}
+
+/* Used for debugfs sequential files */
+#define qedi_dbg_fileops_seq(drv, ops) \
+{ \
+ .owner = THIS_MODULE, \
+ .open = drv##_dbg_##ops##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+void qedi_dbg_host_init(struct qedi_dbg_ctx *qedi,
+ struct qedi_debugfs_ops *dops,
+ const struct file_operations *fops);
+void qedi_dbg_host_exit(struct qedi_dbg_ctx *qedi);
+void qedi_dbg_init(char *drv_name);
+void qedi_dbg_exit(void);
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* _QEDI_DBG_H_ */
diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c
new file mode 100644
index 000000000000..955936274241
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_debugfs.c
@@ -0,0 +1,244 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include "qedi.h"
+#include "qedi_dbg.h"
+
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+
+int do_not_recover;
+static struct dentry *qedi_dbg_root;
+
+void
+qedi_dbg_host_init(struct qedi_dbg_ctx *qedi,
+ struct qedi_debugfs_ops *dops,
+ const struct file_operations *fops)
+{
+ char host_dirname[32];
+ struct dentry *file_dentry = NULL;
+
+ sprintf(host_dirname, "host%u", qedi->host_no);
+ qedi->bdf_dentry = debugfs_create_dir(host_dirname, qedi_dbg_root);
+ if (!qedi->bdf_dentry)
+ return;
+
+ while (dops) {
+ if (!(dops->name))
+ break;
+
+ file_dentry = debugfs_create_file(dops->name, 0600,
+ qedi->bdf_dentry, qedi,
+ fops);
+ if (!file_dentry) {
+ QEDI_INFO(qedi, QEDI_LOG_DEBUGFS,
+ "Debugfs entry %s creation failed\n",
+ dops->name);
+ debugfs_remove_recursive(qedi->bdf_dentry);
+ return;
+ }
+ dops++;
+ fops++;
+ }
+}
+
+void
+qedi_dbg_host_exit(struct qedi_dbg_ctx *qedi)
+{
+ debugfs_remove_recursive(qedi->bdf_dentry);
+ qedi->bdf_dentry = NULL;
+}
+
+void
+qedi_dbg_init(char *drv_name)
+{
+ qedi_dbg_root = debugfs_create_dir(drv_name, NULL);
+ if (!qedi_dbg_root)
+ QEDI_INFO(NULL, QEDI_LOG_DEBUGFS, "Init of debugfs failed\n");
+}
+
+void
+qedi_dbg_exit(void)
+{
+ debugfs_remove_recursive(qedi_dbg_root);
+ qedi_dbg_root = NULL;
+}
+
+static ssize_t
+qedi_dbg_do_not_recover_enable(struct qedi_dbg_ctx *qedi_dbg)
+{
+ if (!do_not_recover)
+ do_not_recover = 1;
+
+ QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n",
+ do_not_recover);
+ return 0;
+}
+
+static ssize_t
+qedi_dbg_do_not_recover_disable(struct qedi_dbg_ctx *qedi_dbg)
+{
+ if (do_not_recover)
+ do_not_recover = 0;
+
+ QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n",
+ do_not_recover);
+ return 0;
+}
+
+static struct qedi_list_of_funcs qedi_dbg_do_not_recover_ops[] = {
+ { "enable", qedi_dbg_do_not_recover_enable },
+ { "disable", qedi_dbg_do_not_recover_disable },
+ { NULL, NULL }
+};
+
+struct qedi_debugfs_ops qedi_debugfs_ops[] = {
+ { "gbl_ctx", NULL },
+ { "do_not_recover", qedi_dbg_do_not_recover_ops},
+ { "io_trace", NULL },
+ { NULL, NULL }
+};
+
+static ssize_t
+qedi_dbg_do_not_recover_cmd_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ size_t cnt = 0;
+ struct qedi_dbg_ctx *qedi_dbg =
+ (struct qedi_dbg_ctx *)filp->private_data;
+ struct qedi_list_of_funcs *lof = qedi_dbg_do_not_recover_ops;
+
+ if (*ppos)
+ return 0;
+
+ while (lof) {
+ if (!(lof->oper_str))
+ break;
+
+ if (!strncmp(lof->oper_str, buffer, strlen(lof->oper_str))) {
+ cnt = lof->oper_func(qedi_dbg);
+ break;
+ }
+
+ lof++;
+ }
+ return (count - cnt);
+}
+
+static ssize_t
+qedi_dbg_do_not_recover_cmd_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ size_t cnt = 0;
+
+ if (*ppos)
+ return 0;
+
+ cnt = sprintf(buffer, "do_not_recover=%d\n", do_not_recover);
+ cnt = min_t(int, count, cnt - *ppos);
+ *ppos += cnt;
+ return cnt;
+}
+
+static int
+qedi_gbl_ctx_show(struct seq_file *s, void *unused)
+{
+ struct qedi_fastpath *fp = NULL;
+ struct qed_sb_info *sb_info = NULL;
+ struct status_block *sb = NULL;
+ struct global_queue *que = NULL;
+ int id;
+ u16 prod_idx;
+ struct qedi_ctx *qedi = s->private;
+ unsigned long flags;
+
+ seq_puts(s, " DUMP CQ CONTEXT:\n");
+
+ for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
+ spin_lock_irqsave(&qedi->hba_lock, flags);
+ seq_printf(s, "=========FAST CQ PATH [%d] ==========\n", id);
+ fp = &qedi->fp_array[id];
+ sb_info = fp->sb_info;
+ sb = sb_info->sb_virt;
+ prod_idx = (sb->pi_array[QEDI_PROTO_CQ_PROD_IDX] &
+ STATUS_BLOCK_PROD_INDEX_MASK);
+ seq_printf(s, "SB PROD IDX: %d\n", prod_idx);
+ que = qedi->global_queues[fp->sb_id];
+ seq_printf(s, "DRV CONS IDX: %d\n", que->cq_cons_idx);
+ seq_printf(s, "CQ complete host memory: %d\n", fp->sb_id);
+ seq_puts(s, "=========== END ==================\n\n\n");
+ spin_unlock_irqrestore(&qedi->hba_lock, flags);
+ }
+ return 0;
+}
+
+static int
+qedi_dbg_gbl_ctx_open(struct inode *inode, struct file *file)
+{
+ struct qedi_dbg_ctx *qedi_dbg = inode->i_private;
+ struct qedi_ctx *qedi = container_of(qedi_dbg, struct qedi_ctx,
+ dbg_ctx);
+
+ return single_open(file, qedi_gbl_ctx_show, qedi);
+}
+
+static int
+qedi_io_trace_show(struct seq_file *s, void *unused)
+{
+ int id, idx = 0;
+ struct qedi_ctx *qedi = s->private;
+ struct qedi_io_log *io_log;
+ unsigned long flags;
+
+ seq_puts(s, " DUMP IO LOGS:\n");
+ spin_lock_irqsave(&qedi->io_trace_lock, flags);
+ idx = qedi->io_trace_idx;
+ for (id = 0; id < QEDI_IO_TRACE_SIZE; id++) {
+ io_log = &qedi->io_trace_buf[idx];
+ seq_printf(s, "iodir-%d:", io_log->direction);
+ seq_printf(s, "tid-0x%x:", io_log->task_id);
+ seq_printf(s, "cid-0x%x:", io_log->cid);
+ seq_printf(s, "lun-%d:", io_log->lun);
+ seq_printf(s, "op-0x%02x:", io_log->op);
+ seq_printf(s, "0x%02x%02x%02x%02x:", io_log->lba[0],
+ io_log->lba[1], io_log->lba[2], io_log->lba[3]);
+ seq_printf(s, "buflen-%d:", io_log->bufflen);
+ seq_printf(s, "sgcnt-%d:", io_log->sg_count);
+ seq_printf(s, "res-0x%08x:", io_log->result);
+ seq_printf(s, "jif-%lu:", io_log->jiffies);
+ seq_printf(s, "blk_req_cpu-%d:", io_log->blk_req_cpu);
+ seq_printf(s, "req_cpu-%d:", io_log->req_cpu);
+ seq_printf(s, "intr_cpu-%d:", io_log->intr_cpu);
+ seq_printf(s, "blk_rsp_cpu-%d\n", io_log->blk_rsp_cpu);
+
+ idx++;
+ if (idx == QEDI_IO_TRACE_SIZE)
+ idx = 0;
+ }
+ spin_unlock_irqrestore(&qedi->io_trace_lock, flags);
+ return 0;
+}
+
+static int
+qedi_dbg_io_trace_open(struct inode *inode, struct file *file)
+{
+ struct qedi_dbg_ctx *qedi_dbg = inode->i_private;
+ struct qedi_ctx *qedi = container_of(qedi_dbg, struct qedi_ctx,
+ dbg_ctx);
+
+ return single_open(file, qedi_io_trace_show, qedi);
+}
+
+const struct file_operations qedi_dbg_fops[] = {
+ qedi_dbg_fileops_seq(qedi, gbl_ctx),
+ qedi_dbg_fileops(qedi, do_not_recover),
+ qedi_dbg_fileops_seq(qedi, io_trace),
+ { NULL, NULL },
+};
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
new file mode 100644
index 000000000000..b1d3904ae8fd
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -0,0 +1,2378 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/blkdev.h>
+#include <scsi/scsi_tcq.h>
+#include <linux/delay.h>
+
+#include "qedi.h"
+#include "qedi_iscsi.h"
+#include "qedi_gbl.h"
+
+static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
+ struct iscsi_task *mtask);
+
+void qedi_iscsi_unmap_sg_list(struct qedi_cmd *cmd)
+{
+ struct scsi_cmnd *sc = cmd->scsi_cmd;
+
+ if (cmd->io_tbl.sge_valid && sc) {
+ cmd->io_tbl.sge_valid = 0;
+ scsi_dma_unmap(sc);
+ }
+}
+
+static void qedi_process_logout_resp(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *qedi_conn)
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_logout_rsp *resp_hdr;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_logout_response_hdr *cqe_logout_response;
+ struct qedi_cmd *cmd;
+
+ cmd = (struct qedi_cmd *)task->dd_data;
+ cqe_logout_response = &cqe->cqe_common.iscsi_hdr.logout_response;
+ spin_lock(&session->back_lock);
+ resp_hdr = (struct iscsi_logout_rsp *)&qedi_conn->gen_pdu.resp_hdr;
+ memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+ resp_hdr->opcode = cqe_logout_response->opcode;
+ resp_hdr->flags = cqe_logout_response->flags;
+ resp_hdr->hlength = 0;
+
+ resp_hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age);
+ resp_hdr->statsn = cpu_to_be32(cqe_logout_response->stat_sn);
+ resp_hdr->exp_cmdsn = cpu_to_be32(cqe_logout_response->exp_cmd_sn);
+ resp_hdr->max_cmdsn = cpu_to_be32(cqe_logout_response->max_cmd_sn);
+
+ resp_hdr->t2wait = cpu_to_be32(cqe_logout_response->time2wait);
+ resp_hdr->t2retain = cpu_to_be32(cqe_logout_response->time2retain);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+ "Freeing tid=0x%x for cid=0x%x\n",
+ cmd->task_id, qedi_conn->iscsi_conn_id);
+
+ if (likely(cmd->io_cmd_in_list)) {
+ cmd->io_cmd_in_list = false;
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ } else {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Active cmd list node already deleted, tid=0x%x, cid=0x%x, io_cmd_node=%p\n",
+ cmd->task_id, qedi_conn->iscsi_conn_id,
+ &cmd->io_cmd);
+ }
+
+ cmd->state = RESPONSE_RECEIVED;
+ qedi_clear_task_idx(qedi, cmd->task_id);
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
+
+ spin_unlock(&session->back_lock);
+}
+
+static void qedi_process_text_resp(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *qedi_conn)
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_task_context *task_ctx;
+ struct iscsi_text_rsp *resp_hdr_ptr;
+ struct iscsi_text_response_hdr *cqe_text_response;
+ struct qedi_cmd *cmd;
+ int pld_len;
+ u32 *tmp;
+
+ cmd = (struct qedi_cmd *)task->dd_data;
+ task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id);
+
+ cqe_text_response = &cqe->cqe_common.iscsi_hdr.text_response;
+ spin_lock(&session->back_lock);
+ resp_hdr_ptr = (struct iscsi_text_rsp *)&qedi_conn->gen_pdu.resp_hdr;
+ memset(resp_hdr_ptr, 0, sizeof(struct iscsi_hdr));
+ resp_hdr_ptr->opcode = cqe_text_response->opcode;
+ resp_hdr_ptr->flags = cqe_text_response->flags;
+ resp_hdr_ptr->hlength = 0;
+
+ hton24(resp_hdr_ptr->dlength,
+ (cqe_text_response->hdr_second_dword &
+ ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK));
+ tmp = (u32 *)resp_hdr_ptr->dlength;
+
+ resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
+ conn->session->age);
+ resp_hdr_ptr->ttt = cqe_text_response->ttt;
+ resp_hdr_ptr->statsn = cpu_to_be32(cqe_text_response->stat_sn);
+ resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_text_response->exp_cmd_sn);
+ resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_text_response->max_cmd_sn);
+
+ pld_len = cqe_text_response->hdr_second_dword &
+ ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK;
+ qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len;
+
+ memset(task_ctx, '\0', sizeof(*task_ctx));
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+ "Freeing tid=0x%x for cid=0x%x\n",
+ cmd->task_id, qedi_conn->iscsi_conn_id);
+
+ if (likely(cmd->io_cmd_in_list)) {
+ cmd->io_cmd_in_list = false;
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ } else {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Active cmd list node already deleted, tid=0x%x, cid=0x%x, io_cmd_node=%p\n",
+ cmd->task_id, qedi_conn->iscsi_conn_id,
+ &cmd->io_cmd);
+ }
+
+ cmd->state = RESPONSE_RECEIVED;
+ qedi_clear_task_idx(qedi, cmd->task_id);
+
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr,
+ qedi_conn->gen_pdu.resp_buf,
+ (qedi_conn->gen_pdu.resp_wr_ptr -
+ qedi_conn->gen_pdu.resp_buf));
+ spin_unlock(&session->back_lock);
+}
+
+static void qedi_tmf_resp_work(struct work_struct *work)
+{
+ struct qedi_cmd *qedi_cmd =
+ container_of(work, struct qedi_cmd, tmf_work);
+ struct qedi_conn *qedi_conn = qedi_cmd->conn;
+ struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_tm_rsp *resp_hdr_ptr;
+ struct iscsi_cls_session *cls_sess;
+ int rval = 0;
+
+ set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+ resp_hdr_ptr = (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf;
+ cls_sess = iscsi_conn_to_session(qedi_conn->cls_conn);
+
+ iscsi_block_session(session->cls_session);
+ rval = qedi_cleanup_all_io(qedi, qedi_conn, qedi_cmd->task, true);
+ if (rval) {
+ clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+ qedi_clear_task_idx(qedi, qedi_cmd->task_id);
+ iscsi_unblock_session(session->cls_session);
+ return;
+ }
+
+ iscsi_unblock_session(session->cls_session);
+ qedi_clear_task_idx(qedi, qedi_cmd->task_id);
+
+ spin_lock(&session->back_lock);
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
+ spin_unlock(&session->back_lock);
+ kfree(resp_hdr_ptr);
+ clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+}
+
+static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *qedi_conn)
+
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_tmf_response_hdr *cqe_tmp_response;
+ struct iscsi_tm_rsp *resp_hdr_ptr;
+ struct iscsi_tm *tmf_hdr;
+ struct qedi_cmd *qedi_cmd = NULL;
+ u32 *tmp;
+
+ cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response;
+
+ qedi_cmd = task->dd_data;
+ qedi_cmd->tmf_resp_buf = kzalloc(sizeof(*resp_hdr_ptr), GFP_KERNEL);
+ if (!qedi_cmd->tmf_resp_buf) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Failed to allocate resp buf, cid=0x%x\n",
+ qedi_conn->iscsi_conn_id);
+ return;
+ }
+
+ spin_lock(&session->back_lock);
+ resp_hdr_ptr = (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf;
+ memset(resp_hdr_ptr, 0, sizeof(struct iscsi_tm_rsp));
+
+ /* Fill up the header */
+ resp_hdr_ptr->opcode = cqe_tmp_response->opcode;
+ resp_hdr_ptr->flags = cqe_tmp_response->hdr_flags;
+ resp_hdr_ptr->response = cqe_tmp_response->hdr_response;
+ resp_hdr_ptr->hlength = 0;
+
+ hton24(resp_hdr_ptr->dlength,
+ (cqe_tmp_response->hdr_second_dword &
+ ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK));
+ tmp = (u32 *)resp_hdr_ptr->dlength;
+ resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
+ conn->session->age);
+ resp_hdr_ptr->statsn = cpu_to_be32(cqe_tmp_response->stat_sn);
+ resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_tmp_response->exp_cmd_sn);
+ resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_tmp_response->max_cmd_sn);
+
+ tmf_hdr = (struct iscsi_tm *)qedi_cmd->task->hdr;
+
+ if (likely(qedi_cmd->io_cmd_in_list)) {
+ qedi_cmd->io_cmd_in_list = false;
+ list_del_init(&qedi_cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ }
+
+ if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
+ ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_TARGET_WARM_RESET) ||
+ ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_TARGET_COLD_RESET)) {
+ INIT_WORK(&qedi_cmd->tmf_work, qedi_tmf_resp_work);
+ queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work);
+ goto unblock_sess;
+ }
+
+ qedi_clear_task_idx(qedi, qedi_cmd->task_id);
+
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
+ kfree(resp_hdr_ptr);
+
+unblock_sess:
+ spin_unlock(&session->back_lock);
+}
+
+static void qedi_process_login_resp(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *qedi_conn)
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_task_context *task_ctx;
+ struct iscsi_login_rsp *resp_hdr_ptr;
+ struct iscsi_login_response_hdr *cqe_login_response;
+ struct qedi_cmd *cmd;
+ int pld_len;
+ u32 *tmp;
+
+ cmd = (struct qedi_cmd *)task->dd_data;
+
+ cqe_login_response = &cqe->cqe_common.iscsi_hdr.login_response;
+ task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id);
+
+ spin_lock(&session->back_lock);
+ resp_hdr_ptr = (struct iscsi_login_rsp *)&qedi_conn->gen_pdu.resp_hdr;
+ memset(resp_hdr_ptr, 0, sizeof(struct iscsi_login_rsp));
+ resp_hdr_ptr->opcode = cqe_login_response->opcode;
+ resp_hdr_ptr->flags = cqe_login_response->flags_attr;
+ resp_hdr_ptr->hlength = 0;
+
+ hton24(resp_hdr_ptr->dlength,
+ (cqe_login_response->hdr_second_dword &
+ ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK));
+ tmp = (u32 *)resp_hdr_ptr->dlength;
+ resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
+ conn->session->age);
+ resp_hdr_ptr->tsih = cqe_login_response->tsih;
+ resp_hdr_ptr->statsn = cpu_to_be32(cqe_login_response->stat_sn);
+ resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_login_response->exp_cmd_sn);
+ resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_login_response->max_cmd_sn);
+ resp_hdr_ptr->status_class = cqe_login_response->status_class;
+ resp_hdr_ptr->status_detail = cqe_login_response->status_detail;
+ pld_len = cqe_login_response->hdr_second_dword &
+ ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK;
+ qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len;
+
+ if (likely(cmd->io_cmd_in_list)) {
+ cmd->io_cmd_in_list = false;
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ }
+
+ memset(task_ctx, '\0', sizeof(*task_ctx));
+
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr,
+ qedi_conn->gen_pdu.resp_buf,
+ (qedi_conn->gen_pdu.resp_wr_ptr -
+ qedi_conn->gen_pdu.resp_buf));
+
+ spin_unlock(&session->back_lock);
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+ "Freeing tid=0x%x for cid=0x%x\n",
+ cmd->task_id, qedi_conn->iscsi_conn_id);
+ cmd->state = RESPONSE_RECEIVED;
+ qedi_clear_task_idx(qedi, cmd->task_id);
+}
+
+static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi,
+ struct iscsi_cqe_unsolicited *cqe,
+ char *ptr, int len)
+{
+ u16 idx = 0;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "pld_len [%d], bdq_prod_idx [%d], idx [%d]\n",
+ len, qedi->bdq_prod_idx,
+ (qedi->bdq_prod_idx % qedi->rq_num_entries));
+
+ /* Obtain buffer address from rqe_opaque */
+ idx = cqe->rqe_opaque.lo;
+ if ((idx < 0) || (idx > (QEDI_BDQ_NUM - 1))) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
+ idx);
+ return;
+ }
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "rqe_opaque.lo [0x%p], rqe_opaque.hi [0x%p], idx [%d]\n",
+ cqe->rqe_opaque.lo, cqe->rqe_opaque.hi, idx);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "unsol_cqe_type = %d\n", cqe->unsol_cqe_type);
+ switch (cqe->unsol_cqe_type) {
+ case ISCSI_CQE_UNSOLICITED_SINGLE:
+ case ISCSI_CQE_UNSOLICITED_FIRST:
+ if (len)
+ memcpy(ptr, (void *)qedi->bdq[idx].buf_addr, len);
+ break;
+ case ISCSI_CQE_UNSOLICITED_MIDDLE:
+ case ISCSI_CQE_UNSOLICITED_LAST:
+ break;
+ default:
+ break;
+ }
+}
+
+static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi,
+ struct iscsi_cqe_unsolicited *cqe,
+ int count)
+{
+ u16 tmp;
+ u16 idx = 0;
+ struct scsi_bd *pbl;
+
+ /* Obtain buffer address from rqe_opaque */
+ idx = cqe->rqe_opaque.lo;
+ if ((idx < 0) || (idx > (QEDI_BDQ_NUM - 1))) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
+ idx);
+ return;
+ }
+
+ pbl = (struct scsi_bd *)qedi->bdq_pbl;
+ pbl += (qedi->bdq_prod_idx % qedi->rq_num_entries);
+ pbl->address.hi = cpu_to_le32(QEDI_U64_HI(qedi->bdq[idx].buf_dma));
+ pbl->address.lo = cpu_to_le32(QEDI_U64_LO(qedi->bdq[idx].buf_dma));
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx] idx [%d]\n",
+ pbl, pbl->address.hi, pbl->address.lo, idx);
+ pbl->opaque.hi = 0;
+ pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(idx));
+
+ /* Increment producer to let f/w know we've handled the frame */
+ qedi->bdq_prod_idx += count;
+
+ writew(qedi->bdq_prod_idx, qedi->bdq_primary_prod);
+ tmp = readw(qedi->bdq_primary_prod);
+
+ writew(qedi->bdq_prod_idx, qedi->bdq_secondary_prod);
+ tmp = readw(qedi->bdq_secondary_prod);
+}
+
+static void qedi_unsol_pdu_adjust_bdq(struct qedi_ctx *qedi,
+ struct iscsi_cqe_unsolicited *cqe,
+ u32 pdu_len, u32 num_bdqs,
+ char *bdq_data)
+{
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "num_bdqs [%d]\n", num_bdqs);
+
+ qedi_get_rq_bdq_buf(qedi, cqe, bdq_data, pdu_len);
+ qedi_put_rq_bdq_buf(qedi, cqe, (num_bdqs + 1));
+}
+
+static int qedi_process_nopin_mesg(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *qedi_conn, u16 que_idx)
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_nop_in_hdr *cqe_nop_in;
+ struct iscsi_nopin *hdr;
+ struct qedi_cmd *cmd;
+ int tgt_async_nop = 0;
+ u32 lun[2];
+ u32 pdu_len, num_bdqs;
+ char bdq_data[QEDI_BDQ_BUF_SIZE];
+ unsigned long flags;
+
+ spin_lock_bh(&session->back_lock);
+ cqe_nop_in = &cqe->cqe_common.iscsi_hdr.nop_in;
+
+ pdu_len = cqe_nop_in->hdr_second_dword &
+ ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK;
+ num_bdqs = pdu_len / QEDI_BDQ_BUF_SIZE;
+
+ hdr = (struct iscsi_nopin *)&qedi_conn->gen_pdu.resp_hdr;
+ memset(hdr, 0, sizeof(struct iscsi_hdr));
+ hdr->opcode = cqe_nop_in->opcode;
+ hdr->max_cmdsn = cpu_to_be32(cqe_nop_in->max_cmd_sn);
+ hdr->exp_cmdsn = cpu_to_be32(cqe_nop_in->exp_cmd_sn);
+ hdr->statsn = cpu_to_be32(cqe_nop_in->stat_sn);
+ hdr->ttt = cpu_to_be32(cqe_nop_in->ttt);
+
+ if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
+ spin_lock_irqsave(&qedi->hba_lock, flags);
+ qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
+ pdu_len, num_bdqs, bdq_data);
+ hdr->itt = RESERVED_ITT;
+ tgt_async_nop = 1;
+ spin_unlock_irqrestore(&qedi->hba_lock, flags);
+ goto done;
+ }
+
+ /* Response to one of our nop-outs */
+ if (task) {
+ cmd = task->dd_data;
+ hdr->flags = ISCSI_FLAG_CMD_FINAL;
+ hdr->itt = build_itt(cqe->cqe_solicited.itid,
+ conn->session->age);
+ lun[0] = 0xffffffff;
+ lun[1] = 0xffffffff;
+ memcpy(&hdr->lun, lun, sizeof(struct scsi_lun));
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+ "Freeing tid=0x%x for cid=0x%x\n",
+ cmd->task_id, qedi_conn->iscsi_conn_id);
+ cmd->state = RESPONSE_RECEIVED;
+ spin_lock(&qedi_conn->list_lock);
+ if (likely(cmd->io_cmd_in_list)) {
+ cmd->io_cmd_in_list = false;
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ }
+
+ spin_unlock(&qedi_conn->list_lock);
+ qedi_clear_task_idx(qedi, cmd->task_id);
+ }
+
+done:
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, bdq_data, pdu_len);
+
+ spin_unlock_bh(&session->back_lock);
+ return tgt_async_nop;
+}
+
+static void qedi_process_async_mesg(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *qedi_conn,
+ u16 que_idx)
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_async_msg_hdr *cqe_async_msg;
+ struct iscsi_async *resp_hdr;
+ u32 lun[2];
+ u32 pdu_len, num_bdqs;
+ char bdq_data[QEDI_BDQ_BUF_SIZE];
+ unsigned long flags;
+
+ spin_lock_bh(&session->back_lock);
+
+ cqe_async_msg = &cqe->cqe_common.iscsi_hdr.async_msg;
+ pdu_len = cqe_async_msg->hdr_second_dword &
+ ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK;
+ num_bdqs = pdu_len / QEDI_BDQ_BUF_SIZE;
+
+ if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
+ spin_lock_irqsave(&qedi->hba_lock, flags);
+ qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
+ pdu_len, num_bdqs, bdq_data);
+ spin_unlock_irqrestore(&qedi->hba_lock, flags);
+ }
+
+ resp_hdr = (struct iscsi_async *)&qedi_conn->gen_pdu.resp_hdr;
+ memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+ resp_hdr->opcode = cqe_async_msg->opcode;
+ resp_hdr->flags = 0x80;
+
+ lun[0] = cpu_to_be32(cqe_async_msg->lun.lo);
+ lun[1] = cpu_to_be32(cqe_async_msg->lun.hi);
+ memcpy(&resp_hdr->lun, lun, sizeof(struct scsi_lun));
+ resp_hdr->exp_cmdsn = cpu_to_be32(cqe_async_msg->exp_cmd_sn);
+ resp_hdr->max_cmdsn = cpu_to_be32(cqe_async_msg->max_cmd_sn);
+ resp_hdr->statsn = cpu_to_be32(cqe_async_msg->stat_sn);
+
+ resp_hdr->async_event = cqe_async_msg->async_event;
+ resp_hdr->async_vcode = cqe_async_msg->async_vcode;
+
+ resp_hdr->param1 = cpu_to_be16(cqe_async_msg->param1_rsrv);
+ resp_hdr->param2 = cpu_to_be16(cqe_async_msg->param2_rsrv);
+ resp_hdr->param3 = cpu_to_be16(cqe_async_msg->param3_rsrv);
+
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, bdq_data,
+ pdu_len);
+
+ spin_unlock_bh(&session->back_lock);
+}
+
+static void qedi_process_reject_mesg(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *qedi_conn,
+ uint16_t que_idx)
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_reject_hdr *cqe_reject;
+ struct iscsi_reject *hdr;
+ u32 pld_len, num_bdqs;
+ unsigned long flags;
+
+ spin_lock_bh(&session->back_lock);
+ cqe_reject = &cqe->cqe_common.iscsi_hdr.reject;
+ pld_len = cqe_reject->hdr_second_dword &
+ ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK;
+ num_bdqs = pld_len / QEDI_BDQ_BUF_SIZE;
+
+ if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
+ spin_lock_irqsave(&qedi->hba_lock, flags);
+ qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
+ pld_len, num_bdqs, conn->data);
+ spin_unlock_irqrestore(&qedi->hba_lock, flags);
+ }
+ hdr = (struct iscsi_reject *)&qedi_conn->gen_pdu.resp_hdr;
+ memset(hdr, 0, sizeof(struct iscsi_hdr));
+ hdr->opcode = cqe_reject->opcode;
+ hdr->reason = cqe_reject->hdr_reason;
+ hdr->flags = cqe_reject->hdr_flags;
+ hton24(hdr->dlength, (cqe_reject->hdr_second_dword &
+ ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK));
+ hdr->max_cmdsn = cpu_to_be32(cqe_reject->max_cmd_sn);
+ hdr->exp_cmdsn = cpu_to_be32(cqe_reject->exp_cmd_sn);
+ hdr->statsn = cpu_to_be32(cqe_reject->stat_sn);
+ hdr->ffffffff = cpu_to_be32(0xffffffff);
+
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
+ conn->data, pld_len);
+ spin_unlock_bh(&session->back_lock);
+}
+
+static void qedi_scsi_completion(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct iscsi_conn *conn)
+{
+ struct scsi_cmnd *sc_cmd;
+ struct qedi_cmd *cmd = task->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_scsi_rsp *hdr;
+ struct iscsi_data_in_hdr *cqe_data_in;
+ int datalen = 0;
+ struct qedi_conn *qedi_conn;
+ u32 iscsi_cid;
+ bool mark_cmd_node_deleted = false;
+ u8 cqe_err_bits = 0;
+
+ iscsi_cid = cqe->cqe_common.conn_id;
+ qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
+
+ cqe_data_in = &cqe->cqe_common.iscsi_hdr.data_in;
+ cqe_err_bits =
+ cqe->cqe_common.error_bitmap.error_bits.cqe_error_status_bits;
+
+ spin_lock_bh(&session->back_lock);
+ /* get the scsi command */
+ sc_cmd = cmd->scsi_cmd;
+
+ if (!sc_cmd) {
+ QEDI_WARN(&qedi->dbg_ctx, "sc_cmd is NULL!\n");
+ goto error;
+ }
+
+ if (!sc_cmd->SCp.ptr) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "SCp.ptr is NULL, returned in another context.\n");
+ goto error;
+ }
+
+ if (!sc_cmd->request) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "sc_cmd->request is NULL, sc_cmd=%p.\n",
+ sc_cmd);
+ goto error;
+ }
+
+ if (!sc_cmd->request->special) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "request->special is NULL so request not valid, sc_cmd=%p.\n",
+ sc_cmd);
+ goto error;
+ }
+
+ if (!sc_cmd->request->q) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "request->q is NULL so request is not valid, sc_cmd=%p.\n",
+ sc_cmd);
+ goto error;
+ }
+
+ qedi_iscsi_unmap_sg_list(cmd);
+
+ hdr = (struct iscsi_scsi_rsp *)task->hdr;
+ hdr->opcode = cqe_data_in->opcode;
+ hdr->max_cmdsn = cpu_to_be32(cqe_data_in->max_cmd_sn);
+ hdr->exp_cmdsn = cpu_to_be32(cqe_data_in->exp_cmd_sn);
+ hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age);
+ hdr->response = cqe_data_in->reserved1;
+ hdr->cmd_status = cqe_data_in->status_rsvd;
+ hdr->flags = cqe_data_in->flags;
+ hdr->residual_count = cpu_to_be32(cqe_data_in->residual_count);
+
+ if (hdr->cmd_status == SAM_STAT_CHECK_CONDITION) {
+ datalen = cqe_data_in->reserved2 &
+ ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK;
+ memcpy((char *)conn->data, (char *)cmd->sense_buffer, datalen);
+ }
+
+ /* If f/w reports data underrun err then set residual to IO transfer
+ * length, set Underrun flag and clear Overrun flag explicitly
+ */
+ if (unlikely(cqe_err_bits &&
+ GET_FIELD(cqe_err_bits, CQE_ERROR_BITMAP_UNDER_RUN_ERR))) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Under flow itt=0x%x proto flags=0x%x tid=0x%x cid 0x%x fw resid 0x%x sc dlen 0x%x\n",
+ hdr->itt, cqe_data_in->flags, cmd->task_id,
+ qedi_conn->iscsi_conn_id, hdr->residual_count,
+ scsi_bufflen(sc_cmd));
+ hdr->residual_count = cpu_to_be32(scsi_bufflen(sc_cmd));
+ hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
+ hdr->flags &= (~ISCSI_FLAG_CMD_OVERFLOW);
+ }
+
+ spin_lock(&qedi_conn->list_lock);
+ if (likely(cmd->io_cmd_in_list)) {
+ cmd->io_cmd_in_list = false;
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ mark_cmd_node_deleted = true;
+ }
+ spin_unlock(&qedi_conn->list_lock);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+ "Freeing tid=0x%x for cid=0x%x\n",
+ cmd->task_id, qedi_conn->iscsi_conn_id);
+ cmd->state = RESPONSE_RECEIVED;
+ if (qedi_io_tracing)
+ qedi_trace_io(qedi, task, cmd->task_id, QEDI_IO_TRACE_RSP);
+
+ qedi_clear_task_idx(qedi, cmd->task_id);
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
+ conn->data, datalen);
+error:
+ spin_unlock_bh(&session->back_lock);
+}
+
+static void qedi_mtask_completion(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *conn, uint16_t que_idx)
+{
+ struct iscsi_conn *iscsi_conn;
+ u32 hdr_opcode;
+
+ hdr_opcode = cqe->cqe_common.iscsi_hdr.common.hdr_first_byte;
+ iscsi_conn = conn->cls_conn->dd_data;
+
+ switch (hdr_opcode) {
+ case ISCSI_OPCODE_SCSI_RESPONSE:
+ case ISCSI_OPCODE_DATA_IN:
+ qedi_scsi_completion(qedi, cqe, task, iscsi_conn);
+ break;
+ case ISCSI_OPCODE_LOGIN_RESPONSE:
+ qedi_process_login_resp(qedi, cqe, task, conn);
+ break;
+ case ISCSI_OPCODE_TMF_RESPONSE:
+ qedi_process_tmf_resp(qedi, cqe, task, conn);
+ break;
+ case ISCSI_OPCODE_TEXT_RESPONSE:
+ qedi_process_text_resp(qedi, cqe, task, conn);
+ break;
+ case ISCSI_OPCODE_LOGOUT_RESPONSE:
+ qedi_process_logout_resp(qedi, cqe, task, conn);
+ break;
+ case ISCSI_OPCODE_NOP_IN:
+ qedi_process_nopin_mesg(qedi, cqe, task, conn, que_idx);
+ break;
+ default:
+ QEDI_ERR(&qedi->dbg_ctx, "unknown opcode\n");
+ }
+}
+
+static void qedi_process_nopin_local_cmpl(struct qedi_ctx *qedi,
+ struct iscsi_cqe_solicited *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *qedi_conn)
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct qedi_cmd *cmd = task->dd_data;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_UNSOL,
+ "itid=0x%x, cmd task id=0x%x\n",
+ cqe->itid, cmd->task_id);
+
+ cmd->state = RESPONSE_RECEIVED;
+ qedi_clear_task_idx(qedi, cmd->task_id);
+
+ spin_lock_bh(&session->back_lock);
+ __iscsi_put_task(task);
+ spin_unlock_bh(&session->back_lock);
+}
+
+static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
+ struct iscsi_cqe_solicited *cqe,
+ struct iscsi_task *task,
+ struct iscsi_conn *conn)
+{
+ struct qedi_work_map *work, *work_tmp;
+ u32 proto_itt = cqe->itid;
+ u32 ptmp_itt = 0;
+ itt_t protoitt = 0;
+ int found = 0;
+ struct qedi_cmd *qedi_cmd = NULL;
+ u32 rtid = 0;
+ u32 iscsi_cid;
+ struct qedi_conn *qedi_conn;
+ struct qedi_cmd *cmd_new, *dbg_cmd;
+ struct iscsi_task *mtask;
+ struct iscsi_tm *tmf_hdr = NULL;
+
+ iscsi_cid = cqe->conn_id;
+ qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
+
+ /* Based on this itt get the corresponding qedi_cmd */
+ spin_lock_bh(&qedi_conn->tmf_work_lock);
+ list_for_each_entry_safe(work, work_tmp, &qedi_conn->tmf_work_list,
+ list) {
+ if (work->rtid == proto_itt) {
+ /* We found the command */
+ qedi_cmd = work->qedi_cmd;
+ if (!qedi_cmd->list_tmf_work) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "TMF work not found, cqe->tid=0x%x, cid=0x%x\n",
+ proto_itt, qedi_conn->iscsi_conn_id);
+ WARN_ON(1);
+ }
+ found = 1;
+ mtask = qedi_cmd->task;
+ tmf_hdr = (struct iscsi_tm *)mtask->hdr;
+ rtid = work->rtid;
+
+ list_del_init(&work->list);
+ kfree(work);
+ qedi_cmd->list_tmf_work = NULL;
+ }
+ }
+ spin_unlock_bh(&qedi_conn->tmf_work_lock);
+
+ if (found) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n",
+ proto_itt, tmf_hdr->flags, qedi_conn->iscsi_conn_id);
+
+ if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_ABORT_TASK) {
+ spin_lock_bh(&conn->session->back_lock);
+
+ protoitt = build_itt(get_itt(tmf_hdr->rtt),
+ conn->session->age);
+ task = iscsi_itt_to_task(conn, protoitt);
+
+ spin_unlock_bh(&conn->session->back_lock);
+
+ if (!task) {
+ QEDI_NOTICE(&qedi->dbg_ctx,
+ "IO task completed, tmf rtt=0x%x, cid=0x%x\n",
+ get_itt(tmf_hdr->rtt),
+ qedi_conn->iscsi_conn_id);
+ return;
+ }
+
+ dbg_cmd = task->dd_data;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "Abort tmf rtt=0x%x, i/o itt=0x%x, i/o tid=0x%x, cid=0x%x\n",
+ get_itt(tmf_hdr->rtt), get_itt(task->itt),
+ dbg_cmd->task_id, qedi_conn->iscsi_conn_id);
+
+ if (qedi_cmd->state == CLEANUP_WAIT_FAILED)
+ qedi_cmd->state = CLEANUP_RECV;
+
+ qedi_clear_task_idx(qedi_conn->qedi, rtid);
+
+ spin_lock(&qedi_conn->list_lock);
+ list_del_init(&dbg_cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ spin_unlock(&qedi_conn->list_lock);
+ qedi_cmd->state = CLEANUP_RECV;
+ wake_up_interruptible(&qedi_conn->wait_queue);
+ }
+ } else if (qedi_conn->cmd_cleanup_req > 0) {
+ spin_lock_bh(&conn->session->back_lock);
+ qedi_get_proto_itt(qedi, cqe->itid, &ptmp_itt);
+ protoitt = build_itt(ptmp_itt, conn->session->age);
+ task = iscsi_itt_to_task(conn, protoitt);
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "cleanup io itid=0x%x, protoitt=0x%x, cmd_cleanup_cmpl=%d, cid=0x%x\n",
+ cqe->itid, protoitt, qedi_conn->cmd_cleanup_cmpl,
+ qedi_conn->iscsi_conn_id);
+
+ spin_unlock_bh(&conn->session->back_lock);
+ if (!task) {
+ QEDI_NOTICE(&qedi->dbg_ctx,
+ "task is null, itid=0x%x, cid=0x%x\n",
+ cqe->itid, qedi_conn->iscsi_conn_id);
+ return;
+ }
+ qedi_conn->cmd_cleanup_cmpl++;
+ wake_up(&qedi_conn->wait_queue);
+ cmd_new = task->dd_data;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+ "Freeing tid=0x%x for cid=0x%x\n",
+ cqe->itid, qedi_conn->iscsi_conn_id);
+ qedi_clear_task_idx(qedi_conn->qedi, cqe->itid);
+
+ } else {
+ qedi_get_proto_itt(qedi, cqe->itid, &ptmp_itt);
+ protoitt = build_itt(ptmp_itt, conn->session->age);
+ task = iscsi_itt_to_task(conn, protoitt);
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x, task=%p\n",
+ protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task);
+ WARN_ON(1);
+ }
+}
+
+void qedi_fp_process_cqes(struct qedi_work *work)
+{
+ struct qedi_ctx *qedi = work->qedi;
+ union iscsi_cqe *cqe = &work->cqe;
+ struct iscsi_task *task = NULL;
+ struct iscsi_nopout *nopout_hdr;
+ struct qedi_conn *q_conn;
+ struct iscsi_conn *conn;
+ struct qedi_cmd *qedi_cmd;
+ u32 comp_type;
+ u32 iscsi_cid;
+ u32 hdr_opcode;
+ u16 que_idx = work->que_idx;
+ u8 cqe_err_bits = 0;
+
+ comp_type = cqe->cqe_common.cqe_type;
+ hdr_opcode = cqe->cqe_common.iscsi_hdr.common.hdr_first_byte;
+ cqe_err_bits =
+ cqe->cqe_common.error_bitmap.error_bits.cqe_error_status_bits;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "fw_cid=0x%x, cqe type=0x%x, opcode=0x%x\n",
+ cqe->cqe_common.conn_id, comp_type, hdr_opcode);
+
+ if (comp_type >= MAX_ISCSI_CQES_TYPE) {
+ QEDI_WARN(&qedi->dbg_ctx, "Invalid CqE type\n");
+ return;
+ }
+
+ iscsi_cid = cqe->cqe_common.conn_id;
+ q_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
+ if (!q_conn) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Session no longer exists for cid=0x%x!!\n",
+ iscsi_cid);
+ return;
+ }
+
+ conn = q_conn->cls_conn->dd_data;
+
+ if (unlikely(cqe_err_bits &&
+ GET_FIELD(cqe_err_bits,
+ CQE_ERROR_BITMAP_DATA_DIGEST_ERR))) {
+ iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
+ return;
+ }
+
+ switch (comp_type) {
+ case ISCSI_CQE_TYPE_SOLICITED:
+ case ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE:
+ qedi_cmd = container_of(work, struct qedi_cmd, cqe_work);
+ task = qedi_cmd->task;
+ if (!task) {
+ QEDI_WARN(&qedi->dbg_ctx, "task is NULL\n");
+ return;
+ }
+
+ /* Process NOPIN local completion */
+ nopout_hdr = (struct iscsi_nopout *)task->hdr;
+ if ((nopout_hdr->itt == RESERVED_ITT) &&
+ (cqe->cqe_solicited.itid != (u16)RESERVED_ITT)) {
+ qedi_process_nopin_local_cmpl(qedi, &cqe->cqe_solicited,
+ task, q_conn);
+ } else {
+ cqe->cqe_solicited.itid =
+ qedi_get_itt(cqe->cqe_solicited);
+ /* Process other solicited responses */
+ qedi_mtask_completion(qedi, cqe, task, q_conn, que_idx);
+ }
+ break;
+ case ISCSI_CQE_TYPE_UNSOLICITED:
+ switch (hdr_opcode) {
+ case ISCSI_OPCODE_NOP_IN:
+ qedi_process_nopin_mesg(qedi, cqe, task, q_conn,
+ que_idx);
+ break;
+ case ISCSI_OPCODE_ASYNC_MSG:
+ qedi_process_async_mesg(qedi, cqe, task, q_conn,
+ que_idx);
+ break;
+ case ISCSI_OPCODE_REJECT:
+ qedi_process_reject_mesg(qedi, cqe, task, q_conn,
+ que_idx);
+ break;
+ }
+ goto exit_fp_process;
+ case ISCSI_CQE_TYPE_DUMMY:
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "Dummy CqE\n");
+ goto exit_fp_process;
+ case ISCSI_CQE_TYPE_TASK_CLEANUP:
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "CleanUp CqE\n");
+ qedi_process_cmd_cleanup_resp(qedi, &cqe->cqe_solicited, task,
+ conn);
+ goto exit_fp_process;
+ default:
+ QEDI_ERR(&qedi->dbg_ctx, "Error cqe.\n");
+ break;
+ }
+
+exit_fp_process:
+ return;
+}
+
+static void qedi_add_to_sq(struct qedi_conn *qedi_conn, struct iscsi_task *task,
+ u16 tid, uint16_t ptu_invalidate, int is_cleanup)
+{
+ struct iscsi_wqe *wqe;
+ struct iscsi_wqe_field *cont_field;
+ struct qedi_endpoint *ep;
+ struct scsi_cmnd *sc = task->sc;
+ struct iscsi_login_req *login_hdr;
+ struct qedi_cmd *cmd = task->dd_data;
+
+ login_hdr = (struct iscsi_login_req *)task->hdr;
+ ep = qedi_conn->ep;
+ wqe = &ep->sq[ep->sq_prod_idx];
+
+ memset(wqe, 0, sizeof(*wqe));
+
+ ep->sq_prod_idx++;
+ ep->fw_sq_prod_idx++;
+ if (ep->sq_prod_idx == QEDI_SQ_SIZE)
+ ep->sq_prod_idx = 0;
+
+ if (is_cleanup) {
+ SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
+ ISCSI_WQE_TYPE_TASK_CLEANUP);
+ wqe->task_id = tid;
+ return;
+ }
+
+ if (ptu_invalidate) {
+ SET_FIELD(wqe->flags, ISCSI_WQE_PTU_INVALIDATE,
+ ISCSI_WQE_SET_PTU_INVALIDATE);
+ }
+
+ cont_field = &wqe->cont_prevtid_union.cont_field;
+
+ switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
+ case ISCSI_OP_LOGIN:
+ case ISCSI_OP_TEXT:
+ SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
+ ISCSI_WQE_TYPE_MIDDLE_PATH);
+ SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES,
+ 1);
+ cont_field->contlen_cdbsize_field = ntoh24(login_hdr->dlength);
+ break;
+ case ISCSI_OP_LOGOUT:
+ case ISCSI_OP_NOOP_OUT:
+ case ISCSI_OP_SCSI_TMFUNC:
+ SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
+ ISCSI_WQE_TYPE_NORMAL);
+ break;
+ default:
+ if (!sc)
+ break;
+
+ SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
+ ISCSI_WQE_TYPE_NORMAL);
+ cont_field->contlen_cdbsize_field =
+ (sc->sc_data_direction == DMA_TO_DEVICE) ?
+ scsi_bufflen(sc) : 0;
+ if (cmd->use_slowpath)
+ SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES, 0);
+ else
+ SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES,
+ (sc->sc_data_direction ==
+ DMA_TO_DEVICE) ?
+ min((u16)QEDI_FAST_SGE_COUNT,
+ (u16)cmd->io_tbl.sge_valid) : 0);
+ break;
+ }
+
+ wqe->task_id = tid;
+ /* Make sure SQ data is coherent */
+ wmb();
+}
+
+static void qedi_ring_doorbell(struct qedi_conn *qedi_conn)
+{
+ struct iscsi_db_data dbell = { 0 };
+
+ dbell.agg_flags = 0;
+
+ dbell.params |= DB_DEST_XCM << ISCSI_DB_DATA_DEST_SHIFT;
+ dbell.params |= DB_AGG_CMD_SET << ISCSI_DB_DATA_AGG_CMD_SHIFT;
+ dbell.params |=
+ DQ_XCM_ISCSI_SQ_PROD_CMD << ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT;
+
+ dbell.sq_prod = qedi_conn->ep->fw_sq_prod_idx;
+ writel(*(u32 *)&dbell, qedi_conn->ep->p_doorbell);
+
+ /* Make sure fw write idx is coherent, and include both memory barriers
+ * as a failsafe as for some architectures the call is the same but on
+ * others they are two different assembly operations.
+ */
+ wmb();
+ mmiowb();
+ QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_MP_REQ,
+ "prod_idx=0x%x, fw_prod_idx=0x%x, cid=0x%x\n",
+ qedi_conn->ep->sq_prod_idx, qedi_conn->ep->fw_sq_prod_idx,
+ qedi_conn->iscsi_conn_id);
+}
+
+int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
+ struct iscsi_task *task)
+{
+ struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_task_context *fw_task_ctx;
+ struct iscsi_login_req *login_hdr;
+ struct iscsi_login_req_hdr *fw_login_req = NULL;
+ struct iscsi_cached_sge_ctx *cached_sge = NULL;
+ struct iscsi_sge *single_sge = NULL;
+ struct iscsi_sge *req_sge = NULL;
+ struct iscsi_sge *resp_sge = NULL;
+ struct qedi_cmd *qedi_cmd;
+ s16 ptu_invalidate = 0;
+ s16 tid = 0;
+
+ req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ qedi_cmd = (struct qedi_cmd *)task->dd_data;
+ login_hdr = (struct iscsi_login_req *)task->hdr;
+
+ tid = qedi_get_task_idx(qedi);
+ if (tid == -1)
+ return -ENOMEM;
+
+ fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+
+ qedi_cmd->task_id = tid;
+
+ /* Ystorm context */
+ fw_login_req = &fw_task_ctx->ystorm_st_context.pdu_hdr.login_req;
+ fw_login_req->opcode = login_hdr->opcode;
+ fw_login_req->version_min = login_hdr->min_version;
+ fw_login_req->version_max = login_hdr->max_version;
+ fw_login_req->flags_attr = login_hdr->flags;
+ fw_login_req->isid_tabc = *((u16 *)login_hdr->isid + 2);
+ fw_login_req->isid_d = *((u32 *)login_hdr->isid);
+ fw_login_req->tsih = login_hdr->tsih;
+ qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
+ fw_login_req->itt = qedi_set_itt(tid, get_itt(task->itt));
+ fw_login_req->cid = qedi_conn->iscsi_conn_id;
+ fw_login_req->cmd_sn = be32_to_cpu(login_hdr->cmdsn);
+ fw_login_req->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
+ fw_login_req->exp_stat_sn = 0;
+
+ if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+ ptu_invalidate = 1;
+ qedi->tid_reuse_count[tid] = 0;
+ }
+
+ fw_task_ctx->ystorm_st_context.state.reuse_count =
+ qedi->tid_reuse_count[tid];
+ fw_task_ctx->mstorm_st_context.reuse_count =
+ qedi->tid_reuse_count[tid]++;
+ cached_sge =
+ &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
+ cached_sge->sge.sge_len = req_sge->sge_len;
+ cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
+ cached_sge->sge.sge_addr.hi =
+ (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+
+ /* Mstorm context */
+ single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
+ fw_task_ctx->mstorm_st_context.task_type = 0x2;
+ fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
+ single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
+ single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
+ single_sge->sge_len = resp_sge->sge_len;
+
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SINGLE_SGE, 1);
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SLOW_IO, 0);
+ fw_task_ctx->mstorm_st_context.sgl_size = 1;
+ fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
+
+ /* Ustorm context */
+ fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
+ fw_task_ctx->ustorm_st_context.exp_data_transfer_len =
+ ntoh24(login_hdr->dlength);
+ fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
+ fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
+ fw_task_ctx->ustorm_st_context.task_type = 0x2;
+ fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+ fw_task_ctx->ustorm_ag_context.exp_data_acked =
+ ntoh24(login_hdr->dlength);
+ SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+ USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
+ USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
+
+ spin_lock(&qedi_conn->list_lock);
+ list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
+ qedi_cmd->io_cmd_in_list = true;
+ qedi_conn->active_cmd_count++;
+ spin_unlock(&qedi_conn->list_lock);
+
+ qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
+ qedi_ring_doorbell(qedi_conn);
+ return 0;
+}
+
+int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
+ struct iscsi_task *task)
+{
+ struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_logout_req_hdr *fw_logout_req = NULL;
+ struct iscsi_task_context *fw_task_ctx = NULL;
+ struct iscsi_logout *logout_hdr = NULL;
+ struct qedi_cmd *qedi_cmd = NULL;
+ s16 tid = 0;
+ s16 ptu_invalidate = 0;
+
+ qedi_cmd = (struct qedi_cmd *)task->dd_data;
+ logout_hdr = (struct iscsi_logout *)task->hdr;
+
+ tid = qedi_get_task_idx(qedi);
+ if (tid == -1)
+ return -ENOMEM;
+
+ fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ qedi_cmd->task_id = tid;
+
+ /* Ystorm context */
+ fw_logout_req = &fw_task_ctx->ystorm_st_context.pdu_hdr.logout_req;
+ fw_logout_req->opcode = ISCSI_OPCODE_LOGOUT_REQUEST;
+ fw_logout_req->reason_code = 0x80 | logout_hdr->flags;
+ qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
+ fw_logout_req->itt = qedi_set_itt(tid, get_itt(task->itt));
+ fw_logout_req->exp_stat_sn = be32_to_cpu(logout_hdr->exp_statsn);
+ fw_logout_req->cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
+
+ if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+ ptu_invalidate = 1;
+ qedi->tid_reuse_count[tid] = 0;
+ }
+ fw_task_ctx->ystorm_st_context.state.reuse_count =
+ qedi->tid_reuse_count[tid];
+ fw_task_ctx->mstorm_st_context.reuse_count =
+ qedi->tid_reuse_count[tid]++;
+ fw_logout_req->cid = qedi_conn->iscsi_conn_id;
+ fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
+
+ /* Mstorm context */
+ fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
+ fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
+
+ /* Ustorm context */
+ fw_task_ctx->ustorm_st_context.rem_rcv_len = 0;
+ fw_task_ctx->ustorm_st_context.exp_data_transfer_len = 0;
+ fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
+ fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
+ fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
+
+ SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
+ USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+ ISCSI_REG1_NUM_FAST_SGES, 0);
+
+ fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+ SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+ USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+
+ spin_lock(&qedi_conn->list_lock);
+ list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
+ qedi_cmd->io_cmd_in_list = true;
+ qedi_conn->active_cmd_count++;
+ spin_unlock(&qedi_conn->list_lock);
+
+ qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
+ qedi_ring_doorbell(qedi_conn);
+
+ return 0;
+}
+
+int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
+ struct iscsi_task *task, bool in_recovery)
+{
+ int rval;
+ struct iscsi_task *ctask;
+ struct qedi_cmd *cmd, *cmd_tmp;
+ struct iscsi_tm *tmf_hdr;
+ unsigned int lun = 0;
+ bool lun_reset = false;
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+
+ /* From recovery, task is NULL or from tmf resp valid task */
+ if (task) {
+ tmf_hdr = (struct iscsi_tm *)task->hdr;
+
+ if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) {
+ lun_reset = true;
+ lun = scsilun_to_int(&tmf_hdr->lun);
+ }
+ }
+
+ qedi_conn->cmd_cleanup_req = 0;
+ qedi_conn->cmd_cleanup_cmpl = 0;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "active_cmd_count=%d, cid=0x%x, in_recovery=%d, lun_reset=%d\n",
+ qedi_conn->active_cmd_count, qedi_conn->iscsi_conn_id,
+ in_recovery, lun_reset);
+
+ if (lun_reset)
+ spin_lock_bh(&session->back_lock);
+
+ spin_lock(&qedi_conn->list_lock);
+
+ list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list,
+ io_cmd) {
+ ctask = cmd->task;
+ if (ctask == task)
+ continue;
+
+ if (lun_reset) {
+ if (cmd->scsi_cmd && cmd->scsi_cmd->device) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "tid=0x%x itt=0x%x scsi_cmd_ptr=%p device=%p task_state=%d cmd_state=0%x cid=0x%x\n",
+ cmd->task_id, get_itt(ctask->itt),
+ cmd->scsi_cmd, cmd->scsi_cmd->device,
+ ctask->state, cmd->state,
+ qedi_conn->iscsi_conn_id);
+ if (cmd->scsi_cmd->device->lun != lun)
+ continue;
+ }
+ }
+ qedi_conn->cmd_cleanup_req++;
+ qedi_iscsi_cleanup_task(ctask, true);
+
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Deleted active cmd list node io_cmd=%p, cid=0x%x\n",
+ &cmd->io_cmd, qedi_conn->iscsi_conn_id);
+ }
+
+ spin_unlock(&qedi_conn->list_lock);
+
+ if (lun_reset)
+ spin_unlock_bh(&session->back_lock);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "cmd_cleanup_req=%d, cid=0x%x\n",
+ qedi_conn->cmd_cleanup_req,
+ qedi_conn->iscsi_conn_id);
+
+ rval = wait_event_interruptible_timeout(qedi_conn->wait_queue,
+ ((qedi_conn->cmd_cleanup_req ==
+ qedi_conn->cmd_cleanup_cmpl) ||
+ qedi_conn->ep),
+ 5 * HZ);
+ if (rval) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "i/o cmd_cleanup_req=%d, equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
+ qedi_conn->cmd_cleanup_req,
+ qedi_conn->cmd_cleanup_cmpl,
+ qedi_conn->iscsi_conn_id);
+
+ return 0;
+ }
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "i/o cmd_cleanup_req=%d, not equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
+ qedi_conn->cmd_cleanup_req,
+ qedi_conn->cmd_cleanup_cmpl,
+ qedi_conn->iscsi_conn_id);
+
+ iscsi_host_for_each_session(qedi->shost,
+ qedi_mark_device_missing);
+ qedi_ops->common->drain(qedi->cdev);
+
+ /* Enable IOs for all other sessions except current.*/
+ if (!wait_event_interruptible_timeout(qedi_conn->wait_queue,
+ (qedi_conn->cmd_cleanup_req ==
+ qedi_conn->cmd_cleanup_cmpl),
+ 5 * HZ)) {
+ iscsi_host_for_each_session(qedi->shost,
+ qedi_mark_device_available);
+ return -1;
+ }
+
+ iscsi_host_for_each_session(qedi->shost,
+ qedi_mark_device_available);
+
+ return 0;
+}
+
+void qedi_clearsq(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
+ struct iscsi_task *task)
+{
+ struct qedi_endpoint *qedi_ep;
+ int rval;
+
+ qedi_ep = qedi_conn->ep;
+ qedi_conn->cmd_cleanup_req = 0;
+ qedi_conn->cmd_cleanup_cmpl = 0;
+
+ if (!qedi_ep) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Cannot proceed, ep already disconnected, cid=0x%x\n",
+ qedi_conn->iscsi_conn_id);
+ return;
+ }
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Clearing SQ for cid=0x%x, conn=%p, ep=%p\n",
+ qedi_conn->iscsi_conn_id, qedi_conn, qedi_ep);
+
+ qedi_ops->clear_sq(qedi->cdev, qedi_ep->handle);
+
+ rval = qedi_cleanup_all_io(qedi, qedi_conn, task, true);
+ if (rval) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "fatal error, need hard reset, cid=0x%x\n",
+ qedi_conn->iscsi_conn_id);
+ WARN_ON(1);
+ }
+}
+
+static int qedi_wait_for_cleanup_request(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn,
+ struct iscsi_task *task,
+ struct qedi_cmd *qedi_cmd,
+ struct qedi_work_map *list_work)
+{
+ struct qedi_cmd *cmd = (struct qedi_cmd *)task->dd_data;
+ int wait;
+
+ wait = wait_event_interruptible_timeout(qedi_conn->wait_queue,
+ ((qedi_cmd->state ==
+ CLEANUP_RECV) ||
+ ((qedi_cmd->type == TYPEIO) &&
+ (cmd->state ==
+ RESPONSE_RECEIVED))),
+ 5 * HZ);
+ if (!wait) {
+ qedi_cmd->state = CLEANUP_WAIT_FAILED;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "Cleanup timedout tid=0x%x, issue connection recovery, cid=0x%x\n",
+ cmd->task_id, qedi_conn->iscsi_conn_id);
+
+ return -1;
+ }
+ return 0;
+}
+
+static void qedi_tmf_work(struct work_struct *work)
+{
+ struct qedi_cmd *qedi_cmd =
+ container_of(work, struct qedi_cmd, tmf_work);
+ struct qedi_conn *qedi_conn = qedi_cmd->conn;
+ struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_cls_session *cls_sess;
+ struct qedi_work_map *list_work = NULL;
+ struct iscsi_task *mtask;
+ struct qedi_cmd *cmd;
+ struct iscsi_task *ctask;
+ struct iscsi_tm *tmf_hdr;
+ s16 rval = 0;
+ s16 tid = 0;
+
+ mtask = qedi_cmd->task;
+ tmf_hdr = (struct iscsi_tm *)mtask->hdr;
+ cls_sess = iscsi_conn_to_session(qedi_conn->cls_conn);
+ set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+
+ ctask = iscsi_itt_to_task(conn, tmf_hdr->rtt);
+ if (!ctask || !ctask->sc) {
+ QEDI_ERR(&qedi->dbg_ctx, "Task already completed\n");
+ goto abort_ret;
+ }
+
+ cmd = (struct qedi_cmd *)ctask->dd_data;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Abort tmf rtt=0x%x, cmd itt=0x%x, cmd tid=0x%x, cid=0x%x\n",
+ get_itt(tmf_hdr->rtt), get_itt(ctask->itt), cmd->task_id,
+ qedi_conn->iscsi_conn_id);
+
+ if (do_not_recover) {
+ QEDI_ERR(&qedi->dbg_ctx, "DONT SEND CLEANUP/ABORT %d\n",
+ do_not_recover);
+ goto abort_ret;
+ }
+
+ list_work = kzalloc(sizeof(*list_work), GFP_ATOMIC);
+ if (!list_work) {
+ QEDI_ERR(&qedi->dbg_ctx, "Memory alloction failed\n");
+ goto abort_ret;
+ }
+
+ qedi_cmd->type = TYPEIO;
+ list_work->qedi_cmd = qedi_cmd;
+ list_work->rtid = cmd->task_id;
+ list_work->state = QEDI_WORK_SCHEDULED;
+ qedi_cmd->list_tmf_work = list_work;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "Queue tmf work=%p, list node=%p, cid=0x%x, tmf flags=0x%x\n",
+ list_work->ptr_tmf_work, list_work, qedi_conn->iscsi_conn_id,
+ tmf_hdr->flags);
+
+ spin_lock_bh(&qedi_conn->tmf_work_lock);
+ list_add_tail(&list_work->list, &qedi_conn->tmf_work_list);
+ spin_unlock_bh(&qedi_conn->tmf_work_lock);
+
+ qedi_iscsi_cleanup_task(ctask, false);
+
+ rval = qedi_wait_for_cleanup_request(qedi, qedi_conn, ctask, qedi_cmd,
+ list_work);
+ if (rval == -1) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "FW cleanup got escalated, cid=0x%x\n",
+ qedi_conn->iscsi_conn_id);
+ goto ldel_exit;
+ }
+
+ tid = qedi_get_task_idx(qedi);
+ if (tid == -1) {
+ QEDI_ERR(&qedi->dbg_ctx, "Invalid tid, cid=0x%x\n",
+ qedi_conn->iscsi_conn_id);
+ goto ldel_exit;
+ }
+
+ qedi_cmd->task_id = tid;
+ qedi_send_iscsi_tmf(qedi_conn, qedi_cmd->task);
+
+abort_ret:
+ clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+ return;
+
+ldel_exit:
+ spin_lock_bh(&qedi_conn->tmf_work_lock);
+ if (!qedi_cmd->list_tmf_work) {
+ list_del_init(&list_work->list);
+ qedi_cmd->list_tmf_work = NULL;
+ kfree(list_work);
+ }
+ spin_unlock_bh(&qedi_conn->tmf_work_lock);
+
+ spin_lock(&qedi_conn->list_lock);
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ spin_unlock(&qedi_conn->list_lock);
+
+ clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+}
+
+static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
+ struct iscsi_task *mtask)
+{
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_task_context *fw_task_ctx;
+ struct iscsi_tmf_request_hdr *fw_tmf_request;
+ struct iscsi_sge *single_sge;
+ struct qedi_cmd *qedi_cmd;
+ struct qedi_cmd *cmd;
+ struct iscsi_task *ctask;
+ struct iscsi_tm *tmf_hdr;
+ struct iscsi_sge *req_sge;
+ struct iscsi_sge *resp_sge;
+ u32 lun[2];
+ s16 tid = 0, ptu_invalidate = 0;
+
+ req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
+ tmf_hdr = (struct iscsi_tm *)mtask->hdr;
+
+ tid = qedi_cmd->task_id;
+ qedi_update_itt_map(qedi, tid, mtask->itt, qedi_cmd);
+
+ fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+
+ fw_tmf_request = &fw_task_ctx->ystorm_st_context.pdu_hdr.tmf_request;
+ fw_tmf_request->itt = qedi_set_itt(tid, get_itt(mtask->itt));
+ fw_tmf_request->cmd_sn = be32_to_cpu(tmf_hdr->cmdsn);
+
+ memcpy(lun, &tmf_hdr->lun, sizeof(struct scsi_lun));
+ fw_tmf_request->lun.lo = be32_to_cpu(lun[0]);
+ fw_tmf_request->lun.hi = be32_to_cpu(lun[1]);
+
+ if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+ ptu_invalidate = 1;
+ qedi->tid_reuse_count[tid] = 0;
+ }
+ fw_task_ctx->ystorm_st_context.state.reuse_count =
+ qedi->tid_reuse_count[tid];
+ fw_task_ctx->mstorm_st_context.reuse_count =
+ qedi->tid_reuse_count[tid]++;
+
+ if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_ABORT_TASK) {
+ ctask = iscsi_itt_to_task(conn, tmf_hdr->rtt);
+ if (!ctask || !ctask->sc) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Could not get reference task\n");
+ return 0;
+ }
+ cmd = (struct qedi_cmd *)ctask->dd_data;
+ fw_tmf_request->rtt =
+ qedi_set_itt(cmd->task_id,
+ get_itt(tmf_hdr->rtt));
+ } else {
+ fw_tmf_request->rtt = ISCSI_RESERVED_TAG;
+ }
+
+ fw_tmf_request->opcode = tmf_hdr->opcode;
+ fw_tmf_request->function = tmf_hdr->flags;
+ fw_tmf_request->hdr_second_dword = ntoh24(tmf_hdr->dlength);
+ fw_tmf_request->ref_cmd_sn = be32_to_cpu(tmf_hdr->refcmdsn);
+
+ single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
+ fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
+ fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
+ single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
+ single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
+ single_sge->sge_len = resp_sge->sge_len;
+
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SINGLE_SGE, 1);
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SLOW_IO, 0);
+ fw_task_ctx->mstorm_st_context.sgl_size = 1;
+ fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
+
+ /* Ustorm context */
+ fw_task_ctx->ustorm_st_context.rem_rcv_len = 0;
+ fw_task_ctx->ustorm_st_context.exp_data_transfer_len = 0;
+ fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
+ fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
+ fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
+
+ SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
+ USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+ ISCSI_REG1_NUM_FAST_SGES, 0);
+
+ fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+ SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+ USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+ fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
+ fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "Add TMF to SQ, tmf tid=0x%x, itt=0x%x, cid=0x%x\n",
+ tid, mtask->itt, qedi_conn->iscsi_conn_id);
+
+ spin_lock(&qedi_conn->list_lock);
+ list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
+ qedi_cmd->io_cmd_in_list = true;
+ qedi_conn->active_cmd_count++;
+ spin_unlock(&qedi_conn->list_lock);
+
+ qedi_add_to_sq(qedi_conn, mtask, tid, ptu_invalidate, false);
+ qedi_ring_doorbell(qedi_conn);
+ return 0;
+}
+
+int qedi_iscsi_abort_work(struct qedi_conn *qedi_conn,
+ struct iscsi_task *mtask)
+{
+ struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_tm *tmf_hdr;
+ struct qedi_cmd *qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
+ s16 tid = 0;
+
+ tmf_hdr = (struct iscsi_tm *)mtask->hdr;
+ qedi_cmd->task = mtask;
+
+ /* If abort task then schedule the work and return */
+ if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_ABORT_TASK) {
+ qedi_cmd->state = CLEANUP_WAIT;
+ INIT_WORK(&qedi_cmd->tmf_work, qedi_tmf_work);
+ queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work);
+
+ } else if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
+ ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_TARGET_WARM_RESET) ||
+ ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_TARGET_COLD_RESET)) {
+ tid = qedi_get_task_idx(qedi);
+ if (tid == -1) {
+ QEDI_ERR(&qedi->dbg_ctx, "Invalid tid, cid=0x%x\n",
+ qedi_conn->iscsi_conn_id);
+ return -1;
+ }
+ qedi_cmd->task_id = tid;
+
+ qedi_send_iscsi_tmf(qedi_conn, qedi_cmd->task);
+
+ } else {
+ QEDI_ERR(&qedi->dbg_ctx, "Invalid tmf, cid=0x%x\n",
+ qedi_conn->iscsi_conn_id);
+ return -1;
+ }
+
+ return 0;
+}
+
+int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
+ struct iscsi_task *task)
+{
+ struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_task_context *fw_task_ctx;
+ struct iscsi_text_request_hdr *fw_text_request;
+ struct iscsi_cached_sge_ctx *cached_sge;
+ struct iscsi_sge *single_sge;
+ struct qedi_cmd *qedi_cmd;
+ /* For 6.5 hdr iscsi_hdr */
+ struct iscsi_text *text_hdr;
+ struct iscsi_sge *req_sge;
+ struct iscsi_sge *resp_sge;
+ s16 ptu_invalidate = 0;
+ s16 tid = 0;
+
+ req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ qedi_cmd = (struct qedi_cmd *)task->dd_data;
+ text_hdr = (struct iscsi_text *)task->hdr;
+
+ tid = qedi_get_task_idx(qedi);
+ if (tid == -1)
+ return -ENOMEM;
+
+ fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+
+ qedi_cmd->task_id = tid;
+
+ /* Ystorm context */
+ fw_text_request =
+ &fw_task_ctx->ystorm_st_context.pdu_hdr.text_request;
+ fw_text_request->opcode = text_hdr->opcode;
+ fw_text_request->flags_attr = text_hdr->flags;
+
+ qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
+ fw_text_request->itt = qedi_set_itt(tid, get_itt(task->itt));
+ fw_text_request->ttt = text_hdr->ttt;
+ fw_text_request->cmd_sn = be32_to_cpu(text_hdr->cmdsn);
+ fw_text_request->exp_stat_sn = be32_to_cpu(text_hdr->exp_statsn);
+ fw_text_request->hdr_second_dword = ntoh24(text_hdr->dlength);
+
+ if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+ ptu_invalidate = 1;
+ qedi->tid_reuse_count[tid] = 0;
+ }
+ fw_task_ctx->ystorm_st_context.state.reuse_count =
+ qedi->tid_reuse_count[tid];
+ fw_task_ctx->mstorm_st_context.reuse_count =
+ qedi->tid_reuse_count[tid]++;
+
+ cached_sge =
+ &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
+ cached_sge->sge.sge_len = req_sge->sge_len;
+ cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
+ cached_sge->sge.sge_addr.hi =
+ (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+
+ /* Mstorm context */
+ single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
+ fw_task_ctx->mstorm_st_context.task_type = 0x2;
+ fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
+ single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
+ single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
+ single_sge->sge_len = resp_sge->sge_len;
+
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SINGLE_SGE, 1);
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SLOW_IO, 0);
+ fw_task_ctx->mstorm_st_context.sgl_size = 1;
+ fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
+
+ /* Ustorm context */
+ fw_task_ctx->ustorm_ag_context.exp_data_acked =
+ ntoh24(text_hdr->dlength);
+ fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
+ fw_task_ctx->ustorm_st_context.exp_data_transfer_len =
+ ntoh24(text_hdr->dlength);
+ fw_task_ctx->ustorm_st_context.exp_data_sn =
+ be32_to_cpu(text_hdr->exp_statsn);
+ fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
+ fw_task_ctx->ustorm_st_context.task_type = 0x2;
+ fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+ SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+ USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+
+ /* Add command in active command list */
+ spin_lock(&qedi_conn->list_lock);
+ list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
+ qedi_cmd->io_cmd_in_list = true;
+ qedi_conn->active_cmd_count++;
+ spin_unlock(&qedi_conn->list_lock);
+
+ qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
+ qedi_ring_doorbell(qedi_conn);
+
+ return 0;
+}
+
+int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
+ struct iscsi_task *task,
+ char *datap, int data_len, int unsol)
+{
+ struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_task_context *fw_task_ctx;
+ struct iscsi_nop_out_hdr *fw_nop_out;
+ struct qedi_cmd *qedi_cmd;
+ /* For 6.5 hdr iscsi_hdr */
+ struct iscsi_nopout *nopout_hdr;
+ struct iscsi_cached_sge_ctx *cached_sge;
+ struct iscsi_sge *single_sge;
+ struct iscsi_sge *req_sge;
+ struct iscsi_sge *resp_sge;
+ u32 lun[2];
+ s16 ptu_invalidate = 0;
+ s16 tid = 0;
+
+ req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ qedi_cmd = (struct qedi_cmd *)task->dd_data;
+ nopout_hdr = (struct iscsi_nopout *)task->hdr;
+
+ tid = qedi_get_task_idx(qedi);
+ if (tid == -1) {
+ QEDI_WARN(&qedi->dbg_ctx, "Invalid tid\n");
+ return -ENOMEM;
+ }
+
+ fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ qedi_cmd->task_id = tid;
+
+ /* Ystorm context */
+ fw_nop_out = &fw_task_ctx->ystorm_st_context.pdu_hdr.nop_out;
+ SET_FIELD(fw_nop_out->flags_attr, ISCSI_NOP_OUT_HDR_CONST1, 1);
+ SET_FIELD(fw_nop_out->flags_attr, ISCSI_NOP_OUT_HDR_RSRV, 0);
+
+ memcpy(lun, &nopout_hdr->lun, sizeof(struct scsi_lun));
+ fw_nop_out->lun.lo = be32_to_cpu(lun[0]);
+ fw_nop_out->lun.hi = be32_to_cpu(lun[1]);
+
+ qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
+
+ if (nopout_hdr->ttt != ISCSI_TTT_ALL_ONES) {
+ fw_nop_out->itt = be32_to_cpu(nopout_hdr->itt);
+ fw_nop_out->ttt = be32_to_cpu(nopout_hdr->ttt);
+ fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
+ fw_task_ctx->ystorm_st_context.state.local_comp = 1;
+ SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
+ USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 1);
+ } else {
+ fw_nop_out->itt = qedi_set_itt(tid, get_itt(task->itt));
+ fw_nop_out->ttt = ISCSI_TTT_ALL_ONES;
+ fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
+
+ spin_lock(&qedi_conn->list_lock);
+ list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
+ qedi_cmd->io_cmd_in_list = true;
+ qedi_conn->active_cmd_count++;
+ spin_unlock(&qedi_conn->list_lock);
+ }
+
+ fw_nop_out->opcode = ISCSI_OPCODE_NOP_OUT;
+ fw_nop_out->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
+ fw_nop_out->exp_stat_sn = be32_to_cpu(nopout_hdr->exp_statsn);
+
+ cached_sge =
+ &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
+ cached_sge->sge.sge_len = req_sge->sge_len;
+ cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
+ cached_sge->sge.sge_addr.hi =
+ (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+
+ /* Mstorm context */
+ fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
+ fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
+
+ single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
+ single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
+ single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
+ single_sge->sge_len = resp_sge->sge_len;
+ fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
+
+ if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+ ptu_invalidate = 1;
+ qedi->tid_reuse_count[tid] = 0;
+ }
+ fw_task_ctx->ystorm_st_context.state.reuse_count =
+ qedi->tid_reuse_count[tid];
+ fw_task_ctx->mstorm_st_context.reuse_count =
+ qedi->tid_reuse_count[tid]++;
+ /* Ustorm context */
+ fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
+ fw_task_ctx->ustorm_st_context.exp_data_transfer_len = data_len;
+ fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
+ fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
+ fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
+
+ SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+ ISCSI_REG1_NUM_FAST_SGES, 0);
+
+ fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+ SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+ USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+
+ fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
+ fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
+
+ qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
+ qedi_ring_doorbell(qedi_conn);
+ return 0;
+}
+
+static int qedi_split_bd(struct qedi_cmd *cmd, u64 addr, int sg_len,
+ int bd_index)
+{
+ struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+ int frag_size, sg_frags;
+
+ sg_frags = 0;
+
+ while (sg_len) {
+ if (addr % QEDI_PAGE_SIZE)
+ frag_size =
+ (QEDI_PAGE_SIZE - (addr % QEDI_PAGE_SIZE));
+ else
+ frag_size = (sg_len > QEDI_BD_SPLIT_SZ) ? 0 :
+ (sg_len % QEDI_BD_SPLIT_SZ);
+
+ if (frag_size == 0)
+ frag_size = QEDI_BD_SPLIT_SZ;
+
+ bd[bd_index + sg_frags].sge_addr.lo = (addr & 0xffffffff);
+ bd[bd_index + sg_frags].sge_addr.hi = (addr >> 32);
+ bd[bd_index + sg_frags].sge_len = (u16)frag_size;
+ QEDI_INFO(&cmd->conn->qedi->dbg_ctx, QEDI_LOG_IO,
+ "split sge %d: addr=%llx, len=%x",
+ (bd_index + sg_frags), addr, frag_size);
+
+ addr += (u64)frag_size;
+ sg_frags++;
+ sg_len -= frag_size;
+ }
+ return sg_frags;
+}
+
+static int qedi_map_scsi_sg(struct qedi_ctx *qedi, struct qedi_cmd *cmd)
+{
+ struct scsi_cmnd *sc = cmd->scsi_cmd;
+ struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+ struct scatterlist *sg;
+ int byte_count = 0;
+ int bd_count = 0;
+ int sg_count;
+ int sg_len;
+ int sg_frags;
+ u64 addr, end_addr;
+ int i;
+
+ WARN_ON(scsi_sg_count(sc) > QEDI_ISCSI_MAX_BDS_PER_CMD);
+
+ sg_count = dma_map_sg(&qedi->pdev->dev, scsi_sglist(sc),
+ scsi_sg_count(sc), sc->sc_data_direction);
+
+ /*
+ * New condition to send single SGE as cached-SGL.
+ * Single SGE with length less than 64K.
+ */
+ sg = scsi_sglist(sc);
+ if ((sg_count == 1) && (sg_dma_len(sg) <= MAX_SGLEN_FOR_CACHESGL)) {
+ sg_len = sg_dma_len(sg);
+ addr = (u64)sg_dma_address(sg);
+
+ bd[bd_count].sge_addr.lo = (addr & 0xffffffff);
+ bd[bd_count].sge_addr.hi = (addr >> 32);
+ bd[bd_count].sge_len = (u16)sg_len;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
+ "single-cashed-sgl: bd_count:%d addr=%llx, len=%x",
+ sg_count, addr, sg_len);
+
+ return ++bd_count;
+ }
+
+ scsi_for_each_sg(sc, sg, sg_count, i) {
+ sg_len = sg_dma_len(sg);
+ addr = (u64)sg_dma_address(sg);
+ end_addr = (addr + sg_len);
+
+ /*
+ * first sg elem in the 'list',
+ * check if end addr is page-aligned.
+ */
+ if ((i == 0) && (sg_count > 1) && (end_addr % QEDI_PAGE_SIZE))
+ cmd->use_slowpath = true;
+
+ /*
+ * last sg elem in the 'list',
+ * check if start addr is page-aligned.
+ */
+ else if ((i == (sg_count - 1)) &&
+ (sg_count > 1) && (addr % QEDI_PAGE_SIZE))
+ cmd->use_slowpath = true;
+
+ /*
+ * middle sg elements in list,
+ * check if start and end addr is page-aligned
+ */
+ else if ((i != 0) && (i != (sg_count - 1)) &&
+ ((addr % QEDI_PAGE_SIZE) ||
+ (end_addr % QEDI_PAGE_SIZE)))
+ cmd->use_slowpath = true;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "sg[%d] size=0x%x",
+ i, sg_len);
+
+ if (sg_len > QEDI_BD_SPLIT_SZ) {
+ sg_frags = qedi_split_bd(cmd, addr, sg_len, bd_count);
+ } else {
+ sg_frags = 1;
+ bd[bd_count].sge_addr.lo = addr & 0xffffffff;
+ bd[bd_count].sge_addr.hi = addr >> 32;
+ bd[bd_count].sge_len = sg_len;
+ }
+ byte_count += sg_len;
+ bd_count += sg_frags;
+ }
+
+ if (byte_count != scsi_bufflen(sc))
+ QEDI_ERR(&qedi->dbg_ctx,
+ "byte_count = %d != scsi_bufflen = %d\n", byte_count,
+ scsi_bufflen(sc));
+ else
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "byte_count = %d\n",
+ byte_count);
+
+ WARN_ON(byte_count != scsi_bufflen(sc));
+
+ return bd_count;
+}
+
+static void qedi_iscsi_map_sg_list(struct qedi_cmd *cmd)
+{
+ int bd_count;
+ struct scsi_cmnd *sc = cmd->scsi_cmd;
+
+ if (scsi_sg_count(sc)) {
+ bd_count = qedi_map_scsi_sg(cmd->conn->qedi, cmd);
+ if (bd_count == 0)
+ return;
+ } else {
+ struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+
+ bd[0].sge_addr.lo = 0;
+ bd[0].sge_addr.hi = 0;
+ bd[0].sge_len = 0;
+ bd_count = 0;
+ }
+ cmd->io_tbl.sge_valid = bd_count;
+}
+
+static void qedi_cpy_scsi_cdb(struct scsi_cmnd *sc, u32 *dstp)
+{
+ u32 dword;
+ int lpcnt;
+ u8 *srcp;
+
+ lpcnt = sc->cmd_len / sizeof(dword);
+ srcp = (u8 *)sc->cmnd;
+ while (lpcnt--) {
+ memcpy(&dword, (const void *)srcp, 4);
+ *dstp = cpu_to_be32(dword);
+ srcp += 4;
+ dstp++;
+ }
+ if (sc->cmd_len & 0x3) {
+ dword = (u32)srcp[0] | ((u32)srcp[1] << 8);
+ *dstp = cpu_to_be32(dword);
+ }
+}
+
+void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task,
+ u16 tid, int8_t direction)
+{
+ struct qedi_io_log *io_log;
+ struct iscsi_conn *conn = task->conn;
+ struct qedi_conn *qedi_conn = conn->dd_data;
+ struct scsi_cmnd *sc_cmd = task->sc;
+ unsigned long flags;
+ u8 op;
+
+ spin_lock_irqsave(&qedi->io_trace_lock, flags);
+
+ io_log = &qedi->io_trace_buf[qedi->io_trace_idx];
+ io_log->direction = direction;
+ io_log->task_id = tid;
+ io_log->cid = qedi_conn->iscsi_conn_id;
+ io_log->lun = sc_cmd->device->lun;
+ io_log->op = sc_cmd->cmnd[0];
+ op = sc_cmd->cmnd[0];
+ io_log->lba[0] = sc_cmd->cmnd[2];
+ io_log->lba[1] = sc_cmd->cmnd[3];
+ io_log->lba[2] = sc_cmd->cmnd[4];
+ io_log->lba[3] = sc_cmd->cmnd[5];
+ io_log->bufflen = scsi_bufflen(sc_cmd);
+ io_log->sg_count = scsi_sg_count(sc_cmd);
+ io_log->fast_sgs = qedi->fast_sgls;
+ io_log->cached_sgs = qedi->cached_sgls;
+ io_log->slow_sgs = qedi->slow_sgls;
+ io_log->cached_sge = qedi->use_cached_sge;
+ io_log->slow_sge = qedi->use_slow_sge;
+ io_log->fast_sge = qedi->use_fast_sge;
+ io_log->result = sc_cmd->result;
+ io_log->jiffies = jiffies;
+ io_log->blk_req_cpu = smp_processor_id();
+
+ if (direction == QEDI_IO_TRACE_REQ) {
+ /* For requests we only care about the submission CPU */
+ io_log->req_cpu = smp_processor_id() % qedi->num_queues;
+ io_log->intr_cpu = 0;
+ io_log->blk_rsp_cpu = 0;
+ } else if (direction == QEDI_IO_TRACE_RSP) {
+ io_log->req_cpu = smp_processor_id() % qedi->num_queues;
+ io_log->intr_cpu = qedi->intr_cpu;
+ io_log->blk_rsp_cpu = smp_processor_id();
+ }
+
+ qedi->io_trace_idx++;
+ if (qedi->io_trace_idx == QEDI_IO_TRACE_SIZE)
+ qedi->io_trace_idx = 0;
+
+ qedi->use_cached_sge = false;
+ qedi->use_slow_sge = false;
+ qedi->use_fast_sge = false;
+
+ spin_unlock_irqrestore(&qedi->io_trace_lock, flags);
+}
+
+int qedi_iscsi_send_ioreq(struct iscsi_task *task)
+{
+ struct iscsi_conn *conn = task->conn;
+ struct iscsi_session *session = conn->session;
+ struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
+ struct qedi_ctx *qedi = iscsi_host_priv(shost);
+ struct qedi_conn *qedi_conn = conn->dd_data;
+ struct qedi_cmd *cmd = task->dd_data;
+ struct scsi_cmnd *sc = task->sc;
+ struct iscsi_task_context *fw_task_ctx;
+ struct iscsi_cached_sge_ctx *cached_sge;
+ struct iscsi_phys_sgl_ctx *phys_sgl;
+ struct iscsi_virt_sgl_ctx *virt_sgl;
+ struct ystorm_iscsi_task_st_ctx *yst_cxt;
+ struct mstorm_iscsi_task_st_ctx *mst_cxt;
+ struct iscsi_sgl *sgl_struct;
+ struct iscsi_sge *single_sge;
+ struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
+ struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+ enum iscsi_task_type task_type;
+ struct iscsi_cmd_hdr *fw_cmd;
+ u32 lun[2];
+ u32 exp_data;
+ u16 cq_idx = smp_processor_id() % qedi->num_queues;
+ s16 ptu_invalidate = 0;
+ s16 tid = 0;
+ u8 num_fast_sgs;
+
+ tid = qedi_get_task_idx(qedi);
+ if (tid == -1)
+ return -ENOMEM;
+
+ qedi_iscsi_map_sg_list(cmd);
+
+ int_to_scsilun(sc->device->lun, (struct scsi_lun *)lun);
+ fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ cmd->task_id = tid;
+
+ /* Ystorm context */
+ fw_cmd = &fw_task_ctx->ystorm_st_context.pdu_hdr.cmd;
+ SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_ATTR, ISCSI_ATTR_SIMPLE);
+
+ if (sc->sc_data_direction == DMA_TO_DEVICE) {
+ if (conn->session->initial_r2t_en) {
+ exp_data = min((conn->session->imm_data_en *
+ conn->max_xmit_dlength),
+ conn->session->first_burst);
+ exp_data = min(exp_data, scsi_bufflen(sc));
+ fw_task_ctx->ustorm_ag_context.exp_data_acked =
+ cpu_to_le32(exp_data);
+ } else {
+ fw_task_ctx->ustorm_ag_context.exp_data_acked =
+ min(conn->session->first_burst, scsi_bufflen(sc));
+ }
+
+ SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_WRITE, 1);
+ task_type = ISCSI_TASK_TYPE_INITIATOR_WRITE;
+ } else {
+ if (scsi_bufflen(sc))
+ SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_READ, 1);
+ task_type = ISCSI_TASK_TYPE_INITIATOR_READ;
+ }
+
+ fw_cmd->lun.lo = be32_to_cpu(lun[0]);
+ fw_cmd->lun.hi = be32_to_cpu(lun[1]);
+
+ qedi_update_itt_map(qedi, tid, task->itt, cmd);
+ fw_cmd->itt = qedi_set_itt(tid, get_itt(task->itt));
+ fw_cmd->expected_transfer_length = scsi_bufflen(sc);
+ fw_cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
+ fw_cmd->opcode = hdr->opcode;
+ qedi_cpy_scsi_cdb(sc, (u32 *)fw_cmd->cdb);
+
+ /* Mstorm context */
+ fw_task_ctx->mstorm_st_context.sense_db.lo = (u32)cmd->sense_buffer_dma;
+ fw_task_ctx->mstorm_st_context.sense_db.hi =
+ (u32)((u64)cmd->sense_buffer_dma >> 32);
+ fw_task_ctx->mstorm_ag_context.task_cid = qedi_conn->iscsi_conn_id;
+ fw_task_ctx->mstorm_st_context.task_type = task_type;
+
+ if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+ ptu_invalidate = 1;
+ qedi->tid_reuse_count[tid] = 0;
+ }
+ fw_task_ctx->ystorm_st_context.state.reuse_count =
+ qedi->tid_reuse_count[tid];
+ fw_task_ctx->mstorm_st_context.reuse_count =
+ qedi->tid_reuse_count[tid]++;
+
+ /* Ustorm context */
+ fw_task_ctx->ustorm_st_context.rem_rcv_len = scsi_bufflen(sc);
+ fw_task_ctx->ustorm_st_context.exp_data_transfer_len = scsi_bufflen(sc);
+ fw_task_ctx->ustorm_st_context.exp_data_sn =
+ be32_to_cpu(hdr->exp_statsn);
+ fw_task_ctx->ustorm_st_context.task_type = task_type;
+ fw_task_ctx->ustorm_st_context.cq_rss_number = cq_idx;
+ fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+
+ SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+ USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
+ USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
+
+ num_fast_sgs = (cmd->io_tbl.sge_valid ?
+ min((u16)QEDI_FAST_SGE_COUNT,
+ (u16)cmd->io_tbl.sge_valid) : 0);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+ ISCSI_REG1_NUM_FAST_SGES, num_fast_sgs);
+
+ fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
+ fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "Total sge count [%d]\n",
+ cmd->io_tbl.sge_valid);
+
+ yst_cxt = &fw_task_ctx->ystorm_st_context;
+ mst_cxt = &fw_task_ctx->mstorm_st_context;
+ /* Tx path */
+ if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
+ /* not considering superIO or FastIO */
+ if (cmd->io_tbl.sge_valid == 1) {
+ cached_sge = &yst_cxt->state.sgl_ctx_union.cached_sge;
+ cached_sge->sge.sge_addr.lo = bd[0].sge_addr.lo;
+ cached_sge->sge.sge_addr.hi = bd[0].sge_addr.hi;
+ cached_sge->sge.sge_len = bd[0].sge_len;
+ qedi->cached_sgls++;
+ } else if ((cmd->io_tbl.sge_valid != 1) && cmd->use_slowpath) {
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SLOW_IO, 1);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+ ISCSI_REG1_NUM_FAST_SGES, 0);
+ phys_sgl = &yst_cxt->state.sgl_ctx_union.phys_sgl;
+ phys_sgl->sgl_base.lo = (u32)(cmd->io_tbl.sge_tbl_dma);
+ phys_sgl->sgl_base.hi =
+ (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
+ phys_sgl->sgl_size = cmd->io_tbl.sge_valid;
+ qedi->slow_sgls++;
+ } else if ((cmd->io_tbl.sge_valid != 1) && !cmd->use_slowpath) {
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SLOW_IO, 0);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+ ISCSI_REG1_NUM_FAST_SGES,
+ min((u16)QEDI_FAST_SGE_COUNT,
+ (u16)cmd->io_tbl.sge_valid));
+ virt_sgl = &yst_cxt->state.sgl_ctx_union.virt_sgl;
+ virt_sgl->sgl_base.lo = (u32)(cmd->io_tbl.sge_tbl_dma);
+ virt_sgl->sgl_base.hi =
+ (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
+ virt_sgl->sgl_initial_offset =
+ (u32)bd[0].sge_addr.lo & (QEDI_PAGE_SIZE - 1);
+ qedi->fast_sgls++;
+ }
+ fw_task_ctx->mstorm_st_context.sgl_size = cmd->io_tbl.sge_valid;
+ fw_task_ctx->mstorm_st_context.rem_task_size = scsi_bufflen(sc);
+ } else {
+ /* Rx path */
+ if (cmd->io_tbl.sge_valid == 1) {
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SLOW_IO, 0);
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SINGLE_SGE, 1);
+ single_sge = &mst_cxt->sgl_union.single_sge;
+ single_sge->sge_addr.lo = bd[0].sge_addr.lo;
+ single_sge->sge_addr.hi = bd[0].sge_addr.hi;
+ single_sge->sge_len = bd[0].sge_len;
+ qedi->cached_sgls++;
+ } else if ((cmd->io_tbl.sge_valid != 1) && cmd->use_slowpath) {
+ sgl_struct = &mst_cxt->sgl_union.sgl_struct;
+ sgl_struct->sgl_addr.lo =
+ (u32)(cmd->io_tbl.sge_tbl_dma);
+ sgl_struct->sgl_addr.hi =
+ (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SLOW_IO, 1);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+ ISCSI_REG1_NUM_FAST_SGES, 0);
+ sgl_struct->updated_sge_size = 0;
+ sgl_struct->updated_sge_offset = 0;
+ qedi->slow_sgls++;
+ } else if ((cmd->io_tbl.sge_valid != 1) && !cmd->use_slowpath) {
+ sgl_struct = &mst_cxt->sgl_union.sgl_struct;
+ sgl_struct->sgl_addr.lo =
+ (u32)(cmd->io_tbl.sge_tbl_dma);
+ sgl_struct->sgl_addr.hi =
+ (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
+ sgl_struct->byte_offset =
+ (u32)bd[0].sge_addr.lo & (QEDI_PAGE_SIZE - 1);
+ SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+ ISCSI_MFLAGS_SLOW_IO, 0);
+ SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+ ISCSI_REG1_NUM_FAST_SGES, 0);
+ sgl_struct->updated_sge_size = 0;
+ sgl_struct->updated_sge_offset = 0;
+ qedi->fast_sgls++;
+ }
+ fw_task_ctx->mstorm_st_context.sgl_size = cmd->io_tbl.sge_valid;
+ fw_task_ctx->mstorm_st_context.rem_task_size = scsi_bufflen(sc);
+ }
+
+ if (cmd->io_tbl.sge_valid == 1)
+ /* Singel-SGL */
+ qedi->use_cached_sge = true;
+ else {
+ if (cmd->use_slowpath)
+ qedi->use_slow_sge = true;
+ else
+ qedi->use_fast_sge = true;
+ }
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
+ "%s: %s-SGL: num_sges=0x%x first-sge-lo=0x%x first-sge-hi=0x%x",
+ (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) ?
+ "Write " : "Read ", (cmd->io_tbl.sge_valid == 1) ?
+ "Single" : (cmd->use_slowpath ? "SLOW" : "FAST"),
+ (u16)cmd->io_tbl.sge_valid, (u32)(cmd->io_tbl.sge_tbl_dma),
+ (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32));
+
+ /* Add command in active command list */
+ spin_lock(&qedi_conn->list_lock);
+ list_add_tail(&cmd->io_cmd, &qedi_conn->active_cmd_list);
+ cmd->io_cmd_in_list = true;
+ qedi_conn->active_cmd_count++;
+ spin_unlock(&qedi_conn->list_lock);
+
+ qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
+ qedi_ring_doorbell(qedi_conn);
+ if (qedi_io_tracing)
+ qedi_trace_io(qedi, task, tid, QEDI_IO_TRACE_REQ);
+
+ return 0;
+}
+
+int qedi_iscsi_cleanup_task(struct iscsi_task *task, bool mark_cmd_node_deleted)
+{
+ struct iscsi_conn *conn = task->conn;
+ struct qedi_conn *qedi_conn = conn->dd_data;
+ struct qedi_cmd *cmd = task->dd_data;
+ s16 ptu_invalidate = 0;
+
+ QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "issue cleanup tid=0x%x itt=0x%x task_state=%d cmd_state=0%x cid=0x%x\n",
+ cmd->task_id, get_itt(task->itt), task->state,
+ cmd->state, qedi_conn->iscsi_conn_id);
+
+ qedi_add_to_sq(qedi_conn, task, cmd->task_id, ptu_invalidate, true);
+ qedi_ring_doorbell(qedi_conn);
+
+ return 0;
+}
diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h
new file mode 100644
index 000000000000..8e488de88ece
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_gbl.h
@@ -0,0 +1,73 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_GBL_H_
+#define _QEDI_GBL_H_
+
+#include "qedi_iscsi.h"
+
+extern uint qedi_io_tracing;
+extern int do_not_recover;
+extern struct scsi_host_template qedi_host_template;
+extern struct iscsi_transport qedi_iscsi_transport;
+extern const struct qed_iscsi_ops *qedi_ops;
+extern struct qedi_debugfs_ops qedi_debugfs_ops;
+extern const struct file_operations qedi_dbg_fops;
+extern struct device_attribute *qedi_shost_attrs[];
+
+int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep);
+void qedi_free_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep);
+
+int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
+ struct iscsi_task *task);
+int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
+ struct iscsi_task *task);
+int qedi_iscsi_abort_work(struct qedi_conn *qedi_conn,
+ struct iscsi_task *mtask);
+int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
+ struct iscsi_task *task);
+int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
+ struct iscsi_task *task,
+ char *datap, int data_len, int unsol);
+int qedi_iscsi_send_ioreq(struct iscsi_task *task);
+int qedi_get_task_idx(struct qedi_ctx *qedi);
+void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx);
+int qedi_iscsi_cleanup_task(struct iscsi_task *task,
+ bool mark_cmd_node_deleted);
+void qedi_iscsi_unmap_sg_list(struct qedi_cmd *cmd);
+void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,
+ struct qedi_cmd *qedi_cmd);
+void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt);
+void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, int16_t *tid);
+void qedi_process_iscsi_error(struct qedi_endpoint *ep,
+ struct async_data *data);
+void qedi_start_conn_recovery(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn);
+struct qedi_conn *qedi_get_conn_from_id(struct qedi_ctx *qedi, u32 iscsi_cid);
+void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data);
+void qedi_mark_device_missing(struct iscsi_cls_session *cls_session);
+void qedi_mark_device_available(struct iscsi_cls_session *cls_session);
+void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu);
+int qedi_recover_all_conns(struct qedi_ctx *qedi);
+void qedi_fp_process_cqes(struct qedi_work *work);
+int qedi_cleanup_all_io(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn,
+ struct iscsi_task *task, bool in_recovery);
+void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task,
+ u16 tid, int8_t direction);
+int qedi_alloc_id(struct qedi_portid_tbl *id_tbl, u16 id);
+u16 qedi_alloc_new_id(struct qedi_portid_tbl *id_tbl);
+void qedi_free_id(struct qedi_portid_tbl *id_tbl, u16 id);
+int qedi_create_sysfs_ctx_attr(struct qedi_ctx *qedi);
+void qedi_remove_sysfs_ctx_attr(struct qedi_ctx *qedi);
+void qedi_clearsq(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn,
+ struct iscsi_task *task);
+
+#endif
diff --git a/drivers/scsi/qedi/qedi_hsi.h b/drivers/scsi/qedi/qedi_hsi.h
new file mode 100644
index 000000000000..8ca44c78f093
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_hsi.h
@@ -0,0 +1,52 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#ifndef __QEDI_HSI__
+#define __QEDI_HSI__
+/*
+ * Add include to common target
+ */
+#include <linux/qed/common_hsi.h>
+
+/*
+ * Add include to common storage target
+ */
+#include <linux/qed/storage_common.h>
+
+/*
+ * Add include to common TCP target
+ */
+#include <linux/qed/tcp_common.h>
+
+/*
+ * Add include to common iSCSI target for both eCore and protocol driver
+ */
+#include <linux/qed/iscsi_common.h>
+
+/*
+ * iSCSI CMDQ element
+ */
+struct iscsi_cmdqe {
+ __le16 conn_id;
+ u8 invalid_command;
+ u8 cmd_hdr_type;
+ __le32 reserved1[2];
+ __le32 cmd_payload[13];
+};
+
+/*
+ * iSCSI CMD header type
+ */
+enum iscsi_cmd_hdr_type {
+ ISCSI_CMD_HDR_TYPE_BHS_ONLY /* iSCSI BHS with no expected AHS */,
+ ISCSI_CMD_HDR_TYPE_BHS_W_AHS /* iSCSI BHS with expected AHS */,
+ ISCSI_CMD_HDR_TYPE_AHS /* iSCSI AHS */,
+ MAX_ISCSI_CMD_HDR_TYPE
+};
+
+#endif /* __QEDI_HSI__ */
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
new file mode 100644
index 000000000000..d6a205433b66
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -0,0 +1,1624 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/blkdev.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <scsi/scsi_tcq.h>
+
+#include "qedi.h"
+#include "qedi_iscsi.h"
+#include "qedi_gbl.h"
+
+int qedi_recover_all_conns(struct qedi_ctx *qedi)
+{
+ struct qedi_conn *qedi_conn;
+ int i;
+
+ for (i = 0; i < qedi->max_active_conns; i++) {
+ qedi_conn = qedi_get_conn_from_id(qedi, i);
+ if (!qedi_conn)
+ continue;
+
+ qedi_start_conn_recovery(qedi, qedi_conn);
+ }
+
+ return SUCCESS;
+}
+
+static int qedi_eh_host_reset(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *shost = cmd->device->host;
+ struct qedi_ctx *qedi;
+
+ qedi = iscsi_host_priv(shost);
+
+ return qedi_recover_all_conns(qedi);
+}
+
+struct scsi_host_template qedi_host_template = {
+ .module = THIS_MODULE,
+ .name = "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver",
+ .proc_name = QEDI_MODULE_NAME,
+ .queuecommand = iscsi_queuecommand,
+ .eh_abort_handler = iscsi_eh_abort,
+ .eh_device_reset_handler = iscsi_eh_device_reset,
+ .eh_target_reset_handler = iscsi_eh_recover_target,
+ .eh_host_reset_handler = qedi_eh_host_reset,
+ .target_alloc = iscsi_target_alloc,
+ .change_queue_depth = scsi_change_queue_depth,
+ .can_queue = QEDI_MAX_ISCSI_TASK,
+ .this_id = -1,
+ .sg_tablesize = QEDI_ISCSI_MAX_BDS_PER_CMD,
+ .max_sectors = 0xffff,
+ .cmd_per_lun = 128,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = qedi_shost_attrs,
+};
+
+static void qedi_conn_free_login_resources(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn)
+{
+ if (qedi_conn->gen_pdu.resp_bd_tbl) {
+ dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
+ qedi_conn->gen_pdu.resp_bd_tbl,
+ qedi_conn->gen_pdu.resp_bd_dma);
+ qedi_conn->gen_pdu.resp_bd_tbl = NULL;
+ }
+
+ if (qedi_conn->gen_pdu.req_bd_tbl) {
+ dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
+ qedi_conn->gen_pdu.req_bd_tbl,
+ qedi_conn->gen_pdu.req_bd_dma);
+ qedi_conn->gen_pdu.req_bd_tbl = NULL;
+ }
+
+ if (qedi_conn->gen_pdu.resp_buf) {
+ dma_free_coherent(&qedi->pdev->dev,
+ ISCSI_DEF_MAX_RECV_SEG_LEN,
+ qedi_conn->gen_pdu.resp_buf,
+ qedi_conn->gen_pdu.resp_dma_addr);
+ qedi_conn->gen_pdu.resp_buf = NULL;
+ }
+
+ if (qedi_conn->gen_pdu.req_buf) {
+ dma_free_coherent(&qedi->pdev->dev,
+ ISCSI_DEF_MAX_RECV_SEG_LEN,
+ qedi_conn->gen_pdu.req_buf,
+ qedi_conn->gen_pdu.req_dma_addr);
+ qedi_conn->gen_pdu.req_buf = NULL;
+ }
+}
+
+static int qedi_conn_alloc_login_resources(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn)
+{
+ qedi_conn->gen_pdu.req_buf =
+ dma_alloc_coherent(&qedi->pdev->dev,
+ ISCSI_DEF_MAX_RECV_SEG_LEN,
+ &qedi_conn->gen_pdu.req_dma_addr,
+ GFP_KERNEL);
+ if (!qedi_conn->gen_pdu.req_buf)
+ goto login_req_buf_failure;
+
+ qedi_conn->gen_pdu.req_buf_size = 0;
+ qedi_conn->gen_pdu.req_wr_ptr = qedi_conn->gen_pdu.req_buf;
+
+ qedi_conn->gen_pdu.resp_buf =
+ dma_alloc_coherent(&qedi->pdev->dev,
+ ISCSI_DEF_MAX_RECV_SEG_LEN,
+ &qedi_conn->gen_pdu.resp_dma_addr,
+ GFP_KERNEL);
+ if (!qedi_conn->gen_pdu.resp_buf)
+ goto login_resp_buf_failure;
+
+ qedi_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN;
+ qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf;
+
+ qedi_conn->gen_pdu.req_bd_tbl =
+ dma_alloc_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
+ &qedi_conn->gen_pdu.req_bd_dma, GFP_KERNEL);
+ if (!qedi_conn->gen_pdu.req_bd_tbl)
+ goto login_req_bd_tbl_failure;
+
+ qedi_conn->gen_pdu.resp_bd_tbl =
+ dma_alloc_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
+ &qedi_conn->gen_pdu.resp_bd_dma,
+ GFP_KERNEL);
+ if (!qedi_conn->gen_pdu.resp_bd_tbl)
+ goto login_resp_bd_tbl_failure;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SESS,
+ "Allocation successful, cid=0x%x\n",
+ qedi_conn->iscsi_conn_id);
+ return 0;
+
+login_resp_bd_tbl_failure:
+ dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
+ qedi_conn->gen_pdu.req_bd_tbl,
+ qedi_conn->gen_pdu.req_bd_dma);
+ qedi_conn->gen_pdu.req_bd_tbl = NULL;
+
+login_req_bd_tbl_failure:
+ dma_free_coherent(&qedi->pdev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
+ qedi_conn->gen_pdu.resp_buf,
+ qedi_conn->gen_pdu.resp_dma_addr);
+ qedi_conn->gen_pdu.resp_buf = NULL;
+login_resp_buf_failure:
+ dma_free_coherent(&qedi->pdev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
+ qedi_conn->gen_pdu.req_buf,
+ qedi_conn->gen_pdu.req_dma_addr);
+ qedi_conn->gen_pdu.req_buf = NULL;
+login_req_buf_failure:
+ iscsi_conn_printk(KERN_ERR, qedi_conn->cls_conn->dd_data,
+ "login resource alloc failed!!\n");
+ return -ENOMEM;
+}
+
+static void qedi_destroy_cmd_pool(struct qedi_ctx *qedi,
+ struct iscsi_session *session)
+{
+ int i;
+
+ for (i = 0; i < session->cmds_max; i++) {
+ struct iscsi_task *task = session->cmds[i];
+ struct qedi_cmd *cmd = task->dd_data;
+
+ if (cmd->io_tbl.sge_tbl)
+ dma_free_coherent(&qedi->pdev->dev,
+ QEDI_ISCSI_MAX_BDS_PER_CMD *
+ sizeof(struct iscsi_sge),
+ cmd->io_tbl.sge_tbl,
+ cmd->io_tbl.sge_tbl_dma);
+
+ if (cmd->sense_buffer)
+ dma_free_coherent(&qedi->pdev->dev,
+ SCSI_SENSE_BUFFERSIZE,
+ cmd->sense_buffer,
+ cmd->sense_buffer_dma);
+ }
+}
+
+static int qedi_alloc_sget(struct qedi_ctx *qedi, struct iscsi_session *session,
+ struct qedi_cmd *cmd)
+{
+ struct qedi_io_bdt *io = &cmd->io_tbl;
+ struct iscsi_sge *sge;
+
+ io->sge_tbl = dma_alloc_coherent(&qedi->pdev->dev,
+ QEDI_ISCSI_MAX_BDS_PER_CMD *
+ sizeof(*sge),
+ &io->sge_tbl_dma, GFP_KERNEL);
+ if (!io->sge_tbl) {
+ iscsi_session_printk(KERN_ERR, session,
+ "Could not allocate BD table.\n");
+ return -ENOMEM;
+ }
+
+ io->sge_valid = 0;
+ return 0;
+}
+
+static int qedi_setup_cmd_pool(struct qedi_ctx *qedi,
+ struct iscsi_session *session)
+{
+ int i;
+
+ for (i = 0; i < session->cmds_max; i++) {
+ struct iscsi_task *task = session->cmds[i];
+ struct qedi_cmd *cmd = task->dd_data;
+
+ task->hdr = &cmd->hdr;
+ task->hdr_max = sizeof(struct iscsi_hdr);
+
+ if (qedi_alloc_sget(qedi, session, cmd))
+ goto free_sgets;
+
+ cmd->sense_buffer = dma_alloc_coherent(&qedi->pdev->dev,
+ SCSI_SENSE_BUFFERSIZE,
+ &cmd->sense_buffer_dma,
+ GFP_KERNEL);
+ if (!cmd->sense_buffer)
+ goto free_sgets;
+ }
+
+ return 0;
+
+free_sgets:
+ qedi_destroy_cmd_pool(qedi, session);
+ return -ENOMEM;
+}
+
+static struct iscsi_cls_session *
+qedi_session_create(struct iscsi_endpoint *ep, u16 cmds_max,
+ u16 qdepth, uint32_t initial_cmdsn)
+{
+ struct Scsi_Host *shost;
+ struct iscsi_cls_session *cls_session;
+ struct qedi_ctx *qedi;
+ struct qedi_endpoint *qedi_ep;
+
+ if (!ep)
+ return NULL;
+
+ qedi_ep = ep->dd_data;
+ shost = qedi_ep->qedi->shost;
+ qedi = iscsi_host_priv(shost);
+
+ if (cmds_max > qedi->max_sqes)
+ cmds_max = qedi->max_sqes;
+ else if (cmds_max < QEDI_SQ_WQES_MIN)
+ cmds_max = QEDI_SQ_WQES_MIN;
+
+ cls_session = iscsi_session_setup(&qedi_iscsi_transport, shost,
+ cmds_max, 0, sizeof(struct qedi_cmd),
+ initial_cmdsn, ISCSI_MAX_TARGET);
+ if (!cls_session) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Failed to setup session for ep=%p\n", qedi_ep);
+ return NULL;
+ }
+
+ if (qedi_setup_cmd_pool(qedi, cls_session->dd_data)) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Failed to setup cmd pool for ep=%p\n", qedi_ep);
+ goto session_teardown;
+ }
+
+ return cls_session;
+
+session_teardown:
+ iscsi_session_teardown(cls_session);
+ return NULL;
+}
+
+static void qedi_session_destroy(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *session = cls_session->dd_data;
+ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+ struct qedi_ctx *qedi = iscsi_host_priv(shost);
+
+ qedi_destroy_cmd_pool(qedi, session);
+ iscsi_session_teardown(cls_session);
+}
+
+static struct iscsi_cls_conn *
+qedi_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid)
+{
+ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+ struct qedi_ctx *qedi = iscsi_host_priv(shost);
+ struct iscsi_cls_conn *cls_conn;
+ struct qedi_conn *qedi_conn;
+ struct iscsi_conn *conn;
+
+ cls_conn = iscsi_conn_setup(cls_session, sizeof(*qedi_conn),
+ cid);
+ if (!cls_conn) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "conn_new: iscsi conn setup failed, cid=0x%x, cls_sess=%p!\n",
+ cid, cls_session);
+ return NULL;
+ }
+
+ conn = cls_conn->dd_data;
+ qedi_conn = conn->dd_data;
+ qedi_conn->cls_conn = cls_conn;
+ qedi_conn->qedi = qedi;
+ qedi_conn->ep = NULL;
+ qedi_conn->active_cmd_count = 0;
+ INIT_LIST_HEAD(&qedi_conn->active_cmd_list);
+ spin_lock_init(&qedi_conn->list_lock);
+
+ if (qedi_conn_alloc_login_resources(qedi, qedi_conn)) {
+ iscsi_conn_printk(KERN_ALERT, conn,
+ "conn_new: login resc alloc failed, cid=0x%x, cls_sess=%p!!\n",
+ cid, cls_session);
+ goto free_conn;
+ }
+
+ return cls_conn;
+
+free_conn:
+ iscsi_conn_teardown(cls_conn);
+ return NULL;
+}
+
+void qedi_mark_device_missing(struct iscsi_cls_session *cls_session)
+{
+ iscsi_block_session(cls_session);
+}
+
+void qedi_mark_device_available(struct iscsi_cls_session *cls_session)
+{
+ iscsi_unblock_session(cls_session);
+}
+
+static int qedi_bind_conn_to_iscsi_cid(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn)
+{
+ u32 iscsi_cid = qedi_conn->iscsi_conn_id;
+
+ if (qedi->cid_que.conn_cid_tbl[iscsi_cid]) {
+ iscsi_conn_printk(KERN_ALERT, qedi_conn->cls_conn->dd_data,
+ "conn bind - entry #%d not free\n",
+ iscsi_cid);
+ return -EBUSY;
+ }
+
+ qedi->cid_que.conn_cid_tbl[iscsi_cid] = qedi_conn;
+ return 0;
+}
+
+struct qedi_conn *qedi_get_conn_from_id(struct qedi_ctx *qedi, u32 iscsi_cid)
+{
+ if (!qedi->cid_que.conn_cid_tbl) {
+ QEDI_ERR(&qedi->dbg_ctx, "missing conn<->cid table\n");
+ return NULL;
+
+ } else if (iscsi_cid >= qedi->max_active_conns) {
+ QEDI_ERR(&qedi->dbg_ctx, "wrong cid #%d\n", iscsi_cid);
+ return NULL;
+ }
+ return qedi->cid_que.conn_cid_tbl[iscsi_cid];
+}
+
+static int qedi_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn,
+ u64 transport_fd, int is_leading)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct qedi_conn *qedi_conn = conn->dd_data;
+ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+ struct qedi_ctx *qedi = iscsi_host_priv(shost);
+ struct qedi_endpoint *qedi_ep;
+ struct iscsi_endpoint *ep;
+
+ ep = iscsi_lookup_endpoint(transport_fd);
+ if (!ep)
+ return -EINVAL;
+
+ qedi_ep = ep->dd_data;
+ if ((qedi_ep->state == EP_STATE_TCP_FIN_RCVD) ||
+ (qedi_ep->state == EP_STATE_TCP_RST_RCVD))
+ return -EINVAL;
+
+ if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
+ return -EINVAL;
+
+ qedi_ep->conn = qedi_conn;
+ qedi_conn->ep = qedi_ep;
+ qedi_conn->iscsi_conn_id = qedi_ep->iscsi_cid;
+ qedi_conn->fw_cid = qedi_ep->fw_cid;
+ qedi_conn->cmd_cleanup_req = 0;
+ qedi_conn->cmd_cleanup_cmpl = 0;
+
+ if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn))
+ return -EINVAL;
+
+ spin_lock_init(&qedi_conn->tmf_work_lock);
+ INIT_LIST_HEAD(&qedi_conn->tmf_work_list);
+ init_waitqueue_head(&qedi_conn->wait_queue);
+ return 0;
+}
+
+static int qedi_iscsi_update_conn(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn)
+{
+ struct qed_iscsi_params_update *conn_info;
+ struct iscsi_cls_conn *cls_conn = qedi_conn->cls_conn;
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct qedi_endpoint *qedi_ep;
+ int rval;
+
+ qedi_ep = qedi_conn->ep;
+
+ conn_info = kzalloc(sizeof(*conn_info), GFP_KERNEL);
+ if (!conn_info) {
+ QEDI_ERR(&qedi->dbg_ctx, "memory alloc failed\n");
+ return -ENOMEM;
+ }
+
+ conn_info->update_flag = 0;
+
+ if (conn->hdrdgst_en)
+ SET_FIELD(conn_info->update_flag,
+ ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN, true);
+ if (conn->datadgst_en)
+ SET_FIELD(conn_info->update_flag,
+ ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN, true);
+ if (conn->session->initial_r2t_en)
+ SET_FIELD(conn_info->update_flag,
+ ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T,
+ true);
+ if (conn->session->imm_data_en)
+ SET_FIELD(conn_info->update_flag,
+ ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA,
+ true);
+
+ conn_info->max_seq_size = conn->session->max_burst;
+ conn_info->max_recv_pdu_length = conn->max_recv_dlength;
+ conn_info->max_send_pdu_length = conn->max_xmit_dlength;
+ conn_info->first_seq_length = conn->session->first_burst;
+ conn_info->exp_stat_sn = conn->exp_statsn;
+
+ rval = qedi_ops->update_conn(qedi->cdev, qedi_ep->handle,
+ conn_info);
+ if (rval) {
+ rval = -ENXIO;
+ QEDI_ERR(&qedi->dbg_ctx, "Could not update connection\n");
+ goto update_conn_err;
+ }
+
+ kfree(conn_info);
+ rval = 0;
+
+update_conn_err:
+ return rval;
+}
+
+static u16 qedi_calc_mss(u16 pmtu, u8 is_ipv6, u8 tcp_ts_en, u8 vlan_en)
+{
+ u16 mss = 0;
+ u16 hdrs = TCP_HDR_LEN;
+
+ if (is_ipv6)
+ hdrs += IPV6_HDR_LEN;
+ else
+ hdrs += IPV4_HDR_LEN;
+
+ if (vlan_en)
+ hdrs += VLAN_LEN;
+
+ mss = pmtu - hdrs;
+
+ if (tcp_ts_en)
+ mss -= TCP_OPTION_LEN;
+
+ if (!mss)
+ mss = DEF_MSS;
+
+ return mss;
+}
+
+static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep)
+{
+ struct qedi_ctx *qedi = qedi_ep->qedi;
+ struct qed_iscsi_params_offload *conn_info;
+ int rval;
+ int i;
+
+ conn_info = kzalloc(sizeof(*conn_info), GFP_KERNEL);
+ if (!conn_info) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Failed to allocate memory ep=%p\n", qedi_ep);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(conn_info->src.mac, qedi_ep->src_mac);
+ ether_addr_copy(conn_info->dst.mac, qedi_ep->dst_mac);
+
+ conn_info->src.ip[0] = ntohl(qedi_ep->src_addr[0]);
+ conn_info->dst.ip[0] = ntohl(qedi_ep->dst_addr[0]);
+
+ if (qedi_ep->ip_type == TCP_IPV4) {
+ conn_info->ip_version = 0;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "After ntohl: src_addr=%pI4, dst_addr=%pI4\n",
+ qedi_ep->src_addr, qedi_ep->dst_addr);
+ } else {
+ for (i = 1; i < 4; i++) {
+ conn_info->src.ip[i] = ntohl(qedi_ep->src_addr[i]);
+ conn_info->dst.ip[i] = ntohl(qedi_ep->dst_addr[i]);
+ }
+
+ conn_info->ip_version = 1;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "After ntohl: src_addr=%pI6, dst_addr=%pI6\n",
+ qedi_ep->src_addr, qedi_ep->dst_addr);
+ }
+
+ conn_info->src.port = qedi_ep->src_port;
+ conn_info->dst.port = qedi_ep->dst_port;
+
+ conn_info->layer_code = ISCSI_SLOW_PATH_LAYER_CODE;
+ conn_info->sq_pbl_addr = qedi_ep->sq_pbl_dma;
+ conn_info->vlan_id = qedi_ep->vlan_id;
+
+ SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_TS_EN, 1);
+ SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_DA_EN, 1);
+ SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_DA_CNT_EN, 1);
+ SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_KA_EN, 1);
+
+ conn_info->default_cq = (qedi_ep->fw_cid % 8);
+
+ conn_info->ka_max_probe_cnt = DEF_KA_MAX_PROBE_COUNT;
+ conn_info->dup_ack_theshold = 3;
+ conn_info->rcv_wnd = 65535;
+ conn_info->cwnd = DEF_MAX_CWND;
+
+ conn_info->ss_thresh = 65535;
+ conn_info->srtt = 300;
+ conn_info->rtt_var = 150;
+ conn_info->flow_label = 0;
+ conn_info->ka_timeout = DEF_KA_TIMEOUT;
+ conn_info->ka_interval = DEF_KA_INTERVAL;
+ conn_info->max_rt_time = DEF_MAX_RT_TIME;
+ conn_info->ttl = DEF_TTL;
+ conn_info->tos_or_tc = DEF_TOS;
+ conn_info->remote_port = qedi_ep->dst_port;
+ conn_info->local_port = qedi_ep->src_port;
+
+ conn_info->mss = qedi_calc_mss(qedi_ep->pmtu,
+ (qedi_ep->ip_type == TCP_IPV6),
+ 1, (qedi_ep->vlan_id != 0));
+
+ conn_info->rcv_wnd_scale = 4;
+ conn_info->ts_ticks_per_second = 1000;
+ conn_info->da_timeout_value = 200;
+ conn_info->ack_frequency = 2;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Default cq index [%d], mss [%d]\n",
+ conn_info->default_cq, conn_info->mss);
+
+ rval = qedi_ops->offload_conn(qedi->cdev, qedi_ep->handle, conn_info);
+ if (rval)
+ QEDI_ERR(&qedi->dbg_ctx, "offload_conn returned %d, ep=%p\n",
+ rval, qedi_ep);
+
+ kfree(conn_info);
+ return rval;
+}
+
+static int qedi_conn_start(struct iscsi_cls_conn *cls_conn)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct qedi_conn *qedi_conn = conn->dd_data;
+ struct qedi_ctx *qedi;
+ int rval;
+
+ qedi = qedi_conn->qedi;
+
+ rval = qedi_iscsi_update_conn(qedi, qedi_conn);
+ if (rval) {
+ iscsi_conn_printk(KERN_ALERT, conn,
+ "conn_start: FW oflload conn failed.\n");
+ rval = -EINVAL;
+ goto start_err;
+ }
+
+ clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+ qedi_conn->abrt_conn = 0;
+
+ rval = iscsi_conn_start(cls_conn);
+ if (rval) {
+ iscsi_conn_printk(KERN_ALERT, conn,
+ "iscsi_conn_start: FW oflload conn failed!!\n");
+ }
+
+start_err:
+ return rval;
+}
+
+static void qedi_conn_destroy(struct iscsi_cls_conn *cls_conn)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct qedi_conn *qedi_conn = conn->dd_data;
+ struct Scsi_Host *shost;
+ struct qedi_ctx *qedi;
+
+ shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
+ qedi = iscsi_host_priv(shost);
+
+ qedi_conn_free_login_resources(qedi, qedi_conn);
+ iscsi_conn_teardown(cls_conn);
+}
+
+static int qedi_ep_get_param(struct iscsi_endpoint *ep,
+ enum iscsi_param param, char *buf)
+{
+ struct qedi_endpoint *qedi_ep = ep->dd_data;
+ int len;
+
+ if (!qedi_ep)
+ return -ENOTCONN;
+
+ switch (param) {
+ case ISCSI_PARAM_CONN_PORT:
+ len = sprintf(buf, "%hu\n", qedi_ep->dst_port);
+ break;
+ case ISCSI_PARAM_CONN_ADDRESS:
+ if (qedi_ep->ip_type == TCP_IPV4)
+ len = sprintf(buf, "%pI4\n", qedi_ep->dst_addr);
+ else
+ len = sprintf(buf, "%pI6\n", qedi_ep->dst_addr);
+ break;
+ default:
+ return -ENOTCONN;
+ }
+
+ return len;
+}
+
+static int qedi_host_get_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf)
+{
+ struct qedi_ctx *qedi;
+ int len;
+
+ qedi = iscsi_host_priv(shost);
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ len = sysfs_format_mac(buf, qedi->mac, 6);
+ break;
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+ len = sprintf(buf, "host%d\n", shost->host_no);
+ break;
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ if (qedi->ip_type == TCP_IPV4)
+ len = sprintf(buf, "%pI4\n", qedi->src_ip);
+ else
+ len = sprintf(buf, "%pI6\n", qedi->src_ip);
+ break;
+ default:
+ return iscsi_host_get_param(shost, param, buf);
+ }
+
+ return len;
+}
+
+static void qedi_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+ struct iscsi_stats *stats)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct qed_iscsi_stats iscsi_stats;
+ struct Scsi_Host *shost;
+ struct qedi_ctx *qedi;
+
+ shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
+ qedi = iscsi_host_priv(shost);
+ qedi_ops->get_stats(qedi->cdev, &iscsi_stats);
+
+ conn->txdata_octets = iscsi_stats.iscsi_tx_bytes_cnt;
+ conn->rxdata_octets = iscsi_stats.iscsi_rx_bytes_cnt;
+ conn->dataout_pdus_cnt = (uint32_t)iscsi_stats.iscsi_tx_data_pdu_cnt;
+ conn->datain_pdus_cnt = (uint32_t)iscsi_stats.iscsi_rx_data_pdu_cnt;
+ conn->r2t_pdus_cnt = (uint32_t)iscsi_stats.iscsi_rx_r2t_pdu_cnt;
+
+ stats->txdata_octets = conn->txdata_octets;
+ stats->rxdata_octets = conn->rxdata_octets;
+ stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
+ stats->dataout_pdus = conn->dataout_pdus_cnt;
+ stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
+ stats->datain_pdus = conn->datain_pdus_cnt;
+ stats->r2t_pdus = conn->r2t_pdus_cnt;
+ stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
+ stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
+ stats->digest_err = 0;
+ stats->timeout_err = 0;
+ strcpy(stats->custom[0].desc, "eh_abort_cnt");
+ stats->custom[0].value = conn->eh_abort_cnt;
+ stats->custom_length = 1;
+}
+
+static void qedi_iscsi_prep_generic_pdu_bd(struct qedi_conn *qedi_conn)
+{
+ struct iscsi_sge *bd_tbl;
+
+ bd_tbl = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+
+ bd_tbl->sge_addr.hi =
+ (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+ bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.req_dma_addr;
+ bd_tbl->sge_len = qedi_conn->gen_pdu.req_wr_ptr -
+ qedi_conn->gen_pdu.req_buf;
+ bd_tbl->reserved0 = 0;
+ bd_tbl = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ bd_tbl->sge_addr.hi =
+ (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
+ bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.resp_dma_addr;
+ bd_tbl->sge_len = ISCSI_DEF_MAX_RECV_SEG_LEN;
+ bd_tbl->reserved0 = 0;
+}
+
+static int qedi_iscsi_send_generic_request(struct iscsi_task *task)
+{
+ struct qedi_cmd *cmd = task->dd_data;
+ struct qedi_conn *qedi_conn = cmd->conn;
+ char *buf;
+ int data_len;
+ int rc = 0;
+
+ qedi_iscsi_prep_generic_pdu_bd(qedi_conn);
+ switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
+ case ISCSI_OP_LOGIN:
+ qedi_send_iscsi_login(qedi_conn, task);
+ break;
+ case ISCSI_OP_NOOP_OUT:
+ data_len = qedi_conn->gen_pdu.req_buf_size;
+ buf = qedi_conn->gen_pdu.req_buf;
+ if (data_len)
+ rc = qedi_send_iscsi_nopout(qedi_conn, task,
+ buf, data_len, 1);
+ else
+ rc = qedi_send_iscsi_nopout(qedi_conn, task,
+ NULL, 0, 1);
+ break;
+ case ISCSI_OP_LOGOUT:
+ rc = qedi_send_iscsi_logout(qedi_conn, task);
+ break;
+ case ISCSI_OP_SCSI_TMFUNC:
+ rc = qedi_iscsi_abort_work(qedi_conn, task);
+ break;
+ case ISCSI_OP_TEXT:
+ rc = qedi_send_iscsi_text(qedi_conn, task);
+ break;
+ default:
+ iscsi_conn_printk(KERN_ALERT, qedi_conn->cls_conn->dd_data,
+ "unsupported op 0x%x\n", task->hdr->opcode);
+ }
+
+ return rc;
+}
+
+static int qedi_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
+{
+ struct qedi_conn *qedi_conn = conn->dd_data;
+ struct qedi_cmd *cmd = task->dd_data;
+
+ memset(qedi_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN);
+
+ qedi_conn->gen_pdu.req_buf_size = task->data_count;
+
+ if (task->data_count) {
+ memcpy(qedi_conn->gen_pdu.req_buf, task->data,
+ task->data_count);
+ qedi_conn->gen_pdu.req_wr_ptr =
+ qedi_conn->gen_pdu.req_buf + task->data_count;
+ }
+
+ cmd->conn = conn->dd_data;
+ cmd->scsi_cmd = NULL;
+ return qedi_iscsi_send_generic_request(task);
+}
+
+static int qedi_task_xmit(struct iscsi_task *task)
+{
+ struct iscsi_conn *conn = task->conn;
+ struct qedi_conn *qedi_conn = conn->dd_data;
+ struct qedi_cmd *cmd = task->dd_data;
+ struct scsi_cmnd *sc = task->sc;
+
+ cmd->state = 0;
+ cmd->task = NULL;
+ cmd->use_slowpath = false;
+ cmd->conn = qedi_conn;
+ cmd->task = task;
+ cmd->io_cmd_in_list = false;
+ INIT_LIST_HEAD(&cmd->io_cmd);
+
+ if (!sc)
+ return qedi_mtask_xmit(conn, task);
+
+ cmd->scsi_cmd = sc;
+ return qedi_iscsi_send_ioreq(task);
+}
+
+static struct iscsi_endpoint *
+qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
+ int non_blocking)
+{
+ struct qedi_ctx *qedi;
+ struct iscsi_endpoint *ep;
+ struct qedi_endpoint *qedi_ep;
+ struct sockaddr_in *addr;
+ struct sockaddr_in6 *addr6;
+ struct qed_dev *cdev = NULL;
+ struct qedi_uio_dev *udev = NULL;
+ struct iscsi_path path_req;
+ u32 msg_type = ISCSI_KEVENT_IF_DOWN;
+ u32 iscsi_cid = QEDI_CID_RESERVED;
+ u16 len = 0;
+ char *buf = NULL;
+ int ret;
+
+ if (!shost) {
+ ret = -ENXIO;
+ QEDI_ERR(NULL, "shost is NULL\n");
+ return ERR_PTR(ret);
+ }
+
+ if (do_not_recover) {
+ ret = -ENOMEM;
+ return ERR_PTR(ret);
+ }
+
+ qedi = iscsi_host_priv(shost);
+ cdev = qedi->cdev;
+ udev = qedi->udev;
+
+ if (test_bit(QEDI_IN_OFFLINE, &qedi->flags) ||
+ test_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
+ ret = -ENOMEM;
+ return ERR_PTR(ret);
+ }
+
+ ep = iscsi_create_endpoint(sizeof(struct qedi_endpoint));
+ if (!ep) {
+ QEDI_ERR(&qedi->dbg_ctx, "endpoint create fail\n");
+ ret = -ENOMEM;
+ return ERR_PTR(ret);
+ }
+ qedi_ep = ep->dd_data;
+ memset(qedi_ep, 0, sizeof(struct qedi_endpoint));
+ qedi_ep->state = EP_STATE_IDLE;
+ qedi_ep->iscsi_cid = (u32)-1;
+ qedi_ep->qedi = qedi;
+
+ if (dst_addr->sa_family == AF_INET) {
+ addr = (struct sockaddr_in *)dst_addr;
+ memcpy(qedi_ep->dst_addr, &addr->sin_addr.s_addr,
+ sizeof(struct in_addr));
+ qedi_ep->dst_port = ntohs(addr->sin_port);
+ qedi_ep->ip_type = TCP_IPV4;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "dst_addr=%pI4, dst_port=%u\n",
+ qedi_ep->dst_addr, qedi_ep->dst_port);
+ } else if (dst_addr->sa_family == AF_INET6) {
+ addr6 = (struct sockaddr_in6 *)dst_addr;
+ memcpy(qedi_ep->dst_addr, &addr6->sin6_addr,
+ sizeof(struct in6_addr));
+ qedi_ep->dst_port = ntohs(addr6->sin6_port);
+ qedi_ep->ip_type = TCP_IPV6;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "dst_addr=%pI6, dst_port=%u\n",
+ qedi_ep->dst_addr, qedi_ep->dst_port);
+ } else {
+ QEDI_ERR(&qedi->dbg_ctx, "Invalid endpoint\n");
+ }
+
+ if (atomic_read(&qedi->link_state) != QEDI_LINK_UP) {
+ QEDI_WARN(&qedi->dbg_ctx, "qedi link down\n");
+ ret = -ENXIO;
+ goto ep_conn_exit;
+ }
+
+ ret = qedi_alloc_sq(qedi, qedi_ep);
+ if (ret)
+ goto ep_conn_exit;
+
+ ret = qedi_ops->acquire_conn(qedi->cdev, &qedi_ep->handle,
+ &qedi_ep->fw_cid, &qedi_ep->p_doorbell);
+
+ if (ret) {
+ QEDI_ERR(&qedi->dbg_ctx, "Could not acquire connection\n");
+ ret = -ENXIO;
+ goto ep_free_sq;
+ }
+
+ iscsi_cid = qedi_ep->handle;
+ qedi_ep->iscsi_cid = iscsi_cid;
+
+ init_waitqueue_head(&qedi_ep->ofld_wait);
+ init_waitqueue_head(&qedi_ep->tcp_ofld_wait);
+ qedi_ep->state = EP_STATE_OFLDCONN_START;
+ qedi->ep_tbl[iscsi_cid] = qedi_ep;
+
+ buf = (char *)&path_req;
+ len = sizeof(path_req);
+ memset(&path_req, 0, len);
+
+ msg_type = ISCSI_KEVENT_PATH_REQ;
+ path_req.handle = (u64)qedi_ep->iscsi_cid;
+ path_req.pmtu = qedi->ll2_mtu;
+ qedi_ep->pmtu = qedi->ll2_mtu;
+ if (qedi_ep->ip_type == TCP_IPV4) {
+ memcpy(&path_req.dst.v4_addr, &qedi_ep->dst_addr,
+ sizeof(struct in_addr));
+ path_req.ip_addr_len = 4;
+ } else {
+ memcpy(&path_req.dst.v6_addr, &qedi_ep->dst_addr,
+ sizeof(struct in6_addr));
+ path_req.ip_addr_len = 16;
+ }
+
+ ret = iscsi_offload_mesg(shost, &qedi_iscsi_transport, msg_type, buf,
+ len);
+ if (ret) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "iscsi_offload_mesg() failed for cid=0x%x ret=%d\n",
+ iscsi_cid, ret);
+ goto ep_rel_conn;
+ }
+
+ atomic_inc(&qedi->num_offloads);
+ return ep;
+
+ep_rel_conn:
+ qedi->ep_tbl[iscsi_cid] = NULL;
+ ret = qedi_ops->release_conn(qedi->cdev, qedi_ep->handle);
+ if (ret)
+ QEDI_WARN(&qedi->dbg_ctx, "release_conn returned %d\n",
+ ret);
+ep_free_sq:
+ qedi_free_sq(qedi, qedi_ep);
+ep_conn_exit:
+ iscsi_destroy_endpoint(ep);
+ return ERR_PTR(ret);
+}
+
+static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+{
+ struct qedi_endpoint *qedi_ep;
+ int ret = 0;
+
+ if (do_not_recover)
+ return 1;
+
+ qedi_ep = ep->dd_data;
+ if (qedi_ep->state == EP_STATE_IDLE ||
+ qedi_ep->state == EP_STATE_OFLDCONN_FAILED)
+ return -1;
+
+ if (qedi_ep->state == EP_STATE_OFLDCONN_COMPL)
+ ret = 1;
+
+ ret = wait_event_interruptible_timeout(qedi_ep->ofld_wait,
+ QEDI_OFLD_WAIT_STATE(qedi_ep),
+ msecs_to_jiffies(timeout_ms));
+
+ if (qedi_ep->state == EP_STATE_OFLDCONN_FAILED)
+ ret = -1;
+
+ if (ret > 0)
+ return 1;
+ else if (!ret)
+ return 0;
+ else
+ return ret;
+}
+
+static void qedi_cleanup_active_cmd_list(struct qedi_conn *qedi_conn)
+{
+ struct qedi_cmd *cmd, *cmd_tmp;
+
+ list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list,
+ io_cmd) {
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ }
+}
+
+static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
+{
+ struct qedi_endpoint *qedi_ep;
+ struct qedi_conn *qedi_conn = NULL;
+ struct iscsi_conn *conn = NULL;
+ struct qedi_ctx *qedi;
+ int ret = 0;
+ int wait_delay = 20 * HZ;
+ int abrt_conn = 0;
+ int count = 10;
+
+ qedi_ep = ep->dd_data;
+ qedi = qedi_ep->qedi;
+
+ flush_work(&qedi_ep->offload_work);
+
+ if (qedi_ep->conn) {
+ qedi_conn = qedi_ep->conn;
+ conn = qedi_conn->cls_conn->dd_data;
+ iscsi_suspend_queue(conn);
+ abrt_conn = qedi_conn->abrt_conn;
+
+ while (count--) {
+ if (!test_bit(QEDI_CONN_FW_CLEANUP,
+ &qedi_conn->flags)) {
+ break;
+ }
+ msleep(1000);
+ }
+
+ if (test_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
+ if (do_not_recover) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Do not recover cid=0x%x\n",
+ qedi_ep->iscsi_cid);
+ goto ep_exit_recover;
+ }
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Reset recovery cid=0x%x, qedi_ep=%p, state=0x%x\n",
+ qedi_ep->iscsi_cid, qedi_ep, qedi_ep->state);
+ qedi_cleanup_active_cmd_list(qedi_conn);
+ goto ep_release_conn;
+ }
+ }
+
+ if (do_not_recover)
+ goto ep_exit_recover;
+
+ switch (qedi_ep->state) {
+ case EP_STATE_OFLDCONN_START:
+ goto ep_release_conn;
+ case EP_STATE_OFLDCONN_FAILED:
+ break;
+ case EP_STATE_OFLDCONN_COMPL:
+ if (unlikely(!qedi_conn))
+ break;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Active cmd count=%d, abrt_conn=%d, ep state=0x%x, cid=0x%x, qedi_conn=%p\n",
+ qedi_conn->active_cmd_count, abrt_conn,
+ qedi_ep->state,
+ qedi_ep->iscsi_cid,
+ qedi_ep->conn
+ );
+
+ if (!qedi_conn->active_cmd_count)
+ abrt_conn = 0;
+ else
+ abrt_conn = 1;
+
+ if (abrt_conn)
+ qedi_clearsq(qedi, qedi_conn, NULL);
+ break;
+ default:
+ break;
+ }
+
+ qedi_ep->state = EP_STATE_DISCONN_START;
+ ret = qedi_ops->destroy_conn(qedi->cdev, qedi_ep->handle, abrt_conn);
+ if (ret) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "destroy_conn failed returned %d\n", ret);
+ } else {
+ ret = wait_event_interruptible_timeout(
+ qedi_ep->tcp_ofld_wait,
+ (qedi_ep->state !=
+ EP_STATE_DISCONN_START),
+ wait_delay);
+ if ((ret <= 0) || (qedi_ep->state == EP_STATE_DISCONN_START)) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Destroy conn timedout or interrupted, ret=%d, delay=%d, cid=0x%x\n",
+ ret, wait_delay, qedi_ep->iscsi_cid);
+ }
+ }
+
+ep_release_conn:
+ ret = qedi_ops->release_conn(qedi->cdev, qedi_ep->handle);
+ if (ret)
+ QEDI_WARN(&qedi->dbg_ctx,
+ "release_conn returned %d, cid=0x%x\n",
+ ret, qedi_ep->iscsi_cid);
+ep_exit_recover:
+ qedi_ep->state = EP_STATE_IDLE;
+ qedi->ep_tbl[qedi_ep->iscsi_cid] = NULL;
+ qedi->cid_que.conn_cid_tbl[qedi_ep->iscsi_cid] = NULL;
+ qedi_free_id(&qedi->lcl_port_tbl, qedi_ep->src_port);
+ qedi_free_sq(qedi, qedi_ep);
+
+ if (qedi_conn)
+ qedi_conn->ep = NULL;
+
+ qedi_ep->conn = NULL;
+ qedi_ep->qedi = NULL;
+ atomic_dec(&qedi->num_offloads);
+
+ iscsi_destroy_endpoint(ep);
+}
+
+static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid)
+{
+ struct qed_dev *cdev = qedi->cdev;
+ struct qedi_uio_dev *udev;
+ struct qedi_uio_ctrl *uctrl;
+ struct sk_buff *skb;
+ u32 len;
+ int rc = 0;
+
+ udev = qedi->udev;
+ if (!udev) {
+ QEDI_ERR(&qedi->dbg_ctx, "udev is NULL.\n");
+ return -EINVAL;
+ }
+
+ uctrl = (struct qedi_uio_ctrl *)udev->uctrl;
+ if (!uctrl) {
+ QEDI_ERR(&qedi->dbg_ctx, "uctlr is NULL.\n");
+ return -EINVAL;
+ }
+
+ len = uctrl->host_tx_pkt_len;
+ if (!len) {
+ QEDI_ERR(&qedi->dbg_ctx, "Invalid len %u\n", len);
+ return -EINVAL;
+ }
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb) {
+ QEDI_ERR(&qedi->dbg_ctx, "alloc_skb failed\n");
+ return -EINVAL;
+ }
+
+ skb_put(skb, len);
+ memcpy(skb->data, udev->tx_pkt, len);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ if (vlanid)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
+
+ rc = qedi_ops->ll2->start_xmit(cdev, skb);
+ if (rc) {
+ QEDI_ERR(&qedi->dbg_ctx, "ll2 start_xmit returned %d\n",
+ rc);
+ kfree_skb(skb);
+ }
+
+ uctrl->host_tx_pkt_len = 0;
+ uctrl->hw_tx_cons++;
+
+ return rc;
+}
+
+static void qedi_offload_work(struct work_struct *work)
+{
+ struct qedi_endpoint *qedi_ep =
+ container_of(work, struct qedi_endpoint, offload_work);
+ struct qedi_ctx *qedi;
+ int wait_delay = 20 * HZ;
+ int ret;
+
+ qedi = qedi_ep->qedi;
+
+ ret = qedi_iscsi_offload_conn(qedi_ep);
+ if (ret) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
+ qedi_ep->iscsi_cid, qedi_ep, ret);
+ qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
+ return;
+ }
+
+ ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
+ (qedi_ep->state ==
+ EP_STATE_OFLDCONN_COMPL),
+ wait_delay);
+ if ((ret <= 0) || (qedi_ep->state != EP_STATE_OFLDCONN_COMPL)) {
+ qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
+ qedi_ep->iscsi_cid, qedi_ep);
+ }
+}
+
+static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
+{
+ struct qedi_ctx *qedi;
+ struct qedi_endpoint *qedi_ep;
+ int ret = 0;
+ u32 iscsi_cid;
+ u16 port_id = 0;
+
+ if (!shost) {
+ ret = -ENXIO;
+ QEDI_ERR(NULL, "shost is NULL\n");
+ return ret;
+ }
+
+ if (strcmp(shost->hostt->proc_name, "qedi")) {
+ ret = -ENXIO;
+ QEDI_ERR(NULL, "shost %s is invalid\n",
+ shost->hostt->proc_name);
+ return ret;
+ }
+
+ qedi = iscsi_host_priv(shost);
+ if (path_data->handle == QEDI_PATH_HANDLE) {
+ ret = qedi_data_avail(qedi, path_data->vlan_id);
+ goto set_path_exit;
+ }
+
+ iscsi_cid = (u32)path_data->handle;
+ qedi_ep = qedi->ep_tbl[iscsi_cid];
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep);
+
+ if (!is_valid_ether_addr(&path_data->mac_addr[0])) {
+ QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n");
+ ret = -EIO;
+ goto set_path_exit;
+ }
+
+ ether_addr_copy(&qedi_ep->src_mac[0], &qedi->mac[0]);
+ ether_addr_copy(&qedi_ep->dst_mac[0], &path_data->mac_addr[0]);
+
+ qedi_ep->vlan_id = path_data->vlan_id;
+ if (path_data->pmtu < DEF_PATH_MTU) {
+ qedi_ep->pmtu = qedi->ll2_mtu;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "MTU cannot be %u, using default MTU %u\n",
+ path_data->pmtu, qedi_ep->pmtu);
+ }
+
+ if (path_data->pmtu != qedi->ll2_mtu) {
+ if (path_data->pmtu > JUMBO_MTU) {
+ ret = -EINVAL;
+ QEDI_ERR(NULL, "Invalid MTU %u\n", path_data->pmtu);
+ goto set_path_exit;
+ }
+
+ qedi_reset_host_mtu(qedi, path_data->pmtu);
+ qedi_ep->pmtu = qedi->ll2_mtu;
+ }
+
+ port_id = qedi_ep->src_port;
+ if (port_id >= QEDI_LOCAL_PORT_MIN &&
+ port_id < QEDI_LOCAL_PORT_MAX) {
+ if (qedi_alloc_id(&qedi->lcl_port_tbl, port_id))
+ port_id = 0;
+ } else {
+ port_id = 0;
+ }
+
+ if (!port_id) {
+ port_id = qedi_alloc_new_id(&qedi->lcl_port_tbl);
+ if (port_id == QEDI_LOCAL_PORT_INVALID) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Failed to allocate port id for iscsi_cid=0x%x\n",
+ iscsi_cid);
+ ret = -ENOMEM;
+ goto set_path_exit;
+ }
+ }
+
+ qedi_ep->src_port = port_id;
+
+ if (qedi_ep->ip_type == TCP_IPV4) {
+ memcpy(&qedi_ep->src_addr[0], &path_data->src.v4_addr,
+ sizeof(struct in_addr));
+ memcpy(&qedi->src_ip[0], &path_data->src.v4_addr,
+ sizeof(struct in_addr));
+ qedi->ip_type = TCP_IPV4;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "src addr:port=%pI4:%u, dst addr:port=%pI4:%u\n",
+ qedi_ep->src_addr, qedi_ep->src_port,
+ qedi_ep->dst_addr, qedi_ep->dst_port);
+ } else {
+ memcpy(&qedi_ep->src_addr[0], &path_data->src.v6_addr,
+ sizeof(struct in6_addr));
+ memcpy(&qedi->src_ip[0], &path_data->src.v6_addr,
+ sizeof(struct in6_addr));
+ qedi->ip_type = TCP_IPV6;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "src addr:port=%pI6:%u, dst addr:port=%pI6:%u\n",
+ qedi_ep->src_addr, qedi_ep->src_port,
+ qedi_ep->dst_addr, qedi_ep->dst_port);
+ }
+
+ INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
+ queue_work(qedi->offload_thread, &qedi_ep->offload_work);
+
+ ret = 0;
+
+set_path_exit:
+ return ret;
+}
+
+static umode_t qedi_attr_is_visible(int param_type, int param)
+{
+ switch (param_type) {
+ case ISCSI_HOST_PARAM:
+ switch (param) {
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ return 0444;
+ default:
+ return 0;
+ }
+ case ISCSI_PARAM:
+ switch (param) {
+ case ISCSI_PARAM_MAX_RECV_DLENGTH:
+ case ISCSI_PARAM_MAX_XMIT_DLENGTH:
+ case ISCSI_PARAM_HDRDGST_EN:
+ case ISCSI_PARAM_DATADGST_EN:
+ case ISCSI_PARAM_CONN_ADDRESS:
+ case ISCSI_PARAM_CONN_PORT:
+ case ISCSI_PARAM_EXP_STATSN:
+ case ISCSI_PARAM_PERSISTENT_ADDRESS:
+ case ISCSI_PARAM_PERSISTENT_PORT:
+ case ISCSI_PARAM_PING_TMO:
+ case ISCSI_PARAM_RECV_TMO:
+ case ISCSI_PARAM_INITIAL_R2T_EN:
+ case ISCSI_PARAM_MAX_R2T:
+ case ISCSI_PARAM_IMM_DATA_EN:
+ case ISCSI_PARAM_FIRST_BURST:
+ case ISCSI_PARAM_MAX_BURST:
+ case ISCSI_PARAM_PDU_INORDER_EN:
+ case ISCSI_PARAM_DATASEQ_INORDER_EN:
+ case ISCSI_PARAM_ERL:
+ case ISCSI_PARAM_TARGET_NAME:
+ case ISCSI_PARAM_TPGT:
+ case ISCSI_PARAM_USERNAME:
+ case ISCSI_PARAM_PASSWORD:
+ case ISCSI_PARAM_USERNAME_IN:
+ case ISCSI_PARAM_PASSWORD_IN:
+ case ISCSI_PARAM_FAST_ABORT:
+ case ISCSI_PARAM_ABORT_TMO:
+ case ISCSI_PARAM_LU_RESET_TMO:
+ case ISCSI_PARAM_TGT_RESET_TMO:
+ case ISCSI_PARAM_IFACE_NAME:
+ case ISCSI_PARAM_INITIATOR_NAME:
+ case ISCSI_PARAM_BOOT_ROOT:
+ case ISCSI_PARAM_BOOT_NIC:
+ case ISCSI_PARAM_BOOT_TARGET:
+ return 0444;
+ default:
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+static void qedi_cleanup_task(struct iscsi_task *task)
+{
+ if (!task->sc || task->state == ISCSI_TASK_PENDING) {
+ QEDI_INFO(NULL, QEDI_LOG_IO, "Returning ref_cnt=%d\n",
+ atomic_read(&task->refcount));
+ return;
+ }
+
+ qedi_iscsi_unmap_sg_list(task->dd_data);
+}
+
+struct iscsi_transport qedi_iscsi_transport = {
+ .owner = THIS_MODULE,
+ .name = QEDI_MODULE_NAME,
+ .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_MULTI_R2T | CAP_DATADGST |
+ CAP_DATA_PATH_OFFLOAD | CAP_TEXT_NEGO,
+ .create_session = qedi_session_create,
+ .destroy_session = qedi_session_destroy,
+ .create_conn = qedi_conn_create,
+ .bind_conn = qedi_conn_bind,
+ .start_conn = qedi_conn_start,
+ .stop_conn = iscsi_conn_stop,
+ .destroy_conn = qedi_conn_destroy,
+ .set_param = iscsi_set_param,
+ .get_ep_param = qedi_ep_get_param,
+ .get_conn_param = iscsi_conn_get_param,
+ .get_session_param = iscsi_session_get_param,
+ .get_host_param = qedi_host_get_param,
+ .send_pdu = iscsi_conn_send_pdu,
+ .get_stats = qedi_conn_get_stats,
+ .xmit_task = qedi_task_xmit,
+ .cleanup_task = qedi_cleanup_task,
+ .session_recovery_timedout = iscsi_session_recovery_timedout,
+ .ep_connect = qedi_ep_connect,
+ .ep_poll = qedi_ep_poll,
+ .ep_disconnect = qedi_ep_disconnect,
+ .set_path = qedi_set_path,
+ .attr_is_visible = qedi_attr_is_visible,
+};
+
+void qedi_start_conn_recovery(struct qedi_ctx *qedi,
+ struct qedi_conn *qedi_conn)
+{
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_conn *conn;
+
+ cls_conn = qedi_conn->cls_conn;
+ conn = cls_conn->dd_data;
+ cls_sess = iscsi_conn_to_session(cls_conn);
+
+ if (iscsi_is_session_online(cls_sess)) {
+ qedi_conn->abrt_conn = 1;
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Failing connection, state=0x%x, cid=0x%x\n",
+ conn->session->state, qedi_conn->iscsi_conn_id);
+ iscsi_conn_failure(qedi_conn->cls_conn->dd_data,
+ ISCSI_ERR_CONN_FAILED);
+ }
+}
+
+static const struct {
+ enum iscsi_error_types error_code;
+ char *err_string;
+} qedi_iscsi_error[] = {
+ { ISCSI_STATUS_NONE,
+ "tcp_error none"
+ },
+ { ISCSI_CONN_ERROR_TASK_CID_MISMATCH,
+ "task cid mismatch"
+ },
+ { ISCSI_CONN_ERROR_TASK_NOT_VALID,
+ "invalid task"
+ },
+ { ISCSI_CONN_ERROR_RQ_RING_IS_FULL,
+ "rq ring full"
+ },
+ { ISCSI_CONN_ERROR_CMDQ_RING_IS_FULL,
+ "cmdq ring full"
+ },
+ { ISCSI_CONN_ERROR_HQE_CACHING_FAILED,
+ "sge caching failed"
+ },
+ { ISCSI_CONN_ERROR_HEADER_DIGEST_ERROR,
+ "hdr digest error"
+ },
+ { ISCSI_CONN_ERROR_LOCAL_COMPLETION_ERROR,
+ "local cmpl error"
+ },
+ { ISCSI_CONN_ERROR_DATA_OVERRUN,
+ "invalid task"
+ },
+ { ISCSI_CONN_ERROR_OUT_OF_SGES_ERROR,
+ "out of sge error"
+ },
+ { ISCSI_CONN_ERROR_TCP_SEG_PROC_IP_OPTIONS_ERROR,
+ "tcp seg ip options error"
+ },
+ { ISCSI_CONN_ERROR_TCP_IP_FRAGMENT_ERROR,
+ "tcp ip fragment error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_LEN,
+ "AHS len protocol error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_ITT_OUT_OF_RANGE,
+ "itt out of range error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_EXCEEDS_PDU_SIZE,
+ "data seg more than pdu size"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE,
+ "invalid opcode"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE_BEFORE_UPDATE,
+ "invalid opcode before update"
+ },
+ { ISCSI_CONN_ERROR_UNVALID_NOPIN_DSL,
+ "unexpected opcode"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_CARRIES_NO_DATA,
+ "r2t carries no data"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SN,
+ "data sn error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_IN_TTT,
+ "data TTT error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_TTT,
+ "r2t TTT error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_BUFFER_OFFSET,
+ "buffer offset error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_BUFFER_OFFSET_OOO,
+ "buffer offset ooo"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_SN,
+ "data seg len 0"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0,
+ "data xer len error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1,
+ "data xer len1 error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_2,
+ "data xer len2 error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_LUN,
+ "protocol lun error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_F_BIT_ZERO,
+ "f bit zero error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_EXP_STAT_SN,
+ "exp stat sn error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_DSL_NOT_ZERO,
+ "dsl not zero error"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_DSL,
+ "invalid dsl"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG,
+ "data seg len too big"
+ },
+ { ISCSI_CONN_ERROR_PROTOCOL_ERR_OUTSTANDING_R2T_COUNT,
+ "outstanding r2t count error"
+ },
+ { ISCSI_CONN_ERROR_SENSE_DATA_LENGTH,
+ "sense datalen error"
+ },
+};
+
+char *qedi_get_iscsi_error(enum iscsi_error_types err_code)
+{
+ int i;
+ char *msg = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(qedi_iscsi_error); i++) {
+ if (qedi_iscsi_error[i].error_code == err_code) {
+ msg = qedi_iscsi_error[i].err_string;
+ break;
+ }
+ }
+ return msg;
+}
+
+void qedi_process_iscsi_error(struct qedi_endpoint *ep, struct async_data *data)
+{
+ struct qedi_conn *qedi_conn;
+ struct qedi_ctx *qedi;
+ char warn_notice[] = "iscsi_warning";
+ char error_notice[] = "iscsi_error";
+ char unknown_msg[] = "Unknown error";
+ char *message;
+ int need_recovery = 0;
+ u32 err_mask = 0;
+ char *msg;
+
+ if (!ep)
+ return;
+
+ qedi_conn = ep->conn;
+ if (!qedi_conn)
+ return;
+
+ qedi = ep->qedi;
+
+ QEDI_ERR(&qedi->dbg_ctx, "async event iscsi error:0x%x\n",
+ data->error_code);
+
+ if (err_mask) {
+ need_recovery = 0;
+ message = warn_notice;
+ } else {
+ need_recovery = 1;
+ message = error_notice;
+ }
+
+ msg = qedi_get_iscsi_error(data->error_code);
+ if (!msg) {
+ need_recovery = 0;
+ msg = unknown_msg;
+ }
+
+ iscsi_conn_printk(KERN_ALERT,
+ qedi_conn->cls_conn->dd_data,
+ "qedi: %s - %s\n", message, msg);
+
+ if (need_recovery)
+ qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
+}
+
+void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data)
+{
+ struct qedi_conn *qedi_conn;
+
+ if (!ep)
+ return;
+
+ qedi_conn = ep->conn;
+ if (!qedi_conn)
+ return;
+
+ QEDI_ERR(&ep->qedi->dbg_ctx, "async event TCP error:0x%x\n",
+ data->error_code);
+
+ qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
+}
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
new file mode 100644
index 000000000000..d3c06bbddb4e
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -0,0 +1,232 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_ISCSI_H_
+#define _QEDI_ISCSI_H_
+
+#include <linux/socket.h>
+#include <linux/completion.h>
+#include "qedi.h"
+
+#define ISCSI_MAX_SESS_PER_HBA 4096
+
+#define DEF_KA_TIMEOUT 7200000
+#define DEF_KA_INTERVAL 10000
+#define DEF_KA_MAX_PROBE_COUNT 10
+#define DEF_TOS 0
+#define DEF_TTL 0xfe
+#define DEF_SND_SEQ_SCALE 0
+#define DEF_RCV_BUF 0xffff
+#define DEF_SND_BUF 0xffff
+#define DEF_SEED 0
+#define DEF_MAX_RT_TIME 8000
+#define DEF_MAX_DA_COUNT 2
+#define DEF_SWS_TIMER 1000
+#define DEF_MAX_CWND 2
+#define DEF_PATH_MTU 1500
+#define DEF_MSS 1460
+#define DEF_LL2_MTU 1560
+#define JUMBO_MTU 9000
+
+#define MIN_MTU 576 /* rfc 793 */
+#define IPV4_HDR_LEN 20
+#define IPV6_HDR_LEN 40
+#define TCP_HDR_LEN 20
+#define TCP_OPTION_LEN 12
+#define VLAN_LEN 4
+
+enum {
+ EP_STATE_IDLE = 0x0,
+ EP_STATE_ACQRCONN_START = 0x1,
+ EP_STATE_ACQRCONN_COMPL = 0x2,
+ EP_STATE_OFLDCONN_START = 0x4,
+ EP_STATE_OFLDCONN_COMPL = 0x8,
+ EP_STATE_DISCONN_START = 0x10,
+ EP_STATE_DISCONN_COMPL = 0x20,
+ EP_STATE_CLEANUP_START = 0x40,
+ EP_STATE_CLEANUP_CMPL = 0x80,
+ EP_STATE_TCP_FIN_RCVD = 0x100,
+ EP_STATE_TCP_RST_RCVD = 0x200,
+ EP_STATE_LOGOUT_SENT = 0x400,
+ EP_STATE_LOGOUT_RESP_RCVD = 0x800,
+ EP_STATE_CLEANUP_FAILED = 0x1000,
+ EP_STATE_OFLDCONN_FAILED = 0x2000,
+ EP_STATE_CONNECT_FAILED = 0x4000,
+ EP_STATE_DISCONN_TIMEDOUT = 0x8000,
+};
+
+struct qedi_conn;
+
+struct qedi_endpoint {
+ struct qedi_ctx *qedi;
+ u32 dst_addr[4];
+ u32 src_addr[4];
+ u16 src_port;
+ u16 dst_port;
+ u16 vlan_id;
+ u16 pmtu;
+ u8 src_mac[ETH_ALEN];
+ u8 dst_mac[ETH_ALEN];
+ u8 ip_type;
+ int state;
+ wait_queue_head_t ofld_wait;
+ wait_queue_head_t tcp_ofld_wait;
+ u32 iscsi_cid;
+ /* identifier of the connection from qed */
+ u32 handle;
+ u32 fw_cid;
+ void __iomem *p_doorbell;
+
+ /* Send queue management */
+ struct iscsi_wqe *sq;
+ dma_addr_t sq_dma;
+
+ u16 sq_prod_idx;
+ u16 fw_sq_prod_idx;
+ u16 sq_con_idx;
+ u32 sq_mem_size;
+
+ void *sq_pbl;
+ dma_addr_t sq_pbl_dma;
+ u32 sq_pbl_size;
+ struct qedi_conn *conn;
+ struct work_struct offload_work;
+};
+
+#define QEDI_SQ_WQES_MIN 16
+
+struct qedi_io_bdt {
+ struct iscsi_sge *sge_tbl;
+ dma_addr_t sge_tbl_dma;
+ u16 sge_valid;
+};
+
+/**
+ * struct generic_pdu_resc - login pdu resource structure
+ *
+ * @req_buf: driver buffer used to stage payload associated with
+ * the login request
+ * @req_dma_addr: dma address for iscsi login request payload buffer
+ * @req_buf_size: actual login request payload length
+ * @req_wr_ptr: pointer into login request buffer when next data is
+ * to be written
+ * @resp_hdr: iscsi header where iscsi login response header is to
+ * be recreated
+ * @resp_buf: buffer to stage login response payload
+ * @resp_dma_addr: login response payload buffer dma address
+ * @resp_buf_size: login response paylod length
+ * @resp_wr_ptr: pointer into login response buffer when next data is
+ * to be written
+ * @req_bd_tbl: iscsi login request payload BD table
+ * @req_bd_dma: login request BD table dma address
+ * @resp_bd_tbl: iscsi login response payload BD table
+ * @resp_bd_dma: login request BD table dma address
+ *
+ * following structure defines buffer info for generic pdus such as iSCSI Login,
+ * Logout and NOP
+ */
+struct generic_pdu_resc {
+ char *req_buf;
+ dma_addr_t req_dma_addr;
+ u32 req_buf_size;
+ char *req_wr_ptr;
+ struct iscsi_hdr resp_hdr;
+ char *resp_buf;
+ dma_addr_t resp_dma_addr;
+ u32 resp_buf_size;
+ char *resp_wr_ptr;
+ char *req_bd_tbl;
+ dma_addr_t req_bd_dma;
+ char *resp_bd_tbl;
+ dma_addr_t resp_bd_dma;
+};
+
+struct qedi_conn {
+ struct iscsi_cls_conn *cls_conn;
+ struct qedi_ctx *qedi;
+ struct qedi_endpoint *ep;
+ struct list_head active_cmd_list;
+ spinlock_t list_lock; /* internal conn lock */
+ u32 active_cmd_count;
+ u32 cmd_cleanup_req;
+ u32 cmd_cleanup_cmpl;
+
+ u32 iscsi_conn_id;
+ int itt;
+ int abrt_conn;
+#define QEDI_CID_RESERVED 0x5AFF
+ u32 fw_cid;
+ /*
+ * Buffer for login negotiation process
+ */
+ struct generic_pdu_resc gen_pdu;
+
+ struct list_head tmf_work_list;
+ wait_queue_head_t wait_queue;
+ spinlock_t tmf_work_lock; /* tmf work lock */
+ unsigned long flags;
+#define QEDI_CONN_FW_CLEANUP 1
+};
+
+struct qedi_cmd {
+ struct list_head io_cmd;
+ bool io_cmd_in_list;
+ struct iscsi_hdr hdr;
+ struct qedi_conn *conn;
+ struct scsi_cmnd *scsi_cmd;
+ struct scatterlist *sg;
+ struct qedi_io_bdt io_tbl;
+ struct iscsi_task_context request;
+ unsigned char *sense_buffer;
+ dma_addr_t sense_buffer_dma;
+ u16 task_id;
+
+ /* field populated for tmf work queue */
+ struct iscsi_task *task;
+ struct work_struct tmf_work;
+ int state;
+#define CLEANUP_WAIT 1
+#define CLEANUP_RECV 2
+#define CLEANUP_WAIT_FAILED 3
+#define CLEANUP_NOT_REQUIRED 4
+#define LUN_RESET_RESPONSE_RECEIVED 5
+#define RESPONSE_RECEIVED 6
+
+ int type;
+#define TYPEIO 1
+#define TYPERESET 2
+
+ struct qedi_work_map *list_tmf_work;
+ /* slowpath management */
+ bool use_slowpath;
+
+ struct iscsi_tm_rsp *tmf_resp_buf;
+ struct qedi_work cqe_work;
+};
+
+struct qedi_work_map {
+ struct list_head list;
+ struct qedi_cmd *qedi_cmd;
+ int rtid;
+
+ int state;
+#define QEDI_WORK_QUEUED 1
+#define QEDI_WORK_SCHEDULED 2
+#define QEDI_WORK_EXIT 3
+
+ struct work_struct *ptr_tmf_work;
+};
+
+#define qedi_set_itt(task_id, itt) ((u32)(((task_id) & 0xffff) | ((itt) << 16)))
+#define qedi_get_itt(cqe) (cqe.iscsi_hdr.cmd.itt >> 16)
+
+#define QEDI_OFLD_WAIT_STATE(q) ((q)->state == EP_STATE_OFLDCONN_FAILED || \
+ (q)->state == EP_STATE_OFLDCONN_COMPL)
+
+#endif /* _QEDI_ISCSI_H_ */
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
new file mode 100644
index 000000000000..5eda21d903e9
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -0,0 +1,2095 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/if_arp.h>
+#include <scsi/iscsi_if.h>
+#include <linux/inet.h>
+#include <net/arp.h>
+#include <linux/list.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/if_vlan.h>
+#include <linux/cpu.h>
+
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi.h>
+
+#include "qedi.h"
+#include "qedi_gbl.h"
+#include "qedi_iscsi.h"
+
+static uint qedi_fw_debug;
+module_param(qedi_fw_debug, uint, 0644);
+MODULE_PARM_DESC(qedi_fw_debug, " Firmware debug level 0(default) to 3");
+
+uint qedi_dbg_log = QEDI_LOG_WARN | QEDI_LOG_SCSI_TM;
+module_param(qedi_dbg_log, uint, 0644);
+MODULE_PARM_DESC(qedi_dbg_log, " Default debug level");
+
+uint qedi_io_tracing;
+module_param(qedi_io_tracing, uint, 0644);
+MODULE_PARM_DESC(qedi_io_tracing,
+ " Enable logging of SCSI requests/completions into trace buffer. (default off).");
+
+const struct qed_iscsi_ops *qedi_ops;
+static struct scsi_transport_template *qedi_scsi_transport;
+static struct pci_driver qedi_pci_driver;
+static DEFINE_PER_CPU(struct qedi_percpu_s, qedi_percpu);
+static LIST_HEAD(qedi_udev_list);
+/* Static function declaration */
+static int qedi_alloc_global_queues(struct qedi_ctx *qedi);
+static void qedi_free_global_queues(struct qedi_ctx *qedi);
+static struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid);
+static void qedi_reset_uio_rings(struct qedi_uio_dev *udev);
+static void qedi_ll2_free_skbs(struct qedi_ctx *qedi);
+
+static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
+{
+ struct qedi_ctx *qedi;
+ struct qedi_endpoint *qedi_ep;
+ struct async_data *data;
+ int rval = 0;
+
+ if (!context || !fw_handle) {
+ QEDI_ERR(NULL, "Recv event with ctx NULL\n");
+ return -EINVAL;
+ }
+
+ qedi = (struct qedi_ctx *)context;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Recv Event %d fw_handle %p\n", fw_event_code, fw_handle);
+
+ data = (struct async_data *)fw_handle;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "cid=0x%x tid=0x%x err-code=0x%x fw-dbg-param=0x%x\n",
+ data->cid, data->itid, data->error_code,
+ data->fw_debug_param);
+
+ qedi_ep = qedi->ep_tbl[data->cid];
+
+ if (!qedi_ep) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Cannot process event, ep already disconnected, cid=0x%x\n",
+ data->cid);
+ WARN_ON(1);
+ return -ENODEV;
+ }
+
+ switch (fw_event_code) {
+ case ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE:
+ if (qedi_ep->state == EP_STATE_OFLDCONN_START)
+ qedi_ep->state = EP_STATE_OFLDCONN_COMPL;
+
+ wake_up_interruptible(&qedi_ep->tcp_ofld_wait);
+ break;
+ case ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE:
+ qedi_ep->state = EP_STATE_DISCONN_COMPL;
+ wake_up_interruptible(&qedi_ep->tcp_ofld_wait);
+ break;
+ case ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR:
+ qedi_process_iscsi_error(qedi_ep, data);
+ break;
+ case ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD:
+ case ISCSI_EVENT_TYPE_ASYN_SYN_RCVD:
+ case ISCSI_EVENT_TYPE_ASYN_MAX_RT_TIME:
+ case ISCSI_EVENT_TYPE_ASYN_MAX_RT_CNT:
+ case ISCSI_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT:
+ case ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2:
+ case ISCSI_EVENT_TYPE_TCP_CONN_ERROR:
+ qedi_process_tcp_error(qedi_ep, data);
+ break;
+ default:
+ QEDI_ERR(&qedi->dbg_ctx, "Recv Unknown Event %u\n",
+ fw_event_code);
+ }
+
+ return rval;
+}
+
+static int qedi_uio_open(struct uio_info *uinfo, struct inode *inode)
+{
+ struct qedi_uio_dev *udev = uinfo->priv;
+ struct qedi_ctx *qedi = udev->qedi;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (udev->uio_dev != -1)
+ return -EBUSY;
+
+ rtnl_lock();
+ udev->uio_dev = iminor(inode);
+ qedi_reset_uio_rings(udev);
+ set_bit(UIO_DEV_OPENED, &qedi->flags);
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int qedi_uio_close(struct uio_info *uinfo, struct inode *inode)
+{
+ struct qedi_uio_dev *udev = uinfo->priv;
+ struct qedi_ctx *qedi = udev->qedi;
+
+ udev->uio_dev = -1;
+ clear_bit(UIO_DEV_OPENED, &qedi->flags);
+ qedi_ll2_free_skbs(qedi);
+ return 0;
+}
+
+static void __qedi_free_uio_rings(struct qedi_uio_dev *udev)
+{
+ if (udev->ll2_ring) {
+ free_page((unsigned long)udev->ll2_ring);
+ udev->ll2_ring = NULL;
+ }
+
+ if (udev->ll2_buf) {
+ free_pages((unsigned long)udev->ll2_buf, 2);
+ udev->ll2_buf = NULL;
+ }
+}
+
+static void __qedi_free_uio(struct qedi_uio_dev *udev)
+{
+ uio_unregister_device(&udev->qedi_uinfo);
+
+ __qedi_free_uio_rings(udev);
+
+ pci_dev_put(udev->pdev);
+ kfree(udev->uctrl);
+ kfree(udev);
+}
+
+static void qedi_free_uio(struct qedi_uio_dev *udev)
+{
+ if (!udev)
+ return;
+
+ list_del_init(&udev->list);
+ __qedi_free_uio(udev);
+}
+
+static void qedi_reset_uio_rings(struct qedi_uio_dev *udev)
+{
+ struct qedi_ctx *qedi = NULL;
+ struct qedi_uio_ctrl *uctrl = NULL;
+
+ qedi = udev->qedi;
+ uctrl = udev->uctrl;
+
+ spin_lock_bh(&qedi->ll2_lock);
+ uctrl->host_rx_cons = 0;
+ uctrl->hw_rx_prod = 0;
+ uctrl->hw_rx_bd_prod = 0;
+ uctrl->host_rx_bd_cons = 0;
+
+ memset(udev->ll2_ring, 0, udev->ll2_ring_size);
+ memset(udev->ll2_buf, 0, udev->ll2_buf_size);
+ spin_unlock_bh(&qedi->ll2_lock);
+}
+
+static int __qedi_alloc_uio_rings(struct qedi_uio_dev *udev)
+{
+ int rc = 0;
+
+ if (udev->ll2_ring || udev->ll2_buf)
+ return rc;
+
+ /* Allocating memory for LL2 ring */
+ udev->ll2_ring_size = QEDI_PAGE_SIZE;
+ udev->ll2_ring = (void *)get_zeroed_page(GFP_KERNEL | __GFP_COMP);
+ if (!udev->ll2_ring) {
+ rc = -ENOMEM;
+ goto exit_alloc_ring;
+ }
+
+ /* Allocating memory for Tx/Rx pkt buffer */
+ udev->ll2_buf_size = TX_RX_RING * LL2_SINGLE_BUF_SIZE;
+ udev->ll2_buf_size = QEDI_PAGE_ALIGN(udev->ll2_buf_size);
+ udev->ll2_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_COMP |
+ __GFP_ZERO, 2);
+ if (!udev->ll2_buf) {
+ rc = -ENOMEM;
+ goto exit_alloc_buf;
+ }
+ return rc;
+
+exit_alloc_buf:
+ free_page((unsigned long)udev->ll2_ring);
+ udev->ll2_ring = NULL;
+exit_alloc_ring:
+ return rc;
+}
+
+static int qedi_alloc_uio_rings(struct qedi_ctx *qedi)
+{
+ struct qedi_uio_dev *udev = NULL;
+ struct qedi_uio_ctrl *uctrl = NULL;
+ int rc = 0;
+
+ list_for_each_entry(udev, &qedi_udev_list, list) {
+ if (udev->pdev == qedi->pdev) {
+ udev->qedi = qedi;
+ if (__qedi_alloc_uio_rings(udev)) {
+ udev->qedi = NULL;
+ return -ENOMEM;
+ }
+ qedi->udev = udev;
+ return 0;
+ }
+ }
+
+ udev = kzalloc(sizeof(*udev), GFP_KERNEL);
+ if (!udev) {
+ rc = -ENOMEM;
+ goto err_udev;
+ }
+
+ uctrl = kzalloc(sizeof(*uctrl), GFP_KERNEL);
+ if (!uctrl) {
+ rc = -ENOMEM;
+ goto err_uctrl;
+ }
+
+ udev->uio_dev = -1;
+
+ udev->qedi = qedi;
+ udev->pdev = qedi->pdev;
+ udev->uctrl = uctrl;
+
+ rc = __qedi_alloc_uio_rings(udev);
+ if (rc)
+ goto err_uio_rings;
+
+ list_add(&udev->list, &qedi_udev_list);
+
+ pci_dev_get(udev->pdev);
+ qedi->udev = udev;
+
+ udev->tx_pkt = udev->ll2_buf;
+ udev->rx_pkt = udev->ll2_buf + LL2_SINGLE_BUF_SIZE;
+ return 0;
+
+ err_uio_rings:
+ kfree(uctrl);
+ err_uctrl:
+ kfree(udev);
+ err_udev:
+ return -ENOMEM;
+}
+
+static int qedi_init_uio(struct qedi_ctx *qedi)
+{
+ struct qedi_uio_dev *udev = qedi->udev;
+ struct uio_info *uinfo;
+ int ret = 0;
+
+ if (!udev)
+ return -ENOMEM;
+
+ uinfo = &udev->qedi_uinfo;
+
+ uinfo->mem[0].addr = (unsigned long)udev->uctrl;
+ uinfo->mem[0].size = sizeof(struct qedi_uio_ctrl);
+ uinfo->mem[0].memtype = UIO_MEM_LOGICAL;
+
+ uinfo->mem[1].addr = (unsigned long)udev->ll2_ring;
+ uinfo->mem[1].size = udev->ll2_ring_size;
+ uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
+
+ uinfo->mem[2].addr = (unsigned long)udev->ll2_buf;
+ uinfo->mem[2].size = udev->ll2_buf_size;
+ uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
+
+ uinfo->name = "qedi_uio";
+ uinfo->version = QEDI_MODULE_VERSION;
+ uinfo->irq = UIO_IRQ_CUSTOM;
+
+ uinfo->open = qedi_uio_open;
+ uinfo->release = qedi_uio_close;
+
+ if (udev->uio_dev == -1) {
+ if (!uinfo->priv) {
+ uinfo->priv = udev;
+
+ ret = uio_register_device(&udev->pdev->dev, uinfo);
+ if (ret) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "UIO registration failed\n");
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi,
+ struct qed_sb_info *sb_info, u16 sb_id)
+{
+ struct status_block *sb_virt;
+ dma_addr_t sb_phys;
+ int ret;
+
+ sb_virt = dma_alloc_coherent(&qedi->pdev->dev,
+ sizeof(struct status_block), &sb_phys,
+ GFP_KERNEL);
+ if (!sb_virt) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Status block allocation failed for id = %d.\n",
+ sb_id);
+ return -ENOMEM;
+ }
+
+ ret = qedi_ops->common->sb_init(qedi->cdev, sb_info, sb_virt, sb_phys,
+ sb_id, QED_SB_TYPE_STORAGE);
+ if (ret) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Status block initialization failed for id = %d.\n",
+ sb_id);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void qedi_free_sb(struct qedi_ctx *qedi)
+{
+ struct qed_sb_info *sb_info;
+ int id;
+
+ for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
+ sb_info = &qedi->sb_array[id];
+ if (sb_info->sb_virt)
+ dma_free_coherent(&qedi->pdev->dev,
+ sizeof(*sb_info->sb_virt),
+ (void *)sb_info->sb_virt,
+ sb_info->sb_phys);
+ }
+}
+
+static void qedi_free_fp(struct qedi_ctx *qedi)
+{
+ kfree(qedi->fp_array);
+ kfree(qedi->sb_array);
+}
+
+static void qedi_destroy_fp(struct qedi_ctx *qedi)
+{
+ qedi_free_sb(qedi);
+ qedi_free_fp(qedi);
+}
+
+static int qedi_alloc_fp(struct qedi_ctx *qedi)
+{
+ int ret = 0;
+
+ qedi->fp_array = kcalloc(MIN_NUM_CPUS_MSIX(qedi),
+ sizeof(struct qedi_fastpath), GFP_KERNEL);
+ if (!qedi->fp_array) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "fastpath fp array allocation failed.\n");
+ return -ENOMEM;
+ }
+
+ qedi->sb_array = kcalloc(MIN_NUM_CPUS_MSIX(qedi),
+ sizeof(struct qed_sb_info), GFP_KERNEL);
+ if (!qedi->sb_array) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "fastpath sb array allocation failed.\n");
+ ret = -ENOMEM;
+ goto free_fp;
+ }
+
+ return ret;
+
+free_fp:
+ qedi_free_fp(qedi);
+ return ret;
+}
+
+static void qedi_int_fp(struct qedi_ctx *qedi)
+{
+ struct qedi_fastpath *fp;
+ int id;
+
+ memset(qedi->fp_array, 0, MIN_NUM_CPUS_MSIX(qedi) *
+ sizeof(*qedi->fp_array));
+ memset(qedi->sb_array, 0, MIN_NUM_CPUS_MSIX(qedi) *
+ sizeof(*qedi->sb_array));
+
+ for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
+ fp = &qedi->fp_array[id];
+ fp->sb_info = &qedi->sb_array[id];
+ fp->sb_id = id;
+ fp->qedi = qedi;
+ snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
+ "qedi", id);
+
+ /* fp_array[i] ---- irq cookie
+ * So init data which is needed in int ctx
+ */
+ }
+}
+
+static int qedi_prepare_fp(struct qedi_ctx *qedi)
+{
+ struct qedi_fastpath *fp;
+ int id, ret = 0;
+
+ ret = qedi_alloc_fp(qedi);
+ if (ret)
+ goto err;
+
+ qedi_int_fp(qedi);
+
+ for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
+ fp = &qedi->fp_array[id];
+ ret = qedi_alloc_and_init_sb(qedi, fp->sb_info, fp->sb_id);
+ if (ret) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "SB allocation and initialization failed.\n");
+ ret = -EIO;
+ goto err_init;
+ }
+ }
+
+ return 0;
+
+err_init:
+ qedi_free_sb(qedi);
+ qedi_free_fp(qedi);
+err:
+ return ret;
+}
+
+static int qedi_setup_cid_que(struct qedi_ctx *qedi)
+{
+ int i;
+
+ qedi->cid_que.cid_que_base = kmalloc_array(qedi->max_active_conns,
+ sizeof(u32), GFP_KERNEL);
+ if (!qedi->cid_que.cid_que_base)
+ return -ENOMEM;
+
+ qedi->cid_que.conn_cid_tbl = kmalloc_array(qedi->max_active_conns,
+ sizeof(struct qedi_conn *),
+ GFP_KERNEL);
+ if (!qedi->cid_que.conn_cid_tbl) {
+ kfree(qedi->cid_que.cid_que_base);
+ qedi->cid_que.cid_que_base = NULL;
+ return -ENOMEM;
+ }
+
+ qedi->cid_que.cid_que = (u32 *)qedi->cid_que.cid_que_base;
+ qedi->cid_que.cid_q_prod_idx = 0;
+ qedi->cid_que.cid_q_cons_idx = 0;
+ qedi->cid_que.cid_q_max_idx = qedi->max_active_conns;
+ qedi->cid_que.cid_free_cnt = qedi->max_active_conns;
+
+ for (i = 0; i < qedi->max_active_conns; i++) {
+ qedi->cid_que.cid_que[i] = i;
+ qedi->cid_que.conn_cid_tbl[i] = NULL;
+ }
+
+ return 0;
+}
+
+static void qedi_release_cid_que(struct qedi_ctx *qedi)
+{
+ kfree(qedi->cid_que.cid_que_base);
+ qedi->cid_que.cid_que_base = NULL;
+
+ kfree(qedi->cid_que.conn_cid_tbl);
+ qedi->cid_que.conn_cid_tbl = NULL;
+}
+
+static int qedi_init_id_tbl(struct qedi_portid_tbl *id_tbl, u16 size,
+ u16 start_id, u16 next)
+{
+ id_tbl->start = start_id;
+ id_tbl->max = size;
+ id_tbl->next = next;
+ spin_lock_init(&id_tbl->lock);
+ id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
+ if (!id_tbl->table)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void qedi_free_id_tbl(struct qedi_portid_tbl *id_tbl)
+{
+ kfree(id_tbl->table);
+ id_tbl->table = NULL;
+}
+
+int qedi_alloc_id(struct qedi_portid_tbl *id_tbl, u16 id)
+{
+ int ret = -1;
+
+ id -= id_tbl->start;
+ if (id >= id_tbl->max)
+ return ret;
+
+ spin_lock(&id_tbl->lock);
+ if (!test_bit(id, id_tbl->table)) {
+ set_bit(id, id_tbl->table);
+ ret = 0;
+ }
+ spin_unlock(&id_tbl->lock);
+ return ret;
+}
+
+u16 qedi_alloc_new_id(struct qedi_portid_tbl *id_tbl)
+{
+ u16 id;
+
+ spin_lock(&id_tbl->lock);
+ id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
+ if (id >= id_tbl->max) {
+ id = QEDI_LOCAL_PORT_INVALID;
+ if (id_tbl->next != 0) {
+ id = find_first_zero_bit(id_tbl->table, id_tbl->next);
+ if (id >= id_tbl->next)
+ id = QEDI_LOCAL_PORT_INVALID;
+ }
+ }
+
+ if (id < id_tbl->max) {
+ set_bit(id, id_tbl->table);
+ id_tbl->next = (id + 1) & (id_tbl->max - 1);
+ id += id_tbl->start;
+ }
+
+ spin_unlock(&id_tbl->lock);
+
+ return id;
+}
+
+void qedi_free_id(struct qedi_portid_tbl *id_tbl, u16 id)
+{
+ if (id == QEDI_LOCAL_PORT_INVALID)
+ return;
+
+ id -= id_tbl->start;
+ if (id >= id_tbl->max)
+ return;
+
+ clear_bit(id, id_tbl->table);
+}
+
+static void qedi_cm_free_mem(struct qedi_ctx *qedi)
+{
+ kfree(qedi->ep_tbl);
+ qedi->ep_tbl = NULL;
+ qedi_free_id_tbl(&qedi->lcl_port_tbl);
+}
+
+static int qedi_cm_alloc_mem(struct qedi_ctx *qedi)
+{
+ u16 port_id;
+
+ qedi->ep_tbl = kzalloc((qedi->max_active_conns *
+ sizeof(struct qedi_endpoint *)), GFP_KERNEL);
+ if (!qedi->ep_tbl)
+ return -ENOMEM;
+ port_id = prandom_u32() % QEDI_LOCAL_PORT_RANGE;
+ if (qedi_init_id_tbl(&qedi->lcl_port_tbl, QEDI_LOCAL_PORT_RANGE,
+ QEDI_LOCAL_PORT_MIN, port_id)) {
+ qedi_cm_free_mem(qedi);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static struct qedi_ctx *qedi_host_alloc(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost;
+ struct qedi_ctx *qedi = NULL;
+
+ shost = iscsi_host_alloc(&qedi_host_template,
+ sizeof(struct qedi_ctx), 0);
+ if (!shost) {
+ QEDI_ERR(NULL, "Could not allocate shost\n");
+ goto exit_setup_shost;
+ }
+
+ shost->max_id = QEDI_MAX_ISCSI_CONNS_PER_HBA;
+ shost->max_channel = 0;
+ shost->max_lun = ~0;
+ shost->max_cmd_len = 16;
+ shost->transportt = qedi_scsi_transport;
+
+ qedi = iscsi_host_priv(shost);
+ memset(qedi, 0, sizeof(*qedi));
+ qedi->shost = shost;
+ qedi->dbg_ctx.host_no = shost->host_no;
+ qedi->pdev = pdev;
+ qedi->dbg_ctx.pdev = pdev;
+ qedi->max_active_conns = ISCSI_MAX_SESS_PER_HBA;
+ qedi->max_sqes = QEDI_SQ_SIZE;
+
+ if (shost_use_blk_mq(shost))
+ shost->nr_hw_queues = MIN_NUM_CPUS_MSIX(qedi);
+
+ pci_set_drvdata(pdev, qedi);
+
+exit_setup_shost:
+ return qedi;
+}
+
+static int qedi_ll2_rx(void *cookie, struct sk_buff *skb, u32 arg1, u32 arg2)
+{
+ struct qedi_ctx *qedi = (struct qedi_ctx *)cookie;
+ struct qedi_uio_dev *udev;
+ struct qedi_uio_ctrl *uctrl;
+ struct skb_work_list *work;
+ u32 prod;
+
+ if (!qedi) {
+ QEDI_ERR(NULL, "qedi is NULL\n");
+ return -1;
+ }
+
+ if (!test_bit(UIO_DEV_OPENED, &qedi->flags)) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_UIO,
+ "UIO DEV is not opened\n");
+ kfree_skb(skb);
+ return 0;
+ }
+
+ udev = qedi->udev;
+ uctrl = udev->uctrl;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Could not allocate work so dropping frame.\n");
+ kfree_skb(skb);
+ return 0;
+ }
+
+ INIT_LIST_HEAD(&work->list);
+ work->skb = skb;
+
+ if (skb_vlan_tag_present(skb))
+ work->vlan_id = skb_vlan_tag_get(skb);
+
+ if (work->vlan_id)
+ __vlan_insert_tag(work->skb, htons(ETH_P_8021Q), work->vlan_id);
+
+ spin_lock_bh(&qedi->ll2_lock);
+ list_add_tail(&work->list, &qedi->ll2_skb_list);
+
+ ++uctrl->hw_rx_prod_cnt;
+ prod = (uctrl->hw_rx_prod + 1) % RX_RING;
+ if (prod != uctrl->host_rx_cons) {
+ uctrl->hw_rx_prod = prod;
+ spin_unlock_bh(&qedi->ll2_lock);
+ wake_up_process(qedi->ll2_recv_thread);
+ return 0;
+ }
+
+ spin_unlock_bh(&qedi->ll2_lock);
+ return 0;
+}
+
+/* map this skb to iscsiuio mmaped region */
+static int qedi_ll2_process_skb(struct qedi_ctx *qedi, struct sk_buff *skb,
+ u16 vlan_id)
+{
+ struct qedi_uio_dev *udev = NULL;
+ struct qedi_uio_ctrl *uctrl = NULL;
+ struct qedi_rx_bd rxbd;
+ struct qedi_rx_bd *p_rxbd;
+ u32 rx_bd_prod;
+ void *pkt;
+ int len = 0;
+
+ if (!qedi) {
+ QEDI_ERR(NULL, "qedi is NULL\n");
+ return -1;
+ }
+
+ udev = qedi->udev;
+ uctrl = udev->uctrl;
+ pkt = udev->rx_pkt + (uctrl->hw_rx_prod * LL2_SINGLE_BUF_SIZE);
+ len = min_t(u32, skb->len, (u32)LL2_SINGLE_BUF_SIZE);
+ memcpy(pkt, skb->data, len);
+
+ memset(&rxbd, 0, sizeof(rxbd));
+ rxbd.rx_pkt_index = uctrl->hw_rx_prod;
+ rxbd.rx_pkt_len = len;
+ rxbd.vlan_id = vlan_id;
+
+ uctrl->hw_rx_bd_prod = (uctrl->hw_rx_bd_prod + 1) % QEDI_NUM_RX_BD;
+ rx_bd_prod = uctrl->hw_rx_bd_prod;
+ p_rxbd = (struct qedi_rx_bd *)udev->ll2_ring;
+ p_rxbd += rx_bd_prod;
+
+ memcpy(p_rxbd, &rxbd, sizeof(rxbd));
+
+ /* notify the iscsiuio about new packet */
+ uio_event_notify(&udev->qedi_uinfo);
+
+ return 0;
+}
+
+static void qedi_ll2_free_skbs(struct qedi_ctx *qedi)
+{
+ struct skb_work_list *work, *work_tmp;
+
+ spin_lock_bh(&qedi->ll2_lock);
+ list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list, list) {
+ list_del(&work->list);
+ if (work->skb)
+ kfree_skb(work->skb);
+ kfree(work);
+ }
+ spin_unlock_bh(&qedi->ll2_lock);
+}
+
+static int qedi_ll2_recv_thread(void *arg)
+{
+ struct qedi_ctx *qedi = (struct qedi_ctx *)arg;
+ struct skb_work_list *work, *work_tmp;
+
+ set_user_nice(current, -20);
+
+ while (!kthread_should_stop()) {
+ spin_lock_bh(&qedi->ll2_lock);
+ list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list,
+ list) {
+ list_del(&work->list);
+ qedi_ll2_process_skb(qedi, work->skb, work->vlan_id);
+ kfree_skb(work->skb);
+ kfree(work);
+ }
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_bh(&qedi->ll2_lock);
+ schedule();
+ }
+
+ __set_current_state(TASK_RUNNING);
+ return 0;
+}
+
+static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
+{
+ u8 num_sq_pages;
+ u32 log_page_size;
+ int rval = 0;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "Min number of MSIX %d\n",
+ MIN_NUM_CPUS_MSIX(qedi));
+
+ num_sq_pages = (MAX_OUSTANDING_TASKS_PER_CON * 8) / PAGE_SIZE;
+
+ qedi->num_queues = MIN_NUM_CPUS_MSIX(qedi);
+
+ memset(&qedi->pf_params.iscsi_pf_params, 0,
+ sizeof(qedi->pf_params.iscsi_pf_params));
+
+ qedi->p_cpuq = pci_alloc_consistent(qedi->pdev,
+ qedi->num_queues * sizeof(struct qedi_glbl_q_params),
+ &qedi->hw_p_cpuq);
+ if (!qedi->p_cpuq) {
+ QEDI_ERR(&qedi->dbg_ctx, "pci_alloc_consistent fail\n");
+ rval = -1;
+ goto err_alloc_mem;
+ }
+
+ rval = qedi_alloc_global_queues(qedi);
+ if (rval) {
+ QEDI_ERR(&qedi->dbg_ctx, "Global queue allocation failed.\n");
+ rval = -1;
+ goto err_alloc_mem;
+ }
+
+ qedi->pf_params.iscsi_pf_params.num_cons = QEDI_MAX_ISCSI_CONNS_PER_HBA;
+ qedi->pf_params.iscsi_pf_params.num_tasks = QEDI_MAX_ISCSI_TASK;
+ qedi->pf_params.iscsi_pf_params.half_way_close_timeout = 10;
+ qedi->pf_params.iscsi_pf_params.num_sq_pages_in_ring = num_sq_pages;
+ qedi->pf_params.iscsi_pf_params.num_r2tq_pages_in_ring = num_sq_pages;
+ qedi->pf_params.iscsi_pf_params.num_uhq_pages_in_ring = num_sq_pages;
+ qedi->pf_params.iscsi_pf_params.num_queues = qedi->num_queues;
+ qedi->pf_params.iscsi_pf_params.debug_mode = qedi_fw_debug;
+
+ for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) {
+ if ((1 << log_page_size) == PAGE_SIZE)
+ break;
+ }
+ qedi->pf_params.iscsi_pf_params.log_page_size = log_page_size;
+
+ qedi->pf_params.iscsi_pf_params.glbl_q_params_addr =
+ (u64)qedi->hw_p_cpuq;
+
+ /* RQ BDQ initializations.
+ * rq_num_entries: suggested value for Initiator is 16 (4KB RQ)
+ * rqe_log_size: 8 for 256B RQE
+ */
+ qedi->pf_params.iscsi_pf_params.rqe_log_size = 8;
+ /* BDQ address and size */
+ qedi->pf_params.iscsi_pf_params.bdq_pbl_base_addr[BDQ_ID_RQ] =
+ qedi->bdq_pbl_list_dma;
+ qedi->pf_params.iscsi_pf_params.bdq_pbl_num_entries[BDQ_ID_RQ] =
+ qedi->bdq_pbl_list_num_entries;
+ qedi->pf_params.iscsi_pf_params.rq_buffer_size = QEDI_BDQ_BUF_SIZE;
+
+ /* cq_num_entries: num_tasks + rq_num_entries */
+ qedi->pf_params.iscsi_pf_params.cq_num_entries = 2048;
+
+ qedi->pf_params.iscsi_pf_params.gl_rq_pi = QEDI_PROTO_CQ_PROD_IDX;
+ qedi->pf_params.iscsi_pf_params.gl_cmd_pi = 1;
+ qedi->pf_params.iscsi_pf_params.ooo_enable = 1;
+
+err_alloc_mem:
+ return rval;
+}
+
+/* Free DMA coherent memory for array of queue pointers we pass to qed */
+static void qedi_free_iscsi_pf_param(struct qedi_ctx *qedi)
+{
+ size_t size = 0;
+
+ if (qedi->p_cpuq) {
+ size = qedi->num_queues * sizeof(struct qedi_glbl_q_params);
+ pci_free_consistent(qedi->pdev, size, qedi->p_cpuq,
+ qedi->hw_p_cpuq);
+ }
+
+ qedi_free_global_queues(qedi);
+
+ kfree(qedi->global_queues);
+}
+
+static void qedi_link_update(void *dev, struct qed_link_output *link)
+{
+ struct qedi_ctx *qedi = (struct qedi_ctx *)dev;
+
+ if (link->link_up) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Link Up event.\n");
+ atomic_set(&qedi->link_state, QEDI_LINK_UP);
+ } else {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Link Down event.\n");
+ atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
+ }
+}
+
+static struct qed_iscsi_cb_ops qedi_cb_ops = {
+ {
+ .link_update = qedi_link_update,
+ }
+};
+
+static int qedi_queue_cqe(struct qedi_ctx *qedi, union iscsi_cqe *cqe,
+ u16 que_idx, struct qedi_percpu_s *p)
+{
+ struct qedi_work *qedi_work;
+ struct qedi_conn *q_conn;
+ struct iscsi_conn *conn;
+ struct qedi_cmd *qedi_cmd;
+ u32 iscsi_cid;
+ int rc = 0;
+
+ iscsi_cid = cqe->cqe_common.conn_id;
+ q_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
+ if (!q_conn) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Session no longer exists for cid=0x%x!!\n",
+ iscsi_cid);
+ return -1;
+ }
+ conn = q_conn->cls_conn->dd_data;
+
+ switch (cqe->cqe_common.cqe_type) {
+ case ISCSI_CQE_TYPE_SOLICITED:
+ case ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE:
+ qedi_cmd = qedi_get_cmd_from_tid(qedi, cqe->cqe_solicited.itid);
+ if (!qedi_cmd) {
+ rc = -1;
+ break;
+ }
+ INIT_LIST_HEAD(&qedi_cmd->cqe_work.list);
+ qedi_cmd->cqe_work.qedi = qedi;
+ memcpy(&qedi_cmd->cqe_work.cqe, cqe, sizeof(union iscsi_cqe));
+ qedi_cmd->cqe_work.que_idx = que_idx;
+ qedi_cmd->cqe_work.is_solicited = true;
+ list_add_tail(&qedi_cmd->cqe_work.list, &p->work_list);
+ break;
+ case ISCSI_CQE_TYPE_UNSOLICITED:
+ case ISCSI_CQE_TYPE_DUMMY:
+ case ISCSI_CQE_TYPE_TASK_CLEANUP:
+ qedi_work = kzalloc(sizeof(*qedi_work), GFP_ATOMIC);
+ if (!qedi_work) {
+ rc = -1;
+ break;
+ }
+ INIT_LIST_HEAD(&qedi_work->list);
+ qedi_work->qedi = qedi;
+ memcpy(&qedi_work->cqe, cqe, sizeof(union iscsi_cqe));
+ qedi_work->que_idx = que_idx;
+ qedi_work->is_solicited = false;
+ list_add_tail(&qedi_work->list, &p->work_list);
+ break;
+ default:
+ rc = -1;
+ QEDI_ERR(&qedi->dbg_ctx, "FW Error cqe.\n");
+ }
+ return rc;
+}
+
+static bool qedi_process_completions(struct qedi_fastpath *fp)
+{
+ struct qedi_ctx *qedi = fp->qedi;
+ struct qed_sb_info *sb_info = fp->sb_info;
+ struct status_block *sb = sb_info->sb_virt;
+ struct qedi_percpu_s *p = NULL;
+ struct global_queue *que;
+ u16 prod_idx;
+ unsigned long flags;
+ union iscsi_cqe *cqe;
+ int cpu;
+ int ret;
+
+ /* Get the current firmware producer index */
+ prod_idx = sb->pi_array[QEDI_PROTO_CQ_PROD_IDX];
+
+ if (prod_idx >= QEDI_CQ_SIZE)
+ prod_idx = prod_idx % QEDI_CQ_SIZE;
+
+ que = qedi->global_queues[fp->sb_id];
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
+ "Before: global queue=%p prod_idx=%d cons_idx=%d, sb_id=%d\n",
+ que, prod_idx, que->cq_cons_idx, fp->sb_id);
+
+ qedi->intr_cpu = fp->sb_id;
+ cpu = smp_processor_id();
+ p = &per_cpu(qedi_percpu, cpu);
+
+ if (unlikely(!p->iothread))
+ WARN_ON(1);
+
+ spin_lock_irqsave(&p->p_work_lock, flags);
+ while (que->cq_cons_idx != prod_idx) {
+ cqe = &que->cq[que->cq_cons_idx];
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
+ "cqe=%p prod_idx=%d cons_idx=%d.\n",
+ cqe, prod_idx, que->cq_cons_idx);
+
+ ret = qedi_queue_cqe(qedi, cqe, fp->sb_id, p);
+ if (ret)
+ continue;
+
+ que->cq_cons_idx++;
+ if (que->cq_cons_idx == QEDI_CQ_SIZE)
+ que->cq_cons_idx = 0;
+ }
+ wake_up_process(p->iothread);
+ spin_unlock_irqrestore(&p->p_work_lock, flags);
+
+ return true;
+}
+
+static bool qedi_fp_has_work(struct qedi_fastpath *fp)
+{
+ struct qedi_ctx *qedi = fp->qedi;
+ struct global_queue *que;
+ struct qed_sb_info *sb_info = fp->sb_info;
+ struct status_block *sb = sb_info->sb_virt;
+ u16 prod_idx;
+
+ barrier();
+
+ /* Get the current firmware producer index */
+ prod_idx = sb->pi_array[QEDI_PROTO_CQ_PROD_IDX];
+
+ /* Get the pointer to the global CQ this completion is on */
+ que = qedi->global_queues[fp->sb_id];
+
+ /* prod idx wrap around uint16 */
+ if (prod_idx >= QEDI_CQ_SIZE)
+ prod_idx = prod_idx % QEDI_CQ_SIZE;
+
+ return (que->cq_cons_idx != prod_idx);
+}
+
+/* MSI-X fastpath handler code */
+static irqreturn_t qedi_msix_handler(int irq, void *dev_id)
+{
+ struct qedi_fastpath *fp = dev_id;
+ struct qedi_ctx *qedi = fp->qedi;
+ bool wake_io_thread = true;
+
+ qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
+
+process_again:
+ wake_io_thread = qedi_process_completions(fp);
+ if (wake_io_thread) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+ "process already running\n");
+ }
+
+ if (qedi_fp_has_work(fp) == 0)
+ qed_sb_update_sb_idx(fp->sb_info);
+
+ /* Check for more work */
+ rmb();
+
+ if (qedi_fp_has_work(fp) == 0)
+ qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
+ else
+ goto process_again;
+
+ return IRQ_HANDLED;
+}
+
+/* simd handler for MSI/INTa */
+static void qedi_simd_int_handler(void *cookie)
+{
+ /* Cookie is qedi_ctx struct */
+ struct qedi_ctx *qedi = (struct qedi_ctx *)cookie;
+
+ QEDI_WARN(&qedi->dbg_ctx, "qedi=%p.\n", qedi);
+}
+
+#define QEDI_SIMD_HANDLER_NUM 0
+static void qedi_sync_free_irqs(struct qedi_ctx *qedi)
+{
+ int i;
+
+ if (qedi->int_info.msix_cnt) {
+ for (i = 0; i < qedi->int_info.used_cnt; i++) {
+ synchronize_irq(qedi->int_info.msix[i].vector);
+ irq_set_affinity_hint(qedi->int_info.msix[i].vector,
+ NULL);
+ free_irq(qedi->int_info.msix[i].vector,
+ &qedi->fp_array[i]);
+ }
+ } else {
+ qedi_ops->common->simd_handler_clean(qedi->cdev,
+ QEDI_SIMD_HANDLER_NUM);
+ }
+
+ qedi->int_info.used_cnt = 0;
+ qedi_ops->common->set_fp_int(qedi->cdev, 0);
+}
+
+static int qedi_request_msix_irq(struct qedi_ctx *qedi)
+{
+ int i, rc, cpu;
+
+ cpu = cpumask_first(cpu_online_mask);
+ for (i = 0; i < MIN_NUM_CPUS_MSIX(qedi); i++) {
+ rc = request_irq(qedi->int_info.msix[i].vector,
+ qedi_msix_handler, 0, "qedi",
+ &qedi->fp_array[i]);
+
+ if (rc) {
+ QEDI_WARN(&qedi->dbg_ctx, "request_irq failed.\n");
+ qedi_sync_free_irqs(qedi);
+ return rc;
+ }
+ qedi->int_info.used_cnt++;
+ rc = irq_set_affinity_hint(qedi->int_info.msix[i].vector,
+ get_cpu_mask(cpu));
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ }
+
+ return 0;
+}
+
+static int qedi_setup_int(struct qedi_ctx *qedi)
+{
+ int rc = 0;
+
+ rc = qedi_ops->common->set_fp_int(qedi->cdev, num_online_cpus());
+ rc = qedi_ops->common->get_fp_int(qedi->cdev, &qedi->int_info);
+ if (rc)
+ goto exit_setup_int;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+ "Number of msix_cnt = 0x%x num of cpus = 0x%x\n",
+ qedi->int_info.msix_cnt, num_online_cpus());
+
+ if (qedi->int_info.msix_cnt) {
+ rc = qedi_request_msix_irq(qedi);
+ goto exit_setup_int;
+ } else {
+ qedi_ops->common->simd_handler_config(qedi->cdev, &qedi,
+ QEDI_SIMD_HANDLER_NUM,
+ qedi_simd_int_handler);
+ qedi->int_info.used_cnt = 1;
+ }
+
+exit_setup_int:
+ return rc;
+}
+
+static void qedi_free_bdq(struct qedi_ctx *qedi)
+{
+ int i;
+
+ if (qedi->bdq_pbl_list)
+ dma_free_coherent(&qedi->pdev->dev, PAGE_SIZE,
+ qedi->bdq_pbl_list, qedi->bdq_pbl_list_dma);
+
+ if (qedi->bdq_pbl)
+ dma_free_coherent(&qedi->pdev->dev, qedi->bdq_pbl_mem_size,
+ qedi->bdq_pbl, qedi->bdq_pbl_dma);
+
+ for (i = 0; i < QEDI_BDQ_NUM; i++) {
+ if (qedi->bdq[i].buf_addr) {
+ dma_free_coherent(&qedi->pdev->dev, QEDI_BDQ_BUF_SIZE,
+ qedi->bdq[i].buf_addr,
+ qedi->bdq[i].buf_dma);
+ }
+ }
+}
+
+static void qedi_free_global_queues(struct qedi_ctx *qedi)
+{
+ int i;
+ struct global_queue **gl = qedi->global_queues;
+
+ for (i = 0; i < qedi->num_queues; i++) {
+ if (!gl[i])
+ continue;
+
+ if (gl[i]->cq)
+ dma_free_coherent(&qedi->pdev->dev, gl[i]->cq_mem_size,
+ gl[i]->cq, gl[i]->cq_dma);
+ if (gl[i]->cq_pbl)
+ dma_free_coherent(&qedi->pdev->dev, gl[i]->cq_pbl_size,
+ gl[i]->cq_pbl, gl[i]->cq_pbl_dma);
+
+ kfree(gl[i]);
+ }
+ qedi_free_bdq(qedi);
+}
+
+static int qedi_alloc_bdq(struct qedi_ctx *qedi)
+{
+ int i;
+ struct scsi_bd *pbl;
+ u64 *list;
+ dma_addr_t page;
+
+ /* Alloc dma memory for BDQ buffers */
+ for (i = 0; i < QEDI_BDQ_NUM; i++) {
+ qedi->bdq[i].buf_addr =
+ dma_alloc_coherent(&qedi->pdev->dev,
+ QEDI_BDQ_BUF_SIZE,
+ &qedi->bdq[i].buf_dma,
+ GFP_KERNEL);
+ if (!qedi->bdq[i].buf_addr) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Could not allocate BDQ buffer %d.\n", i);
+ return -ENOMEM;
+ }
+ }
+
+ /* Alloc dma memory for BDQ page buffer list */
+ qedi->bdq_pbl_mem_size = QEDI_BDQ_NUM * sizeof(struct scsi_bd);
+ qedi->bdq_pbl_mem_size = ALIGN(qedi->bdq_pbl_mem_size, PAGE_SIZE);
+ qedi->rq_num_entries = qedi->bdq_pbl_mem_size / sizeof(struct scsi_bd);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "rq_num_entries = %d.\n",
+ qedi->rq_num_entries);
+
+ qedi->bdq_pbl = dma_alloc_coherent(&qedi->pdev->dev,
+ qedi->bdq_pbl_mem_size,
+ &qedi->bdq_pbl_dma, GFP_KERNEL);
+ if (!qedi->bdq_pbl) {
+ QEDI_ERR(&qedi->dbg_ctx, "Could not allocate BDQ PBL.\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Populate BDQ PBL with physical and virtual address of individual
+ * BDQ buffers
+ */
+ pbl = (struct scsi_bd *)qedi->bdq_pbl;
+ for (i = 0; i < QEDI_BDQ_NUM; i++) {
+ pbl->address.hi =
+ cpu_to_le32(QEDI_U64_HI(qedi->bdq[i].buf_dma));
+ pbl->address.lo =
+ cpu_to_le32(QEDI_U64_LO(qedi->bdq[i].buf_dma));
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx], idx [%d]\n",
+ pbl, pbl->address.hi, pbl->address.lo, i);
+ pbl->opaque.hi = 0;
+ pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(i));
+ pbl++;
+ }
+
+ /* Allocate list of PBL pages */
+ qedi->bdq_pbl_list = dma_alloc_coherent(&qedi->pdev->dev,
+ PAGE_SIZE,
+ &qedi->bdq_pbl_list_dma,
+ GFP_KERNEL);
+ if (!qedi->bdq_pbl_list) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Could not allocate list of PBL pages.\n");
+ return -ENOMEM;
+ }
+ memset(qedi->bdq_pbl_list, 0, PAGE_SIZE);
+
+ /*
+ * Now populate PBL list with pages that contain pointers to the
+ * individual buffers.
+ */
+ qedi->bdq_pbl_list_num_entries = qedi->bdq_pbl_mem_size / PAGE_SIZE;
+ list = (u64 *)qedi->bdq_pbl_list;
+ page = qedi->bdq_pbl_list_dma;
+ for (i = 0; i < qedi->bdq_pbl_list_num_entries; i++) {
+ *list = qedi->bdq_pbl_dma;
+ list++;
+ page += PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
+{
+ u32 *list;
+ int i;
+ int status = 0, rc;
+ u32 *pbl;
+ dma_addr_t page;
+ int num_pages;
+
+ /*
+ * Number of global queues (CQ / RQ). This should
+ * be <= number of available MSIX vectors for the PF
+ */
+ if (!qedi->num_queues) {
+ QEDI_ERR(&qedi->dbg_ctx, "No MSI-X vectors available!\n");
+ return 1;
+ }
+
+ /* Make sure we allocated the PBL that will contain the physical
+ * addresses of our queues
+ */
+ if (!qedi->p_cpuq) {
+ status = 1;
+ goto mem_alloc_failure;
+ }
+
+ qedi->global_queues = kzalloc((sizeof(struct global_queue *) *
+ qedi->num_queues), GFP_KERNEL);
+ if (!qedi->global_queues) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Unable to allocate global queues array ptr memory\n");
+ return -ENOMEM;
+ }
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+ "qedi->global_queues=%p.\n", qedi->global_queues);
+
+ /* Allocate DMA coherent buffers for BDQ */
+ rc = qedi_alloc_bdq(qedi);
+ if (rc)
+ goto mem_alloc_failure;
+
+ /* Allocate a CQ and an associated PBL for each MSI-X
+ * vector.
+ */
+ for (i = 0; i < qedi->num_queues; i++) {
+ qedi->global_queues[i] =
+ kzalloc(sizeof(*qedi->global_queues[0]),
+ GFP_KERNEL);
+ if (!qedi->global_queues[i]) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Unable to allocation global queue %d.\n", i);
+ goto mem_alloc_failure;
+ }
+
+ qedi->global_queues[i]->cq_mem_size =
+ (QEDI_CQ_SIZE + 8) * sizeof(union iscsi_cqe);
+ qedi->global_queues[i]->cq_mem_size =
+ (qedi->global_queues[i]->cq_mem_size +
+ (QEDI_PAGE_SIZE - 1));
+
+ qedi->global_queues[i]->cq_pbl_size =
+ (qedi->global_queues[i]->cq_mem_size /
+ QEDI_PAGE_SIZE) * sizeof(void *);
+ qedi->global_queues[i]->cq_pbl_size =
+ (qedi->global_queues[i]->cq_pbl_size +
+ (QEDI_PAGE_SIZE - 1));
+
+ qedi->global_queues[i]->cq =
+ dma_alloc_coherent(&qedi->pdev->dev,
+ qedi->global_queues[i]->cq_mem_size,
+ &qedi->global_queues[i]->cq_dma,
+ GFP_KERNEL);
+
+ if (!qedi->global_queues[i]->cq) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Could not allocate cq.\n");
+ status = -ENOMEM;
+ goto mem_alloc_failure;
+ }
+ memset(qedi->global_queues[i]->cq, 0,
+ qedi->global_queues[i]->cq_mem_size);
+
+ qedi->global_queues[i]->cq_pbl =
+ dma_alloc_coherent(&qedi->pdev->dev,
+ qedi->global_queues[i]->cq_pbl_size,
+ &qedi->global_queues[i]->cq_pbl_dma,
+ GFP_KERNEL);
+
+ if (!qedi->global_queues[i]->cq_pbl) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Could not allocate cq PBL.\n");
+ status = -ENOMEM;
+ goto mem_alloc_failure;
+ }
+ memset(qedi->global_queues[i]->cq_pbl, 0,
+ qedi->global_queues[i]->cq_pbl_size);
+
+ /* Create PBL */
+ num_pages = qedi->global_queues[i]->cq_mem_size /
+ QEDI_PAGE_SIZE;
+ page = qedi->global_queues[i]->cq_dma;
+ pbl = (u32 *)qedi->global_queues[i]->cq_pbl;
+
+ while (num_pages--) {
+ *pbl = (u32)page;
+ pbl++;
+ *pbl = (u32)((u64)page >> 32);
+ pbl++;
+ page += QEDI_PAGE_SIZE;
+ }
+ }
+
+ list = (u32 *)qedi->p_cpuq;
+
+ /*
+ * The list is built as follows: CQ#0 PBL pointer, RQ#0 PBL pointer,
+ * CQ#1 PBL pointer, RQ#1 PBL pointer, etc. Each PBL pointer points
+ * to the physical address which contains an array of pointers to the
+ * physical addresses of the specific queue pages.
+ */
+ for (i = 0; i < qedi->num_queues; i++) {
+ *list = (u32)qedi->global_queues[i]->cq_pbl_dma;
+ list++;
+ *list = (u32)((u64)qedi->global_queues[i]->cq_pbl_dma >> 32);
+ list++;
+
+ *list = (u32)0;
+ list++;
+ *list = (u32)((u64)0 >> 32);
+ list++;
+ }
+
+ return 0;
+
+mem_alloc_failure:
+ qedi_free_global_queues(qedi);
+ return status;
+}
+
+int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep)
+{
+ int rval = 0;
+ u32 *pbl;
+ dma_addr_t page;
+ int num_pages;
+
+ if (!ep)
+ return -EIO;
+
+ /* Calculate appropriate queue and PBL sizes */
+ ep->sq_mem_size = QEDI_SQ_SIZE * sizeof(struct iscsi_wqe);
+ ep->sq_mem_size += QEDI_PAGE_SIZE - 1;
+
+ ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *);
+ ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE;
+
+ ep->sq = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_mem_size,
+ &ep->sq_dma, GFP_KERNEL);
+ if (!ep->sq) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Could not allocate send queue.\n");
+ rval = -ENOMEM;
+ goto out;
+ }
+ memset(ep->sq, 0, ep->sq_mem_size);
+
+ ep->sq_pbl = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size,
+ &ep->sq_pbl_dma, GFP_KERNEL);
+ if (!ep->sq_pbl) {
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Could not allocate send queue PBL.\n");
+ rval = -ENOMEM;
+ goto out_free_sq;
+ }
+ memset(ep->sq_pbl, 0, ep->sq_pbl_size);
+
+ /* Create PBL */
+ num_pages = ep->sq_mem_size / QEDI_PAGE_SIZE;
+ page = ep->sq_dma;
+ pbl = (u32 *)ep->sq_pbl;
+
+ while (num_pages--) {
+ *pbl = (u32)page;
+ pbl++;
+ *pbl = (u32)((u64)page >> 32);
+ pbl++;
+ page += QEDI_PAGE_SIZE;
+ }
+
+ return rval;
+
+out_free_sq:
+ dma_free_coherent(&qedi->pdev->dev, ep->sq_mem_size, ep->sq,
+ ep->sq_dma);
+out:
+ return rval;
+}
+
+void qedi_free_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep)
+{
+ if (ep->sq_pbl)
+ dma_free_coherent(&qedi->pdev->dev, ep->sq_pbl_size, ep->sq_pbl,
+ ep->sq_pbl_dma);
+ if (ep->sq)
+ dma_free_coherent(&qedi->pdev->dev, ep->sq_mem_size, ep->sq,
+ ep->sq_dma);
+}
+
+int qedi_get_task_idx(struct qedi_ctx *qedi)
+{
+ s16 tmp_idx;
+
+again:
+ tmp_idx = find_first_zero_bit(qedi->task_idx_map,
+ MAX_ISCSI_TASK_ENTRIES);
+
+ if (tmp_idx >= MAX_ISCSI_TASK_ENTRIES) {
+ QEDI_ERR(&qedi->dbg_ctx, "FW task context pool is full.\n");
+ tmp_idx = -1;
+ goto err_idx;
+ }
+
+ if (test_and_set_bit(tmp_idx, qedi->task_idx_map))
+ goto again;
+
+err_idx:
+ return tmp_idx;
+}
+
+void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx)
+{
+ if (!test_and_clear_bit(idx, qedi->task_idx_map)) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "FW task context, already cleared, tid=0x%x\n", idx);
+ WARN_ON(1);
+ }
+}
+
+void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,
+ struct qedi_cmd *cmd)
+{
+ qedi->itt_map[tid].itt = proto_itt;
+ qedi->itt_map[tid].p_cmd = cmd;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "update itt map tid=0x%x, with proto itt=0x%x\n", tid,
+ qedi->itt_map[tid].itt);
+}
+
+void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, s16 *tid)
+{
+ u16 i;
+
+ for (i = 0; i < MAX_ISCSI_TASK_ENTRIES; i++) {
+ if (qedi->itt_map[i].itt == itt) {
+ *tid = i;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "Ref itt=0x%x, found at tid=0x%x\n",
+ itt, *tid);
+ return;
+ }
+ }
+
+ WARN_ON(1);
+}
+
+void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt)
+{
+ *proto_itt = qedi->itt_map[tid].itt;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+ "Get itt map tid [0x%x with proto itt[0x%x]",
+ tid, *proto_itt);
+}
+
+struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid)
+{
+ struct qedi_cmd *cmd = NULL;
+
+ if (tid > MAX_ISCSI_TASK_ENTRIES)
+ return NULL;
+
+ cmd = qedi->itt_map[tid].p_cmd;
+ if (cmd->task_id != tid)
+ return NULL;
+
+ qedi->itt_map[tid].p_cmd = NULL;
+
+ return cmd;
+}
+
+static int qedi_alloc_itt(struct qedi_ctx *qedi)
+{
+ qedi->itt_map = kcalloc(MAX_ISCSI_TASK_ENTRIES,
+ sizeof(struct qedi_itt_map), GFP_KERNEL);
+ if (!qedi->itt_map) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Unable to allocate itt map array memory\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void qedi_free_itt(struct qedi_ctx *qedi)
+{
+ kfree(qedi->itt_map);
+}
+
+static struct qed_ll2_cb_ops qedi_ll2_cb_ops = {
+ .rx_cb = qedi_ll2_rx,
+ .tx_cb = NULL,
+};
+
+static int qedi_percpu_io_thread(void *arg)
+{
+ struct qedi_percpu_s *p = arg;
+ struct qedi_work *work, *tmp;
+ unsigned long flags;
+ LIST_HEAD(work_list);
+
+ set_user_nice(current, -20);
+
+ while (!kthread_should_stop()) {
+ spin_lock_irqsave(&p->p_work_lock, flags);
+ while (!list_empty(&p->work_list)) {
+ list_splice_init(&p->work_list, &work_list);
+ spin_unlock_irqrestore(&p->p_work_lock, flags);
+
+ list_for_each_entry_safe(work, tmp, &work_list, list) {
+ list_del_init(&work->list);
+ qedi_fp_process_cqes(work);
+ if (!work->is_solicited)
+ kfree(work);
+ }
+ cond_resched();
+ spin_lock_irqsave(&p->p_work_lock, flags);
+ }
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&p->p_work_lock, flags);
+ schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+
+ return 0;
+}
+
+static int qedi_cpu_online(unsigned int cpu)
+{
+ struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu);
+ struct task_struct *thread;
+
+ thread = kthread_create_on_node(qedi_percpu_io_thread, (void *)p,
+ cpu_to_node(cpu),
+ "qedi_thread/%d", cpu);
+ if (IS_ERR(thread))
+ return PTR_ERR(thread);
+
+ kthread_bind(thread, cpu);
+ p->iothread = thread;
+ wake_up_process(thread);
+ return 0;
+}
+
+static int qedi_cpu_offline(unsigned int cpu)
+{
+ struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu);
+ struct qedi_work *work, *tmp;
+ struct task_struct *thread;
+
+ spin_lock_bh(&p->p_work_lock);
+ thread = p->iothread;
+ p->iothread = NULL;
+
+ list_for_each_entry_safe(work, tmp, &p->work_list, list) {
+ list_del_init(&work->list);
+ qedi_fp_process_cqes(work);
+ if (!work->is_solicited)
+ kfree(work);
+ }
+
+ spin_unlock_bh(&p->p_work_lock);
+ if (thread)
+ kthread_stop(thread);
+ return 0;
+}
+
+void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu)
+{
+ struct qed_ll2_params params;
+
+ qedi_recover_all_conns(qedi);
+
+ qedi_ops->ll2->stop(qedi->cdev);
+ qedi_ll2_free_skbs(qedi);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "old MTU %u, new MTU %u\n",
+ qedi->ll2_mtu, mtu);
+ memset(&params, 0, sizeof(params));
+ qedi->ll2_mtu = mtu;
+ params.mtu = qedi->ll2_mtu + IPV6_HDR_LEN + TCP_HDR_LEN;
+ params.drop_ttl0_packets = 0;
+ params.rx_vlan_stripping = 1;
+ ether_addr_copy(params.ll2_mac_address, qedi->dev_info.common.hw_mac);
+ qedi_ops->ll2->start(qedi->cdev, &params);
+}
+
+static void __qedi_remove(struct pci_dev *pdev, int mode)
+{
+ struct qedi_ctx *qedi = pci_get_drvdata(pdev);
+
+ if (qedi->tmf_thread) {
+ flush_workqueue(qedi->tmf_thread);
+ destroy_workqueue(qedi->tmf_thread);
+ qedi->tmf_thread = NULL;
+ }
+
+ if (qedi->offload_thread) {
+ flush_workqueue(qedi->offload_thread);
+ destroy_workqueue(qedi->offload_thread);
+ qedi->offload_thread = NULL;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ qedi_dbg_host_exit(&qedi->dbg_ctx);
+#endif
+ if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags))
+ qedi_ops->common->set_power_state(qedi->cdev, PCI_D0);
+
+ qedi_sync_free_irqs(qedi);
+
+ if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) {
+ qedi_ops->stop(qedi->cdev);
+ qedi_ops->ll2->stop(qedi->cdev);
+ }
+
+ if (mode == QEDI_MODE_NORMAL)
+ qedi_free_iscsi_pf_param(qedi);
+
+ if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) {
+ qedi_ops->common->slowpath_stop(qedi->cdev);
+ qedi_ops->common->remove(qedi->cdev);
+ }
+
+ qedi_destroy_fp(qedi);
+
+ if (mode == QEDI_MODE_NORMAL) {
+ qedi_release_cid_que(qedi);
+ qedi_cm_free_mem(qedi);
+ qedi_free_uio(qedi->udev);
+ qedi_free_itt(qedi);
+
+ iscsi_host_remove(qedi->shost);
+ iscsi_host_free(qedi->shost);
+
+ if (qedi->ll2_recv_thread) {
+ kthread_stop(qedi->ll2_recv_thread);
+ qedi->ll2_recv_thread = NULL;
+ }
+ qedi_ll2_free_skbs(qedi);
+ }
+}
+
+static int __qedi_probe(struct pci_dev *pdev, int mode)
+{
+ struct qedi_ctx *qedi;
+ struct qed_ll2_params params;
+ u32 dp_module = 0;
+ u8 dp_level = 0;
+ bool is_vf = false;
+ char host_buf[16];
+ struct qed_link_params link_params;
+ struct qed_slowpath_params sp_params;
+ struct qed_probe_params qed_params;
+ void *task_start, *task_end;
+ int rc;
+ u16 tmp;
+
+ if (mode != QEDI_MODE_RECOVERY) {
+ qedi = qedi_host_alloc(pdev);
+ if (!qedi) {
+ rc = -ENOMEM;
+ goto exit_probe;
+ }
+ } else {
+ qedi = pci_get_drvdata(pdev);
+ }
+
+ memset(&qed_params, 0, sizeof(qed_params));
+ qed_params.protocol = QED_PROTOCOL_ISCSI;
+ qed_params.dp_module = dp_module;
+ qed_params.dp_level = dp_level;
+ qed_params.is_vf = is_vf;
+ qedi->cdev = qedi_ops->common->probe(pdev, &qed_params);
+ if (!qedi->cdev) {
+ rc = -ENODEV;
+ QEDI_ERR(&qedi->dbg_ctx, "Cannot initialize hardware\n");
+ goto free_host;
+ }
+
+ qedi->msix_count = MAX_NUM_MSIX_PF;
+ atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
+
+ if (mode != QEDI_MODE_RECOVERY) {
+ rc = qedi_set_iscsi_pf_param(qedi);
+ if (rc) {
+ rc = -ENOMEM;
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Set iSCSI pf param fail\n");
+ goto free_host;
+ }
+ }
+
+ qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params);
+
+ rc = qedi_prepare_fp(qedi);
+ if (rc) {
+ QEDI_ERR(&qedi->dbg_ctx, "Cannot start slowpath.\n");
+ goto free_pf_params;
+ }
+
+ /* Start the Slowpath-process */
+ memset(&sp_params, 0, sizeof(struct qed_slowpath_params));
+ sp_params.int_mode = QED_INT_MODE_MSIX;
+ sp_params.drv_major = QEDI_DRIVER_MAJOR_VER;
+ sp_params.drv_minor = QEDI_DRIVER_MINOR_VER;
+ sp_params.drv_rev = QEDI_DRIVER_REV_VER;
+ sp_params.drv_eng = QEDI_DRIVER_ENG_VER;
+ strlcpy(sp_params.name, "qedi iSCSI", QED_DRV_VER_STR_SIZE);
+ rc = qedi_ops->common->slowpath_start(qedi->cdev, &sp_params);
+ if (rc) {
+ QEDI_ERR(&qedi->dbg_ctx, "Cannot start slowpath\n");
+ goto stop_hw;
+ }
+
+ /* update_pf_params needs to be called before and after slowpath
+ * start
+ */
+ qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params);
+
+ qedi_setup_int(qedi);
+ if (rc)
+ goto stop_iscsi_func;
+
+ qedi_ops->common->set_power_state(qedi->cdev, PCI_D0);
+
+ /* Learn information crucial for qedi to progress */
+ rc = qedi_ops->fill_dev_info(qedi->cdev, &qedi->dev_info);
+ if (rc)
+ goto stop_iscsi_func;
+
+ /* Record BDQ producer doorbell addresses */
+ qedi->bdq_primary_prod = qedi->dev_info.primary_dbq_rq_addr;
+ qedi->bdq_secondary_prod = qedi->dev_info.secondary_bdq_rq_addr;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+ "BDQ primary_prod=%p secondary_prod=%p.\n",
+ qedi->bdq_primary_prod,
+ qedi->bdq_secondary_prod);
+
+ /*
+ * We need to write the number of BDs in the BDQ we've preallocated so
+ * the f/w will do a prefetch and we'll get an unsolicited CQE when a
+ * packet arrives.
+ */
+ qedi->bdq_prod_idx = QEDI_BDQ_NUM;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+ "Writing %d to primary and secondary BDQ doorbell registers.\n",
+ qedi->bdq_prod_idx);
+ writew(qedi->bdq_prod_idx, qedi->bdq_primary_prod);
+ tmp = readw(qedi->bdq_primary_prod);
+ writew(qedi->bdq_prod_idx, qedi->bdq_secondary_prod);
+ tmp = readw(qedi->bdq_secondary_prod);
+
+ ether_addr_copy(qedi->mac, qedi->dev_info.common.hw_mac);
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "MAC address is %pM.\n",
+ qedi->mac);
+
+ sprintf(host_buf, "host_%d", qedi->shost->host_no);
+ qedi_ops->common->set_id(qedi->cdev, host_buf, QEDI_MODULE_VERSION);
+
+ qedi_ops->register_ops(qedi->cdev, &qedi_cb_ops, qedi);
+
+ memset(&params, 0, sizeof(params));
+ params.mtu = DEF_PATH_MTU + IPV6_HDR_LEN + TCP_HDR_LEN;
+ qedi->ll2_mtu = DEF_PATH_MTU;
+ params.drop_ttl0_packets = 0;
+ params.rx_vlan_stripping = 1;
+ ether_addr_copy(params.ll2_mac_address, qedi->dev_info.common.hw_mac);
+
+ if (mode != QEDI_MODE_RECOVERY) {
+ /* set up rx path */
+ INIT_LIST_HEAD(&qedi->ll2_skb_list);
+ spin_lock_init(&qedi->ll2_lock);
+ /* start qedi context */
+ spin_lock_init(&qedi->hba_lock);
+ spin_lock_init(&qedi->task_idx_lock);
+ }
+ qedi_ops->ll2->register_cb_ops(qedi->cdev, &qedi_ll2_cb_ops, qedi);
+ qedi_ops->ll2->start(qedi->cdev, &params);
+
+ if (mode != QEDI_MODE_RECOVERY) {
+ qedi->ll2_recv_thread = kthread_run(qedi_ll2_recv_thread,
+ (void *)qedi,
+ "qedi_ll2_thread");
+ }
+
+ rc = qedi_ops->start(qedi->cdev, &qedi->tasks,
+ qedi, qedi_iscsi_event_cb);
+ if (rc) {
+ rc = -ENODEV;
+ QEDI_ERR(&qedi->dbg_ctx, "Cannot start iSCSI function\n");
+ goto stop_slowpath;
+ }
+
+ task_start = qedi_get_task_mem(&qedi->tasks, 0);
+ task_end = qedi_get_task_mem(&qedi->tasks, MAX_TID_BLOCKS_ISCSI - 1);
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+ "Task context start=%p, end=%p block_size=%u.\n",
+ task_start, task_end, qedi->tasks.size);
+
+ memset(&link_params, 0, sizeof(link_params));
+ link_params.link_up = true;
+ rc = qedi_ops->common->set_link(qedi->cdev, &link_params);
+ if (rc) {
+ QEDI_WARN(&qedi->dbg_ctx, "Link set up failed.\n");
+ atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ qedi_dbg_host_init(&qedi->dbg_ctx, &qedi_debugfs_ops,
+ &qedi_dbg_fops);
+#endif
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "QLogic FastLinQ iSCSI Module qedi %s, FW %d.%d.%d.%d\n",
+ QEDI_MODULE_VERSION, FW_MAJOR_VERSION, FW_MINOR_VERSION,
+ FW_REVISION_VERSION, FW_ENGINEERING_VERSION);
+
+ if (mode == QEDI_MODE_NORMAL) {
+ if (iscsi_host_add(qedi->shost, &pdev->dev)) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Could not add iscsi host\n");
+ rc = -ENOMEM;
+ goto remove_host;
+ }
+
+ /* Allocate uio buffers */
+ rc = qedi_alloc_uio_rings(qedi);
+ if (rc) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "UIO alloc ring failed err=%d\n", rc);
+ goto remove_host;
+ }
+
+ rc = qedi_init_uio(qedi);
+ if (rc) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "UIO init failed, err=%d\n", rc);
+ goto free_uio;
+ }
+
+ /* host the array on iscsi_conn */
+ rc = qedi_setup_cid_que(qedi);
+ if (rc) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Could not setup cid que\n");
+ goto free_uio;
+ }
+
+ rc = qedi_cm_alloc_mem(qedi);
+ if (rc) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Could not alloc cm memory\n");
+ goto free_cid_que;
+ }
+
+ rc = qedi_alloc_itt(qedi);
+ if (rc) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Could not alloc itt memory\n");
+ goto free_cid_que;
+ }
+
+ sprintf(host_buf, "host_%d", qedi->shost->host_no);
+ qedi->tmf_thread = create_singlethread_workqueue(host_buf);
+ if (!qedi->tmf_thread) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Unable to start tmf thread!\n");
+ rc = -ENODEV;
+ goto free_cid_que;
+ }
+
+ sprintf(host_buf, "qedi_ofld%d", qedi->shost->host_no);
+ qedi->offload_thread = create_workqueue(host_buf);
+ if (!qedi->offload_thread) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Unable to start offload thread!\n");
+ rc = -ENODEV;
+ goto free_cid_que;
+ }
+
+ /* F/w needs 1st task context memory entry for performance */
+ set_bit(QEDI_RESERVE_TASK_ID, qedi->task_idx_map);
+ atomic_set(&qedi->num_offloads, 0);
+ }
+
+ return 0;
+
+free_cid_que:
+ qedi_release_cid_que(qedi);
+free_uio:
+ qedi_free_uio(qedi->udev);
+remove_host:
+#ifdef CONFIG_DEBUG_FS
+ qedi_dbg_host_exit(&qedi->dbg_ctx);
+#endif
+ iscsi_host_remove(qedi->shost);
+stop_iscsi_func:
+ qedi_ops->stop(qedi->cdev);
+stop_slowpath:
+ qedi_ops->common->slowpath_stop(qedi->cdev);
+stop_hw:
+ qedi_ops->common->remove(qedi->cdev);
+free_pf_params:
+ qedi_free_iscsi_pf_param(qedi);
+free_host:
+ iscsi_host_free(qedi->shost);
+exit_probe:
+ return rc;
+}
+
+static int qedi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ return __qedi_probe(pdev, QEDI_MODE_NORMAL);
+}
+
+static void qedi_remove(struct pci_dev *pdev)
+{
+ __qedi_remove(pdev, QEDI_MODE_NORMAL);
+}
+
+static struct pci_device_id qedi_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165E) },
+ { 0 },
+};
+MODULE_DEVICE_TABLE(pci, qedi_pci_tbl);
+
+static enum cpuhp_state qedi_cpuhp_state;
+
+static struct pci_driver qedi_pci_driver = {
+ .name = QEDI_MODULE_NAME,
+ .id_table = qedi_pci_tbl,
+ .probe = qedi_probe,
+ .remove = qedi_remove,
+};
+
+static int __init qedi_init(void)
+{
+ struct qedi_percpu_s *p;
+ int cpu, rc = 0;
+
+ qedi_ops = qed_get_iscsi_ops();
+ if (!qedi_ops) {
+ QEDI_ERR(NULL, "Failed to get qed iSCSI operations\n");
+ return -EINVAL;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ qedi_dbg_init("qedi");
+#endif
+
+ qedi_scsi_transport = iscsi_register_transport(&qedi_iscsi_transport);
+ if (!qedi_scsi_transport) {
+ QEDI_ERR(NULL, "Could not register qedi transport");
+ rc = -ENOMEM;
+ goto exit_qedi_init_1;
+ }
+
+ for_each_possible_cpu(cpu) {
+ p = &per_cpu(qedi_percpu, cpu);
+ INIT_LIST_HEAD(&p->work_list);
+ spin_lock_init(&p->p_work_lock);
+ p->iothread = NULL;
+ }
+
+ rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/qedi:online",
+ qedi_cpu_online, qedi_cpu_offline);
+ if (rc < 0)
+ goto exit_qedi_init_2;
+ qedi_cpuhp_state = rc;
+
+ rc = pci_register_driver(&qedi_pci_driver);
+ if (rc) {
+ QEDI_ERR(NULL, "Failed to register driver\n");
+ goto exit_qedi_hp;
+ }
+
+ return 0;
+
+exit_qedi_hp:
+ cpuhp_remove_state(qedi_cpuhp_state);
+exit_qedi_init_2:
+ iscsi_unregister_transport(&qedi_iscsi_transport);
+exit_qedi_init_1:
+#ifdef CONFIG_DEBUG_FS
+ qedi_dbg_exit();
+#endif
+ qed_put_iscsi_ops();
+ return rc;
+}
+
+static void __exit qedi_cleanup(void)
+{
+ pci_unregister_driver(&qedi_pci_driver);
+ cpuhp_remove_state(qedi_cpuhp_state);
+ iscsi_unregister_transport(&qedi_iscsi_transport);
+
+#ifdef CONFIG_DEBUG_FS
+ qedi_dbg_exit();
+#endif
+ qed_put_iscsi_ops();
+}
+
+MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx iSCSI Module");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("QLogic Corporation");
+MODULE_VERSION(QEDI_MODULE_VERSION);
+module_init(qedi_init);
+module_exit(qedi_cleanup);
diff --git a/drivers/scsi/qedi/qedi_sysfs.c b/drivers/scsi/qedi/qedi_sysfs.c
new file mode 100644
index 000000000000..b10c48bd1428
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_sysfs.c
@@ -0,0 +1,52 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include "qedi.h"
+#include "qedi_gbl.h"
+#include "qedi_iscsi.h"
+#include "qedi_dbg.h"
+
+static inline struct qedi_ctx *qedi_dev_to_hba(struct device *dev)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+
+ return iscsi_host_priv(shost);
+}
+
+static ssize_t qedi_show_port_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct qedi_ctx *qedi = qedi_dev_to_hba(dev);
+
+ if (atomic_read(&qedi->link_state) == QEDI_LINK_UP)
+ return sprintf(buf, "Online\n");
+ else
+ return sprintf(buf, "Linkdown\n");
+}
+
+static ssize_t qedi_show_speed(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qedi_ctx *qedi = qedi_dev_to_hba(dev);
+ struct qed_link_output if_link;
+
+ qedi_ops->common->get_link(qedi->cdev, &if_link);
+
+ return sprintf(buf, "%d Gbit\n", if_link.speed / 1000);
+}
+
+static DEVICE_ATTR(port_state, 0444, qedi_show_port_state, NULL);
+static DEVICE_ATTR(speed, 0444, qedi_show_speed, NULL);
+
+struct device_attribute *qedi_shost_attrs[] = {
+ &dev_attr_port_state,
+ &dev_attr_speed,
+ NULL
+};
diff --git a/drivers/scsi/qedi/qedi_version.h b/drivers/scsi/qedi/qedi_version.h
new file mode 100644
index 000000000000..9543a1b139d4
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_version.h
@@ -0,0 +1,14 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#define QEDI_MODULE_VERSION "8.10.3.0"
+#define QEDI_DRIVER_MAJOR_VER 8
+#define QEDI_DRIVER_MINOR_VER 10
+#define QEDI_DRIVER_REV_VER 3
+#define QEDI_DRIVER_ENG_VER 0
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index fe7469c901f7..47eb4d545d13 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1988,9 +1988,9 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
scsi_qla_host_t *vha = NULL;
struct qla_hw_data *ha = base_vha->hw;
- uint16_t options = 0;
int cnt;
struct req_que *req = ha->req_q_map[0];
+ struct qla_qpair *qpair;
ret = qla24xx_vport_create_req_sanity_check(fc_vport);
if (ret) {
@@ -2075,15 +2075,9 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
qlt_vport_create(vha, ha);
qla24xx_vport_disable(fc_vport, disable);
- if (ha->flags.cpu_affinity_enabled) {
- req = ha->req_q_map[1];
- ql_dbg(ql_dbg_multiq, vha, 0xc000,
- "Request queue %p attached with "
- "VP[%d], cpu affinity =%d\n",
- req, vha->vp_idx, ha->flags.cpu_affinity_enabled);
- goto vport_queue;
- } else if (ql2xmaxqueues == 1 || !ha->npiv_info)
+ if (!ql2xmqsupport || !ha->npiv_info)
goto vport_queue;
+
/* Create a request queue in QoS mode for the vport */
for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
@@ -2095,20 +2089,20 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
}
if (qos) {
- ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
- qos);
- if (!ret)
+ qpair = qla2xxx_create_qpair(vha, qos, vha->vp_idx);
+ if (!qpair)
ql_log(ql_log_warn, vha, 0x7084,
- "Can't create request queue for VP[%d]\n",
+ "Can't create qpair for VP[%d]\n",
vha->vp_idx);
else {
ql_dbg(ql_dbg_multiq, vha, 0xc001,
- "Request Que:%d Q0s: %d) created for VP[%d]\n",
- ret, qos, vha->vp_idx);
+ "Queue pair: %d Qos: %d) created for VP[%d]\n",
+ qpair->id, qos, vha->vp_idx);
ql_dbg(ql_dbg_user, vha, 0x7085,
- "Request Que:%d Q0s: %d) created for VP[%d]\n",
- ret, qos, vha->vp_idx);
- req = ha->req_q_map[ret];
+ "Queue Pair: %d Qos: %d) created for VP[%d]\n",
+ qpair->id, qos, vha->vp_idx);
+ req = qpair->req;
+ vha->qpair = qpair;
}
}
@@ -2162,10 +2156,10 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
clear_bit(vha->vp_idx, ha->vp_idx_map);
mutex_unlock(&ha->vport_lock);
- if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
- if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
+ if (vha->qpair->vp_idx == vha->vp_idx) {
+ if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
ql_log(ql_log_warn, vha, 0x7087,
- "Queue delete failed.\n");
+ "Queue Pair delete failed.\n");
}
ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 45af34ddc432..21d9fb7fc887 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,7 +11,7 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
- * | Module Init and Probe | 0x0191 | 0x0146 |
+ * | Module Init and Probe | 0x0193 | 0x0146 |
* | | | 0x015b-0x0160 |
* | | | 0x016e |
* | Mailbox commands | 0x1199 | 0x1193 |
@@ -58,7 +58,7 @@
* | | | 0xb13a,0xb142 |
* | | | 0xb13c-0xb140 |
* | | | 0xb149 |
- * | MultiQ | 0xc00c | |
+ * | MultiQ | 0xc010 | |
* | Misc | 0xd301 | 0xd031-0xd0ff |
* | | | 0xd101-0xd1fe |
* | | | 0xd214-0xd2fe |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 5236e3f2a06a..f7df01b76714 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -401,6 +401,7 @@ typedef struct srb {
uint16_t type;
char *name;
int iocbs;
+ struct qla_qpair *qpair;
union {
struct srb_iocb iocb_cmd;
struct bsg_job *bsg_job;
@@ -2719,6 +2720,7 @@ struct isp_operations {
int (*get_flash_version) (struct scsi_qla_host *, void *);
int (*start_scsi) (srb_t *);
+ int (*start_scsi_mq) (srb_t *);
int (*abort_isp) (struct scsi_qla_host *);
int (*iospace_config)(struct qla_hw_data*);
int (*initialize_adapter)(struct scsi_qla_host *);
@@ -2730,8 +2732,10 @@ struct isp_operations {
#define QLA_MSIX_FW_MODE(m) (((m) & (BIT_7|BIT_8|BIT_9)) >> 7)
#define QLA_MSIX_FW_MODE_1(m) (QLA_MSIX_FW_MODE(m) == 1)
-#define QLA_MSIX_DEFAULT 0x00
-#define QLA_MSIX_RSP_Q 0x01
+#define QLA_MSIX_DEFAULT 0x00
+#define QLA_MSIX_RSP_Q 0x01
+#define QLA_ATIO_VECTOR 0x02
+#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q 0x03
#define QLA_MIDX_DEFAULT 0
#define QLA_MIDX_RSP_Q 1
@@ -2745,9 +2749,11 @@ struct scsi_qla_host;
struct qla_msix_entry {
int have_irq;
+ int in_use;
uint32_t vector;
uint16_t entry;
- struct rsp_que *rsp;
+ char name[30];
+ void *handle;
struct irq_affinity_notify irq_notify;
int cpuid;
};
@@ -2872,7 +2878,6 @@ struct rsp_que {
struct qla_msix_entry *msix;
struct req_que *req;
srb_t *status_srb; /* status continuation entry */
- struct work_struct q_work;
dma_addr_t dma_fx00;
response_t *ring_fx00;
@@ -2909,6 +2914,37 @@ struct req_que {
uint8_t req_pkt[REQUEST_ENTRY_SIZE];
};
+/*Queue pair data structure */
+struct qla_qpair {
+ spinlock_t qp_lock;
+ atomic_t ref_count;
+ /* distill these fields down to 'online=0/1'
+ * ha->flags.eeh_busy
+ * ha->flags.pci_channel_io_perm_failure
+ * base_vha->loop_state
+ */
+ uint32_t online:1;
+ /* move vha->flags.difdix_supported here */
+ uint32_t difdix_supported:1;
+ uint32_t delete_in_progress:1;
+
+ uint16_t id; /* qp number used with FW */
+ uint16_t num_active_cmd; /* cmds down at firmware */
+ cpumask_t cpu_mask; /* CPU mask for cpu affinity operation */
+ uint16_t vp_idx; /* vport ID */
+
+ mempool_t *srb_mempool;
+
+ /* to do: New driver: move queues to here instead of pointers */
+ struct req_que *req;
+ struct rsp_que *rsp;
+ struct atio_que *atio;
+ struct qla_msix_entry *msix; /* point to &ha->msix_entries[x] */
+ struct qla_hw_data *hw;
+ struct work_struct q_work;
+ struct list_head qp_list_elem; /* vha->qp_list */
+};
+
/* Place holder for FW buffer parameters */
struct qlfc_fw {
void *fw_buf;
@@ -3004,7 +3040,6 @@ struct qla_hw_data {
uint32_t chip_reset_done :1;
uint32_t running_gold_fw :1;
uint32_t eeh_busy :1;
- uint32_t cpu_affinity_enabled :1;
uint32_t disable_msix_handshake :1;
uint32_t fcp_prio_enabled :1;
uint32_t isp82xx_fw_hung:1;
@@ -3061,10 +3096,15 @@ struct qla_hw_data {
uint8_t mqenable;
struct req_que **req_q_map;
struct rsp_que **rsp_q_map;
+ struct qla_qpair **queue_pair_map;
unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
+ unsigned long qpair_qid_map[(QLA_MAX_QUEUES / 8)
+ / sizeof(unsigned long)];
uint8_t max_req_queues;
uint8_t max_rsp_queues;
+ uint8_t max_qpairs;
+ struct qla_qpair *base_qpair;
struct qla_npiv_entry *npiv_info;
uint16_t nvram_npiv_size;
@@ -3328,6 +3368,7 @@ struct qla_hw_data {
struct mutex vport_lock; /* Virtual port synchronization */
spinlock_t vport_slock; /* order is hardware_lock, then vport_slock */
+ struct mutex mq_lock; /* multi-queue synchronization */
struct completion mbx_cmd_comp; /* Serialize mbx access */
struct completion mbx_intr_comp; /* Used for completion notification */
struct completion dcbx_comp; /* For set port config notification */
@@ -3608,6 +3649,7 @@ typedef struct scsi_qla_host {
uint32_t fw_tgt_reported:1;
uint32_t bbcr_enable:1;
+ uint32_t qpairs_available:1;
} flags;
atomic_t loop_state;
@@ -3646,6 +3688,7 @@ typedef struct scsi_qla_host {
#define FX00_TARGET_SCAN 24
#define FX00_CRITEMP_RECOVERY 25
#define FX00_HOST_INFO_RESEND 26
+#define QPAIR_ONLINE_CHECK_NEEDED 27
unsigned long pci_flags;
#define PFLG_DISCONNECTED 0 /* PCI device removed */
@@ -3704,10 +3747,13 @@ typedef struct scsi_qla_host {
/* List of pending PLOGI acks, protected by hw lock */
struct list_head plogi_ack_list;
+ struct list_head qp_list;
+
uint32_t vp_abort_cnt;
struct fc_vport *fc_vport; /* holds fc_vport * for each vport */
uint16_t vp_idx; /* vport ID */
+ struct qla_qpair *qpair; /* base qpair */
unsigned long vp_flags;
#define VP_IDX_ACQUIRED 0 /* bit no 0 */
@@ -3763,6 +3809,23 @@ struct qla_tgt_vp_map {
scsi_qla_host_t *vha;
};
+struct qla2_sgx {
+ dma_addr_t dma_addr; /* OUT */
+ uint32_t dma_len; /* OUT */
+
+ uint32_t tot_bytes; /* IN */
+ struct scatterlist *cur_sg; /* IN */
+
+ /* for book keeping, bzero on initial invocation */
+ uint32_t bytes_consumed;
+ uint32_t num_bytes;
+ uint32_t tot_partial;
+
+ /* for debugging */
+ uint32_t num_sg;
+ srb_t *sp;
+};
+
/*
* Macros to help code, maintain, etc.
*/
@@ -3775,21 +3838,34 @@ struct qla_tgt_vp_map {
(test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || \
test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
-#define QLA_VHA_MARK_BUSY(__vha, __bail) do { \
- atomic_inc(&__vha->vref_count); \
- mb(); \
- if (__vha->flags.delete_progress) { \
- atomic_dec(&__vha->vref_count); \
- __bail = 1; \
- } else { \
- __bail = 0; \
- } \
+#define QLA_VHA_MARK_BUSY(__vha, __bail) do { \
+ atomic_inc(&__vha->vref_count); \
+ mb(); \
+ if (__vha->flags.delete_progress) { \
+ atomic_dec(&__vha->vref_count); \
+ __bail = 1; \
+ } else { \
+ __bail = 0; \
+ } \
} while (0)
-#define QLA_VHA_MARK_NOT_BUSY(__vha) do { \
- atomic_dec(&__vha->vref_count); \
+#define QLA_VHA_MARK_NOT_BUSY(__vha) \
+ atomic_dec(&__vha->vref_count); \
+
+#define QLA_QPAIR_MARK_BUSY(__qpair, __bail) do { \
+ atomic_inc(&__qpair->ref_count); \
+ mb(); \
+ if (__qpair->delete_in_progress) { \
+ atomic_dec(&__qpair->ref_count); \
+ __bail = 1; \
+ } else { \
+ __bail = 0; \
+ } \
} while (0)
+#define QLA_QPAIR_MARK_NOT_BUSY(__qpair) \
+ atomic_dec(&__qpair->ref_count); \
+
/*
* qla2x00 local function return status codes
*/
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index c51d9f3359e3..afa0116a163b 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -91,12 +91,17 @@ extern int
qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *);
extern int qla2x00_init_rings(scsi_qla_host_t *);
extern uint8_t qla27xx_find_valid_image(struct scsi_qla_host *);
+extern struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *,
+ int, int);
+extern int qla2xxx_delete_qpair(struct scsi_qla_host *, struct qla_qpair *);
/*
* Global Data in qla_os.c source file.
*/
extern char qla2x00_version_str[];
+extern struct kmem_cache *srb_cachep;
+
extern int ql2xlogintimeout;
extern int qlport_down_retry;
extern int ql2xplogiabsentdevice;
@@ -105,8 +110,7 @@ extern int ql2xfdmienable;
extern int ql2xallocfwdump;
extern int ql2xextended_error_logging;
extern int ql2xiidmaenable;
-extern int ql2xmaxqueues;
-extern int ql2xmultique_tag;
+extern int ql2xmqsupport;
extern int ql2xfwloadbin;
extern int ql2xetsenable;
extern int ql2xshiftctondsd;
@@ -172,6 +176,9 @@ extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
extern void qla2x00_disable_board_on_pci_error(struct work_struct *);
+extern void qla2x00_sp_compl(void *, void *, int);
+extern void qla2xxx_qpair_sp_free_dma(void *, void *);
+extern void qla2xxx_qpair_sp_compl(void *, void *, int);
/*
* Global Functions in qla_mid.c source file.
@@ -220,6 +227,8 @@ extern uint16_t qla2x00_calc_iocbs_32(uint16_t);
extern uint16_t qla2x00_calc_iocbs_64(uint16_t);
extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t);
extern void qla2x00_build_scsi_iocbs_64(srb_t *, cmd_entry_t *, uint16_t);
+extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *,
+ uint16_t, struct req_que *);
extern int qla2x00_start_scsi(srb_t *sp);
extern int qla24xx_start_scsi(srb_t *sp);
int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
@@ -227,6 +236,7 @@ int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
extern int qla2x00_start_sp(srb_t *);
extern int qla24xx_dif_start_scsi(srb_t *);
extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t);
+extern int qla2xxx_dif_start_scsi_mq(srb_t *);
extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
@@ -237,7 +247,10 @@ extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *,
uint32_t *, uint16_t, struct qla_tgt_cmd *);
extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *,
uint32_t *, uint16_t, struct qla_tgt_cmd *);
-
+extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *);
+extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *);
+extern int qla24xx_build_scsi_crc_2_iocbs(srb_t *,
+ struct cmd_type_crc_2 *, uint16_t, uint16_t, uint16_t);
/*
* Global Function Prototypes in qla_mbx.c source file.
@@ -468,6 +481,8 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *, const char *, struct req_que *,
extern void
qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *,
uint32_t);
+extern irqreturn_t
+qla2xxx_msix_rsp_q(int irq, void *dev_id);
/*
* Global Function Prototypes in qla_sup.c source file.
@@ -603,15 +618,18 @@ extern int qla2x00_dfs_setup(scsi_qla_host_t *);
extern int qla2x00_dfs_remove(scsi_qla_host_t *);
/* Globa function prototypes for multi-q */
-extern int qla25xx_request_irq(struct rsp_que *);
+extern int qla25xx_request_irq(struct qla_hw_data *, struct qla_qpair *,
+ struct qla_msix_entry *, int);
extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *);
extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *);
extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t,
uint16_t, int, uint8_t);
extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t,
- uint16_t, int);
+ uint16_t, struct qla_qpair *);
+
extern void qla2x00_init_response_q_entries(struct rsp_que *);
extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *);
+extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *);
extern int qla25xx_delete_queues(struct scsi_qla_host *);
extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t);
extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 5b09296b46a3..632d5f30386a 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1769,8 +1769,7 @@ qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
if (req->outstanding_cmds)
return QLA_SUCCESS;
- if (!IS_FWI2_CAPABLE(ha) || (ha->mqiobase &&
- (ql2xmultique_tag || ql2xmaxqueues > 1)))
+ if (!IS_FWI2_CAPABLE(ha))
req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
else {
if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count)
@@ -4248,10 +4247,7 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
struct req_que *req;
struct rsp_que *rsp;
- if (vha->hw->flags.cpu_affinity_enabled)
- req = vha->hw->req_q_map[0];
- else
- req = vha->req;
+ req = vha->req;
rsp = req->rsp;
clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
@@ -6040,10 +6036,10 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha)
return -EINVAL;
rval = qla2x00_fw_ready(base_vha);
- if (ha->flags.cpu_affinity_enabled)
- req = ha->req_q_map[0];
+ if (vha->qpair)
+ req = vha->qpair->req;
else
- req = vha->req;
+ req = ha->req_q_map[0];
rsp = req->rsp;
if (rval == QLA_SUCCESS) {
@@ -6725,3 +6721,162 @@ qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
return ret;
}
+
+struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int vp_idx)
+{
+ int rsp_id = 0;
+ int req_id = 0;
+ int i;
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t qpair_id = 0;
+ struct qla_qpair *qpair = NULL;
+ struct qla_msix_entry *msix;
+
+ if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) {
+ ql_log(ql_log_warn, vha, 0x00181,
+ "FW/Driver is not multi-queue capable.\n");
+ return NULL;
+ }
+
+ if (ql2xmqsupport) {
+ qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
+ if (qpair == NULL) {
+ ql_log(ql_log_warn, vha, 0x0182,
+ "Failed to allocate memory for queue pair.\n");
+ return NULL;
+ }
+ memset(qpair, 0, sizeof(struct qla_qpair));
+
+ qpair->hw = vha->hw;
+
+ /* Assign available que pair id */
+ mutex_lock(&ha->mq_lock);
+ qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
+ if (qpair_id >= ha->max_qpairs) {
+ mutex_unlock(&ha->mq_lock);
+ ql_log(ql_log_warn, vha, 0x0183,
+ "No resources to create additional q pair.\n");
+ goto fail_qid_map;
+ }
+ set_bit(qpair_id, ha->qpair_qid_map);
+ ha->queue_pair_map[qpair_id] = qpair;
+ qpair->id = qpair_id;
+ qpair->vp_idx = vp_idx;
+
+ for (i = 0; i < ha->msix_count; i++) {
+ msix = &ha->msix_entries[i];
+ if (msix->in_use)
+ continue;
+ qpair->msix = msix;
+ ql_log(ql_dbg_multiq, vha, 0xc00f,
+ "Vector %x selected for qpair\n", msix->vector);
+ break;
+ }
+ if (!qpair->msix) {
+ ql_log(ql_log_warn, vha, 0x0184,
+ "Out of MSI-X vectors!.\n");
+ goto fail_msix;
+ }
+
+ qpair->msix->in_use = 1;
+ list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
+
+ mutex_unlock(&ha->mq_lock);
+
+ /* Create response queue first */
+ rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair);
+ if (!rsp_id) {
+ ql_log(ql_log_warn, vha, 0x0185,
+ "Failed to create response queue.\n");
+ goto fail_rsp;
+ }
+
+ qpair->rsp = ha->rsp_q_map[rsp_id];
+
+ /* Create request queue */
+ req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos);
+ if (!req_id) {
+ ql_log(ql_log_warn, vha, 0x0186,
+ "Failed to create request queue.\n");
+ goto fail_req;
+ }
+
+ qpair->req = ha->req_q_map[req_id];
+ qpair->rsp->req = qpair->req;
+
+ if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
+ if (ha->fw_attributes & BIT_4)
+ qpair->difdix_supported = 1;
+ }
+
+ qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
+ if (!qpair->srb_mempool) {
+ ql_log(ql_log_warn, vha, 0x0191,
+ "Failed to create srb mempool for qpair %d\n",
+ qpair->id);
+ goto fail_mempool;
+ }
+
+ /* Mark as online */
+ qpair->online = 1;
+
+ if (!vha->flags.qpairs_available)
+ vha->flags.qpairs_available = 1;
+
+ ql_dbg(ql_dbg_multiq, vha, 0xc00d,
+ "Request/Response queue pair created, id %d\n",
+ qpair->id);
+ ql_dbg(ql_dbg_init, vha, 0x0187,
+ "Request/Response queue pair created, id %d\n",
+ qpair->id);
+ }
+ return qpair;
+
+fail_mempool:
+fail_req:
+ qla25xx_delete_rsp_que(vha, qpair->rsp);
+fail_rsp:
+ mutex_lock(&ha->mq_lock);
+ qpair->msix->in_use = 0;
+ list_del(&qpair->qp_list_elem);
+ if (list_empty(&vha->qp_list))
+ vha->flags.qpairs_available = 0;
+fail_msix:
+ ha->queue_pair_map[qpair_id] = NULL;
+ clear_bit(qpair_id, ha->qpair_qid_map);
+ mutex_unlock(&ha->mq_lock);
+fail_qid_map:
+ kfree(qpair);
+ return NULL;
+}
+
+int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
+{
+ int ret;
+ struct qla_hw_data *ha = qpair->hw;
+
+ qpair->delete_in_progress = 1;
+ while (atomic_read(&qpair->ref_count))
+ msleep(500);
+
+ ret = qla25xx_delete_req_que(vha, qpair->req);
+ if (ret != QLA_SUCCESS)
+ goto fail;
+ ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
+ if (ret != QLA_SUCCESS)
+ goto fail;
+
+ mutex_lock(&ha->mq_lock);
+ ha->queue_pair_map[qpair->id] = NULL;
+ clear_bit(qpair->id, ha->qpair_qid_map);
+ list_del(&qpair->qp_list_elem);
+ if (list_empty(&vha->qp_list))
+ vha->flags.qpairs_available = 0;
+ mempool_destroy(qpair->srb_mempool);
+ kfree(qpair);
+ mutex_unlock(&ha->mq_lock);
+
+ return QLA_SUCCESS;
+fail:
+ return ret;
+}
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index edc48f3b8230..44e404583c86 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -216,6 +216,36 @@ qla2x00_reset_active(scsi_qla_host_t *vha)
}
static inline srb_t *
+qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag)
+{
+ srb_t *sp = NULL;
+ uint8_t bail;
+
+ QLA_QPAIR_MARK_BUSY(qpair, bail);
+ if (unlikely(bail))
+ return NULL;
+
+ sp = mempool_alloc(qpair->srb_mempool, flag);
+ if (!sp)
+ goto done;
+
+ memset(sp, 0, sizeof(*sp));
+ sp->fcport = fcport;
+ sp->iocbs = 1;
+done:
+ if (!sp)
+ QLA_QPAIR_MARK_NOT_BUSY(qpair);
+ return sp;
+}
+
+static inline void
+qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp)
+{
+ mempool_free(sp, qpair->srb_mempool);
+ QLA_QPAIR_MARK_NOT_BUSY(qpair);
+}
+
+static inline srb_t *
qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
{
srb_t *sp = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 221ad8907893..58e49a3e1de8 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -12,7 +12,6 @@
#include <scsi/scsi_tcq.h>
-static void qla25xx_set_que(srb_t *, struct rsp_que **);
/**
* qla2x00_get_cmd_direction() - Determine control_flag data direction.
* @cmd: SCSI command
@@ -143,7 +142,7 @@ qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
return (cont_pkt);
}
-static inline int
+inline int
qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
{
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
@@ -693,10 +692,11 @@ qla24xx_calc_dsd_lists(uint16_t dsds)
* @sp: SRB command to process
* @cmd_pkt: Command type 3 IOCB
* @tot_dsds: Total number of segments to transfer
+ * @req: pointer to request queue
*/
-static inline void
+inline void
qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
- uint16_t tot_dsds)
+ uint16_t tot_dsds, struct req_que *req)
{
uint16_t avail_dsds;
uint32_t *cur_dsd;
@@ -745,7 +745,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
* Five DSDs are available in the Continuation
* Type 1 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
avail_dsds = 5;
}
@@ -845,24 +845,7 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
}
}
-struct qla2_sgx {
- dma_addr_t dma_addr; /* OUT */
- uint32_t dma_len; /* OUT */
-
- uint32_t tot_bytes; /* IN */
- struct scatterlist *cur_sg; /* IN */
-
- /* for book keeping, bzero on initial invocation */
- uint32_t bytes_consumed;
- uint32_t num_bytes;
- uint32_t tot_partial;
-
- /* for debugging */
- uint32_t num_sg;
- srb_t *sp;
-};
-
-static int
+int
qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
uint32_t *partial)
{
@@ -1207,7 +1190,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
* @cmd_pkt: Command type 3 IOCB
* @tot_dsds: Total number of segments to transfer
*/
-static inline int
+inline int
qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
{
@@ -1436,8 +1419,8 @@ qla24xx_start_scsi(srb_t *sp)
struct qla_hw_data *ha = vha->hw;
/* Setup device pointers. */
- qla25xx_set_que(sp, &rsp);
req = vha->req;
+ rsp = req->rsp;
/* So we know we haven't pci_map'ed anything yet */
tot_dsds = 0;
@@ -1523,12 +1506,10 @@ qla24xx_start_scsi(srb_t *sp)
cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
/* Build IOCB segments */
- qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
+ qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
/* Set total data segment count. */
cmd_pkt->entry_count = (uint8_t)req_cnt;
- /* Specify response queue number where completion should happen */
- cmd_pkt->entry_status = (uint8_t) rsp->id;
wmb();
/* Adjust ring index. */
req->ring_index++;
@@ -1597,9 +1578,8 @@ qla24xx_dif_start_scsi(srb_t *sp)
}
/* Setup device pointers. */
-
- qla25xx_set_que(sp, &rsp);
req = vha->req;
+ rsp = req->rsp;
/* So we know we haven't pci_map'ed anything yet */
tot_dsds = 0;
@@ -1764,18 +1744,365 @@ queuing_error:
return QLA_FUNCTION_FAILED;
}
-
-static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
+/**
+ * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+static int
+qla2xxx_start_scsi_mq(srb_t *sp)
{
+ int nseg;
+ unsigned long flags;
+ uint32_t *clr_ptr;
+ uint32_t index;
+ uint32_t handle;
+ struct cmd_type_7 *cmd_pkt;
+ uint16_t cnt;
+ uint16_t req_cnt;
+ uint16_t tot_dsds;
+ struct req_que *req = NULL;
+ struct rsp_que *rsp = NULL;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
- struct qla_hw_data *ha = sp->fcport->vha->hw;
- int affinity = cmd->request->cpu;
+ struct scsi_qla_host *vha = sp->fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_qpair *qpair = sp->qpair;
+
+ /* Setup qpair pointers */
+ rsp = qpair->rsp;
+ req = qpair->req;
+
+ /* So we know we haven't pci_map'ed anything yet */
+ tot_dsds = 0;
+
+ /* Send marker if required */
+ if (vha->marker_needed != 0) {
+ if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+ QLA_SUCCESS)
+ return QLA_FUNCTION_FAILED;
+ vha->marker_needed = 0;
+ }
+
+ /* Acquire qpair specific lock */
+ spin_lock_irqsave(&qpair->qp_lock, flags);
+
+ /* Check for room in outstanding command list. */
+ handle = req->current_outstanding_cmd;
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
+ handle++;
+ if (handle == req->num_outstanding_cmds)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ break;
+ }
+ if (index == req->num_outstanding_cmds)
+ goto queuing_error;
+
+ /* Map the sg table so we have an accurate count of sg entries needed */
+ if (scsi_sg_count(cmd)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ } else
+ nseg = 0;
+
+ tot_dsds = nseg;
+ req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+ RD_REG_DWORD_RELAXED(req->req_q_out);
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+ }
+
+ /* Build command packet. */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
+ cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ req->cnt -= req_cnt;
+
+ cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
+ cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+ /* Zero out remaining portion of packet. */
+ /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+ /* Set NPORT-ID and LUN number*/
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+ cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
+
+ int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
+ host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+
+ cmd_pkt->task = TSK_SIMPLE;
+
+ /* Load SCSI command packet. */
+ memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
+ host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
+
+ cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+
+ /* Build IOCB segments */
+ qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
+
+ /* Set total data segment count. */
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+ wmb();
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else
+ req->ring_ptr++;
+
+ sp->flags |= SRB_DMA_VALID;
+
+ /* Set chip new ring index. */
+ WRT_REG_DWORD(req->req_q_in, req->ring_index);
+
+ /* Manage unprocessed RIO/ZIO commands in response queue. */
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla24xx_process_response_queue(vha, rsp);
+
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
+ return QLA_SUCCESS;
+
+queuing_error:
+ if (tot_dsds)
+ scsi_dma_unmap(cmd);
+
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
+
+ return QLA_FUNCTION_FAILED;
+}
+
+
+/**
+ * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+int
+qla2xxx_dif_start_scsi_mq(srb_t *sp)
+{
+ int nseg;
+ unsigned long flags;
+ uint32_t *clr_ptr;
+ uint32_t index;
+ uint32_t handle;
+ uint16_t cnt;
+ uint16_t req_cnt = 0;
+ uint16_t tot_dsds;
+ uint16_t tot_prot_dsds;
+ uint16_t fw_prot_opts = 0;
+ struct req_que *req = NULL;
+ struct rsp_que *rsp = NULL;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ struct scsi_qla_host *vha = sp->fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct cmd_type_crc_2 *cmd_pkt;
+ uint32_t status = 0;
+ struct qla_qpair *qpair = sp->qpair;
+
+#define QDSS_GOT_Q_SPACE BIT_0
+
+ /* Check for host side state */
+ if (!qpair->online) {
+ cmd->result = DID_NO_CONNECT << 16;
+ return QLA_INTERFACE_ERROR;
+ }
+
+ if (!qpair->difdix_supported &&
+ scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
+ cmd->result = DID_NO_CONNECT << 16;
+ return QLA_INTERFACE_ERROR;
+ }
+
+ /* Only process protection or >16 cdb in this routine */
+ if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
+ if (cmd->cmd_len <= 16)
+ return qla2xxx_start_scsi_mq(sp);
+ }
+
+ /* Setup qpair pointers */
+ rsp = qpair->rsp;
+ req = qpair->req;
+
+ /* So we know we haven't pci_map'ed anything yet */
+ tot_dsds = 0;
+
+ /* Send marker if required */
+ if (vha->marker_needed != 0) {
+ if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+ QLA_SUCCESS)
+ return QLA_FUNCTION_FAILED;
+ vha->marker_needed = 0;
+ }
+
+ /* Acquire ring specific lock */
+ spin_lock_irqsave(&qpair->qp_lock, flags);
+
+ /* Check for room in outstanding command list. */
+ handle = req->current_outstanding_cmd;
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
+ handle++;
+ if (handle == req->num_outstanding_cmds)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ break;
+ }
+
+ if (index == req->num_outstanding_cmds)
+ goto queuing_error;
+
+ /* Compute number of required data segments */
+ /* Map the sg table so we have an accurate count of sg entries needed */
+ if (scsi_sg_count(cmd)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ else
+ sp->flags |= SRB_DMA_VALID;
+
+ if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
+ (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
+ struct qla2_sgx sgx;
+ uint32_t partial;
+
+ memset(&sgx, 0, sizeof(struct qla2_sgx));
+ sgx.tot_bytes = scsi_bufflen(cmd);
+ sgx.cur_sg = scsi_sglist(cmd);
+ sgx.sp = sp;
+
+ nseg = 0;
+ while (qla24xx_get_one_block_sg(
+ cmd->device->sector_size, &sgx, &partial))
+ nseg++;
+ }
+ } else
+ nseg = 0;
+
+ /* number of required data segments */
+ tot_dsds = nseg;
+
+ /* Compute number of required protection segments */
+ if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
+ scsi_prot_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ else
+ sp->flags |= SRB_CRC_PROT_DMA_VALID;
+
+ if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
+ (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
+ nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
+ }
+ } else {
+ nseg = 0;
+ }
+
+ req_cnt = 1;
+ /* Total Data and protection sg segment(s) */
+ tot_prot_dsds = nseg;
+ tot_dsds += nseg;
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+ RD_REG_DWORD_RELAXED(req->req_q_out);
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+ }
+
+ status |= QDSS_GOT_Q_SPACE;
+
+ /* Build header part of command packet (excluding the OPCODE). */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
+ cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ req->cnt -= req_cnt;
+
+ /* Fill-in common area */
+ cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
+ cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+
+ /* Set NPORT-ID and LUN number*/
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
- if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
- affinity < ha->max_rsp_queues - 1)
- *rsp = ha->rsp_q_map[affinity + 1];
- else
- *rsp = ha->rsp_q_map[0];
+ int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
+ host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+
+ /* Total Data and protection segment(s) */
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+ /* Build IOCB segments and adjust for data protection segments */
+ if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
+ req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
+ QLA_SUCCESS)
+ goto queuing_error;
+
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+ cmd_pkt->timeout = cpu_to_le16(0);
+ wmb();
+
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else
+ req->ring_ptr++;
+
+ /* Set chip new ring index. */
+ WRT_REG_DWORD(req->req_q_in, req->ring_index);
+
+ /* Manage unprocessed RIO/ZIO commands in response queue. */
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla24xx_process_response_queue(vha, rsp);
+
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
+
+ return QLA_SUCCESS;
+
+queuing_error:
+ if (status & QDSS_GOT_Q_SPACE) {
+ req->outstanding_cmds[handle] = NULL;
+ req->cnt += req_cnt;
+ }
+ /* Cleanup will be performed by the caller (queuecommand) */
+
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
+ return QLA_FUNCTION_FAILED;
}
/* Generic Control-SRB manipulation functions. */
@@ -2664,7 +2991,7 @@ sufficient_dsds:
cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
/* Build IOCB segments */
- qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
+ qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
/* Set total data segment count. */
cmd_pkt->entry_count = (uint8_t)req_cnt;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 19f18485a854..5093ca9b02ec 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -9,6 +9,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/t10-pi.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_bsg_fc.h>
#include <scsi/scsi_eh.h>
@@ -2871,41 +2872,6 @@ out:
}
static irqreturn_t
-qla25xx_msix_rsp_q(int irq, void *dev_id)
-{
- struct qla_hw_data *ha;
- scsi_qla_host_t *vha;
- struct rsp_que *rsp;
- struct device_reg_24xx __iomem *reg;
- unsigned long flags;
- uint32_t hccr = 0;
-
- rsp = (struct rsp_que *) dev_id;
- if (!rsp) {
- ql_log(ql_log_info, NULL, 0x505b,
- "%s: NULL response queue pointer.\n", __func__);
- return IRQ_NONE;
- }
- ha = rsp->hw;
- vha = pci_get_drvdata(ha->pdev);
-
- /* Clear the interrupt, if enabled, for this response queue */
- if (!ha->flags.disable_msix_handshake) {
- reg = &ha->iobase->isp24;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- hccr = RD_REG_DWORD_RELAXED(&reg->hccr);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- }
- if (qla2x00_check_reg32_for_disconnect(vha, hccr))
- goto out;
- queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
-
-out:
- return IRQ_HANDLED;
-}
-
-static irqreturn_t
qla24xx_msix_default(int irq, void *dev_id)
{
scsi_qla_host_t *vha;
@@ -3001,6 +2967,35 @@ qla24xx_msix_default(int irq, void *dev_id)
return IRQ_HANDLED;
}
+irqreturn_t
+qla2xxx_msix_rsp_q(int irq, void *dev_id)
+{
+ struct qla_hw_data *ha;
+ struct qla_qpair *qpair;
+ struct device_reg_24xx __iomem *reg;
+ unsigned long flags;
+
+ qpair = dev_id;
+ if (!qpair) {
+ ql_log(ql_log_info, NULL, 0x505b,
+ "%s: NULL response queue pointer.\n", __func__);
+ return IRQ_NONE;
+ }
+ ha = qpair->hw;
+
+ /* Clear the interrupt, if enabled, for this response queue */
+ if (unlikely(!ha->flags.disable_msix_handshake)) {
+ reg = &ha->iobase->isp24;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ }
+
+ queue_work(ha->wq, &qpair->q_work);
+
+ return IRQ_HANDLED;
+}
+
/* Interrupt handling helpers. */
struct qla_init_msix_entry {
@@ -3008,69 +3003,28 @@ struct qla_init_msix_entry {
irq_handler_t handler;
};
-static struct qla_init_msix_entry msix_entries[3] = {
+static struct qla_init_msix_entry msix_entries[] = {
{ "qla2xxx (default)", qla24xx_msix_default },
{ "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
- { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
+ { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
+ { "qla2xxx (qpair_multiq)", qla2xxx_msix_rsp_q },
};
-static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
+static struct qla_init_msix_entry qla82xx_msix_entries[] = {
{ "qla2xxx (default)", qla82xx_msix_default },
{ "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
};
-static struct qla_init_msix_entry qla83xx_msix_entries[3] = {
- { "qla2xxx (default)", qla24xx_msix_default },
- { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
- { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
-};
-
-static void
-qla24xx_disable_msix(struct qla_hw_data *ha)
-{
- int i;
- struct qla_msix_entry *qentry;
- scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
-
- for (i = 0; i < ha->msix_count; i++) {
- qentry = &ha->msix_entries[i];
- if (qentry->have_irq) {
- /* un-register irq cpu affinity notification */
- irq_set_affinity_notifier(qentry->vector, NULL);
- free_irq(qentry->vector, qentry->rsp);
- }
- }
- pci_disable_msix(ha->pdev);
- kfree(ha->msix_entries);
- ha->msix_entries = NULL;
- ha->flags.msix_enabled = 0;
- ql_dbg(ql_dbg_init, vha, 0x0042,
- "Disabled the MSI.\n");
-}
-
static int
qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
{
#define MIN_MSIX_COUNT 2
-#define ATIO_VECTOR 2
int i, ret;
- struct msix_entry *entries;
struct qla_msix_entry *qentry;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
- entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
- GFP_KERNEL);
- if (!entries) {
- ql_log(ql_log_warn, vha, 0x00bc,
- "Failed to allocate memory for msix_entry.\n");
- return -ENOMEM;
- }
-
- for (i = 0; i < ha->msix_count; i++)
- entries[i].entry = i;
-
- ret = pci_enable_msix_range(ha->pdev,
- entries, MIN_MSIX_COUNT, ha->msix_count);
+ ret = pci_alloc_irq_vectors(ha->pdev, MIN_MSIX_COUNT, ha->msix_count,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
if (ret < 0) {
ql_log(ql_log_fatal, vha, 0x00c7,
"MSI-X: Failed to enable support, "
@@ -3080,10 +3034,23 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
} else if (ret < ha->msix_count) {
ql_log(ql_log_warn, vha, 0x00c6,
"MSI-X: Failed to enable support "
- "-- %d/%d\n Retry with %d vectors.\n",
- ha->msix_count, ret, ret);
+ "with %d vectors, using %d vectors.\n",
+ ha->msix_count, ret);
ha->msix_count = ret;
- ha->max_rsp_queues = ha->msix_count - 1;
+ /* Recalculate queue values */
+ if (ha->mqiobase && ql2xmqsupport) {
+ ha->max_req_queues = ha->msix_count - 1;
+
+ /* ATIOQ needs 1 vector. That's 1 less QPair */
+ if (QLA_TGT_MODE_ENABLED())
+ ha->max_req_queues--;
+
+ ha->max_rsp_queues = ha->max_req_queues;
+
+ ha->max_qpairs = ha->max_req_queues - 1;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
+ "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
+ }
}
ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
ha->msix_count, GFP_KERNEL);
@@ -3097,20 +3064,23 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
for (i = 0; i < ha->msix_count; i++) {
qentry = &ha->msix_entries[i];
- qentry->vector = entries[i].vector;
- qentry->entry = entries[i].entry;
+ qentry->vector = pci_irq_vector(ha->pdev, i);
+ qentry->entry = i;
qentry->have_irq = 0;
- qentry->rsp = NULL;
+ qentry->in_use = 0;
+ qentry->handle = NULL;
qentry->irq_notify.notify = qla_irq_affinity_notify;
qentry->irq_notify.release = qla_irq_affinity_release;
qentry->cpuid = -1;
}
/* Enable MSI-X vectors for the base queue */
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < (QLA_MSIX_RSP_Q + 1); i++) {
qentry = &ha->msix_entries[i];
- qentry->rsp = rsp;
+ qentry->handle = rsp;
rsp->msix = qentry;
+ scnprintf(qentry->name, sizeof(qentry->name),
+ msix_entries[i].name);
if (IS_P3P_TYPE(ha))
ret = request_irq(qentry->vector,
qla82xx_msix_entries[i].handler,
@@ -3122,6 +3092,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
if (ret)
goto msix_register_fail;
qentry->have_irq = 1;
+ qentry->in_use = 1;
/* Register for CPU affinity notification. */
irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify);
@@ -3141,12 +3112,15 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
* queue.
*/
if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
- qentry = &ha->msix_entries[ATIO_VECTOR];
- qentry->rsp = rsp;
+ qentry = &ha->msix_entries[QLA_ATIO_VECTOR];
rsp->msix = qentry;
+ qentry->handle = rsp;
+ scnprintf(qentry->name, sizeof(qentry->name),
+ msix_entries[QLA_ATIO_VECTOR].name);
+ qentry->in_use = 1;
ret = request_irq(qentry->vector,
- qla83xx_msix_entries[ATIO_VECTOR].handler,
- 0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp);
+ msix_entries[QLA_ATIO_VECTOR].handler,
+ 0, msix_entries[QLA_ATIO_VECTOR].name, rsp);
qentry->have_irq = 1;
}
@@ -3155,7 +3129,7 @@ msix_register_fail:
ql_log(ql_log_fatal, vha, 0x00cb,
"MSI-X: unable to register handler -- %x/%d.\n",
qentry->vector, ret);
- qla24xx_disable_msix(ha);
+ qla2x00_free_irqs(vha);
ha->mqenable = 0;
goto msix_out;
}
@@ -3163,11 +3137,13 @@ msix_register_fail:
/* Enable MSI-X vector for response queue update for queue 0 */
if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
if (ha->msixbase && ha->mqiobase &&
- (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
+ (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
+ ql2xmqsupport))
ha->mqenable = 1;
} else
- if (ha->mqiobase
- && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
+ if (ha->mqiobase &&
+ (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
+ ql2xmqsupport))
ha->mqenable = 1;
ql_dbg(ql_dbg_multiq, vha, 0xc005,
"mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
@@ -3177,7 +3153,6 @@ msix_register_fail:
ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
msix_out:
- kfree(entries);
return ret;
}
@@ -3230,7 +3205,7 @@ skip_msix:
!IS_QLA27XX(ha))
goto skip_msi;
- ret = pci_enable_msi(ha->pdev);
+ ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
if (!ret) {
ql_dbg(ql_dbg_init, vha, 0x0038,
"MSI: Enabled.\n");
@@ -3275,6 +3250,8 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
struct rsp_que *rsp;
+ struct qla_msix_entry *qentry;
+ int i;
/*
* We need to check that ha->rsp_q_map is valid in case we are called
@@ -3284,25 +3261,36 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
return;
rsp = ha->rsp_q_map[0];
- if (ha->flags.msix_enabled)
- qla24xx_disable_msix(ha);
- else if (ha->flags.msi_enabled) {
- free_irq(ha->pdev->irq, rsp);
- pci_disable_msi(ha->pdev);
- } else
- free_irq(ha->pdev->irq, rsp);
-}
+ if (ha->flags.msix_enabled) {
+ for (i = 0; i < ha->msix_count; i++) {
+ qentry = &ha->msix_entries[i];
+ if (qentry->have_irq) {
+ irq_set_affinity_notifier(qentry->vector, NULL);
+ free_irq(pci_irq_vector(ha->pdev, i), qentry->handle);
+ }
+ }
+ kfree(ha->msix_entries);
+ ha->msix_entries = NULL;
+ ha->flags.msix_enabled = 0;
+ ql_dbg(ql_dbg_init, vha, 0x0042,
+ "Disabled MSI-X.\n");
+ } else {
+ free_irq(pci_irq_vector(ha->pdev, 0), rsp);
+ }
+ pci_free_irq_vectors(ha->pdev);
+}
-int qla25xx_request_irq(struct rsp_que *rsp)
+int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
+ struct qla_msix_entry *msix, int vector_type)
{
- struct qla_hw_data *ha = rsp->hw;
- struct qla_init_msix_entry *intr = &msix_entries[2];
- struct qla_msix_entry *msix = rsp->msix;
+ struct qla_init_msix_entry *intr = &msix_entries[vector_type];
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
int ret;
- ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
+ scnprintf(msix->name, sizeof(msix->name),
+ "qla2xxx%lu_qpair%d", vha->host_no, qpair->id);
+ ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair);
if (ret) {
ql_log(ql_log_fatal, vha, 0x00e6,
"MSI-X: Unable to register handler -- %x/%d.\n",
@@ -3310,7 +3298,7 @@ int qla25xx_request_irq(struct rsp_que *rsp)
return ret;
}
msix->have_irq = 1;
- msix->rsp = rsp;
+ msix->handle = qpair;
return ret;
}
@@ -3323,11 +3311,12 @@ static void qla_irq_affinity_notify(struct irq_affinity_notify *notify,
container_of(notify, struct qla_msix_entry, irq_notify);
struct qla_hw_data *ha;
struct scsi_qla_host *base_vha;
+ struct rsp_que *rsp = e->handle;
/* user is recommended to set mask to just 1 cpu */
e->cpuid = cpumask_first(mask);
- ha = e->rsp->hw;
+ ha = rsp->hw;
base_vha = pci_get_drvdata(ha->pdev);
ql_dbg(ql_dbg_init, base_vha, 0xffff,
@@ -3351,9 +3340,10 @@ static void qla_irq_affinity_release(struct kref *ref)
container_of(ref, struct irq_affinity_notify, kref);
struct qla_msix_entry *e =
container_of(notify, struct qla_msix_entry, irq_notify);
- struct scsi_qla_host *base_vha = pci_get_drvdata(e->rsp->hw->pdev);
+ struct rsp_que *rsp = e->handle;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(rsp->hw->pdev);
ql_dbg(ql_dbg_init, base_vha, 0xffff,
- "%s: host%ld: vector %d cpu %d \n", __func__,
+ "%s: host%ld: vector %d cpu %d\n", __func__,
base_vha->host_no, e->vector, e->cpuid);
}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 23698c998699..2819ceb96041 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -10,6 +10,43 @@
#include <linux/delay.h>
#include <linux/gfp.h>
+struct rom_cmd {
+ uint16_t cmd;
+} rom_cmds[] = {
+ { MBC_LOAD_RAM },
+ { MBC_EXECUTE_FIRMWARE },
+ { MBC_READ_RAM_WORD },
+ { MBC_MAILBOX_REGISTER_TEST },
+ { MBC_VERIFY_CHECKSUM },
+ { MBC_GET_FIRMWARE_VERSION },
+ { MBC_LOAD_RISC_RAM },
+ { MBC_DUMP_RISC_RAM },
+ { MBC_LOAD_RISC_RAM_EXTENDED },
+ { MBC_DUMP_RISC_RAM_EXTENDED },
+ { MBC_WRITE_RAM_WORD_EXTENDED },
+ { MBC_READ_RAM_EXTENDED },
+ { MBC_GET_RESOURCE_COUNTS },
+ { MBC_SET_FIRMWARE_OPTION },
+ { MBC_MID_INITIALIZE_FIRMWARE },
+ { MBC_GET_FIRMWARE_STATE },
+ { MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
+ { MBC_GET_RETRY_COUNT },
+ { MBC_TRACE_CONTROL },
+};
+
+static int is_rom_cmd(uint16_t cmd)
+{
+ int i;
+ struct rom_cmd *wc;
+
+ for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) {
+ wc = rom_cmds + i;
+ if (wc->cmd == cmd)
+ return 1;
+ }
+
+ return 0;
+}
/*
* qla2x00_mailbox_command
@@ -92,6 +129,17 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
return QLA_FUNCTION_TIMEOUT;
}
+ /* check if ISP abort is active and return cmd with timeout */
+ if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
+ test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
+ !is_rom_cmd(mcp->mb[0])) {
+ ql_log(ql_log_info, vha, 0x1005,
+ "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
+ mcp->mb[0]);
+ return QLA_FUNCTION_TIMEOUT;
+ }
+
/*
* Wait for active mailbox commands to finish by waiting at most tov
* seconds. This is to serialize actual issuing of mailbox cmds during
@@ -178,6 +226,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ wait_time = jiffies;
if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
mcp->tov * HZ)) {
ql_dbg(ql_dbg_mbx, vha, 0x117a,
@@ -186,6 +235,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
+ if (time_after(jiffies, wait_time + 5 * HZ))
+ ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
+ command, jiffies_to_msecs(jiffies - wait_time));
} else {
ql_dbg(ql_dbg_mbx, vha, 0x1011,
"Cmd=%x Polling Mode.\n", command);
@@ -1194,12 +1246,17 @@ qla2x00_abort_command(srb_t *sp)
fc_port_t *fcport = sp->fcport;
scsi_qla_host_t *vha = fcport->vha;
struct qla_hw_data *ha = vha->hw;
- struct req_que *req = vha->req;
+ struct req_que *req;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
"Entered %s.\n", __func__);
+ if (vha->flags.qpairs_available && sp->qpair)
+ req = sp->qpair->req;
+ else
+ req = vha->req;
+
spin_lock_irqsave(&ha->hardware_lock, flags);
for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
if (req->outstanding_cmds[handle] == sp)
@@ -2152,10 +2209,10 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
"Entered %s.\n", __func__);
- if (ha->flags.cpu_affinity_enabled)
- req = ha->req_q_map[0];
+ if (vha->vp_idx && vha->qpair)
+ req = vha->qpair->req;
else
- req = vha->req;
+ req = ha->req_q_map[0];
lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
if (lg == NULL) {
@@ -2435,10 +2492,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
}
memset(lg, 0, sizeof(struct logio_entry_24xx));
- if (ql2xmaxqueues > 1)
- req = ha->req_q_map[0];
- else
- req = vha->req;
+ req = vha->req;
lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
lg->entry_count = 1;
lg->handle = MAKE_HANDLE(req->id, lg->handle);
@@ -2904,6 +2958,9 @@ qla24xx_abort_command(srb_t *sp)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
"Entered %s.\n", __func__);
+ if (vha->flags.qpairs_available && sp->qpair)
+ req = sp->qpair->req;
+
if (ql2xasynctmfenable)
return qla24xx_async_abort_command(sp);
@@ -2984,6 +3041,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
struct qla_hw_data *ha;
struct req_que *req;
struct rsp_que *rsp;
+ struct qla_qpair *qpair;
vha = fcport->vha;
ha = vha->hw;
@@ -2992,10 +3050,15 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
"Entered %s.\n", __func__);
- if (ha->flags.cpu_affinity_enabled)
- rsp = ha->rsp_q_map[tag + 1];
- else
+ if (vha->vp_idx && vha->qpair) {
+ /* NPIV port */
+ qpair = vha->qpair;
+ rsp = qpair->rsp;
+ req = qpair->req;
+ } else {
rsp = req->rsp;
+ }
+
tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
if (tsk == NULL) {
ql_log(ql_log_warn, vha, 0x1093,
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index cf7ba52bae66..c6d6f0d912ff 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -540,9 +540,10 @@ qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
uint16_t que_id = rsp->id;
if (rsp->msix && rsp->msix->have_irq) {
- free_irq(rsp->msix->vector, rsp);
+ free_irq(rsp->msix->vector, rsp->msix->handle);
rsp->msix->have_irq = 0;
- rsp->msix->rsp = NULL;
+ rsp->msix->in_use = 0;
+ rsp->msix->handle = NULL;
}
dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
sizeof(response_t), rsp->ring, rsp->dma);
@@ -573,7 +574,7 @@ qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
return ret;
}
-static int
+int
qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
{
int ret = -1;
@@ -596,34 +597,42 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
struct qla_hw_data *ha = vha->hw;
+ struct qla_qpair *qpair, *tqpair;
- /* Delete request queues */
- for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
- req = ha->req_q_map[cnt];
- if (req && test_bit(cnt, ha->req_qid_map)) {
- ret = qla25xx_delete_req_que(vha, req);
- if (ret != QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x00ea,
- "Couldn't delete req que %d.\n",
- req->id);
- return ret;
+ if (ql2xmqsupport) {
+ list_for_each_entry_safe(qpair, tqpair, &vha->qp_list,
+ qp_list_elem)
+ qla2xxx_delete_qpair(vha, qpair);
+ } else {
+ /* Delete request queues */
+ for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
+ req = ha->req_q_map[cnt];
+ if (req && test_bit(cnt, ha->req_qid_map)) {
+ ret = qla25xx_delete_req_que(vha, req);
+ if (ret != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x00ea,
+ "Couldn't delete req que %d.\n",
+ req->id);
+ return ret;
+ }
}
}
- }
- /* Delete response queues */
- for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
- rsp = ha->rsp_q_map[cnt];
- if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
- ret = qla25xx_delete_rsp_que(vha, rsp);
- if (ret != QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x00eb,
- "Couldn't delete rsp que %d.\n",
- rsp->id);
- return ret;
+ /* Delete response queues */
+ for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
+ rsp = ha->rsp_q_map[cnt];
+ if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
+ ret = qla25xx_delete_rsp_que(vha, rsp);
+ if (ret != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x00eb,
+ "Couldn't delete rsp que %d.\n",
+ rsp->id);
+ return ret;
+ }
}
}
}
+
return ret;
}
@@ -659,10 +668,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
if (ret != QLA_SUCCESS)
goto que_failed;
- mutex_lock(&ha->vport_lock);
+ mutex_lock(&ha->mq_lock);
que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
if (que_id >= ha->max_req_queues) {
- mutex_unlock(&ha->vport_lock);
+ mutex_unlock(&ha->mq_lock);
ql_log(ql_log_warn, base_vha, 0x00db,
"No resources to create additional request queue.\n");
goto que_failed;
@@ -708,7 +717,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
req->req_q_out = &reg->isp25mq.req_q_out;
req->max_q_depth = ha->req_q_map[0]->max_q_depth;
req->out_ptr = (void *)(req->ring + req->length);
- mutex_unlock(&ha->vport_lock);
+ mutex_unlock(&ha->mq_lock);
ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
"ring_ptr=%p ring_index=%d, "
"cnt=%d id=%d max_q_depth=%d.\n",
@@ -724,9 +733,9 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
if (ret != QLA_SUCCESS) {
ql_log(ql_log_fatal, base_vha, 0x00df,
"%s failed.\n", __func__);
- mutex_lock(&ha->vport_lock);
+ mutex_lock(&ha->mq_lock);
clear_bit(que_id, ha->req_qid_map);
- mutex_unlock(&ha->vport_lock);
+ mutex_unlock(&ha->mq_lock);
goto que_failed;
}
@@ -741,20 +750,20 @@ failed:
static void qla_do_work(struct work_struct *work)
{
unsigned long flags;
- struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
+ struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work);
struct scsi_qla_host *vha;
- struct qla_hw_data *ha = rsp->hw;
+ struct qla_hw_data *ha = qpair->hw;
- spin_lock_irqsave(&rsp->hw->hardware_lock, flags);
+ spin_lock_irqsave(&qpair->qp_lock, flags);
vha = pci_get_drvdata(ha->pdev);
- qla24xx_process_response_queue(vha, rsp);
- spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags);
+ qla24xx_process_response_queue(vha, qpair->rsp);
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
}
/* create response queue */
int
qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
- uint8_t vp_idx, uint16_t rid, int req)
+ uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair)
{
int ret = 0;
struct rsp_que *rsp = NULL;
@@ -779,28 +788,24 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
goto que_failed;
}
- mutex_lock(&ha->vport_lock);
+ mutex_lock(&ha->mq_lock);
que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
if (que_id >= ha->max_rsp_queues) {
- mutex_unlock(&ha->vport_lock);
+ mutex_unlock(&ha->mq_lock);
ql_log(ql_log_warn, base_vha, 0x00e2,
"No resources to create additional request queue.\n");
goto que_failed;
}
set_bit(que_id, ha->rsp_qid_map);
- if (ha->flags.msix_enabled)
- rsp->msix = &ha->msix_entries[que_id + 1];
- else
- ql_log(ql_log_warn, base_vha, 0x00e3,
- "MSIX not enabled.\n");
+ rsp->msix = qpair->msix;
ha->rsp_q_map[que_id] = rsp;
rsp->rid = rid;
rsp->vp_idx = vp_idx;
rsp->hw = ha;
ql_dbg(ql_dbg_init, base_vha, 0x00e4,
- "queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
+ "rsp queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
que_id, rsp->rid, rsp->vp_idx, rsp->hw);
/* Use alternate PCI bus number */
if (MSB(rsp->rid))
@@ -812,23 +817,27 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
if (!IS_MSIX_NACK_CAPABLE(ha))
options |= BIT_6;
+ /* Set option to indicate response queue creation */
+ options |= BIT_1;
+
rsp->options = options;
rsp->id = que_id;
reg = ISP_QUE_REG(ha, que_id);
rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
rsp->in_ptr = (void *)(rsp->ring + rsp->length);
- mutex_unlock(&ha->vport_lock);
+ mutex_unlock(&ha->mq_lock);
ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
- "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
+ "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
rsp->options, rsp->id, rsp->rsp_q_in,
rsp->rsp_q_out);
ql_dbg(ql_dbg_init, base_vha, 0x00e5,
- "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
+ "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
rsp->options, rsp->id, rsp->rsp_q_in,
rsp->rsp_q_out);
- ret = qla25xx_request_irq(rsp);
+ ret = qla25xx_request_irq(ha, qpair, qpair->msix,
+ QLA_MSIX_QPAIR_MULTIQ_RSP_Q);
if (ret)
goto que_failed;
@@ -836,19 +845,16 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
if (ret != QLA_SUCCESS) {
ql_log(ql_log_fatal, base_vha, 0x00e7,
"%s failed.\n", __func__);
- mutex_lock(&ha->vport_lock);
+ mutex_lock(&ha->mq_lock);
clear_bit(que_id, ha->rsp_qid_map);
- mutex_unlock(&ha->vport_lock);
+ mutex_unlock(&ha->mq_lock);
goto que_failed;
}
- if (req >= 0)
- rsp->req = ha->req_q_map[req];
- else
- rsp->req = NULL;
+ rsp->req = NULL;
qla2x00_init_response_q_entries(rsp);
- if (rsp->hw->wq)
- INIT_WORK(&rsp->q_work, qla_do_work);
+ if (qpair->hw->wq)
+ INIT_WORK(&qpair->q_work, qla_do_work);
return rsp->id;
que_failed:
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 56d6142852a5..8521cfe302e9 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -13,6 +13,7 @@
#include <linux/mutex.h>
#include <linux/kobject.h>
#include <linux/slab.h>
+#include <linux/blk-mq-pci.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include <scsi/scsi_transport.h>
@@ -30,7 +31,7 @@ static int apidev_major;
/*
* SRB allocation cache
*/
-static struct kmem_cache *srb_cachep;
+struct kmem_cache *srb_cachep;
/*
* CT6 CTX allocation cache
@@ -143,19 +144,12 @@ MODULE_PARM_DESC(ql2xiidmaenable,
"Enables iIDMA settings "
"Default is 1 - perform iIDMA. 0 - no iIDMA.");
-int ql2xmaxqueues = 1;
-module_param(ql2xmaxqueues, int, S_IRUGO);
-MODULE_PARM_DESC(ql2xmaxqueues,
- "Enables MQ settings "
- "Default is 1 for single queue. Set it to number "
- "of queues in MQ mode.");
-
-int ql2xmultique_tag;
-module_param(ql2xmultique_tag, int, S_IRUGO);
-MODULE_PARM_DESC(ql2xmultique_tag,
- "Enables CPU affinity settings for the driver "
- "Default is 0 for no affinity of request and response IO. "
- "Set it to 1 to turn on the cpu affinity.");
+int ql2xmqsupport = 1;
+module_param(ql2xmqsupport, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xmqsupport,
+ "Enable on demand multiple queue pairs support "
+ "Default is 1 for supported. "
+ "Set it to 0 to turn off mq qpair support.");
int ql2xfwloadbin;
module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
@@ -261,6 +255,7 @@ static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
static void qla2x00_clear_drv_active(struct qla_hw_data *);
static void qla2x00_free_device(scsi_qla_host_t *);
static void qla83xx_disable_laser(scsi_qla_host_t *vha);
+static int qla2xxx_map_queues(struct Scsi_Host *shost);
struct scsi_host_template qla2xxx_driver_template = {
.module = THIS_MODULE,
@@ -280,6 +275,7 @@ struct scsi_host_template qla2xxx_driver_template = {
.scan_finished = qla2xxx_scan_finished,
.scan_start = qla2xxx_scan_start,
.change_queue_depth = scsi_change_queue_depth,
+ .map_queues = qla2xxx_map_queues,
.this_id = -1,
.cmd_per_lun = 3,
.use_clustering = ENABLE_CLUSTERING,
@@ -339,6 +335,8 @@ static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
struct req_que **, struct rsp_que **);
static void qla2x00_free_fw_dump(struct qla_hw_data *);
static void qla2x00_mem_free(struct qla_hw_data *);
+int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
+ struct qla_qpair *qpair);
/* -------------------------------------------------------------------------- */
static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
@@ -360,6 +358,25 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
"Unable to allocate memory for response queue ptrs.\n");
goto fail_rsp_map;
}
+
+ if (ql2xmqsupport && ha->max_qpairs) {
+ ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *),
+ GFP_KERNEL);
+ if (!ha->queue_pair_map) {
+ ql_log(ql_log_fatal, vha, 0x0180,
+ "Unable to allocate memory for queue pair ptrs.\n");
+ goto fail_qpair_map;
+ }
+ ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
+ if (ha->base_qpair == NULL) {
+ ql_log(ql_log_warn, vha, 0x0182,
+ "Failed to allocate base queue pair memory.\n");
+ goto fail_base_qpair;
+ }
+ ha->base_qpair->req = req;
+ ha->base_qpair->rsp = rsp;
+ }
+
/*
* Make sure we record at least the request and response queue zero in
* case we need to free them if part of the probe fails.
@@ -370,6 +387,11 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
set_bit(0, ha->req_qid_map);
return 1;
+fail_base_qpair:
+ kfree(ha->queue_pair_map);
+fail_qpair_map:
+ kfree(ha->rsp_q_map);
+ ha->rsp_q_map = NULL;
fail_rsp_map:
kfree(ha->req_q_map);
ha->req_q_map = NULL;
@@ -417,82 +439,43 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
struct req_que *req;
struct rsp_que *rsp;
int cnt;
+ unsigned long flags;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
if (!test_bit(cnt, ha->req_qid_map))
continue;
req = ha->req_q_map[cnt];
+ clear_bit(cnt, ha->req_qid_map);
+ ha->req_q_map[cnt] = NULL;
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
qla2x00_free_req_que(ha, req);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
}
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
kfree(ha->req_q_map);
ha->req_q_map = NULL;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
if (!test_bit(cnt, ha->rsp_qid_map))
continue;
rsp = ha->rsp_q_map[cnt];
+ clear_bit(cnt, ha->req_qid_map);
+ ha->rsp_q_map[cnt] = NULL;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
qla2x00_free_rsp_que(ha, rsp);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
}
- kfree(ha->rsp_q_map);
- ha->rsp_q_map = NULL;
-}
-
-static int qla25xx_setup_mode(struct scsi_qla_host *vha)
-{
- uint16_t options = 0;
- int ques, req, ret;
- struct qla_hw_data *ha = vha->hw;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (!(ha->fw_attributes & BIT_6)) {
- ql_log(ql_log_warn, vha, 0x00d8,
- "Firmware is not multi-queue capable.\n");
- goto fail;
- }
- if (ql2xmultique_tag) {
- /* create a request queue for IO */
- options |= BIT_7;
- req = qla25xx_create_req_que(ha, options, 0, 0, -1,
- QLA_DEFAULT_QUE_QOS);
- if (!req) {
- ql_log(ql_log_warn, vha, 0x00e0,
- "Failed to create request queue.\n");
- goto fail;
- }
- ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
- vha->req = ha->req_q_map[req];
- options |= BIT_1;
- for (ques = 1; ques < ha->max_rsp_queues; ques++) {
- ret = qla25xx_create_rsp_que(ha, options, 0, 0, req);
- if (!ret) {
- ql_log(ql_log_warn, vha, 0x00e8,
- "Failed to create response queue.\n");
- goto fail2;
- }
- }
- ha->flags.cpu_affinity_enabled = 1;
- ql_dbg(ql_dbg_multiq, vha, 0xc007,
- "CPU affinity mode enabled, "
- "no. of response queues:%d no. of request queues:%d.\n",
- ha->max_rsp_queues, ha->max_req_queues);
- ql_dbg(ql_dbg_init, vha, 0x00e9,
- "CPU affinity mode enabled, "
- "no. of response queues:%d no. of request queues:%d.\n",
- ha->max_rsp_queues, ha->max_req_queues);
- }
- return 0;
-fail2:
- qla25xx_delete_queues(vha);
- destroy_workqueue(ha->wq);
- ha->wq = NULL;
- vha->req = ha->req_q_map[0];
-fail:
- ha->mqenable = 0;
- kfree(ha->req_q_map);
kfree(ha->rsp_q_map);
- ha->max_req_queues = ha->max_rsp_queues = 1;
- return 1;
+ ha->rsp_q_map = NULL;
}
static char *
@@ -669,7 +652,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr)
qla2x00_rel_sp(sp->fcport->vha, sp);
}
-static void
+void
qla2x00_sp_compl(void *data, void *ptr, int res)
{
struct qla_hw_data *ha = (struct qla_hw_data *)data;
@@ -693,6 +676,75 @@ qla2x00_sp_compl(void *data, void *ptr, int res)
cmd->scsi_done(cmd);
}
+void
+qla2xxx_qpair_sp_free_dma(void *vha, void *ptr)
+{
+ srb_t *sp = (srb_t *)ptr;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ struct qla_hw_data *ha = sp->fcport->vha->hw;
+ void *ctx = GET_CMD_CTX_SP(sp);
+
+ if (sp->flags & SRB_DMA_VALID) {
+ scsi_dma_unmap(cmd);
+ sp->flags &= ~SRB_DMA_VALID;
+ }
+
+ if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
+ dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
+ scsi_prot_sg_count(cmd), cmd->sc_data_direction);
+ sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
+ }
+
+ if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
+ /* List assured to be having elements */
+ qla2x00_clean_dsd_pool(ha, sp, NULL);
+ sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
+ }
+
+ if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
+ dma_pool_free(ha->dl_dma_pool, ctx,
+ ((struct crc_context *)ctx)->crc_ctx_dma);
+ sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
+ }
+
+ if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
+ struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx;
+
+ dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
+ ctx1->fcp_cmnd_dma);
+ list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
+ ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
+ ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
+ mempool_free(ctx1, ha->ctx_mempool);
+ }
+
+ CMD_SP(cmd) = NULL;
+ qla2xxx_rel_qpair_sp(sp->qpair, sp);
+}
+
+void
+qla2xxx_qpair_sp_compl(void *data, void *ptr, int res)
+{
+ srb_t *sp = (srb_t *)ptr;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+
+ cmd->result = res;
+
+ if (atomic_read(&sp->ref_count) == 0) {
+ ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3079,
+ "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
+ sp, GET_CMD_SP(sp));
+ if (ql2xextended_error_logging & ql_dbg_io)
+ WARN_ON(atomic_read(&sp->ref_count) == 0);
+ return;
+ }
+ if (!atomic_dec_and_test(&sp->ref_count))
+ return;
+
+ qla2xxx_qpair_sp_free_dma(sp->fcport->vha, sp);
+ cmd->scsi_done(cmd);
+}
+
/* If we are SP1 here, we need to still take and release the host_lock as SP1
* does not have the changes necessary to avoid taking host->host_lock.
*/
@@ -706,12 +758,28 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
srb_t *sp;
int rval;
+ struct qla_qpair *qpair = NULL;
+ uint32_t tag;
+ uint16_t hwq;
if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) {
cmd->result = DID_NO_CONNECT << 16;
goto qc24_fail_command;
}
+ if (ha->mqenable) {
+ if (shost_use_blk_mq(vha->host)) {
+ tag = blk_mq_unique_tag(cmd->request);
+ hwq = blk_mq_unique_tag_to_hwq(tag);
+ qpair = ha->queue_pair_map[hwq];
+ } else if (vha->vp_idx && vha->qpair) {
+ qpair = vha->qpair;
+ }
+
+ if (qpair)
+ return qla2xxx_mqueuecommand(host, cmd, qpair);
+ }
+
if (ha->flags.eeh_busy) {
if (ha->flags.pci_channel_io_perm_failure) {
ql_dbg(ql_dbg_aer, vha, 0x9010,
@@ -808,6 +876,95 @@ qc24_fail_command:
return 0;
}
+/* For MQ supported I/O */
+int
+qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
+ struct qla_qpair *qpair)
+{
+ scsi_qla_host_t *vha = shost_priv(host);
+ fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
+ struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ srb_t *sp;
+ int rval;
+
+ rval = fc_remote_port_chkready(rport);
+ if (rval) {
+ cmd->result = rval;
+ ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076,
+ "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
+ cmd, rval);
+ goto qc24_fail_command;
+ }
+
+ if (!fcport) {
+ cmd->result = DID_NO_CONNECT << 16;
+ goto qc24_fail_command;
+ }
+
+ if (atomic_read(&fcport->state) != FCS_ONLINE) {
+ if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
+ atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
+ ql_dbg(ql_dbg_io, vha, 0x3077,
+ "Returning DNC, fcport_state=%d loop_state=%d.\n",
+ atomic_read(&fcport->state),
+ atomic_read(&base_vha->loop_state));
+ cmd->result = DID_NO_CONNECT << 16;
+ goto qc24_fail_command;
+ }
+ goto qc24_target_busy;
+ }
+
+ /*
+ * Return target busy if we've received a non-zero retry_delay_timer
+ * in a FCP_RSP.
+ */
+ if (fcport->retry_delay_timestamp == 0) {
+ /* retry delay not set */
+ } else if (time_after(jiffies, fcport->retry_delay_timestamp))
+ fcport->retry_delay_timestamp = 0;
+ else
+ goto qc24_target_busy;
+
+ sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC);
+ if (!sp)
+ goto qc24_host_busy;
+
+ sp->u.scmd.cmd = cmd;
+ sp->type = SRB_SCSI_CMD;
+ atomic_set(&sp->ref_count, 1);
+ CMD_SP(cmd) = (void *)sp;
+ sp->free = qla2xxx_qpair_sp_free_dma;
+ sp->done = qla2xxx_qpair_sp_compl;
+ sp->qpair = qpair;
+
+ rval = ha->isp_ops->start_scsi_mq(sp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078,
+ "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
+ if (rval == QLA_INTERFACE_ERROR)
+ goto qc24_fail_command;
+ goto qc24_host_busy_free_sp;
+ }
+
+ return 0;
+
+qc24_host_busy_free_sp:
+ qla2xxx_qpair_sp_free_dma(vha, sp);
+
+qc24_host_busy:
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+qc24_target_busy:
+ return SCSI_MLQUEUE_TARGET_BUSY;
+
+qc24_fail_command:
+ cmd->scsi_done(cmd);
+
+ return 0;
+}
+
/*
* qla2x00_eh_wait_on_command
* Waits for the command to be returned by the Firmware for some
@@ -1601,7 +1758,6 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
{
resource_size_t pio;
uint16_t msix;
- int cpus;
if (pci_request_selected_regions(ha->pdev, ha->bars,
QLA2XXX_DRIVER_NAME)) {
@@ -1658,9 +1814,7 @@ skip_pio:
/* Determine queue resources */
ha->max_req_queues = ha->max_rsp_queues = 1;
- if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) ||
- (ql2xmaxqueues > 1 && ql2xmultique_tag) ||
- (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
+ if (!ql2xmqsupport || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
goto mqiobase_exit;
ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
@@ -1670,26 +1824,18 @@ skip_pio:
"MQIO Base=%p.\n", ha->mqiobase);
/* Read MSIX vector size of the board */
pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
- ha->msix_count = msix;
+ ha->msix_count = msix + 1;
/* Max queues are bounded by available msix vectors */
- /* queue 0 uses two msix vectors */
- if (ql2xmultique_tag) {
- cpus = num_online_cpus();
- ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
- (cpus + 1) : (ha->msix_count - 1);
- ha->max_req_queues = 2;
- } else if (ql2xmaxqueues > 1) {
- ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
- QLA_MQ_SIZE : ql2xmaxqueues;
- ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008,
- "QoS mode set, max no of request queues:%d.\n",
- ha->max_req_queues);
- ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019,
- "QoS mode set, max no of request queues:%d.\n",
- ha->max_req_queues);
- }
+ /* MB interrupt uses 1 vector */
+ ha->max_req_queues = ha->msix_count - 1;
+ ha->max_rsp_queues = ha->max_req_queues;
+ /* Queue pairs is the max value minus the base queue pair */
+ ha->max_qpairs = ha->max_rsp_queues - 1;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0188,
+ "Max no of queues pairs: %d.\n", ha->max_qpairs);
+
ql_log_pci(ql_log_info, ha->pdev, 0x001a,
- "MSI-X vector count: %d.\n", msix);
+ "MSI-X vector count: %d.\n", ha->msix_count);
} else
ql_log_pci(ql_log_info, ha->pdev, 0x001b,
"BAR 3 not enabled.\n");
@@ -1709,7 +1855,6 @@ static int
qla83xx_iospace_config(struct qla_hw_data *ha)
{
uint16_t msix;
- int cpus;
if (pci_request_selected_regions(ha->pdev, ha->bars,
QLA2XXX_DRIVER_NAME)) {
@@ -1761,32 +1906,36 @@ qla83xx_iospace_config(struct qla_hw_data *ha)
/* Read MSIX vector size of the board */
pci_read_config_word(ha->pdev,
QLA_83XX_PCI_MSIX_CONTROL, &msix);
- ha->msix_count = msix;
- /* Max queues are bounded by available msix vectors */
- /* queue 0 uses two msix vectors */
- if (ql2xmultique_tag) {
- cpus = num_online_cpus();
- ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
- (cpus + 1) : (ha->msix_count - 1);
- ha->max_req_queues = 2;
- } else if (ql2xmaxqueues > 1) {
- ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
- QLA_MQ_SIZE : ql2xmaxqueues;
- ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc00c,
- "QoS mode set, max no of request queues:%d.\n",
- ha->max_req_queues);
- ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
- "QoS mode set, max no of request queues:%d.\n",
- ha->max_req_queues);
+ ha->msix_count = msix + 1;
+ /*
+ * By default, driver uses at least two msix vectors
+ * (default & rspq)
+ */
+ if (ql2xmqsupport) {
+ /* MB interrupt uses 1 vector */
+ ha->max_req_queues = ha->msix_count - 1;
+ ha->max_rsp_queues = ha->max_req_queues;
+
+ /* ATIOQ needs 1 vector. That's 1 less QPair */
+ if (QLA_TGT_MODE_ENABLED())
+ ha->max_req_queues--;
+
+ /* Queue pairs is the max value minus
+ * the base queue pair */
+ ha->max_qpairs = ha->max_req_queues - 1;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
+ "Max no of queues pairs: %d.\n", ha->max_qpairs);
}
ql_log_pci(ql_log_info, ha->pdev, 0x011c,
- "MSI-X vector count: %d.\n", msix);
+ "MSI-X vector count: %d.\n", ha->msix_count);
} else
ql_log_pci(ql_log_info, ha->pdev, 0x011e,
"BAR 1 not enabled.\n");
mqiobase_exit:
ha->msix_count = ha->max_rsp_queues + 1;
+ if (QLA_TGT_MODE_ENABLED())
+ ha->msix_count++;
qlt_83xx_iospace_config(ha);
@@ -1831,6 +1980,7 @@ static struct isp_operations qla2100_isp_ops = {
.write_optrom = qla2x00_write_optrom_data,
.get_flash_version = qla2x00_get_flash_version,
.start_scsi = qla2x00_start_scsi,
+ .start_scsi_mq = NULL,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -1869,6 +2019,7 @@ static struct isp_operations qla2300_isp_ops = {
.write_optrom = qla2x00_write_optrom_data,
.get_flash_version = qla2x00_get_flash_version,
.start_scsi = qla2x00_start_scsi,
+ .start_scsi_mq = NULL,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -1907,6 +2058,7 @@ static struct isp_operations qla24xx_isp_ops = {
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_start_scsi,
+ .start_scsi_mq = NULL,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -1945,6 +2097,7 @@ static struct isp_operations qla25xx_isp_ops = {
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_dif_start_scsi,
+ .start_scsi_mq = qla2xxx_dif_start_scsi_mq,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -1983,6 +2136,7 @@ static struct isp_operations qla81xx_isp_ops = {
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_dif_start_scsi,
+ .start_scsi_mq = qla2xxx_dif_start_scsi_mq,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -2021,6 +2175,7 @@ static struct isp_operations qla82xx_isp_ops = {
.write_optrom = qla82xx_write_optrom_data,
.get_flash_version = qla82xx_get_flash_version,
.start_scsi = qla82xx_start_scsi,
+ .start_scsi_mq = NULL,
.abort_isp = qla82xx_abort_isp,
.iospace_config = qla82xx_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -2059,6 +2214,7 @@ static struct isp_operations qla8044_isp_ops = {
.write_optrom = qla8044_write_optrom_data,
.get_flash_version = qla82xx_get_flash_version,
.start_scsi = qla82xx_start_scsi,
+ .start_scsi_mq = NULL,
.abort_isp = qla8044_abort_isp,
.iospace_config = qla82xx_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -2097,6 +2253,7 @@ static struct isp_operations qla83xx_isp_ops = {
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_dif_start_scsi,
+ .start_scsi_mq = qla2xxx_dif_start_scsi_mq,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla83xx_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -2135,6 +2292,7 @@ static struct isp_operations qlafx00_isp_ops = {
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qlafx00_start_scsi,
+ .start_scsi_mq = NULL,
.abort_isp = qlafx00_abort_isp,
.iospace_config = qlafx00_iospace_config,
.initialize_adapter = qlafx00_initialize_adapter,
@@ -2173,6 +2331,7 @@ static struct isp_operations qla27xx_isp_ops = {
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_dif_start_scsi,
+ .start_scsi_mq = qla2xxx_dif_start_scsi_mq,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla83xx_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
@@ -2387,6 +2546,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
uint16_t req_length = 0, rsp_length = 0;
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
+ int i;
+
bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
sht = &qla2xxx_driver_template;
if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
@@ -2650,6 +2811,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
"Found an ISP%04X irq %d iobase 0x%p.\n",
pdev->device, pdev->irq, ha->iobase);
mutex_init(&ha->vport_lock);
+ mutex_init(&ha->mq_lock);
init_completion(&ha->mbx_cmd_comp);
complete(&ha->mbx_cmd_comp);
init_completion(&ha->mbx_intr_comp);
@@ -2737,7 +2899,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->max_cmd_len, host->max_channel, host->max_lun,
host->transportt, sht->vendor_id);
-que_init:
+ /* Set up the irqs */
+ ret = qla2x00_request_irqs(ha, rsp);
+ if (ret)
+ goto probe_init_failed;
+
/* Alloc arrays of request and response ring ptrs */
if (!qla2x00_alloc_queues(ha, req, rsp)) {
ql_log(ql_log_fatal, base_vha, 0x003d,
@@ -2746,12 +2912,17 @@ que_init:
goto probe_init_failed;
}
- qlt_probe_one_stage1(base_vha, ha);
+ if (ha->mqenable && shost_use_blk_mq(host)) {
+ /* number of hardware queues supported by blk/scsi-mq*/
+ host->nr_hw_queues = ha->max_qpairs;
- /* Set up the irqs */
- ret = qla2x00_request_irqs(ha, rsp);
- if (ret)
- goto probe_init_failed;
+ ql_dbg(ql_dbg_init, base_vha, 0x0192,
+ "blk/scsi-mq enabled, HW queues = %d.\n", host->nr_hw_queues);
+ } else
+ ql_dbg(ql_dbg_init, base_vha, 0x0193,
+ "blk/scsi-mq disabled.\n");
+
+ qlt_probe_one_stage1(base_vha, ha);
pci_save_state(pdev);
@@ -2842,11 +3013,12 @@ que_init:
host->can_queue, base_vha->req,
base_vha->mgmt_svr_loop_id, host->sg_tablesize);
- if (ha->mqenable) {
- if (qla25xx_setup_mode(base_vha)) {
- ql_log(ql_log_warn, base_vha, 0x00ec,
- "Failed to create queues, falling back to single queue mode.\n");
- goto que_init;
+ if (ha->mqenable && qla_ini_mode_enabled(base_vha)) {
+ ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
+ /* Create start of day qpairs for Block MQ */
+ if (shost_use_blk_mq(host)) {
+ for (i = 0; i < ha->max_qpairs; i++)
+ qla2xxx_create_qpair(base_vha, 5, 0);
}
}
@@ -3115,13 +3287,6 @@ qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha)
static void
qla2x00_destroy_deferred_work(struct qla_hw_data *ha)
{
- /* Flush the work queue and remove it */
- if (ha->wq) {
- flush_workqueue(ha->wq);
- destroy_workqueue(ha->wq);
- ha->wq = NULL;
- }
-
/* Cancel all work and destroy DPC workqueues */
if (ha->dpc_lp_wq) {
cancel_work_sync(&ha->idc_aen);
@@ -3317,9 +3482,17 @@ qla2x00_free_device(scsi_qla_host_t *vha)
ha->isp_ops->disable_intrs(ha);
}
+ qla2x00_free_fcports(vha);
+
qla2x00_free_irqs(vha);
- qla2x00_free_fcports(vha);
+ /* Flush the work queue and remove it */
+ if (ha->wq) {
+ flush_workqueue(ha->wq);
+ destroy_workqueue(ha->wq);
+ ha->wq = NULL;
+ }
+
qla2x00_mem_free(ha);
@@ -4034,6 +4207,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list);
INIT_LIST_HEAD(&vha->logo_list);
INIT_LIST_HEAD(&vha->plogi_ack_list);
+ INIT_LIST_HEAD(&vha->qp_list);
spin_lock_init(&vha->work_lock);
spin_lock_init(&vha->cmd_list_lock);
@@ -5038,8 +5212,8 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
base_vha->flags.init_done = 0;
qla25xx_delete_queues(base_vha);
- qla2x00_free_irqs(base_vha);
qla2x00_free_fcports(base_vha);
+ qla2x00_free_irqs(base_vha);
qla2x00_mem_free(ha);
qla82xx_md_free(base_vha);
qla2x00_free_queues(ha);
@@ -5073,6 +5247,8 @@ qla2x00_do_dpc(void *data)
{
scsi_qla_host_t *base_vha;
struct qla_hw_data *ha;
+ uint32_t online;
+ struct qla_qpair *qpair;
ha = (struct qla_hw_data *)data;
base_vha = pci_get_drvdata(ha->pdev);
@@ -5334,6 +5510,22 @@ intr_on_check:
ha->isp_ops->beacon_blink(base_vha);
}
+ /* qpair online check */
+ if (test_and_clear_bit(QPAIR_ONLINE_CHECK_NEEDED,
+ &base_vha->dpc_flags)) {
+ if (ha->flags.eeh_busy ||
+ ha->flags.pci_channel_io_perm_failure)
+ online = 0;
+ else
+ online = 1;
+
+ mutex_lock(&ha->mq_lock);
+ list_for_each_entry(qpair, &base_vha->qp_list,
+ qp_list_elem)
+ qpair->online = online;
+ mutex_unlock(&ha->mq_lock);
+ }
+
if (!IS_QLAFX00(ha))
qla2x00_do_dpc_all_vps(base_vha);
@@ -5676,6 +5868,10 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
switch (state) {
case pci_channel_io_normal:
ha->flags.eeh_busy = 0;
+ if (ql2xmqsupport) {
+ set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
ha->flags.eeh_busy = 1;
@@ -5689,10 +5885,18 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
pci_disable_device(pdev);
/* Return back all IOs */
qla2x00_abort_all_cmds(vha, DID_RESET << 16);
+ if (ql2xmqsupport) {
+ set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
ha->flags.pci_channel_io_perm_failure = 1;
qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
+ if (ql2xmqsupport) {
+ set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
return PCI_ERS_RESULT_DISCONNECT;
}
return PCI_ERS_RESULT_NEED_RESET;
@@ -5960,6 +6164,13 @@ qla83xx_disable_laser(scsi_qla_host_t *vha)
qla83xx_wr_reg(vha, reg, data);
}
+static int qla2xxx_map_queues(struct Scsi_Host *shost)
+{
+ scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
+
+ return blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev);
+}
+
static const struct pci_error_handlers qla2xxx_err_handler = {
.error_detected = qla2xxx_pci_error_detected,
.mmio_enabled = qla2xxx_pci_mmio_enabled,
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 9f6012b78e56..b4336e0cd85f 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -9,7 +9,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/*
* NVRAM support routines
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index cf04a364fd8b..03051e12a072 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -4085,7 +4085,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
jiffies_to_timespec(delta_jiff, &ts);
kt = ktime_set(ts.tv_sec, ts.tv_nsec);
} else
- kt = ktime_set(0, sdebug_ndelay);
+ kt = sdebug_ndelay;
if (NULL == sd_dp) {
sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
if (NULL == sd_dp)
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index c4f7b56fa6f6..8b8c814df5c7 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -12,7 +12,7 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/string.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index c35b6de4ca64..e9e1e141af9c 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1018,7 +1018,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb)
count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
BUG_ON(count > sdb->table.nents);
sdb->table.nents = count;
- sdb->length = blk_rq_bytes(req);
+ sdb->length = blk_rq_payload_bytes(req);
return BLKPREP_OK;
}
@@ -2893,7 +2893,7 @@ scsi_internal_device_block(struct scsi_device *sdev)
* request queue.
*/
if (q->mq_ops) {
- blk_mq_stop_hw_queues(q);
+ blk_mq_quiesce_queue(q);
} else {
spin_lock_irqsave(q->queue_lock, flags);
blk_stop_queue(q);
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index 7a74b82e8973..480a597b3877 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -26,7 +26,7 @@
#include <linux/seq_file.h>
#include <linux/mutex.h>
#include <linux/gfp.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 07349270535d..82dfe07b1d47 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1204,10 +1204,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
struct request_queue *rq = sdev->request_queue;
struct scsi_target *starget = sdev->sdev_target;
- error = scsi_device_set_state(sdev, SDEV_RUNNING);
- if (error)
- return error;
-
error = scsi_target_add(starget);
if (error)
return error;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 1622e23138e0..1fbb1ecf49f2 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -53,7 +53,7 @@
#include <linux/pm_runtime.h>
#include <linux/pr.h>
#include <linux/t10-pi.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
@@ -836,7 +836,6 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
struct bio *bio = rq->bio;
sector_t sector = blk_rq_pos(rq);
unsigned int nr_sectors = blk_rq_sectors(rq);
- unsigned int nr_bytes = blk_rq_bytes(rq);
int ret;
if (sdkp->device->no_write_same)
@@ -869,21 +868,7 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
cmd->transfersize = sdp->sector_size;
cmd->allowed = SD_MAX_RETRIES;
-
- /*
- * For WRITE_SAME the data transferred in the DATA IN buffer is
- * different from the amount of data actually written to the target.
- *
- * We set up __data_len to the amount of data transferred from the
- * DATA IN buffer so that blk_rq_map_sg set up the proper S/G list
- * to transfer a single sector of data first, but then reset it to
- * the amount of data to be written right after so that the I/O path
- * knows how much to actually write.
- */
- rq->__data_len = sdp->sector_size;
- ret = scsi_init_io(cmd);
- rq->__data_len = nr_bytes;
- return ret;
+ return scsi_init_io(cmd);
}
static int sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 070332eb41f3..dbe5b4b95df0 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -581,6 +581,9 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
sg_io_hdr_t *hp;
unsigned char cmnd[SG_MAX_CDB_SIZE];
+ if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
+ return -EINVAL;
+
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c
index 396b32dca074..7cf70aaec0ba 100644
--- a/drivers/scsi/snic/snic_main.c
+++ b/drivers/scsi/snic/snic_main.c
@@ -591,6 +591,7 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!pool) {
SNIC_HOST_ERR(shost, "dflt sgl pool creation failed\n");
+ ret = -ENOMEM;
goto err_free_res;
}
@@ -601,6 +602,7 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!pool) {
SNIC_HOST_ERR(shost, "max sgl pool creation failed\n");
+ ret = -ENOMEM;
goto err_free_dflt_sgl_pool;
}
@@ -611,6 +613,7 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!pool) {
SNIC_HOST_ERR(shost, "snic tmreq info pool creation failed.\n");
+ ret = -ENOMEM;
goto err_free_max_sgl_pool;
}
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index bed2bbd6b923..94352e4df831 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -46,7 +46,7 @@
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <scsi/scsi.h>
#include <scsi/scsi_dbg.h>
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index 03054c0e7689..dfffdf63e44c 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -10,7 +10,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <scsi/scsi.h>
#include <scsi/scsi_dbg.h>
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 605887d5ee57..5f35b863e1a7 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -41,7 +41,7 @@ static const char *verstr = "20160209";
#include <linux/delay.h>
#include <linux/mutex.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/dma.h>
#include <scsi/scsi.h>
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index aa43bfea0d00..abe617372661 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -23,6 +23,7 @@
#include "unipro.h"
#include "ufs-qcom.h"
#include "ufshci.h"
+#include "ufs_quirks.h"
#define UFS_QCOM_DEFAULT_DBG_PRINT_EN \
(UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
@@ -1031,6 +1032,34 @@ out:
return ret;
}
+static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
+{
+ int err;
+ u32 pa_vs_config_reg1;
+
+ err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
+ &pa_vs_config_reg1);
+ if (err)
+ goto out;
+
+ /* Allow extension of MSB bits of PA_SaveConfigTime attribute */
+ err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
+ (pa_vs_config_reg1 | (1 << 12)));
+
+out:
+ return err;
+}
+
+static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
+{
+ int err = 0;
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
+ err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
+
+ return err;
+}
+
static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@@ -1194,7 +1223,16 @@ static int ufs_qcom_init(struct ufs_hba *hba)
*/
host->generic_phy = devm_phy_get(dev, "ufsphy");
- if (IS_ERR(host->generic_phy)) {
+ if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) {
+ /*
+ * UFS driver might be probed before the phy driver does.
+ * In that case we would like to return EPROBE_DEFER code.
+ */
+ err = -EPROBE_DEFER;
+ dev_warn(dev, "%s: required phy device. hasn't probed yet. err = %d\n",
+ __func__, err);
+ goto out_variant_clear;
+ } else if (IS_ERR(host->generic_phy)) {
err = PTR_ERR(host->generic_phy);
dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
goto out_variant_clear;
@@ -1432,7 +1470,8 @@ static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
- ufshcd_writel(hba, (reg & ~UFS_BIT(17)), REG_UFS_CFG1);
+ /* clear bit 17 - UTP_DBG_RAMS_EN */
+ ufshcd_rmwl(hba, UFS_BIT(17), 0, REG_UFS_CFG1);
reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
@@ -1609,6 +1648,7 @@ static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
.hce_enable_notify = ufs_qcom_hce_enable_notify,
.link_startup_notify = ufs_qcom_link_startup_notify,
.pwr_change_notify = ufs_qcom_pwr_change_notify,
+ .apply_dev_quirks = ufs_qcom_apply_dev_quirks,
.suspend = ufs_qcom_suspend,
.resume = ufs_qcom_resume,
.dbg_register_dump = ufs_qcom_dump_dbg_regs,
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index a19307a57ce2..fe517cd7dac3 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -142,6 +142,7 @@ enum ufs_qcom_phy_init_type {
UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
/* QUniPro Vendor specific attributes */
+#define PA_VS_CONFIG_REG1 0x9000
#define DME_VS_CORE_CLK_CTRL 0xD002
/* bit and mask definitions for DME_VS_CORE_CLK_CTRL attribute */
#define DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT BIT(8)
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index f7983058f3f7..08b799d4efcc 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -134,29 +134,17 @@ struct ufs_dev_fix {
*/
#define UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE (1 << 7)
+/*
+ * The max. value PA_SaveConfigTime is 250 (10us) but this is not enough for
+ * some vendors.
+ * Gear switch from PWM to HS may fail even with this max. PA_SaveConfigTime.
+ * Gear switch can be issued by host controller as an error recovery and any
+ * software delay will not help on this case so we need to increase
+ * PA_SaveConfigTime to >32us as per vendor recommendation.
+ */
+#define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME (1 << 8)
struct ufs_hba;
void ufs_advertise_fixup_device(struct ufs_hba *hba);
-static struct ufs_dev_fix ufs_fixups[] = {
- /* UFS cards deviations table */
- UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
- UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
- UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
- UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
- UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
- UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
- UFS_DEVICE_NO_FASTAUTO),
- UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
- UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
- UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
- UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
- UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
- UFS_DEVICE_QUIRK_PA_TACTIVATE),
- UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
- UFS_DEVICE_QUIRK_PA_TACTIVATE),
- UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
-
- END_FIX
-};
#endif /* UFS_QUIRKS_H_ */
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index ef8548c3a423..20e5e5fb048c 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -185,6 +185,30 @@ ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
return ufs_pm_lvl_states[lvl].link_state;
}
+static struct ufs_dev_fix ufs_fixups[] = {
+ /* UFS cards deviations table */
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_NO_FASTAUTO),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
+ UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
+ UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
+ UFS_DEVICE_QUIRK_PA_TACTIVATE),
+ UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
+ UFS_DEVICE_QUIRK_PA_TACTIVATE),
+ UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
+ UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
+
+ END_FIX
+};
+
static void ufshcd_tmc_handler(struct ufs_hba *hba);
static void ufshcd_async_scan(void *data, async_cookie_t cookie);
static int ufshcd_reset_and_restore(struct ufs_hba *hba);
@@ -288,10 +312,24 @@ int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
*/
static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
{
- if (hba->ufs_version == UFSHCI_VERSION_10)
- return INTERRUPT_MASK_ALL_VER_10;
- else
- return INTERRUPT_MASK_ALL_VER_11;
+ u32 intr_mask = 0;
+
+ switch (hba->ufs_version) {
+ case UFSHCI_VERSION_10:
+ intr_mask = INTERRUPT_MASK_ALL_VER_10;
+ break;
+ /* allow fall through */
+ case UFSHCI_VERSION_11:
+ case UFSHCI_VERSION_20:
+ intr_mask = INTERRUPT_MASK_ALL_VER_11;
+ break;
+ /* allow fall through */
+ case UFSHCI_VERSION_21:
+ default:
+ intr_mask = INTERRUPT_MASK_ALL_VER_21;
+ }
+
+ return intr_mask;
}
/**
@@ -892,7 +930,7 @@ static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
if (!hba->outstanding_reqs && scaling->is_busy_started) {
scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
scaling->busy_start_t));
- scaling->busy_start_t = ktime_set(0, 0);
+ scaling->busy_start_t = 0;
scaling->is_busy_started = false;
}
}
@@ -5199,6 +5237,8 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
ufshcd_quirk_tune_host_pa_tactivate(hba);
+
+ ufshcd_vops_apply_dev_quirks(hba);
}
/**
@@ -6621,7 +6661,7 @@ start_window:
scaling->busy_start_t = ktime_get();
scaling->is_busy_started = true;
} else {
- scaling->busy_start_t = ktime_set(0, 0);
+ scaling->busy_start_t = 0;
scaling->is_busy_started = false;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -6667,6 +6707,13 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
/* Get UFS version supported by the controller */
hba->ufs_version = ufshcd_get_ufs_version(hba);
+ if ((hba->ufs_version != UFSHCI_VERSION_10) &&
+ (hba->ufs_version != UFSHCI_VERSION_11) &&
+ (hba->ufs_version != UFSHCI_VERSION_20) &&
+ (hba->ufs_version != UFSHCI_VERSION_21))
+ dev_err(hba->dev, "invalid UFS version 0x%x\n",
+ hba->ufs_version);
+
/* Get Interrupt bit mask per version */
hba->intr_mask = ufshcd_get_intr_mask(hba);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 7d9ff22acfea..08cd26ed2382 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -266,7 +266,7 @@ struct ufs_pwr_mode_info {
* @setup_task_mgmt: called before any task management request is issued
* to set some things
* @hibern8_notify: called around hibern8 enter/exit
- * to configure some things
+ * @apply_dev_quirks: called to apply device specific quirks
* @suspend: called during host controller PM callback
* @resume: called during host controller PM callback
* @dbg_register_dump: used to dump controller debug information
@@ -293,7 +293,8 @@ struct ufs_hba_variant_ops {
void (*setup_xfer_req)(struct ufs_hba *, int, bool);
void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
- enum ufs_notify_change_status);
+ enum ufs_notify_change_status);
+ int (*apply_dev_quirks)(struct ufs_hba *);
int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
int (*resume)(struct ufs_hba *, enum ufs_pm_op);
void (*dbg_register_dump)(struct ufs_hba *hba);
@@ -839,6 +840,13 @@ static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba,
return hba->vops->hibern8_notify(hba, cmd, status);
}
+static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->apply_dev_quirks)
+ return hba->vops->apply_dev_quirks(hba);
+ return 0;
+}
+
static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
{
if (hba->vops && hba->vops->suspend)
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 5d978867be57..8c5190e2e1c9 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -72,6 +72,10 @@ enum {
REG_UIC_COMMAND_ARG_1 = 0x94,
REG_UIC_COMMAND_ARG_2 = 0x98,
REG_UIC_COMMAND_ARG_3 = 0x9C,
+ REG_UFS_CCAP = 0x100,
+ REG_UFS_CRYPTOCAP = 0x104,
+
+ UFSHCI_CRYPTO_REG_SPACE_SIZE = 0x400,
};
/* Controller capability masks */
@@ -275,6 +279,9 @@ enum {
/* Interrupt disable mask for UFSHCI v1.1 */
INTERRUPT_MASK_ALL_VER_11 = 0x31FFF,
+
+ /* Interrupt disable mask for UFSHCI v2.1 */
+ INTERRUPT_MASK_ALL_VER_21 = 0x71FFF,
};
/*
diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c
index ffa48fdbb1a9..a3d6d7cfa929 100644
--- a/drivers/soc/fsl/qbman/bman.c
+++ b/drivers/soc/fsl/qbman/bman.c
@@ -167,12 +167,12 @@ struct bm_portal {
/* Cache-inhibited register access. */
static inline u32 bm_in(struct bm_portal *p, u32 offset)
{
- return __raw_readl(p->addr.ci + offset);
+ return be32_to_cpu(__raw_readl(p->addr.ci + offset));
}
static inline void bm_out(struct bm_portal *p, u32 offset, u32 val)
{
- __raw_writel(val, p->addr.ci + offset);
+ __raw_writel(cpu_to_be32(val), p->addr.ci + offset);
}
/* Cache Enabled Portal Access */
@@ -188,7 +188,7 @@ static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset)
static inline u32 bm_ce_in(struct bm_portal *p, u32 offset)
{
- return __raw_readl(p->addr.ce + offset);
+ return be32_to_cpu(__raw_readl(p->addr.ce + offset));
}
struct bman_portal {
@@ -391,7 +391,7 @@ static void bm_rcr_finish(struct bm_portal *portal)
i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
if (i != rcr_ptr2idx(rcr->cursor))
- pr_crit("losing uncommited RCR entries\n");
+ pr_crit("losing uncommitted RCR entries\n");
i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
if (i != rcr->ci)
diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c
index 9deb0524543f..a8e8389a6894 100644
--- a/drivers/soc/fsl/qbman/bman_ccsr.c
+++ b/drivers/soc/fsl/qbman/bman_ccsr.c
@@ -181,8 +181,7 @@ static int fsl_bman_probe(struct platform_device *pdev)
node->full_name);
return -ENXIO;
}
- bm_ccsr_start = devm_ioremap(dev, res->start,
- res->end - res->start + 1);
+ bm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res));
if (!bm_ccsr_start)
return -ENXIO;
diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c
index 986f64690e6e..8354d4dabdad 100644
--- a/drivers/soc/fsl/qbman/bman_portal.c
+++ b/drivers/soc/fsl/qbman/bman_portal.c
@@ -126,15 +126,19 @@ static int bman_portal_probe(struct platform_device *pdev)
pcfg->irq = irq;
va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
- if (!va)
+ if (!va) {
+ dev_err(dev, "ioremap::CE failed\n");
goto err_ioremap1;
+ }
pcfg->addr_virt[DPAA_PORTAL_CE] = va;
va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
_PAGE_GUARDED | _PAGE_NO_CACHE);
- if (!va)
+ if (!va) {
+ dev_err(dev, "ioremap::CI failed\n");
goto err_ioremap2;
+ }
pcfg->addr_virt[DPAA_PORTAL_CI] = va;
@@ -150,8 +154,10 @@ static int bman_portal_probe(struct platform_device *pdev)
spin_unlock(&bman_lock);
pcfg->cpu = cpu;
- if (!init_pcfg(pcfg))
- goto err_ioremap2;
+ if (!init_pcfg(pcfg)) {
+ dev_err(dev, "portal init failed\n");
+ goto err_portal_init;
+ }
/* clear irq affinity if assigned cpu is offline */
if (!cpu_online(cpu))
@@ -159,10 +165,11 @@ static int bman_portal_probe(struct platform_device *pdev)
return 0;
+err_portal_init:
+ iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]);
err_ioremap2:
iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
err_ioremap1:
- dev_err(dev, "ioremap failed\n");
return -ENXIO;
}
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.h b/drivers/soc/fsl/qbman/dpaa_sys.h
index b63fd72295c6..2eaf3184f61d 100644
--- a/drivers/soc/fsl/qbman/dpaa_sys.h
+++ b/drivers/soc/fsl/qbman/dpaa_sys.h
@@ -38,6 +38,7 @@
#include <linux/kthread.h>
#include <linux/vmalloc.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
#include <linux/of_reserved_mem.h>
#include <linux/prefetch.h>
#include <linux/genalloc.h>
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 119054bc922b..6f509f68085e 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -140,10 +140,10 @@ enum qm_mr_cmode { /* matches QCSP_CFG::MM */
struct qm_eqcr_entry {
u8 _ncw_verb; /* writes to this are non-coherent */
u8 dca;
- u16 seqnum;
- u32 orp; /* 24-bit */
- u32 fqid; /* 24-bit */
- u32 tag;
+ __be16 seqnum;
+ u8 __reserved[4];
+ __be32 fqid; /* 24-bit */
+ __be32 tag;
struct qm_fd fd;
u8 __reserved3[32];
} __packed;
@@ -183,41 +183,22 @@ struct qm_mr {
};
/* MC (Management Command) command */
-/* "Query FQ" */
-struct qm_mcc_queryfq {
+/* "FQ" command layout */
+struct qm_mcc_fq {
u8 _ncw_verb;
u8 __reserved1[3];
- u32 fqid; /* 24-bit */
+ __be32 fqid; /* 24-bit */
u8 __reserved2[56];
} __packed;
-/* "Alter FQ State Commands " */
-struct qm_mcc_alterfq {
- u8 _ncw_verb;
- u8 __reserved1[3];
- u32 fqid; /* 24-bit */
- u8 __reserved2;
- u8 count; /* number of consecutive FQID */
- u8 __reserved3[10];
- u32 context_b; /* frame queue context b */
- u8 __reserved4[40];
-} __packed;
-/* "Query CGR" */
-struct qm_mcc_querycgr {
+/* "CGR" command layout */
+struct qm_mcc_cgr {
u8 _ncw_verb;
u8 __reserved1[30];
u8 cgid;
u8 __reserved2[32];
};
-struct qm_mcc_querywq {
- u8 _ncw_verb;
- u8 __reserved;
- /* select channel if verb != QUERYWQ_DEDICATED */
- u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */
- u8 __reserved2[60];
-} __packed;
-
#define QM_MCC_VERB_VBIT 0x80
#define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */
#define QM_MCC_VERB_INITFQ_PARKED 0x40
@@ -243,12 +224,9 @@ union qm_mc_command {
u8 __reserved[63];
};
struct qm_mcc_initfq initfq;
- struct qm_mcc_queryfq queryfq;
- struct qm_mcc_alterfq alterfq;
struct qm_mcc_initcgr initcgr;
- struct qm_mcc_querycgr querycgr;
- struct qm_mcc_querywq querywq;
- struct qm_mcc_queryfq_np queryfq_np;
+ struct qm_mcc_fq fq;
+ struct qm_mcc_cgr cgr;
};
/* MC (Management Command) result */
@@ -343,12 +321,12 @@ struct qm_portal {
/* Cache-inhibited register access. */
static inline u32 qm_in(struct qm_portal *p, u32 offset)
{
- return __raw_readl(p->addr.ci + offset);
+ return be32_to_cpu(__raw_readl(p->addr.ci + offset));
}
static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
{
- __raw_writel(val, p->addr.ci + offset);
+ __raw_writel(cpu_to_be32(val), p->addr.ci + offset);
}
/* Cache Enabled Portal Access */
@@ -364,7 +342,7 @@ static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
{
- return __raw_readl(p->addr.ce + offset);
+ return be32_to_cpu(__raw_readl(p->addr.ce + offset));
}
/* --- EQCR API --- */
@@ -443,7 +421,7 @@ static inline void qm_eqcr_finish(struct qm_portal *portal)
DPAA_ASSERT(!eqcr->busy);
if (pi != eqcr_ptr2idx(eqcr->cursor))
- pr_crit("losing uncommited EQCR entries\n");
+ pr_crit("losing uncommitted EQCR entries\n");
if (ci != eqcr->ci)
pr_crit("missing existing EQCR completions\n");
if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor))
@@ -492,8 +470,7 @@ static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
static inline void eqcr_commit_checks(struct qm_eqcr *eqcr)
{
DPAA_ASSERT(eqcr->busy);
- DPAA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff));
- DPAA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff));
+ DPAA_ASSERT(!(be32_to_cpu(eqcr->cursor->fqid) & ~QM_FQID_MASK));
DPAA_ASSERT(eqcr->available >= 1);
}
@@ -962,8 +939,6 @@ struct qman_portal {
u32 sdqcr;
/* probing time config params for cpu-affine portals */
const struct qm_portal_config *config;
- /* needed for providing a non-NULL device to dma_map_***() */
- struct platform_device *pdev;
/* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
struct qman_cgrs *cgrs;
/* linked-list of CSCN handlers. */
@@ -1133,7 +1108,6 @@ static int qman_create_portal(struct qman_portal *portal,
const struct qman_cgrs *cgrs)
{
struct qm_portal *p;
- char buf[16];
int ret;
u32 isdr;
@@ -1196,15 +1170,6 @@ static int qman_create_portal(struct qman_portal *portal,
portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
- sprintf(buf, "qportal-%d", c->channel);
- portal->pdev = platform_device_alloc(buf, -1);
- if (!portal->pdev)
- goto fail_devalloc;
- if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40)))
- goto fail_devadd;
- ret = platform_device_add(portal->pdev);
- if (ret)
- goto fail_devadd;
isdr = 0xffffffff;
qm_out(p, QM_REG_ISDR, isdr);
portal->irq_sources = 0;
@@ -1239,8 +1204,8 @@ static int qman_create_portal(struct qman_portal *portal,
/* special handling, drain just in case it's a few FQRNIs */
const union qm_mr_entry *e = qm_mr_current(p);
- dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x\n, addr 0x%x",
- e->verb, e->ern.rc, e->ern.fd.addr_lo);
+ dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x, addr 0x%llx\n",
+ e->verb, e->ern.rc, qm_fd_addr_get64(&e->ern.fd));
goto fail_dqrr_mr_empty;
}
/* Success */
@@ -1256,10 +1221,6 @@ fail_eqcr_empty:
fail_affinity:
free_irq(c->irq, portal);
fail_irq:
- platform_device_del(portal->pdev);
-fail_devadd:
- platform_device_put(portal->pdev);
-fail_devalloc:
kfree(portal->cgrs);
fail_cgrs:
qm_mc_finish(p);
@@ -1321,9 +1282,6 @@ static void qman_destroy_portal(struct qman_portal *qm)
qm_dqrr_finish(&qm->p);
qm_eqcr_finish(&qm->p);
- platform_device_del(qm->pdev);
- platform_device_put(qm->pdev);
-
qm->config = NULL;
}
@@ -1428,7 +1386,7 @@ static void qm_mr_process_task(struct work_struct *work)
case QM_MR_VERB_FQRN:
case QM_MR_VERB_FQRL:
/* Lookup in the retirement table */
- fq = fqid_to_fq(msg->fq.fqid);
+ fq = fqid_to_fq(qm_fqid_get(&msg->fq));
if (WARN_ON(!fq))
break;
fq_state_change(p, fq, msg, verb);
@@ -1437,7 +1395,7 @@ static void qm_mr_process_task(struct work_struct *work)
break;
case QM_MR_VERB_FQPN:
/* Parked */
- fq = tag_to_fq(msg->fq.contextB);
+ fq = tag_to_fq(be32_to_cpu(msg->fq.context_b));
fq_state_change(p, fq, msg, verb);
if (fq->cb.fqs)
fq->cb.fqs(p, fq, msg);
@@ -1451,7 +1409,7 @@ static void qm_mr_process_task(struct work_struct *work)
}
} else {
/* Its a software ERN */
- fq = tag_to_fq(msg->ern.tag);
+ fq = tag_to_fq(be32_to_cpu(msg->ern.tag));
fq->cb.ern(p, fq, msg);
}
num++;
@@ -1536,7 +1494,7 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p,
if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
/*
- * VDQCR: don't trust contextB as the FQ may have
+ * VDQCR: don't trust context_b as the FQ may have
* been configured for h/w consumption and we're
* draining it post-retirement.
*/
@@ -1562,8 +1520,8 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p,
if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
clear_vdqcr(p, fq);
} else {
- /* SDQCR: contextB points to the FQ */
- fq = tag_to_fq(dq->contextB);
+ /* SDQCR: context_b points to the FQ */
+ fq = tag_to_fq(be32_to_cpu(dq->context_b));
/* Now let the callback do its stuff */
res = fq->cb.dqrr(p, fq, dq);
/*
@@ -1780,9 +1738,9 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
return -EINVAL;
#endif
- if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
+ if (opts && (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_OAC)) {
/* And can't be set at the same time as TDTHRESH */
- if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
+ if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_TDTHRESH)
return -EINVAL;
}
/* Issue an INITFQ_[PARKED|SCHED] management command */
@@ -1796,37 +1754,49 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
mcc = qm_mc_start(&p->p);
if (opts)
mcc->initfq = *opts;
- mcc->initfq.fqid = fq->fqid;
+ qm_fqid_set(&mcc->fq, fq->fqid);
mcc->initfq.count = 0;
/*
- * If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a
+ * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a
* demux pointer. Otherwise, the caller-provided value is allowed to
* stand, don't overwrite it.
*/
if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
dma_addr_t phys_fq;
- mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
- mcc->initfq.fqd.context_b = fq_to_tag(fq);
+ mcc->initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTB);
+ mcc->initfq.fqd.context_b = cpu_to_be32(fq_to_tag(fq));
/*
* and the physical address - NB, if the user wasn't trying to
* set CONTEXTA, clear the stashing settings.
*/
- if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
- mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+ if (!(be16_to_cpu(mcc->initfq.we_mask) &
+ QM_INITFQ_WE_CONTEXTA)) {
+ mcc->initfq.we_mask |=
+ cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
memset(&mcc->initfq.fqd.context_a, 0,
sizeof(mcc->initfq.fqd.context_a));
} else {
- phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq),
- DMA_TO_DEVICE);
+ struct qman_portal *p = qman_dma_portal;
+
+ phys_fq = dma_map_single(p->config->dev, fq,
+ sizeof(*fq), DMA_TO_DEVICE);
+ if (dma_mapping_error(p->config->dev, phys_fq)) {
+ dev_err(p->config->dev, "dma_mapping failed\n");
+ ret = -EIO;
+ goto out;
+ }
+
qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
}
}
if (flags & QMAN_INITFQ_FLAG_LOCAL) {
int wq = 0;
- if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
- mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
+ if (!(be16_to_cpu(mcc->initfq.we_mask) &
+ QM_INITFQ_WE_DESTWQ)) {
+ mcc->initfq.we_mask |=
+ cpu_to_be16(QM_INITFQ_WE_DESTWQ);
wq = 4;
}
qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq);
@@ -1845,13 +1815,13 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
goto out;
}
if (opts) {
- if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
- if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
+ if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_FQCTRL) {
+ if (be16_to_cpu(opts->fqd.fq_ctrl) & QM_FQCTRL_CGE)
fq_set(fq, QMAN_FQ_STATE_CGR_EN);
else
fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
}
- if (opts->we_mask & QM_INITFQ_WE_CGID)
+ if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_CGID)
fq->cgr_groupid = opts->fqd.cgid;
}
fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
@@ -1884,7 +1854,7 @@ int qman_schedule_fq(struct qman_fq *fq)
goto out;
}
mcc = qm_mc_start(&p->p);
- mcc->alterfq.fqid = fq->fqid;
+ qm_fqid_set(&mcc->fq, fq->fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
dev_err(p->config->dev, "ALTER_SCHED timeout\n");
@@ -1927,7 +1897,7 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags)
goto out;
}
mcc = qm_mc_start(&p->p);
- mcc->alterfq.fqid = fq->fqid;
+ qm_fqid_set(&mcc->fq, fq->fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
dev_crit(p->config->dev, "ALTER_RETIRE timeout\n");
@@ -1970,8 +1940,8 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags)
msg.verb = QM_MR_VERB_FQRNI;
msg.fq.fqs = mcr->alterfq.fqs;
- msg.fq.fqid = fq->fqid;
- msg.fq.contextB = fq_to_tag(fq);
+ qm_fqid_set(&msg.fq, fq->fqid);
+ msg.fq.context_b = cpu_to_be32(fq_to_tag(fq));
fq->cb.fqs(p, fq, &msg);
}
} else if (res == QM_MCR_RESULT_PENDING) {
@@ -2006,7 +1976,7 @@ int qman_oos_fq(struct qman_fq *fq)
goto out;
}
mcc = qm_mc_start(&p->p);
- mcc->alterfq.fqid = fq->fqid;
+ qm_fqid_set(&mcc->fq, fq->fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
ret = -ETIMEDOUT;
@@ -2032,7 +2002,7 @@ int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
int ret = 0;
mcc = qm_mc_start(&p->p);
- mcc->queryfq.fqid = fq->fqid;
+ qm_fqid_set(&mcc->fq, fq->fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
ret = -ETIMEDOUT;
@@ -2058,7 +2028,7 @@ static int qman_query_fq_np(struct qman_fq *fq,
int ret = 0;
mcc = qm_mc_start(&p->p);
- mcc->queryfq.fqid = fq->fqid;
+ qm_fqid_set(&mcc->fq, fq->fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
ret = -ETIMEDOUT;
@@ -2086,7 +2056,7 @@ static int qman_query_cgr(struct qman_cgr *cgr,
int ret = 0;
mcc = qm_mc_start(&p->p);
- mcc->querycgr.cgid = cgr->cgrid;
+ mcc->cgr.cgid = cgr->cgrid;
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
ret = -ETIMEDOUT;
@@ -2239,8 +2209,8 @@ int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd)
if (unlikely(!eq))
goto out;
- eq->fqid = fq->fqid;
- eq->tag = fq_to_tag(fq);
+ qm_fqid_set(eq, fq->fqid);
+ eq->tag = cpu_to_be32(fq_to_tag(fq));
eq->fd = *fd;
qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE);
@@ -2282,7 +2252,24 @@ out:
}
#define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
-#define TARG_MASK(n) (BIT(31) >> PORTAL_IDX(n))
+
+/* congestion state change notification target update control */
+static void qm_cgr_cscn_targ_set(struct __qm_mc_cgr *cgr, int pi, u32 val)
+{
+ if (qman_ip_rev >= QMAN_REV30)
+ cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi |
+ QM_CGR_TARG_UDP_CTRL_WRITE_BIT);
+ else
+ cgr->cscn_targ = cpu_to_be32(val | QM_CGR_TARG_PORTAL(pi));
+}
+
+static void qm_cgr_cscn_targ_clear(struct __qm_mc_cgr *cgr, int pi, u32 val)
+{
+ if (qman_ip_rev >= QMAN_REV30)
+ cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi);
+ else
+ cgr->cscn_targ = cpu_to_be32(val & ~QM_CGR_TARG_PORTAL(pi));
+}
static u8 qman_cgr_cpus[CGR_NUM];
@@ -2305,7 +2292,6 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
struct qm_mcc_initcgr *opts)
{
struct qm_mcr_querycgr cgr_state;
- struct qm_mcc_initcgr local_opts = {};
int ret;
struct qman_portal *p;
@@ -2327,22 +2313,18 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
spin_lock(&p->cgr_lock);
if (opts) {
+ struct qm_mcc_initcgr local_opts = *opts;
+
ret = qman_query_cgr(cgr, &cgr_state);
if (ret)
goto out;
- if (opts)
- local_opts = *opts;
- if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
- local_opts.cgr.cscn_targ_upd_ctrl =
- QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
- else
- /* Overwrite TARG */
- local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
- TARG_MASK(p);
- local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
+
+ qm_cgr_cscn_targ_set(&local_opts.cgr, PORTAL_IDX(p),
+ be32_to_cpu(cgr_state.cgr.cscn_targ));
+ local_opts.we_mask |= cpu_to_be16(QM_CGR_WE_CSCN_TARG);
/* send init if flags indicate so */
- if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
+ if (flags & QMAN_CGR_FLAG_USE_INIT)
ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
&local_opts);
else
@@ -2405,13 +2387,11 @@ int qman_delete_cgr(struct qman_cgr *cgr)
list_add(&cgr->node, &p->cgr_cbs);
goto release_lock;
}
- /* Overwrite TARG */
- local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
- if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
- local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
- else
- local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
- ~(TARG_MASK(p));
+
+ local_opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_TARG);
+ qm_cgr_cscn_targ_clear(&local_opts.cgr, PORTAL_IDX(p),
+ be32_to_cpu(cgr_state.cgr.cscn_targ));
+
ret = qm_modify_cgr(cgr, 0, &local_opts);
if (ret)
/* add back to the list */
@@ -2501,7 +2481,7 @@ static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
} while (wait && !dqrr);
while (dqrr) {
- if (dqrr->fqid == fqid && (dqrr->stat & s))
+ if (qm_fqid_get(dqrr) == fqid && (dqrr->stat & s))
found = 1;
qm_dqrr_cdc_consume_1ptr(p, dqrr, 0);
qm_dqrr_pvb_update(p);
@@ -2537,7 +2517,7 @@ static int qman_shutdown_fq(u32 fqid)
dev = p->config->dev;
/* Determine the state of the FQID */
mcc = qm_mc_start(&p->p);
- mcc->queryfq_np.fqid = fqid;
+ qm_fqid_set(&mcc->fq, fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
dev_err(dev, "QUERYFQ_NP timeout\n");
@@ -2552,7 +2532,7 @@ static int qman_shutdown_fq(u32 fqid)
/* Query which channel the FQ is using */
mcc = qm_mc_start(&p->p);
- mcc->queryfq.fqid = fqid;
+ qm_fqid_set(&mcc->fq, fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
dev_err(dev, "QUERYFQ timeout\n");
@@ -2572,7 +2552,7 @@ static int qman_shutdown_fq(u32 fqid)
case QM_MCR_NP_STATE_PARKED:
orl_empty = 0;
mcc = qm_mc_start(&p->p);
- mcc->alterfq.fqid = fqid;
+ qm_fqid_set(&mcc->fq, fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
dev_err(dev, "QUERYFQ_NP timeout\n");
@@ -2667,7 +2647,7 @@ static int qman_shutdown_fq(u32 fqid)
cpu_relax();
}
mcc = qm_mc_start(&p->p);
- mcc->alterfq.fqid = fqid;
+ qm_fqid_set(&mcc->fq, fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
ret = -ETIMEDOUT;
@@ -2687,7 +2667,7 @@ static int qman_shutdown_fq(u32 fqid)
case QM_MCR_NP_STATE_RETIRED:
/* Send OOS Command */
mcc = qm_mc_start(&p->p);
- mcc->alterfq.fqid = fqid;
+ qm_fqid_set(&mcc->fq, fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
ret = -ETIMEDOUT;
@@ -2722,6 +2702,7 @@ const struct qm_portal_config *qman_get_qm_portal_config(
{
return portal->config;
}
+EXPORT_SYMBOL(qman_get_qm_portal_config);
struct gen_pool *qm_fqalloc; /* FQID allocator */
struct gen_pool *qm_qpalloc; /* pool-channel allocator */
@@ -2789,15 +2770,18 @@ static int qpool_cleanup(u32 qp)
struct qm_mcr_queryfq_np np;
err = qman_query_fq_np(&fq, &np);
- if (err)
+ if (err == -ERANGE)
/* FQID range exceeded, found no problems */
return 0;
+ else if (WARN_ON(err))
+ return err;
+
if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
struct qm_fqd fqd;
err = qman_query_fq(&fq, &fqd);
if (WARN_ON(err))
- return 0;
+ return err;
if (qm_fqd_get_chan(&fqd) == qp) {
/* The channel is the FQ's target, clean it */
err = qman_shutdown_fq(fq.fqid);
@@ -2836,7 +2820,7 @@ static int cgr_cleanup(u32 cgrid)
* error, looking for non-OOS FQDs whose CGR is the CGR being released
*/
struct qman_fq fq = {
- .fqid = 1
+ .fqid = QM_FQID_RANGE_START
};
int err;
@@ -2844,16 +2828,19 @@ static int cgr_cleanup(u32 cgrid)
struct qm_mcr_queryfq_np np;
err = qman_query_fq_np(&fq, &np);
- if (err)
+ if (err == -ERANGE)
/* FQID range exceeded, found no problems */
return 0;
+ else if (WARN_ON(err))
+ return err;
+
if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
struct qm_fqd fqd;
err = qman_query_fq(&fq, &fqd);
if (WARN_ON(err))
- return 0;
- if ((fqd.fq_ctrl & QM_FQCTRL_CGE) &&
+ return err;
+ if (be16_to_cpu(fqd.fq_ctrl) & QM_FQCTRL_CGE &&
fqd.cgid == cgrid) {
pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n",
cgrid, fq.fqid);
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
index 0cace9e0077e..f4e6e70de259 100644
--- a/drivers/soc/fsl/qbman/qman_ccsr.c
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -444,6 +444,9 @@ static int zero_priv_mem(struct device *dev, struct device_node *node,
/* map as cacheable, non-guarded */
void __iomem *tmpp = ioremap_prot(addr, sz, 0);
+ if (!tmpp)
+ return -ENOMEM;
+
memset_io(tmpp, 0, sz);
flush_dcache_range((unsigned long)tmpp,
(unsigned long)tmpp + sz);
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
index d068e4820f49..adbaa30d3c5a 100644
--- a/drivers/soc/fsl/qbman/qman_portal.c
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -30,6 +30,9 @@
#include "qman_priv.h"
+struct qman_portal *qman_dma_portal;
+EXPORT_SYMBOL(qman_dma_portal);
+
/* Enable portal interupts (as opposed to polling mode) */
#define CONFIG_FSL_DPA_PIRQ_SLOW 1
#define CONFIG_FSL_DPA_PIRQ_FAST 1
@@ -150,6 +153,10 @@ static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
/* all assigned portals are initialized now */
qman_init_cgr_all();
}
+
+ if (!qman_dma_portal)
+ qman_dma_portal = p;
+
spin_unlock(&qman_lock);
dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
@@ -217,9 +224,9 @@ static int qman_portal_probe(struct platform_device *pdev)
struct device_node *node = dev->of_node;
struct qm_portal_config *pcfg;
struct resource *addr_phys[2];
- const u32 *channel;
void __iomem *va;
- int irq, len, cpu;
+ int irq, cpu, err;
+ u32 val;
pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
if (!pcfg)
@@ -243,13 +250,13 @@ static int qman_portal_probe(struct platform_device *pdev)
return -ENXIO;
}
- channel = of_get_property(node, "cell-index", &len);
- if (!channel || (len != 4)) {
+ err = of_property_read_u32(node, "cell-index", &val);
+ if (err) {
dev_err(dev, "Can't get %s property 'cell-index'\n",
node->full_name);
- return -ENXIO;
+ return err;
}
- pcfg->channel = *channel;
+ pcfg->channel = val;
pcfg->cpu = -1;
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
@@ -259,15 +266,19 @@ static int qman_portal_probe(struct platform_device *pdev)
pcfg->irq = irq;
va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
- if (!va)
+ if (!va) {
+ dev_err(dev, "ioremap::CE failed\n");
goto err_ioremap1;
+ }
pcfg->addr_virt[DPAA_PORTAL_CE] = va;
va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
_PAGE_GUARDED | _PAGE_NO_CACHE);
- if (!va)
+ if (!va) {
+ dev_err(dev, "ioremap::CI failed\n");
goto err_ioremap2;
+ }
pcfg->addr_virt[DPAA_PORTAL_CI] = va;
@@ -285,8 +296,15 @@ static int qman_portal_probe(struct platform_device *pdev)
spin_unlock(&qman_lock);
pcfg->cpu = cpu;
- if (!init_pcfg(pcfg))
- goto err_ioremap2;
+ if (dma_set_mask(dev, DMA_BIT_MASK(40))) {
+ dev_err(dev, "dma_set_mask() failed\n");
+ goto err_portal_init;
+ }
+
+ if (!init_pcfg(pcfg)) {
+ dev_err(dev, "portal init failed\n");
+ goto err_portal_init;
+ }
/* clear irq affinity if assigned cpu is offline */
if (!cpu_online(cpu))
@@ -294,10 +312,11 @@ static int qman_portal_probe(struct platform_device *pdev)
return 0;
+err_portal_init:
+ iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]);
err_ioremap2:
iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
err_ioremap1:
- dev_err(dev, "ioremap failed\n");
return -ENXIO;
}
diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h
index 5cf821e623a9..53685b59718e 100644
--- a/drivers/soc/fsl/qbman/qman_priv.h
+++ b/drivers/soc/fsl/qbman/qman_priv.h
@@ -73,29 +73,23 @@ struct qm_mcr_querycgr {
struct __qm_mc_cgr cgr; /* CGR fields */
u8 __reserved2[6];
u8 i_bcnt_hi; /* high 8-bits of 40-bit "Instant" */
- u32 i_bcnt_lo; /* low 32-bits of 40-bit */
+ __be32 i_bcnt_lo; /* low 32-bits of 40-bit */
u8 __reserved3[3];
u8 a_bcnt_hi; /* high 8-bits of 40-bit "Average" */
- u32 a_bcnt_lo; /* low 32-bits of 40-bit */
- u32 cscn_targ_swp[4];
+ __be32 a_bcnt_lo; /* low 32-bits of 40-bit */
+ __be32 cscn_targ_swp[4];
} __packed;
static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q)
{
- return ((u64)q->i_bcnt_hi << 32) | (u64)q->i_bcnt_lo;
+ return ((u64)q->i_bcnt_hi << 32) | be32_to_cpu(q->i_bcnt_lo);
}
static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q)
{
- return ((u64)q->a_bcnt_hi << 32) | (u64)q->a_bcnt_lo;
+ return ((u64)q->a_bcnt_hi << 32) | be32_to_cpu(q->a_bcnt_lo);
}
/* "Query FQ Non-Programmable Fields" */
-struct qm_mcc_queryfq_np {
- u8 _ncw_verb;
- u8 __reserved1[3];
- u32 fqid; /* 24-bit */
- u8 __reserved2[56];
-} __packed;
struct qm_mcr_queryfq_np {
u8 verb;
@@ -367,5 +361,6 @@ int qman_alloc_fq_table(u32 num_fqids);
#define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI)
extern struct qman_portal *affine_portals[NR_CPUS];
+extern struct qman_portal *qman_dma_portal;
const struct qm_portal_config *qman_get_qm_portal_config(
struct qman_portal *portal);
diff --git a/drivers/soc/fsl/qbman/qman_test_api.c b/drivers/soc/fsl/qbman/qman_test_api.c
index 6880ff17f45e..2895d062cf51 100644
--- a/drivers/soc/fsl/qbman/qman_test_api.c
+++ b/drivers/soc/fsl/qbman/qman_test_api.c
@@ -65,7 +65,7 @@ static void fd_init(struct qm_fd *fd)
{
qm_fd_addr_set64(fd, 0xabdeadbeefLLU);
qm_fd_set_contig_big(fd, 0x0000ffff);
- fd->cmd = 0xfeedf00d;
+ fd->cmd = cpu_to_be32(0xfeedf00d);
}
static void fd_inc(struct qm_fd *fd)
@@ -86,26 +86,19 @@ static void fd_inc(struct qm_fd *fd)
len--;
qm_fd_set_param(fd, fmt, off, len);
- fd->cmd++;
+ fd->cmd = cpu_to_be32(be32_to_cpu(fd->cmd) + 1);
}
/* The only part of the 'fd' we can't memcmp() is the ppid */
-static int fd_cmp(const struct qm_fd *a, const struct qm_fd *b)
+static bool fd_neq(const struct qm_fd *a, const struct qm_fd *b)
{
- int r = (qm_fd_addr_get64(a) == qm_fd_addr_get64(b)) ? 0 : -1;
+ bool neq = qm_fd_addr_get64(a) != qm_fd_addr_get64(b);
- if (!r) {
- enum qm_fd_format fmt_a, fmt_b;
+ neq |= qm_fd_get_format(a) != qm_fd_get_format(b);
+ neq |= a->cfg != b->cfg;
+ neq |= a->cmd != b->cmd;
- fmt_a = qm_fd_get_format(a);
- fmt_b = qm_fd_get_format(b);
- r = fmt_a - fmt_b;
- }
- if (!r)
- r = a->cfg - b->cfg;
- if (!r)
- r = a->cmd - b->cmd;
- return r;
+ return neq;
}
/* test */
@@ -217,12 +210,12 @@ static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p,
struct qman_fq *fq,
const struct qm_dqrr_entry *dq)
{
- if (WARN_ON(fd_cmp(&fd_dq, &dq->fd))) {
+ if (WARN_ON(fd_neq(&fd_dq, &dq->fd))) {
pr_err("BADNESS: dequeued frame doesn't match;\n");
return qman_cb_dqrr_consume;
}
fd_inc(&fd_dq);
- if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_cmp(&fd_dq, &fd)) {
+ if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_neq(&fd_dq, &fd)) {
sdqcr_complete = 1;
wake_up(&waitqueue);
}
diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c b/drivers/soc/fsl/qbman/qman_test_stash.c
index 43cf66ba42f5..e87b65403b67 100644
--- a/drivers/soc/fsl/qbman/qman_test_stash.c
+++ b/drivers/soc/fsl/qbman/qman_test_stash.c
@@ -175,7 +175,7 @@ static DEFINE_PER_CPU(struct hp_cpu, hp_cpus);
/* links together the hp_cpu structs, in first-come first-serve order. */
static LIST_HEAD(hp_cpu_list);
-static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock);
+static DEFINE_SPINLOCK(hp_lock);
static unsigned int hp_cpu_list_length;
@@ -191,6 +191,9 @@ static void *__frame_ptr;
static u32 *frame_ptr;
static dma_addr_t frame_dma;
+/* needed for dma_map*() */
+static const struct qm_portal_config *pcfg;
+
/* the main function waits on this */
static DECLARE_WAIT_QUEUE_HEAD(queue);
@@ -210,16 +213,14 @@ static int allocate_frame_data(void)
{
u32 lfsr = HP_FIRST_WORD;
int loop;
- struct platform_device *pdev = platform_device_alloc("foobar", -1);
- if (!pdev) {
- pr_crit("platform_device_alloc() failed");
- return -EIO;
- }
- if (platform_device_add(pdev)) {
- pr_crit("platform_device_add() failed");
+ if (!qman_dma_portal) {
+ pr_crit("portal not available\n");
return -EIO;
}
+
+ pcfg = qman_get_qm_portal_config(qman_dma_portal);
+
__frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL);
if (!__frame_ptr)
return -ENOMEM;
@@ -229,15 +230,22 @@ static int allocate_frame_data(void)
frame_ptr[loop] = lfsr;
lfsr = do_lfsr(lfsr);
}
- frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS,
+
+ frame_dma = dma_map_single(pcfg->dev, frame_ptr, 4 * HP_NUM_WORDS,
DMA_BIDIRECTIONAL);
- platform_device_del(pdev);
- platform_device_put(pdev);
+ if (dma_mapping_error(pcfg->dev, frame_dma)) {
+ pr_crit("dma mapping failure\n");
+ kfree(__frame_ptr);
+ return -EIO;
+ }
+
return 0;
}
static void deallocate_frame_data(void)
{
+ dma_unmap_single(pcfg->dev, frame_dma, 4 * HP_NUM_WORDS,
+ DMA_BIDIRECTIONAL);
kfree(__frame_ptr);
}
@@ -249,7 +257,8 @@ static inline int process_frame_data(struct hp_handler *handler,
int loop;
if (qm_fd_addr_get64(fd) != handler->addr) {
- pr_crit("bad frame address");
+ pr_crit("bad frame address, [%llX != %llX]\n",
+ qm_fd_addr_get64(fd), handler->addr);
return -EIO;
}
for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
@@ -397,8 +406,9 @@ static int init_handler(void *h)
goto failed;
}
memset(&opts, 0, sizeof(opts));
- opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
- opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING;
+ opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL |
+ QM_INITFQ_WE_CONTEXTA);
+ opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING);
qm_fqd_set_stashing(&opts.fqd, 0, STASH_DATA_CL, STASH_CTX_CL);
err = qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED |
QMAN_INITFQ_FLAG_LOCAL, &opts);
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
index 2707a827261b..ade168f5328e 100644
--- a/drivers/soc/fsl/qe/qe.c
+++ b/drivers/soc/fsl/qe/qe.c
@@ -717,9 +717,5 @@ static struct platform_driver qe_driver = {
.resume = qe_resume,
};
-static int __init qe_drv_init(void)
-{
- return platform_driver_register(&qe_driver);
-}
-device_initcall(qe_drv_init);
+builtin_platform_driver(qe_driver);
#endif /* defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx) */
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index 0a4ea809a61b..609bb3424c14 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -23,7 +23,7 @@ config MTK_PMIC_WRAP
config MTK_SCPSYS
bool "MediaTek SCPSYS Support"
depends on ARCH_MEDIATEK || COMPILE_TEST
- default ARM64 && ARCH_MEDIATEK
+ default ARCH_MEDIATEK
select REGMAP
select MTK_INFRACFG
select PM_GENERIC_DOMAINS if PM
diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
index 837effe19907..beb79162369a 100644
--- a/drivers/soc/mediatek/mtk-scpsys.c
+++ b/drivers/soc/mediatek/mtk-scpsys.c
@@ -11,17 +11,16 @@
* GNU General Public License for more details.
*/
#include <linux/clk.h>
-#include <linux/delay.h>
+#include <linux/init.h>
#include <linux/io.h>
-#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
-#include <linux/init.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
-#include <linux/regmap.h>
-#include <linux/soc/mediatek/infracfg.h>
#include <linux/regulator/consumer.h>
+#include <linux/soc/mediatek/infracfg.h>
+
+#include <dt-bindings/power/mt2701-power.h>
#include <dt-bindings/power/mt8173-power.h>
#define SPM_VDE_PWR_CON 0x0210
@@ -29,11 +28,17 @@
#define SPM_VEN_PWR_CON 0x0230
#define SPM_ISP_PWR_CON 0x0238
#define SPM_DIS_PWR_CON 0x023c
+#define SPM_CONN_PWR_CON 0x0280
#define SPM_VEN2_PWR_CON 0x0298
-#define SPM_AUDIO_PWR_CON 0x029c
+#define SPM_AUDIO_PWR_CON 0x029c /* MT8173 */
+#define SPM_BDP_PWR_CON 0x029c /* MT2701 */
+#define SPM_ETH_PWR_CON 0x02a0
+#define SPM_HIF_PWR_CON 0x02a4
+#define SPM_IFR_MSC_PWR_CON 0x02a8
#define SPM_MFG_2D_PWR_CON 0x02c0
#define SPM_MFG_ASYNC_PWR_CON 0x02c4
#define SPM_USB_PWR_CON 0x02cc
+
#define SPM_PWR_STATUS 0x060c
#define SPM_PWR_STATUS_2ND 0x0610
@@ -43,10 +48,15 @@
#define PWR_ON_2ND_BIT BIT(3)
#define PWR_CLK_DIS_BIT BIT(4)
+#define PWR_STATUS_CONN BIT(1)
#define PWR_STATUS_DISP BIT(3)
#define PWR_STATUS_MFG BIT(4)
#define PWR_STATUS_ISP BIT(5)
#define PWR_STATUS_VDEC BIT(7)
+#define PWR_STATUS_BDP BIT(14)
+#define PWR_STATUS_ETH BIT(15)
+#define PWR_STATUS_HIF BIT(16)
+#define PWR_STATUS_IFR_MSC BIT(17)
#define PWR_STATUS_VENC_LT BIT(20)
#define PWR_STATUS_VENC BIT(21)
#define PWR_STATUS_MFG_2D BIT(22)
@@ -55,12 +65,23 @@
#define PWR_STATUS_USB BIT(25)
enum clk_id {
- MT8173_CLK_NONE,
- MT8173_CLK_MM,
- MT8173_CLK_MFG,
- MT8173_CLK_VENC,
- MT8173_CLK_VENC_LT,
- MT8173_CLK_MAX,
+ CLK_NONE,
+ CLK_MM,
+ CLK_MFG,
+ CLK_VENC,
+ CLK_VENC_LT,
+ CLK_ETHIF,
+ CLK_MAX,
+};
+
+static const char * const clk_names[] = {
+ NULL,
+ "mm",
+ "mfg",
+ "venc",
+ "venc_lt",
+ "ethif",
+ NULL,
};
#define MAX_CLKS 2
@@ -76,98 +97,6 @@ struct scp_domain_data {
bool active_wakeup;
};
-static const struct scp_domain_data scp_domain_data[] = {
- [MT8173_POWER_DOMAIN_VDEC] = {
- .name = "vdec",
- .sta_mask = PWR_STATUS_VDEC,
- .ctl_offs = SPM_VDE_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = GENMASK(12, 12),
- .clk_id = {MT8173_CLK_MM},
- },
- [MT8173_POWER_DOMAIN_VENC] = {
- .name = "venc",
- .sta_mask = PWR_STATUS_VENC,
- .ctl_offs = SPM_VEN_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = GENMASK(15, 12),
- .clk_id = {MT8173_CLK_MM, MT8173_CLK_VENC},
- },
- [MT8173_POWER_DOMAIN_ISP] = {
- .name = "isp",
- .sta_mask = PWR_STATUS_ISP,
- .ctl_offs = SPM_ISP_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = GENMASK(13, 12),
- .clk_id = {MT8173_CLK_MM},
- },
- [MT8173_POWER_DOMAIN_MM] = {
- .name = "mm",
- .sta_mask = PWR_STATUS_DISP,
- .ctl_offs = SPM_DIS_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = GENMASK(12, 12),
- .clk_id = {MT8173_CLK_MM},
- .bus_prot_mask = MT8173_TOP_AXI_PROT_EN_MM_M0 |
- MT8173_TOP_AXI_PROT_EN_MM_M1,
- },
- [MT8173_POWER_DOMAIN_VENC_LT] = {
- .name = "venc_lt",
- .sta_mask = PWR_STATUS_VENC_LT,
- .ctl_offs = SPM_VEN2_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = GENMASK(15, 12),
- .clk_id = {MT8173_CLK_MM, MT8173_CLK_VENC_LT},
- },
- [MT8173_POWER_DOMAIN_AUDIO] = {
- .name = "audio",
- .sta_mask = PWR_STATUS_AUDIO,
- .ctl_offs = SPM_AUDIO_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = GENMASK(15, 12),
- .clk_id = {MT8173_CLK_NONE},
- },
- [MT8173_POWER_DOMAIN_USB] = {
- .name = "usb",
- .sta_mask = PWR_STATUS_USB,
- .ctl_offs = SPM_USB_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = GENMASK(15, 12),
- .clk_id = {MT8173_CLK_NONE},
- .active_wakeup = true,
- },
- [MT8173_POWER_DOMAIN_MFG_ASYNC] = {
- .name = "mfg_async",
- .sta_mask = PWR_STATUS_MFG_ASYNC,
- .ctl_offs = SPM_MFG_ASYNC_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = 0,
- .clk_id = {MT8173_CLK_MFG},
- },
- [MT8173_POWER_DOMAIN_MFG_2D] = {
- .name = "mfg_2d",
- .sta_mask = PWR_STATUS_MFG_2D,
- .ctl_offs = SPM_MFG_2D_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = GENMASK(13, 12),
- .clk_id = {MT8173_CLK_NONE},
- },
- [MT8173_POWER_DOMAIN_MFG] = {
- .name = "mfg",
- .sta_mask = PWR_STATUS_MFG,
- .ctl_offs = SPM_MFG_PWR_CON,
- .sram_pdn_bits = GENMASK(13, 8),
- .sram_pdn_ack_bits = GENMASK(21, 16),
- .clk_id = {MT8173_CLK_NONE},
- .bus_prot_mask = MT8173_TOP_AXI_PROT_EN_MFG_S |
- MT8173_TOP_AXI_PROT_EN_MFG_M0 |
- MT8173_TOP_AXI_PROT_EN_MFG_M1 |
- MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT,
- },
-};
-
-#define NUM_DOMAINS ARRAY_SIZE(scp_domain_data)
-
struct scp;
struct scp_domain {
@@ -179,7 +108,7 @@ struct scp_domain {
};
struct scp {
- struct scp_domain domains[NUM_DOMAINS];
+ struct scp_domain *domains;
struct genpd_onecell_data pd_data;
struct device *dev;
void __iomem *base;
@@ -408,57 +337,55 @@ static bool scpsys_active_wakeup(struct device *dev)
return scpd->data->active_wakeup;
}
-static int scpsys_probe(struct platform_device *pdev)
+static void init_clks(struct platform_device *pdev, struct clk **clk)
+{
+ int i;
+
+ for (i = CLK_NONE + 1; i < CLK_MAX; i++)
+ clk[i] = devm_clk_get(&pdev->dev, clk_names[i]);
+}
+
+static struct scp *init_scp(struct platform_device *pdev,
+ const struct scp_domain_data *scp_domain_data, int num)
{
struct genpd_onecell_data *pd_data;
struct resource *res;
- int i, j, ret;
+ int i, j;
struct scp *scp;
- struct clk *clk[MT8173_CLK_MAX];
+ struct clk *clk[CLK_MAX];
scp = devm_kzalloc(&pdev->dev, sizeof(*scp), GFP_KERNEL);
if (!scp)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
scp->dev = &pdev->dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
scp->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(scp->base))
- return PTR_ERR(scp->base);
+ return ERR_CAST(scp->base);
+
+ scp->domains = devm_kzalloc(&pdev->dev,
+ sizeof(*scp->domains) * num, GFP_KERNEL);
+ if (!scp->domains)
+ return ERR_PTR(-ENOMEM);
pd_data = &scp->pd_data;
pd_data->domains = devm_kzalloc(&pdev->dev,
- sizeof(*pd_data->domains) * NUM_DOMAINS, GFP_KERNEL);
+ sizeof(*pd_data->domains) * num, GFP_KERNEL);
if (!pd_data->domains)
- return -ENOMEM;
-
- clk[MT8173_CLK_MM] = devm_clk_get(&pdev->dev, "mm");
- if (IS_ERR(clk[MT8173_CLK_MM]))
- return PTR_ERR(clk[MT8173_CLK_MM]);
-
- clk[MT8173_CLK_MFG] = devm_clk_get(&pdev->dev, "mfg");
- if (IS_ERR(clk[MT8173_CLK_MFG]))
- return PTR_ERR(clk[MT8173_CLK_MFG]);
-
- clk[MT8173_CLK_VENC] = devm_clk_get(&pdev->dev, "venc");
- if (IS_ERR(clk[MT8173_CLK_VENC]))
- return PTR_ERR(clk[MT8173_CLK_VENC]);
-
- clk[MT8173_CLK_VENC_LT] = devm_clk_get(&pdev->dev, "venc_lt");
- if (IS_ERR(clk[MT8173_CLK_VENC_LT]))
- return PTR_ERR(clk[MT8173_CLK_VENC_LT]);
+ return ERR_PTR(-ENOMEM);
scp->infracfg = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"infracfg");
if (IS_ERR(scp->infracfg)) {
dev_err(&pdev->dev, "Cannot find infracfg controller: %ld\n",
PTR_ERR(scp->infracfg));
- return PTR_ERR(scp->infracfg);
+ return ERR_CAST(scp->infracfg);
}
- for (i = 0; i < NUM_DOMAINS; i++) {
+ for (i = 0; i < num; i++) {
struct scp_domain *scpd = &scp->domains[i];
const struct scp_domain_data *data = &scp_domain_data[i];
@@ -467,13 +394,15 @@ static int scpsys_probe(struct platform_device *pdev)
if (PTR_ERR(scpd->supply) == -ENODEV)
scpd->supply = NULL;
else
- return PTR_ERR(scpd->supply);
+ return ERR_CAST(scpd->supply);
}
}
- pd_data->num_domains = NUM_DOMAINS;
+ pd_data->num_domains = num;
+
+ init_clks(pdev, clk);
- for (i = 0; i < NUM_DOMAINS; i++) {
+ for (i = 0; i < num; i++) {
struct scp_domain *scpd = &scp->domains[i];
struct generic_pm_domain *genpd = &scpd->genpd;
const struct scp_domain_data *data = &scp_domain_data[i];
@@ -482,13 +411,37 @@ static int scpsys_probe(struct platform_device *pdev)
scpd->scp = scp;
scpd->data = data;
- for (j = 0; j < MAX_CLKS && data->clk_id[j]; j++)
- scpd->clk[j] = clk[data->clk_id[j]];
+
+ for (j = 0; j < MAX_CLKS && data->clk_id[j]; j++) {
+ struct clk *c = clk[data->clk_id[j]];
+
+ if (IS_ERR(c)) {
+ dev_err(&pdev->dev, "%s: clk unavailable\n",
+ data->name);
+ return ERR_CAST(c);
+ }
+
+ scpd->clk[j] = c;
+ }
genpd->name = data->name;
genpd->power_off = scpsys_power_off;
genpd->power_on = scpsys_power_on;
genpd->dev_ops.active_wakeup = scpsys_active_wakeup;
+ }
+
+ return scp;
+}
+
+static void mtk_register_power_domains(struct platform_device *pdev,
+ struct scp *scp, int num)
+{
+ struct genpd_onecell_data *pd_data;
+ int i, ret;
+
+ for (i = 0; i < num; i++) {
+ struct scp_domain *scpd = &scp->domains[i];
+ struct generic_pm_domain *genpd = &scpd->genpd;
/*
* Initially turn on all domains to make the domains usable
@@ -507,6 +460,222 @@ static int scpsys_probe(struct platform_device *pdev)
* valid.
*/
+ pd_data = &scp->pd_data;
+
+ ret = of_genpd_add_provider_onecell(pdev->dev.of_node, pd_data);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to add OF provider: %d\n", ret);
+}
+
+/*
+ * MT2701 power domain support
+ */
+
+static const struct scp_domain_data scp_domain_data_mt2701[] = {
+ [MT2701_POWER_DOMAIN_CONN] = {
+ .name = "conn",
+ .sta_mask = PWR_STATUS_CONN,
+ .ctl_offs = SPM_CONN_PWR_CON,
+ .bus_prot_mask = 0x0104,
+ .clk_id = {CLK_NONE},
+ .active_wakeup = true,
+ },
+ [MT2701_POWER_DOMAIN_DISP] = {
+ .name = "disp",
+ .sta_mask = PWR_STATUS_DISP,
+ .ctl_offs = SPM_DIS_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .clk_id = {CLK_MM},
+ .bus_prot_mask = 0x0002,
+ .active_wakeup = true,
+ },
+ [MT2701_POWER_DOMAIN_MFG] = {
+ .name = "mfg",
+ .sta_mask = PWR_STATUS_MFG,
+ .ctl_offs = SPM_MFG_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .clk_id = {CLK_MFG},
+ .active_wakeup = true,
+ },
+ [MT2701_POWER_DOMAIN_VDEC] = {
+ .name = "vdec",
+ .sta_mask = PWR_STATUS_VDEC,
+ .ctl_offs = SPM_VDE_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .clk_id = {CLK_MM},
+ .active_wakeup = true,
+ },
+ [MT2701_POWER_DOMAIN_ISP] = {
+ .name = "isp",
+ .sta_mask = PWR_STATUS_ISP,
+ .ctl_offs = SPM_ISP_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ .clk_id = {CLK_MM},
+ .active_wakeup = true,
+ },
+ [MT2701_POWER_DOMAIN_BDP] = {
+ .name = "bdp",
+ .sta_mask = PWR_STATUS_BDP,
+ .ctl_offs = SPM_BDP_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .clk_id = {CLK_NONE},
+ .active_wakeup = true,
+ },
+ [MT2701_POWER_DOMAIN_ETH] = {
+ .name = "eth",
+ .sta_mask = PWR_STATUS_ETH,
+ .ctl_offs = SPM_ETH_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_ETHIF},
+ .active_wakeup = true,
+ },
+ [MT2701_POWER_DOMAIN_HIF] = {
+ .name = "hif",
+ .sta_mask = PWR_STATUS_HIF,
+ .ctl_offs = SPM_HIF_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_ETHIF},
+ .active_wakeup = true,
+ },
+ [MT2701_POWER_DOMAIN_IFR_MSC] = {
+ .name = "ifr_msc",
+ .sta_mask = PWR_STATUS_IFR_MSC,
+ .ctl_offs = SPM_IFR_MSC_PWR_CON,
+ .clk_id = {CLK_NONE},
+ .active_wakeup = true,
+ },
+};
+
+#define NUM_DOMAINS_MT2701 ARRAY_SIZE(scp_domain_data_mt2701)
+
+static int __init scpsys_probe_mt2701(struct platform_device *pdev)
+{
+ struct scp *scp;
+
+ scp = init_scp(pdev, scp_domain_data_mt2701, NUM_DOMAINS_MT2701);
+ if (IS_ERR(scp))
+ return PTR_ERR(scp);
+
+ mtk_register_power_domains(pdev, scp, NUM_DOMAINS_MT2701);
+
+ return 0;
+}
+
+/*
+ * MT8173 power domain support
+ */
+
+static const struct scp_domain_data scp_domain_data_mt8173[] = {
+ [MT8173_POWER_DOMAIN_VDEC] = {
+ .name = "vdec",
+ .sta_mask = PWR_STATUS_VDEC,
+ .ctl_offs = SPM_VDE_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .clk_id = {CLK_MM},
+ },
+ [MT8173_POWER_DOMAIN_VENC] = {
+ .name = "venc",
+ .sta_mask = PWR_STATUS_VENC,
+ .ctl_offs = SPM_VEN_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_MM, CLK_VENC},
+ },
+ [MT8173_POWER_DOMAIN_ISP] = {
+ .name = "isp",
+ .sta_mask = PWR_STATUS_ISP,
+ .ctl_offs = SPM_ISP_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ .clk_id = {CLK_MM},
+ },
+ [MT8173_POWER_DOMAIN_MM] = {
+ .name = "mm",
+ .sta_mask = PWR_STATUS_DISP,
+ .ctl_offs = SPM_DIS_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .clk_id = {CLK_MM},
+ .bus_prot_mask = MT8173_TOP_AXI_PROT_EN_MM_M0 |
+ MT8173_TOP_AXI_PROT_EN_MM_M1,
+ },
+ [MT8173_POWER_DOMAIN_VENC_LT] = {
+ .name = "venc_lt",
+ .sta_mask = PWR_STATUS_VENC_LT,
+ .ctl_offs = SPM_VEN2_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_MM, CLK_VENC_LT},
+ },
+ [MT8173_POWER_DOMAIN_AUDIO] = {
+ .name = "audio",
+ .sta_mask = PWR_STATUS_AUDIO,
+ .ctl_offs = SPM_AUDIO_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_NONE},
+ },
+ [MT8173_POWER_DOMAIN_USB] = {
+ .name = "usb",
+ .sta_mask = PWR_STATUS_USB,
+ .ctl_offs = SPM_USB_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_NONE},
+ .active_wakeup = true,
+ },
+ [MT8173_POWER_DOMAIN_MFG_ASYNC] = {
+ .name = "mfg_async",
+ .sta_mask = PWR_STATUS_MFG_ASYNC,
+ .ctl_offs = SPM_MFG_ASYNC_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = 0,
+ .clk_id = {CLK_MFG},
+ },
+ [MT8173_POWER_DOMAIN_MFG_2D] = {
+ .name = "mfg_2d",
+ .sta_mask = PWR_STATUS_MFG_2D,
+ .ctl_offs = SPM_MFG_2D_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ .clk_id = {CLK_NONE},
+ },
+ [MT8173_POWER_DOMAIN_MFG] = {
+ .name = "mfg",
+ .sta_mask = PWR_STATUS_MFG,
+ .ctl_offs = SPM_MFG_PWR_CON,
+ .sram_pdn_bits = GENMASK(13, 8),
+ .sram_pdn_ack_bits = GENMASK(21, 16),
+ .clk_id = {CLK_NONE},
+ .bus_prot_mask = MT8173_TOP_AXI_PROT_EN_MFG_S |
+ MT8173_TOP_AXI_PROT_EN_MFG_M0 |
+ MT8173_TOP_AXI_PROT_EN_MFG_M1 |
+ MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT,
+ },
+};
+
+#define NUM_DOMAINS_MT8173 ARRAY_SIZE(scp_domain_data_mt8173)
+
+static int __init scpsys_probe_mt8173(struct platform_device *pdev)
+{
+ struct scp *scp;
+ struct genpd_onecell_data *pd_data;
+ int ret;
+
+ scp = init_scp(pdev, scp_domain_data_mt8173, NUM_DOMAINS_MT8173);
+ if (IS_ERR(scp))
+ return PTR_ERR(scp);
+
+ mtk_register_power_domains(pdev, scp, NUM_DOMAINS_MT8173);
+
+ pd_data = &scp->pd_data;
+
ret = pm_genpd_add_subdomain(pd_data->domains[MT8173_POWER_DOMAIN_MFG_ASYNC],
pd_data->domains[MT8173_POWER_DOMAIN_MFG_2D]);
if (ret && IS_ENABLED(CONFIG_PM))
@@ -517,21 +686,39 @@ static int scpsys_probe(struct platform_device *pdev)
if (ret && IS_ENABLED(CONFIG_PM))
dev_err(&pdev->dev, "Failed to add subdomain: %d\n", ret);
- ret = of_genpd_add_provider_onecell(pdev->dev.of_node, pd_data);
- if (ret)
- dev_err(&pdev->dev, "Failed to add OF provider: %d\n", ret);
-
return 0;
}
+/*
+ * scpsys driver init
+ */
+
static const struct of_device_id of_scpsys_match_tbl[] = {
{
+ .compatible = "mediatek,mt2701-scpsys",
+ .data = scpsys_probe_mt2701,
+ }, {
.compatible = "mediatek,mt8173-scpsys",
+ .data = scpsys_probe_mt8173,
}, {
/* sentinel */
}
};
+static int scpsys_probe(struct platform_device *pdev)
+{
+ int (*probe)(struct platform_device *);
+ const struct of_device_id *of_id;
+
+ of_id = of_match_node(of_scpsys_match_tbl, pdev->dev.of_node);
+ if (!of_id || !of_id->data)
+ return -EINVAL;
+
+ probe = of_id->data;
+
+ return probe(pdev);
+}
+
static struct platform_driver scpsys_drv = {
.probe = scpsys_probe,
.driver = {
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
index 86cc78cd1962..d9115cb5ed9d 100644
--- a/drivers/soc/renesas/Makefile
+++ b/drivers/soc/renesas/Makefile
@@ -1,8 +1,12 @@
+obj-$(CONFIG_SOC_BUS) += renesas-soc.o
+
obj-$(CONFIG_ARCH_RCAR_GEN1) += rcar-rst.o
obj-$(CONFIG_ARCH_RCAR_GEN2) += rcar-rst.o
obj-$(CONFIG_ARCH_R8A7795) += rcar-rst.o
obj-$(CONFIG_ARCH_R8A7796) += rcar-rst.o
+obj-$(CONFIG_ARCH_R8A7743) += rcar-sysc.o r8a7743-sysc.o
+obj-$(CONFIG_ARCH_R8A7745) += rcar-sysc.o r8a7745-sysc.o
obj-$(CONFIG_ARCH_R8A7779) += rcar-sysc.o r8a7779-sysc.o
obj-$(CONFIG_ARCH_R8A7790) += rcar-sysc.o r8a7790-sysc.o
obj-$(CONFIG_ARCH_R8A7791) += rcar-sysc.o r8a7791-sysc.o
diff --git a/drivers/soc/renesas/r8a7743-sysc.c b/drivers/soc/renesas/r8a7743-sysc.c
new file mode 100644
index 000000000000..9583a327d90c
--- /dev/null
+++ b/drivers/soc/renesas/r8a7743-sysc.c
@@ -0,0 +1,32 @@
+/*
+ * Renesas RZ/G1M System Controller
+ *
+ * Copyright (C) 2016 Cogent Embedded Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation; of the License.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a7743-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a7743_areas[] __initconst = {
+ { "always-on", 0, 0, R8A7743_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca15-scu", 0x180, 0, R8A7743_PD_CA15_SCU, R8A7743_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca15-cpu0", 0x40, 0, R8A7743_PD_CA15_CPU0, R8A7743_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "ca15-cpu1", 0x40, 1, R8A7743_PD_CA15_CPU1, R8A7743_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "sgx", 0xc0, 0, R8A7743_PD_SGX, R8A7743_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a7743_sysc_info __initconst = {
+ .areas = r8a7743_areas,
+ .num_areas = ARRAY_SIZE(r8a7743_areas),
+};
diff --git a/drivers/soc/renesas/r8a7745-sysc.c b/drivers/soc/renesas/r8a7745-sysc.c
new file mode 100644
index 000000000000..d17887c08aa1
--- /dev/null
+++ b/drivers/soc/renesas/r8a7745-sysc.c
@@ -0,0 +1,32 @@
+/*
+ * Renesas RZ/G1E System Controller
+ *
+ * Copyright (C) 2016 Cogent Embedded Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation; of the License.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a7745-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a7745_areas[] __initconst = {
+ { "always-on", 0, 0, R8A7745_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca7-scu", 0x100, 0, R8A7745_PD_CA7_SCU, R8A7745_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca7-cpu0", 0x1c0, 0, R8A7745_PD_CA7_CPU0, R8A7745_PD_CA7_SCU,
+ PD_CPU_NOCR },
+ { "ca7-cpu1", 0x1c0, 1, R8A7745_PD_CA7_CPU1, R8A7745_PD_CA7_SCU,
+ PD_CPU_NOCR },
+ { "sgx", 0xc0, 0, R8A7745_PD_SGX, R8A7745_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a7745_sysc_info __initconst = {
+ .areas = r8a7745_areas,
+ .num_areas = ARRAY_SIZE(r8a7745_areas),
+};
diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c
index 65c8e1eb90c0..225c35c79d9a 100644
--- a/drivers/soc/renesas/rcar-sysc.c
+++ b/drivers/soc/renesas/rcar-sysc.c
@@ -275,6 +275,12 @@ finalize:
}
static const struct of_device_id rcar_sysc_matches[] = {
+#ifdef CONFIG_ARCH_R8A7743
+ { .compatible = "renesas,r8a7743-sysc", .data = &r8a7743_sysc_info },
+#endif
+#ifdef CONFIG_ARCH_R8A7745
+ { .compatible = "renesas,r8a7745-sysc", .data = &r8a7745_sysc_info },
+#endif
#ifdef CONFIG_ARCH_R8A7779
{ .compatible = "renesas,r8a7779-sysc", .data = &r8a7779_sysc_info },
#endif
diff --git a/drivers/soc/renesas/rcar-sysc.h b/drivers/soc/renesas/rcar-sysc.h
index 77dbe861473f..f6e842e2976e 100644
--- a/drivers/soc/renesas/rcar-sysc.h
+++ b/drivers/soc/renesas/rcar-sysc.h
@@ -50,6 +50,8 @@ struct rcar_sysc_info {
unsigned int num_areas;
};
+extern const struct rcar_sysc_info r8a7743_sysc_info;
+extern const struct rcar_sysc_info r8a7745_sysc_info;
extern const struct rcar_sysc_info r8a7779_sysc_info;
extern const struct rcar_sysc_info r8a7790_sysc_info;
extern const struct rcar_sysc_info r8a7791_sysc_info;
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
new file mode 100644
index 000000000000..330960312296
--- /dev/null
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -0,0 +1,257 @@
+/*
+ * Renesas SoC Identification
+ *
+ * Copyright (C) 2014-2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/sys_soc.h>
+
+
+struct renesas_family {
+ const char name[16];
+ u32 reg; /* CCCR or PRR, if not in DT */
+};
+
+static const struct renesas_family fam_rcar_gen1 __initconst __maybe_unused = {
+ .name = "R-Car Gen1",
+ .reg = 0xff000044, /* PRR (Product Register) */
+};
+
+static const struct renesas_family fam_rcar_gen2 __initconst __maybe_unused = {
+ .name = "R-Car Gen2",
+ .reg = 0xff000044, /* PRR (Product Register) */
+};
+
+static const struct renesas_family fam_rcar_gen3 __initconst __maybe_unused = {
+ .name = "R-Car Gen3",
+ .reg = 0xfff00044, /* PRR (Product Register) */
+};
+
+static const struct renesas_family fam_rmobile __initconst __maybe_unused = {
+ .name = "R-Mobile",
+ .reg = 0xe600101c, /* CCCR (Common Chip Code Register) */
+};
+
+static const struct renesas_family fam_rza __initconst __maybe_unused = {
+ .name = "RZ/A",
+};
+
+static const struct renesas_family fam_rzg __initconst __maybe_unused = {
+ .name = "RZ/G",
+ .reg = 0xff000044, /* PRR (Product Register) */
+};
+
+static const struct renesas_family fam_shmobile __initconst __maybe_unused = {
+ .name = "SH-Mobile",
+ .reg = 0xe600101c, /* CCCR (Common Chip Code Register) */
+};
+
+
+struct renesas_soc {
+ const struct renesas_family *family;
+ u8 id;
+};
+
+static const struct renesas_soc soc_rz_a1h __initconst __maybe_unused = {
+ .family = &fam_rza,
+};
+
+static const struct renesas_soc soc_rmobile_ape6 __initconst __maybe_unused = {
+ .family = &fam_rmobile,
+ .id = 0x3f,
+};
+
+static const struct renesas_soc soc_rmobile_a1 __initconst __maybe_unused = {
+ .family = &fam_rmobile,
+ .id = 0x40,
+};
+
+static const struct renesas_soc soc_rz_g1m __initconst __maybe_unused = {
+ .family = &fam_rzg,
+ .id = 0x47,
+};
+
+static const struct renesas_soc soc_rz_g1e __initconst __maybe_unused = {
+ .family = &fam_rzg,
+ .id = 0x4c,
+};
+
+static const struct renesas_soc soc_rcar_m1a __initconst __maybe_unused = {
+ .family = &fam_rcar_gen1,
+};
+
+static const struct renesas_soc soc_rcar_h1 __initconst __maybe_unused = {
+ .family = &fam_rcar_gen1,
+ .id = 0x3b,
+};
+
+static const struct renesas_soc soc_rcar_h2 __initconst __maybe_unused = {
+ .family = &fam_rcar_gen2,
+ .id = 0x45,
+};
+
+static const struct renesas_soc soc_rcar_m2_w __initconst __maybe_unused = {
+ .family = &fam_rcar_gen2,
+ .id = 0x47,
+};
+
+static const struct renesas_soc soc_rcar_v2h __initconst __maybe_unused = {
+ .family = &fam_rcar_gen2,
+ .id = 0x4a,
+};
+
+static const struct renesas_soc soc_rcar_m2_n __initconst __maybe_unused = {
+ .family = &fam_rcar_gen2,
+ .id = 0x4b,
+};
+
+static const struct renesas_soc soc_rcar_e2 __initconst __maybe_unused = {
+ .family = &fam_rcar_gen2,
+ .id = 0x4c,
+};
+
+static const struct renesas_soc soc_rcar_h3 __initconst __maybe_unused = {
+ .family = &fam_rcar_gen3,
+ .id = 0x4f,
+};
+
+static const struct renesas_soc soc_rcar_m3_w __initconst __maybe_unused = {
+ .family = &fam_rcar_gen3,
+ .id = 0x52,
+};
+
+static const struct renesas_soc soc_shmobile_ag5 __initconst __maybe_unused = {
+ .family = &fam_shmobile,
+ .id = 0x37,
+};
+
+
+static const struct of_device_id renesas_socs[] __initconst = {
+#ifdef CONFIG_ARCH_R7S72100
+ { .compatible = "renesas,r7s72100", .data = &soc_rz_a1h },
+#endif
+#ifdef CONFIG_ARCH_R8A73A4
+ { .compatible = "renesas,r8a73a4", .data = &soc_rmobile_ape6 },
+#endif
+#ifdef CONFIG_ARCH_R8A7740
+ { .compatible = "renesas,r8a7740", .data = &soc_rmobile_a1 },
+#endif
+#ifdef CONFIG_ARCH_R8A7743
+ { .compatible = "renesas,r8a7743", .data = &soc_rz_g1m },
+#endif
+#ifdef CONFIG_ARCH_R8A7745
+ { .compatible = "renesas,r8a7745", .data = &soc_rz_g1e },
+#endif
+#ifdef CONFIG_ARCH_R8A7778
+ { .compatible = "renesas,r8a7778", .data = &soc_rcar_m1a },
+#endif
+#ifdef CONFIG_ARCH_R8A7779
+ { .compatible = "renesas,r8a7779", .data = &soc_rcar_h1 },
+#endif
+#ifdef CONFIG_ARCH_R8A7790
+ { .compatible = "renesas,r8a7790", .data = &soc_rcar_h2 },
+#endif
+#ifdef CONFIG_ARCH_R8A7791
+ { .compatible = "renesas,r8a7791", .data = &soc_rcar_m2_w },
+#endif
+#ifdef CONFIG_ARCH_R8A7792
+ { .compatible = "renesas,r8a7792", .data = &soc_rcar_v2h },
+#endif
+#ifdef CONFIG_ARCH_R8A7793
+ { .compatible = "renesas,r8a7793", .data = &soc_rcar_m2_n },
+#endif
+#ifdef CONFIG_ARCH_R8A7794
+ { .compatible = "renesas,r8a7794", .data = &soc_rcar_e2 },
+#endif
+#ifdef CONFIG_ARCH_R8A7795
+ { .compatible = "renesas,r8a7795", .data = &soc_rcar_h3 },
+#endif
+#ifdef CONFIG_ARCH_R8A7796
+ { .compatible = "renesas,r8a7796", .data = &soc_rcar_m3_w },
+#endif
+#ifdef CONFIG_ARCH_SH73A0
+ { .compatible = "renesas,sh73a0", .data = &soc_shmobile_ag5 },
+#endif
+ { /* sentinel */ }
+};
+
+static int __init renesas_soc_init(void)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ const struct renesas_family *family;
+ const struct of_device_id *match;
+ const struct renesas_soc *soc;
+ void __iomem *chipid = NULL;
+ struct soc_device *soc_dev;
+ struct device_node *np;
+ unsigned int product;
+
+ match = of_match_node(renesas_socs, of_root);
+ if (!match)
+ return -ENODEV;
+
+ soc = match->data;
+ family = soc->family;
+
+ /* Try PRR first, then hardcoded fallback */
+ np = of_find_compatible_node(NULL, NULL, "renesas,prr");
+ if (np) {
+ chipid = of_iomap(np, 0);
+ of_node_put(np);
+ } else if (soc->id) {
+ chipid = ioremap(family->reg, 4);
+ }
+ if (chipid) {
+ product = readl(chipid);
+ iounmap(chipid);
+ if (soc->id && ((product >> 8) & 0xff) != soc->id) {
+ pr_warn("SoC mismatch (product = 0x%x)\n", product);
+ return -ENODEV;
+ }
+ }
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ np = of_find_node_by_path("/");
+ of_property_read_string(np, "model", &soc_dev_attr->machine);
+ of_node_put(np);
+
+ soc_dev_attr->family = kstrdup_const(family->name, GFP_KERNEL);
+ soc_dev_attr->soc_id = kstrdup_const(strchr(match->compatible, ',') + 1,
+ GFP_KERNEL);
+ if (chipid)
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "ES%u.%u",
+ ((product >> 4) & 0x0f) + 1,
+ product & 0xf);
+
+ pr_info("Detected Renesas %s %s %s\n", soc_dev_attr->family,
+ soc_dev_attr->soc_id, soc_dev_attr->revision ?: "");
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ kfree(soc_dev_attr->revision);
+ kfree_const(soc_dev_attr->soc_id);
+ kfree_const(soc_dev_attr->family);
+ kfree(soc_dev_attr);
+ return PTR_ERR(soc_dev);
+ }
+
+ return 0;
+}
+core_initcall(renesas_soc_init);
diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
index 7acd1517dd37..1c78c42416c6 100644
--- a/drivers/soc/rockchip/pm_domains.c
+++ b/drivers/soc/rockchip/pm_domains.c
@@ -9,6 +9,7 @@
*/
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/err.h>
#include <linux/pm_clock.h>
#include <linux/pm_domain.h>
@@ -105,12 +106,24 @@ static bool rockchip_pmu_domain_is_idle(struct rockchip_pm_domain *pd)
return (val & pd_info->idle_mask) == pd_info->idle_mask;
}
+static unsigned int rockchip_pmu_read_ack(struct rockchip_pmu *pmu)
+{
+ unsigned int val;
+
+ regmap_read(pmu->regmap, pmu->info->ack_offset, &val);
+ return val;
+}
+
static int rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd,
bool idle)
{
const struct rockchip_domain_info *pd_info = pd->info;
+ struct generic_pm_domain *genpd = &pd->genpd;
struct rockchip_pmu *pmu = pd->pmu;
+ unsigned int target_ack;
unsigned int val;
+ bool is_idle;
+ int ret;
if (pd_info->req_mask == 0)
return 0;
@@ -120,12 +133,26 @@ static int rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd,
dsb(sy);
- do {
- regmap_read(pmu->regmap, pmu->info->ack_offset, &val);
- } while ((val & pd_info->ack_mask) != (idle ? pd_info->ack_mask : 0));
+ /* Wait util idle_ack = 1 */
+ target_ack = idle ? pd_info->ack_mask : 0;
+ ret = readx_poll_timeout_atomic(rockchip_pmu_read_ack, pmu, val,
+ (val & pd_info->ack_mask) == target_ack,
+ 0, 10000);
+ if (ret) {
+ dev_err(pmu->dev,
+ "failed to get ack on domain '%s', val=0x%x\n",
+ genpd->name, val);
+ return ret;
+ }
- while (rockchip_pmu_domain_is_idle(pd) != idle)
- cpu_relax();
+ ret = readx_poll_timeout_atomic(rockchip_pmu_domain_is_idle, pd,
+ is_idle, is_idle == idle, 0, 10000);
+ if (ret) {
+ dev_err(pmu->dev,
+ "failed to set idle on domain '%s', val=%d\n",
+ genpd->name, is_idle);
+ return ret;
+ }
return 0;
}
@@ -198,6 +225,8 @@ static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
bool on)
{
struct rockchip_pmu *pmu = pd->pmu;
+ struct generic_pm_domain *genpd = &pd->genpd;
+ bool is_on;
if (pd->info->pwr_mask == 0)
return;
@@ -207,8 +236,13 @@ static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
dsb(sy);
- while (rockchip_pmu_domain_is_on(pd) != on)
- cpu_relax();
+ if (readx_poll_timeout_atomic(rockchip_pmu_domain_is_on, pd, is_on,
+ is_on == on, 0, 10000)) {
+ dev_err(pmu->dev,
+ "failed to set domain '%s', val=%d\n",
+ genpd->name, is_on);
+ return;
+ }
}
static int rockchip_pd_power(struct rockchip_pm_domain *pd, bool power_on)
@@ -445,7 +479,16 @@ err_out:
static void rockchip_pm_remove_one_domain(struct rockchip_pm_domain *pd)
{
- int i;
+ int i, ret;
+
+ /*
+ * We're in the error cleanup already, so we only complain,
+ * but won't emit another error on top of the original one.
+ */
+ ret = pm_genpd_remove(&pd->genpd);
+ if (ret < 0)
+ dev_err(pd->pmu->dev, "failed to remove domain '%s' : %d - state may be inconsistent\n",
+ pd->genpd.name, ret);
for (i = 0; i < pd->num_clks; i++) {
clk_unprepare(pd->clks[i]);
@@ -597,10 +640,12 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
* Configure power up and down transition delays for CORE
* and GPU domains.
*/
- rockchip_configure_pd_cnt(pmu, pmu_info->core_pwrcnt_offset,
- pmu_info->core_power_transition_time);
- rockchip_configure_pd_cnt(pmu, pmu_info->gpu_pwrcnt_offset,
- pmu_info->gpu_power_transition_time);
+ if (pmu_info->core_power_transition_time)
+ rockchip_configure_pd_cnt(pmu, pmu_info->core_pwrcnt_offset,
+ pmu_info->core_power_transition_time);
+ if (pmu_info->gpu_pwrcnt_offset)
+ rockchip_configure_pd_cnt(pmu, pmu_info->gpu_pwrcnt_offset,
+ pmu_info->gpu_power_transition_time);
error = -ENODEV;
@@ -627,7 +672,11 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
goto err_out;
}
- of_genpd_add_provider_onecell(np, &pmu->genpd_data);
+ error = of_genpd_add_provider_onecell(np, &pmu->genpd_data);
+ if (error) {
+ dev_err(dev, "failed to add provider: %d\n", error);
+ goto err_out;
+ }
return 0;
@@ -722,11 +771,7 @@ static const struct rockchip_pmu_info rk3399_pmu = {
.idle_offset = 0x64,
.ack_offset = 0x68,
- .core_pwrcnt_offset = 0x9c,
- .gpu_pwrcnt_offset = 0xa4,
-
- .core_power_transition_time = 24,
- .gpu_power_transition_time = 24,
+ /* ARM Trusted Firmware manages power transition times */
.num_domains = ARRAY_SIZE(rk3399_pm_domains),
.domain_info = rk3399_pm_domains,
diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig
index 03089ad2fc65..e5e124c07066 100644
--- a/drivers/soc/tegra/Kconfig
+++ b/drivers/soc/tegra/Kconfig
@@ -77,5 +77,19 @@ config ARCH_TEGRA_210_SOC
controllers, such as GPIO, I2C, SPI, SDHCI, PCIe, SATA and XHCI, to
name only a few.
+config ARCH_TEGRA_186_SOC
+ bool "NVIDIA Tegra186 SoC"
+ select MAILBOX
+ select TEGRA_BPMP
+ select TEGRA_HSP_MBOX
+ select TEGRA_IVC
+ help
+ Enable support for the NVIDIA Tegar186 SoC. The Tegra186 features a
+ combination of Denver and Cortex-A57 CPU cores and a GPU based on
+ the Pascal architecture. It contains an ADSP with a Cortex-A9 CPU
+ used for audio processing, hardware video encoders/decoders with
+ multi-format support, ISP for image capture processing and BPMP for
+ power management.
+
endif
endif
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 7792ed88d80b..e233dd5dcab3 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -45,29 +45,31 @@
#include <soc/tegra/pmc.h>
#define PMC_CNTRL 0x0
-#define PMC_CNTRL_SYSCLK_POLARITY (1 << 10) /* sys clk polarity */
-#define PMC_CNTRL_SYSCLK_OE (1 << 11) /* system clock enable */
-#define PMC_CNTRL_SIDE_EFFECT_LP0 (1 << 14) /* LP0 when CPU pwr gated */
-#define PMC_CNTRL_CPU_PWRREQ_POLARITY (1 << 15) /* CPU pwr req polarity */
-#define PMC_CNTRL_CPU_PWRREQ_OE (1 << 16) /* CPU pwr req enable */
-#define PMC_CNTRL_INTR_POLARITY (1 << 17) /* inverts INTR polarity */
-#define PMC_CNTRL_MAIN_RST (1 << 4)
+#define PMC_CNTRL_INTR_POLARITY BIT(17) /* inverts INTR polarity */
+#define PMC_CNTRL_CPU_PWRREQ_OE BIT(16) /* CPU pwr req enable */
+#define PMC_CNTRL_CPU_PWRREQ_POLARITY BIT(15) /* CPU pwr req polarity */
+#define PMC_CNTRL_SIDE_EFFECT_LP0 BIT(14) /* LP0 when CPU pwr gated */
+#define PMC_CNTRL_SYSCLK_OE BIT(11) /* system clock enable */
+#define PMC_CNTRL_SYSCLK_POLARITY BIT(10) /* sys clk polarity */
+#define PMC_CNTRL_MAIN_RST BIT(4)
#define DPD_SAMPLE 0x020
-#define DPD_SAMPLE_ENABLE (1 << 0)
+#define DPD_SAMPLE_ENABLE BIT(0)
#define DPD_SAMPLE_DISABLE (0 << 0)
#define PWRGATE_TOGGLE 0x30
-#define PWRGATE_TOGGLE_START (1 << 8)
+#define PWRGATE_TOGGLE_START BIT(8)
#define REMOVE_CLAMPING 0x34
#define PWRGATE_STATUS 0x38
+#define PMC_PWR_DET 0x48
+
#define PMC_SCRATCH0 0x50
-#define PMC_SCRATCH0_MODE_RECOVERY (1 << 31)
-#define PMC_SCRATCH0_MODE_BOOTLOADER (1 << 30)
-#define PMC_SCRATCH0_MODE_RCM (1 << 1)
+#define PMC_SCRATCH0_MODE_RECOVERY BIT(31)
+#define PMC_SCRATCH0_MODE_BOOTLOADER BIT(30)
+#define PMC_SCRATCH0_MODE_RCM BIT(1)
#define PMC_SCRATCH0_MODE_MASK (PMC_SCRATCH0_MODE_RECOVERY | \
PMC_SCRATCH0_MODE_BOOTLOADER | \
PMC_SCRATCH0_MODE_RCM)
@@ -75,11 +77,13 @@
#define PMC_CPUPWRGOOD_TIMER 0xc8
#define PMC_CPUPWROFF_TIMER 0xcc
+#define PMC_PWR_DET_VALUE 0xe4
+
#define PMC_SCRATCH41 0x140
#define PMC_SENSOR_CTRL 0x1b0
-#define PMC_SENSOR_CTRL_SCRATCH_WRITE (1 << 2)
-#define PMC_SENSOR_CTRL_ENABLE_RST (1 << 1)
+#define PMC_SENSOR_CTRL_SCRATCH_WRITE BIT(2)
+#define PMC_SENSOR_CTRL_ENABLE_RST BIT(1)
#define PMC_RST_STATUS 0x1b4
#define PMC_RST_STATUS_POR 0
@@ -90,10 +94,10 @@
#define PMC_RST_STATUS_AOTAG 5
#define IO_DPD_REQ 0x1b8
-#define IO_DPD_REQ_CODE_IDLE (0 << 30)
-#define IO_DPD_REQ_CODE_OFF (1 << 30)
-#define IO_DPD_REQ_CODE_ON (2 << 30)
-#define IO_DPD_REQ_CODE_MASK (3 << 30)
+#define IO_DPD_REQ_CODE_IDLE (0U << 30)
+#define IO_DPD_REQ_CODE_OFF (1U << 30)
+#define IO_DPD_REQ_CODE_ON (2U << 30)
+#define IO_DPD_REQ_CODE_MASK (3U << 30)
#define IO_DPD_STATUS 0x1bc
#define IO_DPD2_REQ 0x1c0
@@ -101,16 +105,16 @@
#define SEL_DPD_TIM 0x1c8
#define PMC_SCRATCH54 0x258
-#define PMC_SCRATCH54_DATA_SHIFT 8
-#define PMC_SCRATCH54_ADDR_SHIFT 0
+#define PMC_SCRATCH54_DATA_SHIFT 8
+#define PMC_SCRATCH54_ADDR_SHIFT 0
#define PMC_SCRATCH55 0x25c
-#define PMC_SCRATCH55_RESET_TEGRA (1 << 31)
-#define PMC_SCRATCH55_CNTRL_ID_SHIFT 27
-#define PMC_SCRATCH55_PINMUX_SHIFT 24
-#define PMC_SCRATCH55_16BITOP (1 << 15)
-#define PMC_SCRATCH55_CHECKSUM_SHIFT 16
-#define PMC_SCRATCH55_I2CSLV1_SHIFT 0
+#define PMC_SCRATCH55_RESET_TEGRA BIT(31)
+#define PMC_SCRATCH55_CNTRL_ID_SHIFT 27
+#define PMC_SCRATCH55_PINMUX_SHIFT 24
+#define PMC_SCRATCH55_16BITOP BIT(15)
+#define PMC_SCRATCH55_CHECKSUM_SHIFT 16
+#define PMC_SCRATCH55_I2CSLV1_SHIFT 0
#define GPU_RG_CNTRL 0x2d4
@@ -124,6 +128,12 @@ struct tegra_powergate {
unsigned int num_resets;
};
+struct tegra_io_pad_soc {
+ enum tegra_io_pad id;
+ unsigned int dpd;
+ unsigned int voltage;
+};
+
struct tegra_pmc_soc {
unsigned int num_powergates;
const char *const *powergates;
@@ -132,6 +142,9 @@ struct tegra_pmc_soc {
bool has_tsense_reset;
bool has_gpu_clamps;
+
+ const struct tegra_io_pad_soc *io_pads;
+ unsigned int num_io_pads;
};
/**
@@ -238,8 +251,6 @@ static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name)
return i;
}
- dev_err(pmc->dev, "powergate %s not found\n", name);
-
return -ENODEV;
}
@@ -456,13 +467,12 @@ disable_clks:
static int tegra_genpd_power_on(struct generic_pm_domain *domain)
{
struct tegra_powergate *pg = to_powergate(domain);
- struct tegra_pmc *pmc = pg->pmc;
int err;
err = tegra_powergate_power_up(pg, true);
if (err)
- dev_err(pmc->dev, "failed to turn on PM domain %s: %d\n",
- pg->genpd.name, err);
+ pr_err("failed to turn on PM domain %s: %d\n", pg->genpd.name,
+ err);
return err;
}
@@ -470,13 +480,12 @@ static int tegra_genpd_power_on(struct generic_pm_domain *domain)
static int tegra_genpd_power_off(struct generic_pm_domain *domain)
{
struct tegra_powergate *pg = to_powergate(domain);
- struct tegra_pmc *pmc = pg->pmc;
int err;
err = tegra_powergate_power_down(pg);
if (err)
- dev_err(pmc->dev, "failed to turn off PM domain %s: %d\n",
- pg->genpd.name, err);
+ pr_err("failed to turn off PM domain %s: %d\n",
+ pg->genpd.name, err);
return err;
}
@@ -801,8 +810,7 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
id = tegra_powergate_lookup(pmc, np->name);
if (id < 0) {
- dev_err(pmc->dev, "powergate lookup failed for %s: %d\n",
- np->name, id);
+ pr_err("powergate lookup failed for %s: %d\n", np->name, id);
goto free_mem;
}
@@ -822,20 +830,22 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
err = tegra_powergate_of_get_clks(pg, np);
if (err < 0) {
- dev_err(pmc->dev, "failed to get clocks for %s: %d\n",
- np->name, err);
+ pr_err("failed to get clocks for %s: %d\n", np->name, err);
goto set_available;
}
err = tegra_powergate_of_get_resets(pg, np, off);
if (err < 0) {
- dev_err(pmc->dev, "failed to get resets for %s: %d\n",
- np->name, err);
+ pr_err("failed to get resets for %s: %d\n", np->name, err);
goto remove_clks;
}
- if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS))
- goto power_on_cleanup;
+ if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) {
+ if (off)
+ WARN_ON(tegra_powergate_power_up(pg, true));
+
+ goto remove_resets;
+ }
/*
* FIXME: If XHCI is enabled for Tegra, then power-up the XUSB
@@ -846,25 +856,33 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
* to be unused.
*/
if (IS_ENABLED(CONFIG_USB_XHCI_TEGRA) &&
- (id == TEGRA_POWERGATE_XUSBA || id == TEGRA_POWERGATE_XUSBC))
- goto power_on_cleanup;
+ (id == TEGRA_POWERGATE_XUSBA || id == TEGRA_POWERGATE_XUSBC)) {
+ if (off)
+ WARN_ON(tegra_powergate_power_up(pg, true));
+
+ goto remove_resets;
+ }
- pm_genpd_init(&pg->genpd, NULL, off);
+ err = pm_genpd_init(&pg->genpd, NULL, off);
+ if (err < 0) {
+ pr_err("failed to initialise PM domain %s: %d\n", np->name,
+ err);
+ goto remove_resets;
+ }
err = of_genpd_add_provider_simple(np, &pg->genpd);
if (err < 0) {
- dev_err(pmc->dev, "failed to add genpd provider for %s: %d\n",
- np->name, err);
- goto remove_resets;
+ pr_err("failed to add PM domain provider for %s: %d\n",
+ np->name, err);
+ goto remove_genpd;
}
- dev_dbg(pmc->dev, "added power domain %s\n", pg->genpd.name);
+ pr_debug("added PM domain %s\n", pg->genpd.name);
return;
-power_on_cleanup:
- if (off)
- WARN_ON(tegra_powergate_power_up(pg, true));
+remove_genpd:
+ pm_genpd_remove(&pg->genpd);
remove_resets:
while (pg->num_resets--)
@@ -908,21 +926,36 @@ static void tegra_powergate_init(struct tegra_pmc *pmc,
of_node_put(np);
}
-static int tegra_io_rail_prepare(unsigned int id, unsigned long *request,
- unsigned long *status, unsigned int *bit)
+static const struct tegra_io_pad_soc *
+tegra_io_pad_find(struct tegra_pmc *pmc, enum tegra_io_pad id)
{
+ unsigned int i;
+
+ for (i = 0; i < pmc->soc->num_io_pads; i++)
+ if (pmc->soc->io_pads[i].id == id)
+ return &pmc->soc->io_pads[i];
+
+ return NULL;
+}
+
+static int tegra_io_pad_prepare(enum tegra_io_pad id, unsigned long *request,
+ unsigned long *status, u32 *mask)
+{
+ const struct tegra_io_pad_soc *pad;
unsigned long rate, value;
- *bit = id % 32;
+ pad = tegra_io_pad_find(pmc, id);
+ if (!pad) {
+ pr_err("invalid I/O pad ID %u\n", id);
+ return -ENOENT;
+ }
- /*
- * There are two sets of 30 bits to select IO rails, but bits 30 and
- * 31 are control bits rather than IO rail selection bits.
- */
- if (id > 63 || *bit == 30 || *bit == 31)
- return -EINVAL;
+ if (pad->dpd == UINT_MAX)
+ return -ENOTSUPP;
- if (id < 32) {
+ *mask = BIT(pad->dpd % 32);
+
+ if (pad->dpd < 32) {
*status = IO_DPD_STATUS;
*request = IO_DPD_REQ;
} else {
@@ -931,6 +964,10 @@ static int tegra_io_rail_prepare(unsigned int id, unsigned long *request,
}
rate = clk_get_rate(pmc->clk);
+ if (!rate) {
+ pr_err("failed to get clock rate\n");
+ return -ENODEV;
+ }
tegra_pmc_writel(DPD_SAMPLE_ENABLE, DPD_SAMPLE);
@@ -942,10 +979,10 @@ static int tegra_io_rail_prepare(unsigned int id, unsigned long *request,
return 0;
}
-static int tegra_io_rail_poll(unsigned long offset, unsigned long mask,
- unsigned long val, unsigned long timeout)
+static int tegra_io_pad_poll(unsigned long offset, u32 mask,
+ u32 val, unsigned long timeout)
{
- unsigned long value;
+ u32 value;
timeout = jiffies + msecs_to_jiffies(timeout);
@@ -960,67 +997,164 @@ static int tegra_io_rail_poll(unsigned long offset, unsigned long mask,
return -ETIMEDOUT;
}
-static void tegra_io_rail_unprepare(void)
+static void tegra_io_pad_unprepare(void)
{
tegra_pmc_writel(DPD_SAMPLE_DISABLE, DPD_SAMPLE);
}
-int tegra_io_rail_power_on(unsigned int id)
+/**
+ * tegra_io_pad_power_enable() - enable power to I/O pad
+ * @id: Tegra I/O pad ID for which to enable power
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int tegra_io_pad_power_enable(enum tegra_io_pad id)
{
unsigned long request, status;
- unsigned int bit;
+ u32 mask;
int err;
mutex_lock(&pmc->powergates_lock);
- err = tegra_io_rail_prepare(id, &request, &status, &bit);
- if (err)
- goto error;
+ err = tegra_io_pad_prepare(id, &request, &status, &mask);
+ if (err < 0) {
+ pr_err("failed to prepare I/O pad: %d\n", err);
+ goto unlock;
+ }
- tegra_pmc_writel(IO_DPD_REQ_CODE_OFF | BIT(bit), request);
+ tegra_pmc_writel(IO_DPD_REQ_CODE_OFF | mask, request);
- err = tegra_io_rail_poll(status, BIT(bit), 0, 250);
- if (err) {
- pr_info("tegra_io_rail_poll() failed: %d\n", err);
- goto error;
+ err = tegra_io_pad_poll(status, mask, 0, 250);
+ if (err < 0) {
+ pr_err("failed to enable I/O pad: %d\n", err);
+ goto unlock;
}
- tegra_io_rail_unprepare();
+ tegra_io_pad_unprepare();
-error:
+unlock:
mutex_unlock(&pmc->powergates_lock);
-
return err;
}
-EXPORT_SYMBOL(tegra_io_rail_power_on);
+EXPORT_SYMBOL(tegra_io_pad_power_enable);
-int tegra_io_rail_power_off(unsigned int id)
+/**
+ * tegra_io_pad_power_disable() - disable power to I/O pad
+ * @id: Tegra I/O pad ID for which to disable power
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int tegra_io_pad_power_disable(enum tegra_io_pad id)
{
unsigned long request, status;
- unsigned int bit;
+ u32 mask;
int err;
mutex_lock(&pmc->powergates_lock);
- err = tegra_io_rail_prepare(id, &request, &status, &bit);
- if (err) {
- pr_info("tegra_io_rail_prepare() failed: %d\n", err);
- goto error;
+ err = tegra_io_pad_prepare(id, &request, &status, &mask);
+ if (err < 0) {
+ pr_err("failed to prepare I/O pad: %d\n", err);
+ goto unlock;
}
- tegra_pmc_writel(IO_DPD_REQ_CODE_ON | BIT(bit), request);
+ tegra_pmc_writel(IO_DPD_REQ_CODE_ON | mask, request);
- err = tegra_io_rail_poll(status, BIT(bit), BIT(bit), 250);
- if (err)
- goto error;
+ err = tegra_io_pad_poll(status, mask, mask, 250);
+ if (err < 0) {
+ pr_err("failed to disable I/O pad: %d\n", err);
+ goto unlock;
+ }
- tegra_io_rail_unprepare();
+ tegra_io_pad_unprepare();
-error:
+unlock:
mutex_unlock(&pmc->powergates_lock);
-
return err;
}
+EXPORT_SYMBOL(tegra_io_pad_power_disable);
+
+int tegra_io_pad_set_voltage(enum tegra_io_pad id,
+ enum tegra_io_pad_voltage voltage)
+{
+ const struct tegra_io_pad_soc *pad;
+ u32 value;
+
+ pad = tegra_io_pad_find(pmc, id);
+ if (!pad)
+ return -ENOENT;
+
+ if (pad->voltage == UINT_MAX)
+ return -ENOTSUPP;
+
+ mutex_lock(&pmc->powergates_lock);
+
+ /* write-enable PMC_PWR_DET_VALUE[pad->voltage] */
+ value = tegra_pmc_readl(PMC_PWR_DET);
+ value |= BIT(pad->voltage);
+ tegra_pmc_writel(value, PMC_PWR_DET);
+
+ /* update I/O voltage */
+ value = tegra_pmc_readl(PMC_PWR_DET_VALUE);
+
+ if (voltage == TEGRA_IO_PAD_1800000UV)
+ value &= ~BIT(pad->voltage);
+ else
+ value |= BIT(pad->voltage);
+
+ tegra_pmc_writel(value, PMC_PWR_DET_VALUE);
+
+ mutex_unlock(&pmc->powergates_lock);
+
+ usleep_range(100, 250);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_io_pad_set_voltage);
+
+int tegra_io_pad_get_voltage(enum tegra_io_pad id)
+{
+ const struct tegra_io_pad_soc *pad;
+ u32 value;
+
+ pad = tegra_io_pad_find(pmc, id);
+ if (!pad)
+ return -ENOENT;
+
+ if (pad->voltage == UINT_MAX)
+ return -ENOTSUPP;
+
+ value = tegra_pmc_readl(PMC_PWR_DET_VALUE);
+
+ if ((value & BIT(pad->voltage)) == 0)
+ return TEGRA_IO_PAD_1800000UV;
+
+ return TEGRA_IO_PAD_3300000UV;
+}
+EXPORT_SYMBOL(tegra_io_pad_get_voltage);
+
+/**
+ * tegra_io_rail_power_on() - enable power to I/O rail
+ * @id: Tegra I/O pad ID for which to enable power
+ *
+ * See also: tegra_io_pad_power_enable()
+ */
+int tegra_io_rail_power_on(unsigned int id)
+{
+ return tegra_io_pad_power_enable(id);
+}
+EXPORT_SYMBOL(tegra_io_rail_power_on);
+
+/**
+ * tegra_io_rail_power_off() - disable power to I/O rail
+ * @id: Tegra I/O pad ID for which to disable power
+ *
+ * See also: tegra_io_pad_power_disable()
+ */
+int tegra_io_rail_power_off(unsigned int id)
+{
+ return tegra_io_pad_power_disable(id);
+}
EXPORT_SYMBOL(tegra_io_rail_power_off);
#ifdef CONFIG_PM_SLEEP
@@ -1454,6 +1588,39 @@ static const u8 tegra124_cpu_powergates[] = {
TEGRA_POWERGATE_CPU3,
};
+static const struct tegra_io_pad_soc tegra124_io_pads[] = {
+ { .id = TEGRA_IO_PAD_AUDIO, .dpd = 17, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_BB, .dpd = 15, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CAM, .dpd = 36, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_COMP, .dpd = 22, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSIA, .dpd = 0, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSIB, .dpd = 1, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSIE, .dpd = 44, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSI, .dpd = 2, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSIB, .dpd = 39, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSIC, .dpd = 40, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSID, .dpd = 41, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_HDMI, .dpd = 28, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_HSIC, .dpd = 19, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_HV, .dpd = 38, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_LVDS, .dpd = 57, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_MIPI_BIAS, .dpd = 3, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_NAND, .dpd = 13, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_BIAS, .dpd = 4, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_CLK1, .dpd = 5, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_CLK2, .dpd = 6, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_CNTRL, .dpd = 32, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_SDMMC1, .dpd = 33, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_SDMMC3, .dpd = 34, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_SDMMC4, .dpd = 35, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_SYS_DDC, .dpd = 58, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_UART, .dpd = 14, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB0, .dpd = 9, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB1, .dpd = 10, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB2, .dpd = 11, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB_BIAS, .dpd = 12, .voltage = UINT_MAX },
+};
+
static const struct tegra_pmc_soc tegra124_pmc_soc = {
.num_powergates = ARRAY_SIZE(tegra124_powergates),
.powergates = tegra124_powergates,
@@ -1461,6 +1628,8 @@ static const struct tegra_pmc_soc tegra124_pmc_soc = {
.cpu_powergates = tegra124_cpu_powergates,
.has_tsense_reset = true,
.has_gpu_clamps = true,
+ .num_io_pads = ARRAY_SIZE(tegra124_io_pads),
+ .io_pads = tegra124_io_pads,
};
static const char * const tegra210_powergates[] = {
@@ -1497,6 +1666,47 @@ static const u8 tegra210_cpu_powergates[] = {
TEGRA_POWERGATE_CPU3,
};
+static const struct tegra_io_pad_soc tegra210_io_pads[] = {
+ { .id = TEGRA_IO_PAD_AUDIO, .dpd = 17, .voltage = 5 },
+ { .id = TEGRA_IO_PAD_AUDIO_HV, .dpd = 61, .voltage = 18 },
+ { .id = TEGRA_IO_PAD_CAM, .dpd = 36, .voltage = 10 },
+ { .id = TEGRA_IO_PAD_CSIA, .dpd = 0, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSIB, .dpd = 1, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSIC, .dpd = 42, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSID, .dpd = 43, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSIE, .dpd = 44, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSIF, .dpd = 45, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DBG, .dpd = 25, .voltage = 19 },
+ { .id = TEGRA_IO_PAD_DEBUG_NONAO, .dpd = 26, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DMIC, .dpd = 50, .voltage = 20 },
+ { .id = TEGRA_IO_PAD_DP, .dpd = 51, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSI, .dpd = 2, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSIB, .dpd = 39, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSIC, .dpd = 40, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSID, .dpd = 41, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_EMMC, .dpd = 35, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_EMMC2, .dpd = 37, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_GPIO, .dpd = 27, .voltage = 21 },
+ { .id = TEGRA_IO_PAD_HDMI, .dpd = 28, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_HSIC, .dpd = 19, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_LVDS, .dpd = 57, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_MIPI_BIAS, .dpd = 3, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_BIAS, .dpd = 4, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_CLK1, .dpd = 5, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_CLK2, .dpd = 6, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_CNTRL, .dpd = UINT_MAX, .voltage = 11 },
+ { .id = TEGRA_IO_PAD_SDMMC1, .dpd = 33, .voltage = 12 },
+ { .id = TEGRA_IO_PAD_SDMMC3, .dpd = 34, .voltage = 13 },
+ { .id = TEGRA_IO_PAD_SPI, .dpd = 46, .voltage = 22 },
+ { .id = TEGRA_IO_PAD_SPI_HV, .dpd = 47, .voltage = 23 },
+ { .id = TEGRA_IO_PAD_UART, .dpd = 14, .voltage = 2 },
+ { .id = TEGRA_IO_PAD_USB0, .dpd = 9, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB1, .dpd = 10, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB2, .dpd = 11, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB3, .dpd = 18, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB_BIAS, .dpd = 12, .voltage = UINT_MAX },
+};
+
static const struct tegra_pmc_soc tegra210_pmc_soc = {
.num_powergates = ARRAY_SIZE(tegra210_powergates),
.powergates = tegra210_powergates,
@@ -1504,6 +1714,8 @@ static const struct tegra_pmc_soc tegra210_pmc_soc = {
.cpu_powergates = tegra210_cpu_powergates,
.has_tsense_reset = true,
.has_gpu_clamps = true,
+ .num_io_pads = ARRAY_SIZE(tegra210_io_pads),
+ .io_pads = tegra210_io_pads,
};
static const struct of_device_id tegra_pmc_match[] = {
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index b73e3534f67b..eacad57f2977 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -1228,7 +1228,7 @@ static int knav_setup_queue_range(struct knav_device *kdev,
range->num_irqs++;
- if (oirq.args_count == 3)
+ if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3)
range->irqs[i].cpu_map =
(oirq.args[2] & 0x0000ff00) >> 8;
}
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 3c09e94cf827..28dfdce4beae 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -341,27 +341,20 @@ static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable)
static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
{
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
- dma_filter_fn filter = sdd->cntrlr_info->filter;
struct device *dev = &sdd->pdev->dev;
- dma_cap_mask_t mask;
if (is_polling(sdd))
return 0;
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
/* Acquire DMA channels */
- sdd->rx_dma.ch = dma_request_slave_channel_compat(mask, filter,
- sdd->cntrlr_info->dma_rx, dev, "rx");
+ sdd->rx_dma.ch = dma_request_slave_channel(dev, "rx");
if (!sdd->rx_dma.ch) {
dev_err(dev, "Failed to get RX DMA channel\n");
return -EBUSY;
}
spi->dma_rx = sdd->rx_dma.ch;
- sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter,
- sdd->cntrlr_info->dma_tx, dev, "tx");
+ sdd->tx_dma.ch = dma_request_slave_channel(dev, "tx");
if (!sdd->tx_dma.ch) {
dev_err(dev, "Failed to get TX DMA channel\n");
dma_release_channel(sdd->rx_dma.ch);
@@ -1091,11 +1084,6 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
sdd->cur_bpw = 8;
- if (!sdd->pdev->dev.of_node && (!sci->dma_tx || !sci->dma_rx)) {
- dev_warn(&pdev->dev, "Unable to get SPI tx/rx DMA data. Switching to poll mode\n");
- sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL;
- }
-
sdd->tx_dma.direction = DMA_MEM_TO_DEV;
sdd->rx_dma.direction = DMA_DEV_TO_MEM;
@@ -1205,9 +1193,8 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n",
sdd->port_id, master->num_chipselect);
- dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tFIFO %dbytes\tDMA=[Rx-%p, Tx-%p]\n",
- mem_res, (FIFO_LVL_MASK(sdd) >> 1) + 1,
- sci->dma_rx, sci->dma_tx);
+ dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tFIFO %dbytes\n",
+ mem_res, (FIFO_LVL_MASK(sdd) >> 1) + 1);
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index d5cc3070e83f..b653451843c8 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -882,7 +882,7 @@ static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
- ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+ ret = vm_insert_pfn(vma, vmf->address, pfn);
mutex_unlock(&buffer->lock);
if (ret)
return VM_FAULT_ERROR;
diff --git a/drivers/staging/greybus/camera.c b/drivers/staging/greybus/camera.c
index 1c5b41ae6774..0ee291ca2c72 100644
--- a/drivers/staging/greybus/camera.c
+++ b/drivers/staging/greybus/camera.c
@@ -1092,7 +1092,7 @@ static ssize_t gb_camera_debugfs_read(struct file *file, char __user *buf,
size_t len, loff_t *offset)
{
const struct gb_camera_debugfs_entry *op = file->private_data;
- struct gb_camera *gcam = file->f_inode->i_private;
+ struct gb_camera *gcam = file_inode(file)->i_private;
struct gb_camera_debugfs_buffer *buffer;
ssize_t ret;
@@ -1114,7 +1114,7 @@ static ssize_t gb_camera_debugfs_write(struct file *file,
loff_t *offset)
{
const struct gb_camera_debugfs_entry *op = file->private_data;
- struct gb_camera *gcam = file->f_inode->i_private;
+ struct gb_camera *gcam = file_inode(file)->i_private;
ssize_t ret;
char *kbuf;
diff --git a/drivers/staging/greybus/es2.c b/drivers/staging/greybus/es2.c
index f1d256df06d5..c1929dfa9b31 100644
--- a/drivers/staging/greybus/es2.c
+++ b/drivers/staging/greybus/es2.c
@@ -1249,7 +1249,7 @@ static int apb_log_poll(void *data)
static ssize_t apb_log_read(struct file *f, char __user *buf,
size_t count, loff_t *ppos)
{
- struct es2_ap_dev *es2 = f->f_inode->i_private;
+ struct es2_ap_dev *es2 = file_inode(f)->i_private;
ssize_t ret;
size_t copied;
char *tmp_buf;
@@ -1303,7 +1303,7 @@ static void usb_log_disable(struct es2_ap_dev *es2)
static ssize_t apb_log_enable_read(struct file *f, char __user *buf,
size_t count, loff_t *ppos)
{
- struct es2_ap_dev *es2 = f->f_inode->i_private;
+ struct es2_ap_dev *es2 = file_inode(f)->i_private;
int enable = !IS_ERR_OR_NULL(es2->apb_log_task);
char tmp_buf[3];
@@ -1316,7 +1316,7 @@ static ssize_t apb_log_enable_write(struct file *f, const char __user *buf,
{
int enable;
ssize_t retval;
- struct es2_ap_dev *es2 = f->f_inode->i_private;
+ struct es2_ap_dev *es2 = file_inode(f)->i_private;
retval = kstrtoint_from_user(buf, count, 10, &enable);
if (retval)
diff --git a/drivers/staging/greybus/svc.c b/drivers/staging/greybus/svc.c
index 550055ec27a5..8779270cadc1 100644
--- a/drivers/staging/greybus/svc.c
+++ b/drivers/staging/greybus/svc.c
@@ -757,7 +757,7 @@ static int gb_svc_version_request(struct gb_operation *op)
static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf,
size_t len, loff_t *offset)
{
- struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
+ struct svc_debugfs_pwrmon_rail *pwrmon_rails = file_inode(file)->i_private;
struct gb_svc *svc = pwrmon_rails->svc;
int ret, desc;
u32 value;
@@ -780,7 +780,7 @@ static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf,
static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf,
size_t len, loff_t *offset)
{
- struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
+ struct svc_debugfs_pwrmon_rail *pwrmon_rails = file_inode(file)->i_private;
struct gb_svc *svc = pwrmon_rails->svc;
int ret, desc;
u32 value;
@@ -803,7 +803,7 @@ static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf,
static ssize_t pwr_debugfs_power_read(struct file *file, char __user *buf,
size_t len, loff_t *offset)
{
- struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
+ struct svc_debugfs_pwrmon_rail *pwrmon_rails = file_inode(file)->i_private;
struct gb_svc *svc = pwrmon_rails->svc;
int ret, desc;
u32 value;
diff --git a/drivers/staging/greybus/timesync.c b/drivers/staging/greybus/timesync.c
index e586627f4bbc..29e6c1c12807 100644
--- a/drivers/staging/greybus/timesync.c
+++ b/drivers/staging/greybus/timesync.c
@@ -921,7 +921,7 @@ EXPORT_SYMBOL_GPL(gb_timesync_schedule_asynchronous);
static ssize_t gb_timesync_ping_read(struct file *file, char __user *ubuf,
size_t len, loff_t *offset, bool ktime)
{
- struct gb_timesync_svc *timesync_svc = file->f_inode->i_private;
+ struct gb_timesync_svc *timesync_svc = file_inode(file)->i_private;
char *buf;
ssize_t ret = 0;
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
index 6b9cf06e8df2..427e2198bb9e 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
@@ -967,48 +967,38 @@ cfs_cpt_table_create_pattern(char *pattern)
}
#ifdef CONFIG_HOTPLUG_CPU
-static int
-cfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
- bool warn;
-
- switch (action) {
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- spin_lock(&cpt_data.cpt_lock);
- cpt_data.cpt_version++;
- spin_unlock(&cpt_data.cpt_lock);
- /* Fall through */
- default:
- if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) {
- CDEBUG(D_INFO, "CPU changed [cpu %u action %lx]\n",
- cpu, action);
- break;
- }
+static enum cpuhp_state lustre_cpu_online;
- mutex_lock(&cpt_data.cpt_mutex);
- /* if all HTs in a core are offline, it may break affinity */
- cpumask_copy(cpt_data.cpt_cpumask,
- topology_sibling_cpumask(cpu));
- warn = cpumask_any_and(cpt_data.cpt_cpumask,
- cpu_online_mask) >= nr_cpu_ids;
- mutex_unlock(&cpt_data.cpt_mutex);
- CDEBUG(warn ? D_WARNING : D_INFO,
- "Lustre: can't support CPU plug-out well now, performance and stability could be impacted [CPU %u action: %lx]\n",
- cpu, action);
- }
+static void cfs_cpu_incr_cpt_version(void)
+{
+ spin_lock(&cpt_data.cpt_lock);
+ cpt_data.cpt_version++;
+ spin_unlock(&cpt_data.cpt_lock);
+}
- return NOTIFY_OK;
+static int cfs_cpu_online(unsigned int cpu)
+{
+ cfs_cpu_incr_cpt_version();
+ return 0;
}
-static struct notifier_block cfs_cpu_notifier = {
- .notifier_call = cfs_cpu_notify,
- .priority = 0
-};
+static int cfs_cpu_dead(unsigned int cpu)
+{
+ bool warn;
+
+ cfs_cpu_incr_cpt_version();
+ mutex_lock(&cpt_data.cpt_mutex);
+ /* if all HTs in a core are offline, it may break affinity */
+ cpumask_copy(cpt_data.cpt_cpumask, topology_sibling_cpumask(cpu));
+ warn = cpumask_any_and(cpt_data.cpt_cpumask,
+ cpu_online_mask) >= nr_cpu_ids;
+ mutex_unlock(&cpt_data.cpt_mutex);
+ CDEBUG(warn ? D_WARNING : D_INFO,
+ "Lustre: can't support CPU plug-out well now, performance and stability could be impacted [CPU %u]\n",
+ cpu);
+ return 0;
+}
#endif
void
@@ -1018,7 +1008,9 @@ cfs_cpu_fini(void)
cfs_cpt_table_free(cfs_cpt_table);
#ifdef CONFIG_HOTPLUG_CPU
- unregister_hotcpu_notifier(&cfs_cpu_notifier);
+ if (lustre_cpu_online > 0)
+ cpuhp_remove_state_nocalls(lustre_cpu_online);
+ cpuhp_remove_state_nocalls(CPUHP_LUSTRE_CFS_DEAD);
#endif
if (cpt_data.cpt_cpumask)
LIBCFS_FREE(cpt_data.cpt_cpumask, cpumask_size());
@@ -1027,6 +1019,8 @@ cfs_cpu_fini(void)
int
cfs_cpu_init(void)
{
+ int ret = 0;
+
LASSERT(!cfs_cpt_table);
memset(&cpt_data, 0, sizeof(cpt_data));
@@ -1041,8 +1035,19 @@ cfs_cpu_init(void)
mutex_init(&cpt_data.cpt_mutex);
#ifdef CONFIG_HOTPLUG_CPU
- register_hotcpu_notifier(&cfs_cpu_notifier);
+ ret = cpuhp_setup_state_nocalls(CPUHP_LUSTRE_CFS_DEAD,
+ "staging/lustre/cfe:dead", NULL,
+ cfs_cpu_dead);
+ if (ret < 0)
+ goto failed;
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "staging/lustre/cfe:online",
+ cfs_cpu_online, NULL);
+ if (ret < 0)
+ goto failed;
+ lustre_cpu_online = ret;
#endif
+ ret = -EINVAL;
if (*cpu_pattern) {
cfs_cpt_table = cfs_cpt_table_create_pattern(cpu_pattern);
@@ -1075,7 +1080,7 @@ cfs_cpu_init(void)
failed:
cfs_cpu_fini();
- return -1;
+ return ret;
}
#endif
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
index 0e45d8fc4d7c..65bf0c401b44 100644
--- a/drivers/staging/lustre/lustre/llite/dcache.c
+++ b/drivers/staging/lustre/lustre/llite/dcache.c
@@ -57,9 +57,6 @@ static void ll_release(struct dentry *de)
LASSERT(de);
lld = ll_d2d(de);
- if (!lld) /* NFS copies the de->d_op methods (bug 4655) */
- return;
-
if (lld->lld_it) {
ll_intent_release(lld->lld_it);
kfree(lld->lld_it);
@@ -126,30 +123,13 @@ static int ll_ddelete(const struct dentry *de)
return 0;
}
-int ll_d_init(struct dentry *de)
+static int ll_d_init(struct dentry *de)
{
- CDEBUG(D_DENTRY, "ldd on dentry %pd (%p) parent %p inode %p refc %d\n",
- de, de, de->d_parent, d_inode(de), d_count(de));
-
- if (!de->d_fsdata) {
- struct ll_dentry_data *lld;
-
- lld = kzalloc(sizeof(*lld), GFP_NOFS);
- if (likely(lld)) {
- spin_lock(&de->d_lock);
- if (likely(!de->d_fsdata)) {
- de->d_fsdata = lld;
- __d_lustre_invalidate(de);
- } else {
- kfree(lld);
- }
- spin_unlock(&de->d_lock);
- } else {
- return -ENOMEM;
- }
- }
- LASSERT(de->d_op == &ll_d_ops);
-
+ struct ll_dentry_data *lld = kzalloc(sizeof(*lld), GFP_KERNEL);
+ if (unlikely(!lld))
+ return -ENOMEM;
+ lld->lld_invalid = 1;
+ de->d_fsdata = lld;
return 0;
}
@@ -300,6 +280,7 @@ static int ll_revalidate_nd(struct dentry *dentry, unsigned int flags)
}
const struct dentry_operations ll_d_ops = {
+ .d_init = ll_d_init,
.d_revalidate = ll_revalidate_nd,
.d_release = ll_release,
.d_delete = ll_ddelete,
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index 2f46d475cd7d..065a9a7e120a 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -769,7 +769,6 @@ int ll_hsm_release(struct inode *inode);
/* llite/dcache.c */
-int ll_d_init(struct dentry *de);
extern const struct dentry_operations ll_d_ops;
void ll_intent_drop_lock(struct lookup_intent *);
void ll_intent_release(struct lookup_intent *);
@@ -1148,7 +1147,7 @@ dentry_may_statahead(struct inode *dir, struct dentry *dentry)
* 'lld_sa_generation == lli->lli_sa_generation'.
*/
ldd = ll_d2d(dentry);
- if (ldd && ldd->lld_sa_generation == lli->lli_sa_generation)
+ if (ldd->lld_sa_generation == lli->lli_sa_generation)
return false;
return true;
@@ -1267,17 +1266,7 @@ static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
static inline int d_lustre_invalid(const struct dentry *dentry)
{
- struct ll_dentry_data *lld = ll_d2d(dentry);
-
- return !lld || lld->lld_invalid;
-}
-
-static inline void __d_lustre_invalidate(struct dentry *dentry)
-{
- struct ll_dentry_data *lld = ll_d2d(dentry);
-
- if (lld)
- lld->lld_invalid = 1;
+ return ll_d2d(dentry)->lld_invalid;
}
/*
@@ -1293,7 +1282,7 @@ static inline void d_lustre_invalidate(struct dentry *dentry, int nested)
spin_lock_nested(&dentry->d_lock,
nested ? DENTRY_D_LOCK_NESTED : DENTRY_D_LOCK_NORMAL);
- __d_lustre_invalidate(dentry);
+ ll_d2d(dentry)->lld_invalid = 1;
/*
* We should be careful about dentries created by d_obtain_alias().
* These dentries are not put in the dentry tree, instead they are
diff --git a/drivers/staging/lustre/lustre/llite/llite_nfs.c b/drivers/staging/lustre/lustre/llite/llite_nfs.c
index c63236580b0f..49a930f0fc5d 100644
--- a/drivers/staging/lustre/lustre/llite/llite_nfs.c
+++ b/drivers/staging/lustre/lustre/llite/llite_nfs.c
@@ -169,22 +169,12 @@ ll_iget_for_nfs(struct super_block *sb, struct lu_fid *fid, struct lu_fid *paren
/* N.B. d_obtain_alias() drops inode ref on error */
result = d_obtain_alias(inode);
if (!IS_ERR(result)) {
- int rc;
-
- rc = ll_d_init(result);
- if (rc < 0) {
- dput(result);
- result = ERR_PTR(rc);
- } else {
- struct ll_dentry_data *ldd = ll_d2d(result);
-
- /*
- * Need to signal to the ll_intent_file_open that
- * we came from NFS and so opencache needs to be
- * enabled for this one
- */
- ldd->lld_nfs_dentry = 1;
- }
+ /*
+ * Need to signal to the ll_intent_file_open that
+ * we came from NFS and so opencache needs to be
+ * enabled for this one
+ */
+ ll_d2d(result)->lld_nfs_dentry = 1;
}
return result;
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index 9426759aedc9..a8f4e7fb0a46 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -432,17 +432,9 @@ static struct dentry *ll_find_alias(struct inode *inode, struct dentry *dentry)
*/
struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de)
{
- struct dentry *new;
- int rc;
-
if (inode) {
- new = ll_find_alias(inode, de);
+ struct dentry *new = ll_find_alias(inode, de);
if (new) {
- rc = ll_d_init(new);
- if (rc < 0) {
- dput(new);
- return ERR_PTR(rc);
- }
d_move(new, de);
iput(inode);
CDEBUG(D_DENTRY,
@@ -451,9 +443,6 @@ struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de)
return new;
}
}
- rc = ll_d_init(de);
- if (rc < 0)
- return ERR_PTR(rc);
d_add(de, inode);
CDEBUG(D_DENTRY, "Add dentry %p inode %p refc %d flags %#x\n",
de, d_inode(de), d_count(de), de->d_flags);
diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c
index 4769a2230ae1..f1ee17f9ec0d 100644
--- a/drivers/staging/lustre/lustre/llite/statahead.c
+++ b/drivers/staging/lustre/lustre/llite/statahead.c
@@ -1519,9 +1519,7 @@ out_unplug:
* dentry_may_statahead().
*/
ldd = ll_d2d(*dentryp);
- /* ldd can be NULL if llite lookup failed. */
- if (ldd)
- ldd->lld_sa_generation = lli->lli_sa_generation;
+ ldd->lld_sa_generation = lli->lli_sa_generation;
sa_put(sai, entry);
return rc;
}
diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c
index 82c7c48aa619..cd77b55d3895 100644
--- a/drivers/staging/lustre/lustre/llite/symlink.c
+++ b/drivers/staging/lustre/lustre/llite/symlink.c
@@ -149,7 +149,6 @@ static const char *ll_get_link(struct dentry *dentry,
}
const struct inode_operations ll_fast_symlink_inode_operations = {
- .readlink = generic_readlink,
.setattr = ll_setattr,
.get_link = ll_get_link,
.getattr = ll_getattr,
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index 0b6d388d8aa4..697cbfbe9374 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -1014,7 +1014,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
"page %p map %p index %lu flags %lx count %u priv %0lx: got addr %p type NOPAGE\n",
vmf->page, vmf->page->mapping, vmf->page->index,
(long)vmf->page->flags, page_count(vmf->page),
- page_private(vmf->page), vmf->virtual_address);
+ page_private(vmf->page), (void *)vmf->address);
if (unlikely(!(cfio->ft_flags & VM_FAULT_LOCKED))) {
lock_page(vmf->page);
cfio->ft_flags |= VM_FAULT_LOCKED;
@@ -1025,12 +1025,12 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
}
if (cfio->ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
- CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
+ CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", (void *)vmf->address);
return -EFAULT;
}
if (cfio->ft_flags & VM_FAULT_OOM) {
- CDEBUG(D_PAGE, "got addr %p - OOM\n", vmf->virtual_address);
+ CDEBUG(D_PAGE, "got addr %p - OOM\n", (void *)vmf->address);
return -ENOMEM;
}
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index 6620d96ee44d..ffb8fa72c3da 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -21,16 +21,12 @@ if STAGING_MEDIA && MEDIA_SUPPORT
# Please keep them in alphabetic order
source "drivers/staging/media/bcm2048/Kconfig"
-source "drivers/staging/media/cec/Kconfig"
-
source "drivers/staging/media/cxd2099/Kconfig"
source "drivers/staging/media/davinci_vpfe/Kconfig"
source "drivers/staging/media/omap4iss/Kconfig"
-source "drivers/staging/media/pulse8-cec/Kconfig"
-
source "drivers/staging/media/s5p-cec/Kconfig"
# Keep LIRC at the end, as it has sub-menus
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 906257e94dda..a28e82cf6447 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -1,9 +1,7 @@
obj-$(CONFIG_I2C_BCM2048) += bcm2048/
-obj-$(CONFIG_MEDIA_CEC) += cec/
obj-$(CONFIG_VIDEO_SAMSUNG_S5P_CEC) += s5p-cec/
obj-$(CONFIG_DVB_CXD2099) += cxd2099/
obj-$(CONFIG_LIRC_STAGING) += lirc/
obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci_vpfe/
obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/
-obj-$(CONFIG_USB_PULSE8_CEC) += pulse8-cec/
obj-$(CONFIG_VIDEO_STI_HDMI_CEC) += st-cec/
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
index c5116c058cea..37bd439ee08b 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
/*
@@ -999,7 +995,7 @@ static int bcm2048_set_fm_search_tune_mode(struct bcm2048_device *bdev,
timeout = BCM2048_AUTO_SEARCH_TIMEOUT;
if (!wait_for_completion_timeout(&bdev->compl,
- msecs_to_jiffies(timeout)))
+ msecs_to_jiffies(timeout)))
dev_err(&bdev->client->dev, "IRQ timeout.\n");
if (value)
@@ -2059,67 +2055,67 @@ property_signed_read(fm_rssi, int, "%d")
DEFINE_SYSFS_PROPERTY(region, unsigned, int, "%u", 0)
static struct device_attribute attrs[] = {
- __ATTR(power_state, S_IRUGO | S_IWUSR, bcm2048_power_state_read,
+ __ATTR(power_state, 0644, bcm2048_power_state_read,
bcm2048_power_state_write),
- __ATTR(mute, S_IRUGO | S_IWUSR, bcm2048_mute_read,
+ __ATTR(mute, 0644, bcm2048_mute_read,
bcm2048_mute_write),
- __ATTR(audio_route, S_IRUGO | S_IWUSR, bcm2048_audio_route_read,
+ __ATTR(audio_route, 0644, bcm2048_audio_route_read,
bcm2048_audio_route_write),
- __ATTR(dac_output, S_IRUGO | S_IWUSR, bcm2048_dac_output_read,
+ __ATTR(dac_output, 0644, bcm2048_dac_output_read,
bcm2048_dac_output_write),
- __ATTR(fm_hi_lo_injection, S_IRUGO | S_IWUSR,
+ __ATTR(fm_hi_lo_injection, 0644,
bcm2048_fm_hi_lo_injection_read,
bcm2048_fm_hi_lo_injection_write),
- __ATTR(fm_frequency, S_IRUGO | S_IWUSR, bcm2048_fm_frequency_read,
+ __ATTR(fm_frequency, 0644, bcm2048_fm_frequency_read,
bcm2048_fm_frequency_write),
- __ATTR(fm_af_frequency, S_IRUGO | S_IWUSR,
+ __ATTR(fm_af_frequency, 0644,
bcm2048_fm_af_frequency_read,
bcm2048_fm_af_frequency_write),
- __ATTR(fm_deemphasis, S_IRUGO | S_IWUSR, bcm2048_fm_deemphasis_read,
+ __ATTR(fm_deemphasis, 0644, bcm2048_fm_deemphasis_read,
bcm2048_fm_deemphasis_write),
- __ATTR(fm_rds_mask, S_IRUGO | S_IWUSR, bcm2048_fm_rds_mask_read,
+ __ATTR(fm_rds_mask, 0644, bcm2048_fm_rds_mask_read,
bcm2048_fm_rds_mask_write),
- __ATTR(fm_best_tune_mode, S_IRUGO | S_IWUSR,
+ __ATTR(fm_best_tune_mode, 0644,
bcm2048_fm_best_tune_mode_read,
bcm2048_fm_best_tune_mode_write),
- __ATTR(fm_search_rssi_threshold, S_IRUGO | S_IWUSR,
+ __ATTR(fm_search_rssi_threshold, 0644,
bcm2048_fm_search_rssi_threshold_read,
bcm2048_fm_search_rssi_threshold_write),
- __ATTR(fm_search_mode_direction, S_IRUGO | S_IWUSR,
+ __ATTR(fm_search_mode_direction, 0644,
bcm2048_fm_search_mode_direction_read,
bcm2048_fm_search_mode_direction_write),
- __ATTR(fm_search_tune_mode, S_IRUGO | S_IWUSR,
+ __ATTR(fm_search_tune_mode, 0644,
bcm2048_fm_search_tune_mode_read,
bcm2048_fm_search_tune_mode_write),
- __ATTR(rds, S_IRUGO | S_IWUSR, bcm2048_rds_read,
+ __ATTR(rds, 0644, bcm2048_rds_read,
bcm2048_rds_write),
- __ATTR(rds_b_block_mask, S_IRUGO | S_IWUSR,
+ __ATTR(rds_b_block_mask, 0644,
bcm2048_rds_b_block_mask_read,
bcm2048_rds_b_block_mask_write),
- __ATTR(rds_b_block_match, S_IRUGO | S_IWUSR,
+ __ATTR(rds_b_block_match, 0644,
bcm2048_rds_b_block_match_read,
bcm2048_rds_b_block_match_write),
- __ATTR(rds_pi_mask, S_IRUGO | S_IWUSR, bcm2048_rds_pi_mask_read,
+ __ATTR(rds_pi_mask, 0644, bcm2048_rds_pi_mask_read,
bcm2048_rds_pi_mask_write),
- __ATTR(rds_pi_match, S_IRUGO | S_IWUSR, bcm2048_rds_pi_match_read,
+ __ATTR(rds_pi_match, 0644, bcm2048_rds_pi_match_read,
bcm2048_rds_pi_match_write),
- __ATTR(rds_wline, S_IRUGO | S_IWUSR, bcm2048_rds_wline_read,
+ __ATTR(rds_wline, 0644, bcm2048_rds_wline_read,
bcm2048_rds_wline_write),
- __ATTR(rds_pi, S_IRUGO, bcm2048_rds_pi_read, NULL),
- __ATTR(rds_rt, S_IRUGO, bcm2048_rds_rt_read, NULL),
- __ATTR(rds_ps, S_IRUGO, bcm2048_rds_ps_read, NULL),
- __ATTR(fm_rds_flags, S_IRUGO, bcm2048_fm_rds_flags_read, NULL),
- __ATTR(region_bottom_frequency, S_IRUGO,
+ __ATTR(rds_pi, 0444, bcm2048_rds_pi_read, NULL),
+ __ATTR(rds_rt, 0444, bcm2048_rds_rt_read, NULL),
+ __ATTR(rds_ps, 0444, bcm2048_rds_ps_read, NULL),
+ __ATTR(fm_rds_flags, 0444, bcm2048_fm_rds_flags_read, NULL),
+ __ATTR(region_bottom_frequency, 0444,
bcm2048_region_bottom_frequency_read, NULL),
- __ATTR(region_top_frequency, S_IRUGO,
+ __ATTR(region_top_frequency, 0444,
bcm2048_region_top_frequency_read, NULL),
- __ATTR(fm_carrier_error, S_IRUGO,
+ __ATTR(fm_carrier_error, 0444,
bcm2048_fm_carrier_error_read, NULL),
- __ATTR(fm_rssi, S_IRUGO,
+ __ATTR(fm_rssi, 0444,
bcm2048_fm_rssi_read, NULL),
- __ATTR(region, S_IRUGO | S_IWUSR, bcm2048_region_read,
+ __ATTR(region, 0644, bcm2048_region_read,
bcm2048_region_write),
- __ATTR(rds_data, S_IRUGO, bcm2048_rds_data_read, NULL),
+ __ATTR(rds_data, 0444, bcm2048_rds_data_read, NULL),
};
static int bcm2048_sysfs_unregister_properties(struct bcm2048_device *bdev,
@@ -2204,7 +2200,7 @@ static ssize_t bcm2048_fops_read(struct file *file, char __user *buf,
}
/* interruptible_sleep_on(&bdev->read_queue); */
if (wait_event_interruptible(bdev->read_queue,
- bdev->rds_data_available) < 0) {
+ bdev->rds_data_available) < 0) {
retval = -EINTR;
goto done;
}
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.h b/drivers/staging/media/bcm2048/radio-bcm2048.h
index 4c90a32db795..4d950c1e2e8b 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.h
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.h
@@ -14,11 +14,6 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#ifndef BCM2048_H
diff --git a/drivers/staging/media/cec/Kconfig b/drivers/staging/media/cec/Kconfig
deleted file mode 100644
index 6e12d41b1f86..000000000000
--- a/drivers/staging/media/cec/Kconfig
+++ /dev/null
@@ -1,12 +0,0 @@
-config MEDIA_CEC
- bool "CEC API (EXPERIMENTAL)"
- depends on MEDIA_SUPPORT
- select MEDIA_CEC_EDID
- ---help---
- Enable the CEC API.
-
-config MEDIA_CEC_DEBUG
- bool "CEC debugfs interface (EXPERIMENTAL)"
- depends on MEDIA_CEC && DEBUG_FS
- ---help---
- Turns on the DebugFS interface for CEC devices.
diff --git a/drivers/staging/media/cec/TODO b/drivers/staging/media/cec/TODO
deleted file mode 100644
index 13224694a8ae..000000000000
--- a/drivers/staging/media/cec/TODO
+++ /dev/null
@@ -1,32 +0,0 @@
-The reason why cec.c is still in staging is that I would like
-to have a bit more confidence in the uABI. The kABI is fine,
-no problem there, but I would like to let the public API mature
-a bit.
-
-Once I'm confident that I didn't miss anything then the cec.c source
-can move to drivers/media and the linux/cec.h and linux/cec-funcs.h
-headers can move to uapi/linux and added to uapi/linux/Kbuild to make
-them public.
-
-Hopefully this will happen later in 2016.
-
-Other TODOs:
-
-- There are two possible replies to CEC_MSG_INITIATE_ARC. How to handle that?
-- Add a flag to inhibit passing CEC RC messages to the rc subsystem.
- Applications should be able to choose this when calling S_LOG_ADDRS.
-- If the reply field of cec_msg is set then when the reply arrives it
- is only sent to the filehandle that transmitted the original message
- and not to any followers. Should this behavior change or perhaps
- controlled through a cec_msg flag?
-- Should CEC_LOG_ADDR_TYPE_SPECIFIC be replaced by TYPE_2ND_TV and TYPE_PROCESSOR?
- And also TYPE_SWITCH and TYPE_CDC_ONLY in addition to the TYPE_UNREGISTERED?
- This should give the framework more information about the device type
- since SPECIFIC and UNREGISTERED give no useful information.
-- Once this is out of staging this should no longer be a separate
- config option, instead it should be selected by drivers that want it.
-- Revisit the IS_REACHABLE(RC_CORE): perhaps the RC_CORE support should
- be enabled through a separate config option in drivers/media/Kconfig
- or rc/Kconfig?
-
-Hans Verkuil <hans.verkuil@cisco.com>
diff --git a/drivers/staging/media/davinci_vpfe/Makefile b/drivers/staging/media/davinci_vpfe/Makefile
index c64515c644cd..3019c9ecd548 100644
--- a/drivers/staging/media/davinci_vpfe/Makefile
+++ b/drivers/staging/media/davinci_vpfe/Makefile
@@ -1,3 +1,5 @@
-obj-$(CONFIG_VIDEO_DM365_VPFE) += \
+obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci-vfpe.o
+
+davinci-vfpe-objs := \
dm365_isif.o dm365_ipipe_hw.o dm365_ipipe.o \
dm365_resizer.o dm365_ipipeif.o vpfe_mc_capture.o vpfe_video.o
diff --git a/drivers/staging/media/davinci_vpfe/dm365_resizer.c b/drivers/staging/media/davinci_vpfe/dm365_resizer.c
index 128662623ea8..5fbc2d447ff2 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_resizer.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_resizer.c
@@ -237,9 +237,8 @@ resizer_calculate_resize_ratios(struct vpfe_resizer_device *resizer, int index)
((informat->width) * 256) / (outformat->width);
}
-void
-static resizer_enable_422_420_conversion(struct resizer_params *param,
- int index, bool en)
+static void resizer_enable_422_420_conversion(struct resizer_params *param,
+ int index, bool en)
{
param->rsz_rsc_param[index].cen = en;
param->rsz_rsc_param[index].yen = en;
@@ -490,7 +489,7 @@ resizer_configure_in_continious_mode(struct vpfe_resizer_device *resizer)
int line_len;
int ret;
- if (resizer->resizer_a.output != RESIZER_OUPUT_MEMORY) {
+ if (resizer->resizer_a.output != RESIZER_OUTPUT_MEMORY) {
dev_err(dev, "enable resizer - Resizer-A\n");
return -EINVAL;
}
@@ -502,7 +501,7 @@ resizer_configure_in_continious_mode(struct vpfe_resizer_device *resizer)
param->rsz_en[RSZ_B] = DISABLE;
param->oper_mode = RESIZER_MODE_CONTINIOUS;
- if (resizer->resizer_b.output == RESIZER_OUPUT_MEMORY) {
+ if (resizer->resizer_b.output == RESIZER_OUTPUT_MEMORY) {
struct v4l2_mbus_framefmt *outformat2;
param->rsz_en[RSZ_B] = ENABLE;
@@ -825,7 +824,7 @@ resizer_set_defualt_configuration(struct vpfe_resizer_device *resizer)
.o_hsz = WIDTH_O - 1,
.v_dif = 256,
.v_typ_y = VPFE_RSZ_INTP_CUBIC,
- .h_typ_c = VPFE_RSZ_INTP_CUBIC,
+ .v_typ_c = VPFE_RSZ_INTP_CUBIC,
.h_dif = 256,
.h_typ_y = VPFE_RSZ_INTP_CUBIC,
.h_typ_c = VPFE_RSZ_INTP_CUBIC,
@@ -843,7 +842,7 @@ resizer_set_defualt_configuration(struct vpfe_resizer_device *resizer)
.o_hsz = WIDTH_O - 1,
.v_dif = 256,
.v_typ_y = VPFE_RSZ_INTP_CUBIC,
- .h_typ_c = VPFE_RSZ_INTP_CUBIC,
+ .v_typ_c = VPFE_RSZ_INTP_CUBIC,
.h_dif = 256,
.h_typ_y = VPFE_RSZ_INTP_CUBIC,
.h_typ_c = VPFE_RSZ_INTP_CUBIC,
@@ -1043,13 +1042,13 @@ static void resizer_ss_isr(struct vpfe_resizer_device *resizer)
if (ipipeif_sink != IPIPEIF_INPUT_MEMORY)
return;
- if (resizer->resizer_a.output == RESIZER_OUPUT_MEMORY) {
+ if (resizer->resizer_a.output == RESIZER_OUTPUT_MEMORY) {
val = vpss_dma_complete_interrupt();
if (val != 0 && val != 2)
return;
}
- if (resizer->resizer_a.output == RESIZER_OUPUT_MEMORY) {
+ if (resizer->resizer_a.output == RESIZER_OUTPUT_MEMORY) {
spin_lock(&video_out->dma_queue_lock);
vpfe_video_process_buffer_complete(video_out);
video_out->state = VPFE_VIDEO_BUFFER_NOT_QUEUED;
@@ -1059,7 +1058,7 @@ static void resizer_ss_isr(struct vpfe_resizer_device *resizer)
/* If resizer B is enabled */
if (pipe->output_num > 1 && resizer->resizer_b.output ==
- RESIZER_OUPUT_MEMORY) {
+ RESIZER_OUTPUT_MEMORY) {
spin_lock(&video_out->dma_queue_lock);
vpfe_video_process_buffer_complete(video_out2);
video_out2->state = VPFE_VIDEO_BUFFER_NOT_QUEUED;
@@ -1069,7 +1068,7 @@ static void resizer_ss_isr(struct vpfe_resizer_device *resizer)
/* start HW if buffers are queued */
if (vpfe_video_is_pipe_ready(pipe) &&
- resizer->resizer_a.output == RESIZER_OUPUT_MEMORY) {
+ resizer->resizer_a.output == RESIZER_OUTPUT_MEMORY) {
resizer_enable(resizer, 1);
vpfe_ipipe_enable(vpfe_dev, 1);
vpfe_ipipeif_enable(vpfe_dev);
@@ -1237,8 +1236,8 @@ static int resizer_do_hw_setup(struct vpfe_resizer_device *resizer)
struct resizer_params *param = &resizer->config;
int ret = 0;
- if (resizer->resizer_a.output == RESIZER_OUPUT_MEMORY ||
- resizer->resizer_b.output == RESIZER_OUPUT_MEMORY) {
+ if (resizer->resizer_a.output == RESIZER_OUTPUT_MEMORY ||
+ resizer->resizer_b.output == RESIZER_OUTPUT_MEMORY) {
if (ipipeif_sink == IPIPEIF_INPUT_MEMORY &&
ipipeif_source == IPIPEIF_OUTPUT_RESIZER)
ret = resizer_configure_in_single_shot_mode(resizer);
@@ -1263,7 +1262,7 @@ static int resizer_set_stream(struct v4l2_subdev *sd, int enable)
if (&resizer->crop_resizer.subdev != sd)
return 0;
- if (resizer->resizer_a.output != RESIZER_OUPUT_MEMORY)
+ if (resizer->resizer_a.output != RESIZER_OUTPUT_MEMORY)
return 0;
switch (enable) {
@@ -1724,7 +1723,7 @@ static int resizer_link_setup(struct media_entity *entity,
}
if (resizer->resizer_a.output != RESIZER_OUTPUT_NONE)
return -EBUSY;
- resizer->resizer_a.output = RESIZER_OUPUT_MEMORY;
+ resizer->resizer_a.output = RESIZER_OUTPUT_MEMORY;
break;
default:
@@ -1749,7 +1748,7 @@ static int resizer_link_setup(struct media_entity *entity,
}
if (resizer->resizer_b.output != RESIZER_OUTPUT_NONE)
return -EBUSY;
- resizer->resizer_b.output = RESIZER_OUPUT_MEMORY;
+ resizer->resizer_b.output = RESIZER_OUTPUT_MEMORY;
break;
default:
diff --git a/drivers/staging/media/davinci_vpfe/dm365_resizer.h b/drivers/staging/media/davinci_vpfe/dm365_resizer.h
index 93b0f44030aa..00e64b0d0295 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_resizer.h
+++ b/drivers/staging/media/davinci_vpfe/dm365_resizer.h
@@ -210,7 +210,7 @@ enum resizer_input_entity {
enum resizer_output_entity {
RESIZER_OUTPUT_NONE = 0,
- RESIZER_OUPUT_MEMORY = 1,
+ RESIZER_OUTPUT_MEMORY = 1,
};
struct dm365_resizer_device {
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index c34bf4621767..c27d7e9a1bdb 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -198,7 +198,7 @@ static int vpfe_update_pipe_state(struct vpfe_video_device *video)
return 0;
}
-/* checks wether pipeline is ready for enabling */
+/* checks whether pipeline is ready for enabling */
int vpfe_video_is_pipe_ready(struct vpfe_pipeline *pipe)
{
int i;
@@ -1362,7 +1362,7 @@ static int vpfe_reqbufs(struct file *file, void *priv,
ret = vb2_queue_init(q);
if (ret) {
v4l2_err(&vpfe_dev->v4l2_dev, "vb2_queue_init() failed\n");
- return ret;
+ goto unlock_out;
}
fh->io_allowed = 1;
diff --git a/drivers/staging/media/lirc/Kconfig b/drivers/staging/media/lirc/Kconfig
index 6879c4651b46..25b7e7ccf554 100644
--- a/drivers/staging/media/lirc/Kconfig
+++ b/drivers/staging/media/lirc/Kconfig
@@ -38,19 +38,6 @@ config LIRC_SASEM
help
Driver for the Sasem OnAir Remocon-V or Dign HV5 HTPC IR/VFD Module
-config LIRC_SERIAL
- tristate "Homebrew Serial Port Receiver"
- depends on LIRC
- help
- Driver for Homebrew Serial Port Receivers
-
-config LIRC_SERIAL_TRANSMITTER
- bool "Serial Port Transmitter"
- default y
- depends on LIRC_SERIAL
- help
- Serial Port Transmitter support
-
config LIRC_SIR
tristate "Built-in SIR IrDA port"
depends on LIRC
diff --git a/drivers/staging/media/lirc/Makefile b/drivers/staging/media/lirc/Makefile
index 5430adf0475d..7f919eab1989 100644
--- a/drivers/staging/media/lirc/Makefile
+++ b/drivers/staging/media/lirc/Makefile
@@ -7,6 +7,5 @@ obj-$(CONFIG_LIRC_BT829) += lirc_bt829.o
obj-$(CONFIG_LIRC_IMON) += lirc_imon.o
obj-$(CONFIG_LIRC_PARALLEL) += lirc_parallel.o
obj-$(CONFIG_LIRC_SASEM) += lirc_sasem.o
-obj-$(CONFIG_LIRC_SERIAL) += lirc_serial.o
obj-$(CONFIG_LIRC_SIR) += lirc_sir.o
obj-$(CONFIG_LIRC_ZILOG) += lirc_zilog.o
diff --git a/drivers/staging/media/lirc/lirc_imon.c b/drivers/staging/media/lirc/lirc_imon.c
index 198a8057f2f1..1e650fba4a92 100644
--- a/drivers/staging/media/lirc/lirc_imon.c
+++ b/drivers/staging/media/lirc/lirc_imon.c
@@ -334,7 +334,7 @@ static int send_packet(struct imon_context *context)
context->tx_urb->actual_length = 0;
- init_completion(&context->tx.finished);
+ reinit_completion(&context->tx.finished);
atomic_set(&context->tx.busy, 1);
retval = usb_submit_urb(context->tx_urb, GFP_KERNEL);
@@ -408,9 +408,8 @@ static ssize_t vfd_write(struct file *file, const char __user *buf,
data_buf = memdup_user(buf, n_bytes);
if (IS_ERR(data_buf)) {
- retval = PTR_ERR(data_buf);
- data_buf = NULL;
- goto exit;
+ mutex_unlock(&context->ctx_lock);
+ return PTR_ERR(data_buf);
}
memcpy(context->tx.data_buf, data_buf, n_bytes);
@@ -497,6 +496,8 @@ static int ir_open(void *data)
context->rx.initial_space = 1;
context->rx.prev_bit = 0;
+ init_completion(&context->tx.finished);
+
context->ir_isopen = 1;
dev_info(context->driver->dev, "IR port opened\n");
@@ -930,7 +931,7 @@ static void imon_disconnect(struct usb_interface *interface)
/* Abort ongoing write */
if (atomic_read(&context->tx.busy)) {
usb_kill_urb(context->tx_urb);
- complete_all(&context->tx.finished);
+ complete(&context->tx.finished);
}
context->dev_present = 0;
diff --git a/drivers/staging/media/lirc/lirc_sasem.c b/drivers/staging/media/lirc/lirc_sasem.c
index 920c4a1290f4..b0c176e14b6b 100644
--- a/drivers/staging/media/lirc/lirc_sasem.c
+++ b/drivers/staging/media/lirc/lirc_sasem.c
@@ -386,9 +386,8 @@ static ssize_t vfd_write(struct file *file, const char __user *buf,
data_buf = memdup_user(buf, n_bytes);
if (IS_ERR(data_buf)) {
- retval = PTR_ERR(data_buf);
- data_buf = NULL;
- goto exit;
+ mutex_unlock(&context->ctx_lock);
+ return PTR_ERR(data_buf);
}
memcpy(context->tx.data_buf, data_buf, n_bytes);
diff --git a/drivers/staging/media/lirc/lirc_serial.c b/drivers/staging/media/lirc/lirc_serial.c
deleted file mode 100644
index b798b311d32c..000000000000
--- a/drivers/staging/media/lirc/lirc_serial.c
+++ /dev/null
@@ -1,1130 +0,0 @@
-/*
- * lirc_serial.c
- *
- * lirc_serial - Device driver that records pulse- and pause-lengths
- * (space-lengths) between DDCD event on a serial port.
- *
- * Copyright (C) 1996,97 Ralph Metzler <rjkm@thp.uni-koeln.de>
- * Copyright (C) 1998 Trent Piepho <xyzzy@u.washington.edu>
- * Copyright (C) 1998 Ben Pfaff <blp@gnu.org>
- * Copyright (C) 1999 Christoph Bartelmus <lirc@bartelmus.de>
- * Copyright (C) 2007 Andrei Tanas <andrei@tanas.ca> (suspend/resume support)
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-/*
- * Steve's changes to improve transmission fidelity:
- * - for systems with the rdtsc instruction and the clock counter, a
- * send_pule that times the pulses directly using the counter.
- * This means that the LIRC_SERIAL_TRANSMITTER_LATENCY fudge is
- * not needed. Measurement shows very stable waveform, even where
- * PCI activity slows the access to the UART, which trips up other
- * versions.
- * - For other system, non-integer-microsecond pulse/space lengths,
- * done using fixed point binary. So, much more accurate carrier
- * frequency.
- * - fine tuned transmitter latency, taking advantage of fractional
- * microseconds in previous change
- * - Fixed bug in the way transmitter latency was accounted for by
- * tuning the pulse lengths down - the send_pulse routine ignored
- * this overhead as it timed the overall pulse length - so the
- * pulse frequency was right but overall pulse length was too
- * long. Fixed by accounting for latency on each pulse/space
- * iteration.
- *
- * Steve Davies <steve@daviesfam.org> July 2001
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/fs.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/serial_reg.h>
-#include <linux/ktime.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/wait.h>
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/poll.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/fcntl.h>
-#include <linux/spinlock.h>
-
-/* From Intel IXP42X Developer's Manual (#252480-005): */
-/* ftp://download.intel.com/design/network/manuals/25248005.pdf */
-#define UART_IE_IXP42X_UUE 0x40 /* IXP42X UART Unit enable */
-#define UART_IE_IXP42X_RTOIE 0x10 /* IXP42X Receiver Data Timeout int.enable */
-
-#include <media/lirc.h>
-#include <media/lirc_dev.h>
-
-#define LIRC_DRIVER_NAME "lirc_serial"
-
-struct lirc_serial {
- int signal_pin;
- int signal_pin_change;
- u8 on;
- u8 off;
- long (*send_pulse)(unsigned long length);
- void (*send_space)(long length);
- int features;
- spinlock_t lock;
-};
-
-#define LIRC_HOMEBREW 0
-#define LIRC_IRDEO 1
-#define LIRC_IRDEO_REMOTE 2
-#define LIRC_ANIMAX 3
-#define LIRC_IGOR 4
-#define LIRC_NSLU2 5
-
-/*** module parameters ***/
-static int type;
-static int io;
-static int irq;
-static bool iommap;
-static int ioshift;
-static bool softcarrier = true;
-static bool share_irq;
-static int sense = -1; /* -1 = auto, 0 = active high, 1 = active low */
-static bool txsense; /* 0 = active high, 1 = active low */
-
-/* forward declarations */
-static long send_pulse_irdeo(unsigned long length);
-static long send_pulse_homebrew(unsigned long length);
-static void send_space_irdeo(long length);
-static void send_space_homebrew(long length);
-
-static struct lirc_serial hardware[] = {
- [LIRC_HOMEBREW] = {
- .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_HOMEBREW].lock),
- .signal_pin = UART_MSR_DCD,
- .signal_pin_change = UART_MSR_DDCD,
- .on = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
- .off = (UART_MCR_RTS | UART_MCR_OUT2),
- .send_pulse = send_pulse_homebrew,
- .send_space = send_space_homebrew,
-#ifdef CONFIG_LIRC_SERIAL_TRANSMITTER
- .features = (LIRC_CAN_SET_SEND_DUTY_CYCLE |
- LIRC_CAN_SET_SEND_CARRIER |
- LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2)
-#else
- .features = LIRC_CAN_REC_MODE2
-#endif
- },
-
- [LIRC_IRDEO] = {
- .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IRDEO].lock),
- .signal_pin = UART_MSR_DSR,
- .signal_pin_change = UART_MSR_DDSR,
- .on = UART_MCR_OUT2,
- .off = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
- .send_pulse = send_pulse_irdeo,
- .send_space = send_space_irdeo,
- .features = (LIRC_CAN_SET_SEND_DUTY_CYCLE |
- LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2)
- },
-
- [LIRC_IRDEO_REMOTE] = {
- .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IRDEO_REMOTE].lock),
- .signal_pin = UART_MSR_DSR,
- .signal_pin_change = UART_MSR_DDSR,
- .on = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
- .off = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
- .send_pulse = send_pulse_irdeo,
- .send_space = send_space_irdeo,
- .features = (LIRC_CAN_SET_SEND_DUTY_CYCLE |
- LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2)
- },
-
- [LIRC_ANIMAX] = {
- .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_ANIMAX].lock),
- .signal_pin = UART_MSR_DCD,
- .signal_pin_change = UART_MSR_DDCD,
- .on = 0,
- .off = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
- .send_pulse = NULL,
- .send_space = NULL,
- .features = LIRC_CAN_REC_MODE2
- },
-
- [LIRC_IGOR] = {
- .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IGOR].lock),
- .signal_pin = UART_MSR_DSR,
- .signal_pin_change = UART_MSR_DDSR,
- .on = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
- .off = (UART_MCR_RTS | UART_MCR_OUT2),
- .send_pulse = send_pulse_homebrew,
- .send_space = send_space_homebrew,
-#ifdef CONFIG_LIRC_SERIAL_TRANSMITTER
- .features = (LIRC_CAN_SET_SEND_DUTY_CYCLE |
- LIRC_CAN_SET_SEND_CARRIER |
- LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2)
-#else
- .features = LIRC_CAN_REC_MODE2
-#endif
- },
-};
-
-#define RS_ISR_PASS_LIMIT 256
-
-/*
- * A long pulse code from a remote might take up to 300 bytes. The
- * daemon should read the bytes as soon as they are generated, so take
- * the number of keys you think you can push before the daemon runs
- * and multiply by 300. The driver will warn you if you overrun this
- * buffer. If you have a slow computer or non-busmastering IDE disks,
- * maybe you will need to increase this.
- */
-
-/* This MUST be a power of two! It has to be larger than 1 as well. */
-
-#define RBUF_LEN 256
-
-static ktime_t lastkt;
-
-static struct lirc_buffer rbuf;
-
-static unsigned int freq = 38000;
-static unsigned int duty_cycle = 50;
-
-/* Initialized in init_timing_params() */
-static unsigned long period;
-static unsigned long pulse_width;
-static unsigned long space_width;
-
-#if defined(__i386__)
-/*
- * From:
- * Linux I/O port programming mini-HOWTO
- * Author: Riku Saikkonen <Riku.Saikkonen@hut.fi>
- * v, 28 December 1997
- *
- * [...]
- * Actually, a port I/O instruction on most ports in the 0-0x3ff range
- * takes almost exactly 1 microsecond, so if you're, for example, using
- * the parallel port directly, just do additional inb()s from that port
- * to delay.
- * [...]
- */
-/* transmitter latency 1.5625us 0x1.90 - this figure arrived at from
- * comment above plus trimming to match actual measured frequency.
- * This will be sensitive to cpu speed, though hopefully most of the 1.5us
- * is spent in the uart access. Still - for reference test machine was a
- * 1.13GHz Athlon system - Steve
- */
-
-/*
- * changed from 400 to 450 as this works better on slower machines;
- * faster machines will use the rdtsc code anyway
- */
-#define LIRC_SERIAL_TRANSMITTER_LATENCY 450
-
-#else
-
-/* does anybody have information on other platforms ? */
-/* 256 = 1<<8 */
-#define LIRC_SERIAL_TRANSMITTER_LATENCY 256
-
-#endif /* __i386__ */
-/*
- * FIXME: should we be using hrtimers instead of this
- * LIRC_SERIAL_TRANSMITTER_LATENCY nonsense?
- */
-
-/* fetch serial input packet (1 byte) from register offset */
-static u8 sinp(int offset)
-{
- if (iommap)
- /* the register is memory-mapped */
- offset <<= ioshift;
-
- return inb(io + offset);
-}
-
-/* write serial output packet (1 byte) of value to register offset */
-static void soutp(int offset, u8 value)
-{
- if (iommap)
- /* the register is memory-mapped */
- offset <<= ioshift;
-
- outb(value, io + offset);
-}
-
-static void on(void)
-{
- if (txsense)
- soutp(UART_MCR, hardware[type].off);
- else
- soutp(UART_MCR, hardware[type].on);
-}
-
-static void off(void)
-{
- if (txsense)
- soutp(UART_MCR, hardware[type].on);
- else
- soutp(UART_MCR, hardware[type].off);
-}
-
-#ifndef MAX_UDELAY_MS
-#define MAX_UDELAY_US 5000
-#else
-#define MAX_UDELAY_US (MAX_UDELAY_MS*1000)
-#endif
-
-static void safe_udelay(unsigned long usecs)
-{
- while (usecs > MAX_UDELAY_US) {
- udelay(MAX_UDELAY_US);
- usecs -= MAX_UDELAY_US;
- }
- udelay(usecs);
-}
-
-#ifdef USE_RDTSC
-/*
- * This is an overflow/precision juggle, complicated in that we can't
- * do long long divide in the kernel
- */
-
-/*
- * When we use the rdtsc instruction to measure clocks, we keep the
- * pulse and space widths as clock cycles. As this is CPU speed
- * dependent, the widths must be calculated in init_port and ioctl
- * time
- */
-
-static int init_timing_params(unsigned int new_duty_cycle,
- unsigned int new_freq)
-{
- __u64 loops_per_sec, work;
-
- duty_cycle = new_duty_cycle;
- freq = new_freq;
-
- loops_per_sec = __this_cpu_read(cpu.info.loops_per_jiffy);
- loops_per_sec *= HZ;
-
- /* How many clocks in a microsecond?, avoiding long long divide */
- work = loops_per_sec;
- work *= 4295; /* 4295 = 2^32 / 1e6 */
-
- /*
- * Carrier period in clocks, approach good up to 32GHz clock,
- * gets carrier frequency within 8Hz
- */
- period = loops_per_sec >> 3;
- period /= (freq >> 3);
-
- /* Derive pulse and space from the period */
- pulse_width = period * duty_cycle / 100;
- space_width = period - pulse_width;
- pr_debug("in init_timing_params, freq=%d, duty_cycle=%d, clk/jiffy=%ld, pulse=%ld, space=%ld, conv_us_to_clocks=%ld\n",
- freq, duty_cycle, __this_cpu_read(cpu_info.loops_per_jiffy),
- pulse_width, space_width, conv_us_to_clocks);
- return 0;
-}
-#else /* ! USE_RDTSC */
-static int init_timing_params(unsigned int new_duty_cycle,
- unsigned int new_freq)
-{
-/*
- * period, pulse/space width are kept with 8 binary places -
- * IE multiplied by 256.
- */
- if (256 * 1000000L / new_freq * new_duty_cycle / 100 <=
- LIRC_SERIAL_TRANSMITTER_LATENCY)
- return -EINVAL;
- if (256 * 1000000L / new_freq * (100 - new_duty_cycle) / 100 <=
- LIRC_SERIAL_TRANSMITTER_LATENCY)
- return -EINVAL;
- duty_cycle = new_duty_cycle;
- freq = new_freq;
- period = 256 * 1000000L / freq;
- pulse_width = period * duty_cycle / 100;
- space_width = period - pulse_width;
- pr_debug("in init_timing_params, freq=%d pulse=%ld, space=%ld\n",
- freq, pulse_width, space_width);
- return 0;
-}
-#endif /* USE_RDTSC */
-
-
-/* return value: space length delta */
-
-static long send_pulse_irdeo(unsigned long length)
-{
- long rawbits, ret;
- int i;
- unsigned char output;
- unsigned char chunk, shifted;
-
- /* how many bits have to be sent ? */
- rawbits = length * 1152 / 10000;
- if (duty_cycle > 50)
- chunk = 3;
- else
- chunk = 1;
- for (i = 0, output = 0x7f; rawbits > 0; rawbits -= 3) {
- shifted = chunk << (i * 3);
- shifted >>= 1;
- output &= (~shifted);
- i++;
- if (i == 3) {
- soutp(UART_TX, output);
- while (!(sinp(UART_LSR) & UART_LSR_THRE))
- ;
- output = 0x7f;
- i = 0;
- }
- }
- if (i != 0) {
- soutp(UART_TX, output);
- while (!(sinp(UART_LSR) & UART_LSR_TEMT))
- ;
- }
-
- if (i == 0)
- ret = (-rawbits) * 10000 / 1152;
- else
- ret = (3 - i) * 3 * 10000 / 1152 + (-rawbits) * 10000 / 1152;
-
- return ret;
-}
-
-/* Version using udelay() */
-
-/*
- * here we use fixed point arithmetic, with 8
- * fractional bits. that gets us within 0.1% or so of the right average
- * frequency, albeit with some jitter in pulse length - Steve
- *
- * This should use ndelay instead.
- */
-
-/* To match 8 fractional bits used for pulse/space length */
-
-static long send_pulse_homebrew_softcarrier(unsigned long length)
-{
- int flag;
- unsigned long actual, target, d;
-
- length <<= 8;
-
- actual = 0; target = 0; flag = 0;
- while (actual < length) {
- if (flag) {
- off();
- target += space_width;
- } else {
- on();
- target += pulse_width;
- }
- d = (target - actual -
- LIRC_SERIAL_TRANSMITTER_LATENCY + 128) >> 8;
- /*
- * Note - we've checked in ioctl that the pulse/space
- * widths are big enough so that d is > 0
- */
- udelay(d);
- actual += (d << 8) + LIRC_SERIAL_TRANSMITTER_LATENCY;
- flag = !flag;
- }
- return (actual-length) >> 8;
-}
-
-static long send_pulse_homebrew(unsigned long length)
-{
- if (length <= 0)
- return 0;
-
- if (softcarrier)
- return send_pulse_homebrew_softcarrier(length);
-
- on();
- safe_udelay(length);
- return 0;
-}
-
-static void send_space_irdeo(long length)
-{
- if (length <= 0)
- return;
-
- safe_udelay(length);
-}
-
-static void send_space_homebrew(long length)
-{
- off();
- if (length <= 0)
- return;
- safe_udelay(length);
-}
-
-static void rbwrite(int l)
-{
- if (lirc_buffer_full(&rbuf)) {
- /* no new signals will be accepted */
- pr_debug("Buffer overrun\n");
- return;
- }
- lirc_buffer_write(&rbuf, (void *)&l);
-}
-
-static void frbwrite(int l)
-{
- /* simple noise filter */
- static int pulse, space;
- static unsigned int ptr;
-
- if (ptr > 0 && (l & PULSE_BIT)) {
- pulse += l & PULSE_MASK;
- if (pulse > 250) {
- rbwrite(space);
- rbwrite(pulse | PULSE_BIT);
- ptr = 0;
- pulse = 0;
- }
- return;
- }
- if (!(l & PULSE_BIT)) {
- if (ptr == 0) {
- if (l > 20000) {
- space = l;
- ptr++;
- return;
- }
- } else {
- if (l > 20000) {
- space += pulse;
- if (space > PULSE_MASK)
- space = PULSE_MASK;
- space += l;
- if (space > PULSE_MASK)
- space = PULSE_MASK;
- pulse = 0;
- return;
- }
- rbwrite(space);
- rbwrite(pulse | PULSE_BIT);
- ptr = 0;
- pulse = 0;
- }
- }
- rbwrite(l);
-}
-
-static irqreturn_t lirc_irq_handler(int i, void *blah)
-{
- ktime_t kt;
- int counter, dcd;
- u8 status;
- ktime_t delkt;
- int data;
- static int last_dcd = -1;
-
- if ((sinp(UART_IIR) & UART_IIR_NO_INT)) {
- /* not our interrupt */
- return IRQ_NONE;
- }
-
- counter = 0;
- do {
- counter++;
- status = sinp(UART_MSR);
- if (counter > RS_ISR_PASS_LIMIT) {
- pr_warn("AIEEEE: We're caught!\n");
- break;
- }
- if ((status & hardware[type].signal_pin_change)
- && sense != -1) {
- /* get current time */
- kt = ktime_get();
-
- /* New mode, written by Trent Piepho
- <xyzzy@u.washington.edu>. */
-
- /*
- * The old format was not very portable.
- * We now use an int to pass pulses
- * and spaces to user space.
- *
- * If PULSE_BIT is set a pulse has been
- * received, otherwise a space has been
- * received. The driver needs to know if your
- * receiver is active high or active low, or
- * the space/pulse sense could be
- * inverted. The bits denoted by PULSE_MASK are
- * the length in microseconds. Lengths greater
- * than or equal to 16 seconds are clamped to
- * PULSE_MASK. All other bits are unused.
- * This is a much simpler interface for user
- * programs, as well as eliminating "out of
- * phase" errors with space/pulse
- * autodetection.
- */
-
- /* calc time since last interrupt in microseconds */
- dcd = (status & hardware[type].signal_pin) ? 1 : 0;
-
- if (dcd == last_dcd) {
- pr_warn("ignoring spike: %d %d %llx %llx\n",
- dcd, sense, ktime_to_us(kt),
- ktime_to_us(lastkt));
- continue;
- }
-
- delkt = ktime_sub(kt, lastkt);
- if (ktime_compare(delkt, ktime_set(15, 0)) > 0) {
- data = PULSE_MASK; /* really long time */
- if (!(dcd^sense)) {
- /* sanity check */
- pr_warn("AIEEEE: %d %d %llx %llx\n",
- dcd, sense, ktime_to_us(kt),
- ktime_to_us(lastkt));
- /*
- * detecting pulse while this
- * MUST be a space!
- */
- sense = sense ? 0 : 1;
- }
- } else
- data = (int) ktime_to_us(delkt);
- frbwrite(dcd^sense ? data : (data|PULSE_BIT));
- lastkt = kt;
- last_dcd = dcd;
- wake_up_interruptible(&rbuf.wait_poll);
- }
- } while (!(sinp(UART_IIR) & UART_IIR_NO_INT)); /* still pending ? */
- return IRQ_HANDLED;
-}
-
-
-static int hardware_init_port(void)
-{
- u8 scratch, scratch2, scratch3;
-
- /*
- * This is a simple port existence test, borrowed from the autoconfig
- * function in drivers/serial/8250.c
- */
- scratch = sinp(UART_IER);
- soutp(UART_IER, 0);
-#ifdef __i386__
- outb(0xff, 0x080);
-#endif
- scratch2 = sinp(UART_IER) & 0x0f;
- soutp(UART_IER, 0x0f);
-#ifdef __i386__
- outb(0x00, 0x080);
-#endif
- scratch3 = sinp(UART_IER) & 0x0f;
- soutp(UART_IER, scratch);
- if (scratch2 != 0 || scratch3 != 0x0f) {
- /* we fail, there's nothing here */
- pr_err("port existence test failed, cannot continue\n");
- return -ENODEV;
- }
-
-
-
- /* Set DLAB 0. */
- soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
-
- /* First of all, disable all interrupts */
- soutp(UART_IER, sinp(UART_IER) &
- (~(UART_IER_MSI|UART_IER_RLSI|UART_IER_THRI|UART_IER_RDI)));
-
- /* Clear registers. */
- sinp(UART_LSR);
- sinp(UART_RX);
- sinp(UART_IIR);
- sinp(UART_MSR);
-
- /* Set line for power source */
- off();
-
- /* Clear registers again to be sure. */
- sinp(UART_LSR);
- sinp(UART_RX);
- sinp(UART_IIR);
- sinp(UART_MSR);
-
- switch (type) {
- case LIRC_IRDEO:
- case LIRC_IRDEO_REMOTE:
- /* setup port to 7N1 @ 115200 Baud */
- /* 7N1+start = 9 bits at 115200 ~ 3 bits at 38kHz */
-
- /* Set DLAB 1. */
- soutp(UART_LCR, sinp(UART_LCR) | UART_LCR_DLAB);
- /* Set divisor to 1 => 115200 Baud */
- soutp(UART_DLM, 0);
- soutp(UART_DLL, 1);
- /* Set DLAB 0 + 7N1 */
- soutp(UART_LCR, UART_LCR_WLEN7);
- /* THR interrupt already disabled at this point */
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-static int lirc_serial_probe(struct platform_device *dev)
-{
- int i, nlow, nhigh, result;
-
- result = devm_request_irq(&dev->dev, irq, lirc_irq_handler,
- (share_irq ? IRQF_SHARED : 0),
- LIRC_DRIVER_NAME, &hardware);
- if (result < 0) {
- if (result == -EBUSY)
- dev_err(&dev->dev, "IRQ %d busy\n", irq);
- else if (result == -EINVAL)
- dev_err(&dev->dev, "Bad irq number or handler\n");
- return result;
- }
-
- /* Reserve io region. */
- /*
- * Future MMAP-Developers: Attention!
- * For memory mapped I/O you *might* need to use ioremap() first,
- * for the NSLU2 it's done in boot code.
- */
- if (((iommap)
- && (devm_request_mem_region(&dev->dev, iommap, 8 << ioshift,
- LIRC_DRIVER_NAME) == NULL))
- || ((!iommap)
- && (devm_request_region(&dev->dev, io, 8,
- LIRC_DRIVER_NAME) == NULL))) {
- dev_err(&dev->dev, "port %04x already in use\n", io);
- dev_warn(&dev->dev, "use 'setserial /dev/ttySX uart none'\n");
- dev_warn(&dev->dev,
- "or compile the serial port driver as module and\n");
- dev_warn(&dev->dev, "make sure this module is loaded first\n");
- return -EBUSY;
- }
-
- result = hardware_init_port();
- if (result < 0)
- return result;
-
- /* Initialize pulse/space widths */
- init_timing_params(duty_cycle, freq);
-
- /* If pin is high, then this must be an active low receiver. */
- if (sense == -1) {
- /* wait 1/2 sec for the power supply */
- msleep(500);
-
- /*
- * probe 9 times every 0.04s, collect "votes" for
- * active high/low
- */
- nlow = 0;
- nhigh = 0;
- for (i = 0; i < 9; i++) {
- if (sinp(UART_MSR) & hardware[type].signal_pin)
- nlow++;
- else
- nhigh++;
- msleep(40);
- }
- sense = nlow >= nhigh ? 1 : 0;
- dev_info(&dev->dev, "auto-detected active %s receiver\n",
- sense ? "low" : "high");
- } else
- dev_info(&dev->dev, "Manually using active %s receiver\n",
- sense ? "low" : "high");
-
- dev_dbg(&dev->dev, "Interrupt %d, port %04x obtained\n", irq, io);
- return 0;
-}
-
-static int set_use_inc(void *data)
-{
- unsigned long flags;
-
- /* initialize timestamp */
- lastkt = ktime_get();
-
- spin_lock_irqsave(&hardware[type].lock, flags);
-
- /* Set DLAB 0. */
- soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
-
- soutp(UART_IER, sinp(UART_IER)|UART_IER_MSI);
-
- spin_unlock_irqrestore(&hardware[type].lock, flags);
-
- return 0;
-}
-
-static void set_use_dec(void *data)
-{ unsigned long flags;
-
- spin_lock_irqsave(&hardware[type].lock, flags);
-
- /* Set DLAB 0. */
- soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
-
- /* First of all, disable all interrupts */
- soutp(UART_IER, sinp(UART_IER) &
- (~(UART_IER_MSI|UART_IER_RLSI|UART_IER_THRI|UART_IER_RDI)));
- spin_unlock_irqrestore(&hardware[type].lock, flags);
-}
-
-static ssize_t lirc_write(struct file *file, const char __user *buf,
- size_t n, loff_t *ppos)
-{
- int i, count;
- unsigned long flags;
- long delta = 0;
- int *wbuf;
-
- if (!(hardware[type].features & LIRC_CAN_SEND_PULSE))
- return -EPERM;
-
- count = n / sizeof(int);
- if (n % sizeof(int) || count % 2 == 0)
- return -EINVAL;
- wbuf = memdup_user(buf, n);
- if (IS_ERR(wbuf))
- return PTR_ERR(wbuf);
- spin_lock_irqsave(&hardware[type].lock, flags);
- if (type == LIRC_IRDEO) {
- /* DTR, RTS down */
- on();
- }
- for (i = 0; i < count; i++) {
- if (i%2)
- hardware[type].send_space(wbuf[i] - delta);
- else
- delta = hardware[type].send_pulse(wbuf[i]);
- }
- off();
- spin_unlock_irqrestore(&hardware[type].lock, flags);
- kfree(wbuf);
- return n;
-}
-
-static long lirc_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
-{
- int result;
- u32 __user *uptr = (u32 __user *)arg;
- u32 value;
-
- switch (cmd) {
- case LIRC_GET_SEND_MODE:
- if (!(hardware[type].features&LIRC_CAN_SEND_MASK))
- return -ENOIOCTLCMD;
-
- result = put_user(LIRC_SEND2MODE
- (hardware[type].features&LIRC_CAN_SEND_MASK),
- uptr);
- if (result)
- return result;
- break;
-
- case LIRC_SET_SEND_MODE:
- if (!(hardware[type].features&LIRC_CAN_SEND_MASK))
- return -ENOIOCTLCMD;
-
- result = get_user(value, uptr);
- if (result)
- return result;
- /* only LIRC_MODE_PULSE supported */
- if (value != LIRC_MODE_PULSE)
- return -EINVAL;
- break;
-
- case LIRC_GET_LENGTH:
- return -ENOIOCTLCMD;
-
- case LIRC_SET_SEND_DUTY_CYCLE:
- pr_debug("SET_SEND_DUTY_CYCLE\n");
- if (!(hardware[type].features&LIRC_CAN_SET_SEND_DUTY_CYCLE))
- return -ENOIOCTLCMD;
-
- result = get_user(value, uptr);
- if (result)
- return result;
- if (value <= 0 || value > 100)
- return -EINVAL;
- return init_timing_params(value, freq);
-
- case LIRC_SET_SEND_CARRIER:
- pr_debug("SET_SEND_CARRIER\n");
- if (!(hardware[type].features&LIRC_CAN_SET_SEND_CARRIER))
- return -ENOIOCTLCMD;
-
- result = get_user(value, uptr);
- if (result)
- return result;
- if (value > 500000 || value < 20000)
- return -EINVAL;
- return init_timing_params(duty_cycle, value);
-
- default:
- return lirc_dev_fop_ioctl(filep, cmd, arg);
- }
- return 0;
-}
-
-static const struct file_operations lirc_fops = {
- .owner = THIS_MODULE,
- .write = lirc_write,
- .unlocked_ioctl = lirc_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = lirc_ioctl,
-#endif
- .read = lirc_dev_fop_read,
- .poll = lirc_dev_fop_poll,
- .open = lirc_dev_fop_open,
- .release = lirc_dev_fop_close,
- .llseek = no_llseek,
-};
-
-static struct lirc_driver driver = {
- .name = LIRC_DRIVER_NAME,
- .minor = -1,
- .code_length = 1,
- .sample_rate = 0,
- .data = NULL,
- .add_to_buf = NULL,
- .rbuf = &rbuf,
- .set_use_inc = set_use_inc,
- .set_use_dec = set_use_dec,
- .fops = &lirc_fops,
- .dev = NULL,
- .owner = THIS_MODULE,
-};
-
-static struct platform_device *lirc_serial_dev;
-
-static int lirc_serial_suspend(struct platform_device *dev,
- pm_message_t state)
-{
- /* Set DLAB 0. */
- soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
-
- /* Disable all interrupts */
- soutp(UART_IER, sinp(UART_IER) &
- (~(UART_IER_MSI|UART_IER_RLSI|UART_IER_THRI|UART_IER_RDI)));
-
- /* Clear registers. */
- sinp(UART_LSR);
- sinp(UART_RX);
- sinp(UART_IIR);
- sinp(UART_MSR);
-
- return 0;
-}
-
-/* twisty maze... need a forward-declaration here... */
-static void lirc_serial_exit(void);
-
-static int lirc_serial_resume(struct platform_device *dev)
-{
- unsigned long flags;
- int result;
-
- result = hardware_init_port();
- if (result < 0)
- return result;
-
- spin_lock_irqsave(&hardware[type].lock, flags);
- /* Enable Interrupt */
- lastkt = ktime_get();
- soutp(UART_IER, sinp(UART_IER)|UART_IER_MSI);
- off();
-
- lirc_buffer_clear(&rbuf);
-
- spin_unlock_irqrestore(&hardware[type].lock, flags);
-
- return 0;
-}
-
-static struct platform_driver lirc_serial_driver = {
- .probe = lirc_serial_probe,
- .suspend = lirc_serial_suspend,
- .resume = lirc_serial_resume,
- .driver = {
- .name = "lirc_serial",
- },
-};
-
-static int __init lirc_serial_init(void)
-{
- int result;
-
- /* Init read buffer. */
- result = lirc_buffer_init(&rbuf, sizeof(int), RBUF_LEN);
- if (result < 0)
- return result;
-
- result = platform_driver_register(&lirc_serial_driver);
- if (result) {
- printk("lirc register returned %d\n", result);
- goto exit_buffer_free;
- }
-
- lirc_serial_dev = platform_device_alloc("lirc_serial", 0);
- if (!lirc_serial_dev) {
- result = -ENOMEM;
- goto exit_driver_unregister;
- }
-
- result = platform_device_add(lirc_serial_dev);
- if (result)
- goto exit_device_put;
-
- return 0;
-
-exit_device_put:
- platform_device_put(lirc_serial_dev);
-exit_driver_unregister:
- platform_driver_unregister(&lirc_serial_driver);
-exit_buffer_free:
- lirc_buffer_free(&rbuf);
- return result;
-}
-
-static void lirc_serial_exit(void)
-{
- platform_device_unregister(lirc_serial_dev);
- platform_driver_unregister(&lirc_serial_driver);
- lirc_buffer_free(&rbuf);
-}
-
-static int __init lirc_serial_init_module(void)
-{
- int result;
-
- switch (type) {
- case LIRC_HOMEBREW:
- case LIRC_IRDEO:
- case LIRC_IRDEO_REMOTE:
- case LIRC_ANIMAX:
- case LIRC_IGOR:
- /* if nothing specified, use ttyS0/com1 and irq 4 */
- io = io ? io : 0x3f8;
- irq = irq ? irq : 4;
- break;
- default:
- return -EINVAL;
- }
- if (!softcarrier) {
- switch (type) {
- case LIRC_HOMEBREW:
- case LIRC_IGOR:
- hardware[type].features &=
- ~(LIRC_CAN_SET_SEND_DUTY_CYCLE|
- LIRC_CAN_SET_SEND_CARRIER);
- break;
- }
- }
-
- /* make sure sense is either -1, 0, or 1 */
- if (sense != -1)
- sense = !!sense;
-
- result = lirc_serial_init();
- if (result)
- return result;
-
- driver.features = hardware[type].features;
- driver.dev = &lirc_serial_dev->dev;
- driver.minor = lirc_register_driver(&driver);
- if (driver.minor < 0) {
- pr_err("register_chrdev failed!\n");
- lirc_serial_exit();
- return driver.minor;
- }
- return 0;
-}
-
-static void __exit lirc_serial_exit_module(void)
-{
- lirc_unregister_driver(driver.minor);
- lirc_serial_exit();
- pr_debug("cleaned up module\n");
-}
-
-
-module_init(lirc_serial_init_module);
-module_exit(lirc_serial_exit_module);
-
-MODULE_DESCRIPTION("Infra-red receiver driver for serial ports.");
-MODULE_AUTHOR("Ralph Metzler, Trent Piepho, Ben Pfaff, "
- "Christoph Bartelmus, Andrei Tanas");
-MODULE_LICENSE("GPL");
-
-module_param(type, int, S_IRUGO);
-MODULE_PARM_DESC(type, "Hardware type (0 = home-brew, 1 = IRdeo,"
- " 2 = IRdeo Remote, 3 = AnimaX, 4 = IgorPlug,"
- " 5 = NSLU2 RX:CTS2/TX:GreenLED)");
-
-module_param(io, int, S_IRUGO);
-MODULE_PARM_DESC(io, "I/O address base (0x3f8 or 0x2f8)");
-
-/* some architectures (e.g. intel xscale) have memory mapped registers */
-module_param(iommap, bool, S_IRUGO);
-MODULE_PARM_DESC(iommap, "physical base for memory mapped I/O"
- " (0 = no memory mapped io)");
-
-/*
- * some architectures (e.g. intel xscale) align the 8bit serial registers
- * on 32bit word boundaries.
- * See linux-kernel/drivers/tty/serial/8250/8250.c serial_in()/out()
- */
-module_param(ioshift, int, S_IRUGO);
-MODULE_PARM_DESC(ioshift, "shift I/O register offset (0 = no shift)");
-
-module_param(irq, int, S_IRUGO);
-MODULE_PARM_DESC(irq, "Interrupt (4 or 3)");
-
-module_param(share_irq, bool, S_IRUGO);
-MODULE_PARM_DESC(share_irq, "Share interrupts (0 = off, 1 = on)");
-
-module_param(sense, int, S_IRUGO);
-MODULE_PARM_DESC(sense, "Override autodetection of IR receiver circuit"
- " (0 = active high, 1 = active low )");
-
-#ifdef CONFIG_LIRC_SERIAL_TRANSMITTER
-module_param(txsense, bool, S_IRUGO);
-MODULE_PARM_DESC(txsense, "Sense of transmitter circuit"
- " (0 = active high, 1 = active low )");
-#endif
-
-module_param(softcarrier, bool, S_IRUGO);
-MODULE_PARM_DESC(softcarrier, "Software carrier (0 = off, 1 = on, default on)");
diff --git a/drivers/staging/media/pulse8-cec/TODO b/drivers/staging/media/pulse8-cec/TODO
deleted file mode 100644
index fa6660245e5f..000000000000
--- a/drivers/staging/media/pulse8-cec/TODO
+++ /dev/null
@@ -1,52 +0,0 @@
-This driver needs to mature a bit more and another round of
-code cleanups.
-
-Otherwise it looks to be in good shape. And of course the fact
-that the CEC framework is in staging at the moment also prevents
-this driver from being mainlined.
-
-Some notes:
-
-1) Regarding the "autonomous" mode of the Pulse-Eight: currently this
-is disabled, but the idea is that this allows basic functionality
-when the PC is off, and it can wake-up the PC through USB.
-
-To prevent the device to go into autonomous mode the driver would
-have to send MSGCODE_SET_CONTROLLED 1 and then send a ping every
-30 seconds (in practice once every 15 seconds would be good). When
-powering off or going to standby send MSGCODE_SET_CONTROLLED 0 to
-turn the autonomous mode back on.
-
-This needs to be implemented in the driver. Autonomous mode was
-added in firmware v2.
-
-2) Writing to the EEPROM can only be done once every 10 seconds.
-
-3) To use this driver you also need to patch the inputattach utility,
-this patch will be submitted once this driver is moved out of staging.
-
-diff -urN linuxconsoletools-1.4.9/utils/inputattach.c linuxconsoletools-1.4.9.new/utils/inputattach.c
---- linuxconsoletools-1.4.9/utils/inputattach.c 2016-01-09 16:27:02.000000000 +0100
-+++ linuxconsoletools-1.4.9.new/utils/inputattach.c 2016-03-20 11:35:31.707788967 +0100
-@@ -861,6 +861,9 @@
- { "--wacom_iv", "-wacom_iv", "Wacom protocol IV tablet",
- B9600, CS8 | CRTSCTS,
- SERIO_WACOM_IV, 0x00, 0x00, 0, wacom_iv_init },
-+{ "--pulse8-cec", "-pulse8-cec", "Pulse Eight HDMI CEC dongle",
-+ B9600, CS8,
-+ SERIO_PULSE8_CEC, 0x00, 0x00, 0, NULL },
- { NULL, NULL, NULL, 0, 0, 0, 0, 0, 0, NULL }
- };
-
-diff -urN linuxconsoletools-1.4.9/utils/serio-ids.h linuxconsoletools-1.4.9.new/utils/serio-ids.h
---- linuxconsoletools-1.4.9/utils/serio-ids.h 2015-04-26 18:29:42.000000000 +0200
-+++ linuxconsoletools-1.4.9.new/utils/serio-ids.h 2016-03-20 11:41:00.153558539 +0100
-@@ -131,5 +131,8 @@
- #ifndef SERIO_EASYPEN
- # define SERIO_EASYPEN 0x3f
- #endif
-+#ifndef SERIO_PULSE8_CEC
-+# define SERIO_PULSE8_CEC 0x40
-+#endif
-
- #endif
diff --git a/drivers/staging/media/s5p-cec/Kconfig b/drivers/staging/media/s5p-cec/Kconfig
index 0315fd7ad0f1..ddfd955da0d4 100644
--- a/drivers/staging/media/s5p-cec/Kconfig
+++ b/drivers/staging/media/s5p-cec/Kconfig
@@ -1,6 +1,6 @@
config VIDEO_SAMSUNG_S5P_CEC
tristate "Samsung S5P CEC driver"
- depends on VIDEO_DEV && MEDIA_CEC && (PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST)
+ depends on VIDEO_DEV && MEDIA_CEC_SUPPORT && (PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST)
---help---
This is a driver for Samsung S5P HDMI CEC interface. It uses the
generic CEC framework interface.
diff --git a/drivers/staging/media/s5p-cec/TODO b/drivers/staging/media/s5p-cec/TODO
index f51d5268ac40..64f21bab38f5 100644
--- a/drivers/staging/media/s5p-cec/TODO
+++ b/drivers/staging/media/s5p-cec/TODO
@@ -1,7 +1,7 @@
-This driver depends on the CEC framework, which is currently in
-staging, so therefor this driver is in staging as well.
+This driver requires that userspace sets the physical address.
+However, this should be passed on from the corresponding
+Samsung HDMI driver.
-In addition, this driver requires that userspace sets the physical
-address. However, this should be passed on from the corresponding
-samsung HDMI driver. It is very annoying if userspace has to do this,
-and other than USB CEC adapters this must be handled automatically.
+We have to wait until the HDMI notifier framework has been merged
+in order to handle this gracefully, until that time this driver
+has to remain in staging.
diff --git a/drivers/staging/media/s5p-cec/s5p_cec.c b/drivers/staging/media/s5p-cec/s5p_cec.c
index aef962b6af31..2a07968b5ac6 100644
--- a/drivers/staging/media/s5p-cec/s5p_cec.c
+++ b/drivers/staging/media/s5p-cec/s5p_cec.c
@@ -203,12 +203,11 @@ static int s5p_cec_probe(struct platform_device *pdev)
cec->adap = cec_allocate_adapter(&s5p_cec_adap_ops, cec,
CEC_NAME,
CEC_CAP_PHYS_ADDR | CEC_CAP_LOG_ADDRS | CEC_CAP_TRANSMIT |
- CEC_CAP_PASSTHROUGH | CEC_CAP_RC,
- 1, &pdev->dev);
+ CEC_CAP_PASSTHROUGH | CEC_CAP_RC, 1);
ret = PTR_ERR_OR_ZERO(cec->adap);
if (ret)
return ret;
- ret = cec_register_adapter(cec->adap);
+ ret = cec_register_adapter(cec->adap, &pdev->dev);
if (ret) {
cec_delete_adapter(cec->adap);
return ret;
@@ -230,7 +229,7 @@ static int s5p_cec_remove(struct platform_device *pdev)
return 0;
}
-static int s5p_cec_runtime_suspend(struct device *dev)
+static int __maybe_unused s5p_cec_runtime_suspend(struct device *dev)
{
struct s5p_cec_dev *cec = dev_get_drvdata(dev);
@@ -238,7 +237,7 @@ static int s5p_cec_runtime_suspend(struct device *dev)
return 0;
}
-static int s5p_cec_runtime_resume(struct device *dev)
+static int __maybe_unused s5p_cec_runtime_resume(struct device *dev)
{
struct s5p_cec_dev *cec = dev_get_drvdata(dev);
int ret;
@@ -262,6 +261,7 @@ static const struct of_device_id s5p_cec_match[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, s5p_cec_match);
static struct platform_driver s5p_cec_pdrv = {
.probe = s5p_cec_probe,
diff --git a/drivers/staging/media/st-cec/Kconfig b/drivers/staging/media/st-cec/Kconfig
index 784d2c600aca..c04283db58d6 100644
--- a/drivers/staging/media/st-cec/Kconfig
+++ b/drivers/staging/media/st-cec/Kconfig
@@ -1,6 +1,6 @@
config VIDEO_STI_HDMI_CEC
tristate "STMicroelectronics STiH4xx HDMI CEC driver"
- depends on VIDEO_DEV && MEDIA_CEC && (ARCH_STI || COMPILE_TEST)
+ depends on VIDEO_DEV && MEDIA_CEC_SUPPORT && (ARCH_STI || COMPILE_TEST)
---help---
This is a driver for STIH4xx HDMI CEC interface. It uses the
generic CEC framework interface.
diff --git a/drivers/staging/media/st-cec/TODO b/drivers/staging/media/st-cec/TODO
new file mode 100644
index 000000000000..c61289742c5c
--- /dev/null
+++ b/drivers/staging/media/st-cec/TODO
@@ -0,0 +1,7 @@
+This driver requires that userspace sets the physical address.
+However, this should be passed on from the corresponding
+ST HDMI driver.
+
+We have to wait until the HDMI notifier framework has been merged
+in order to handle this gracefully, until that time this driver
+has to remain in staging.
diff --git a/drivers/staging/media/st-cec/stih-cec.c b/drivers/staging/media/st-cec/stih-cec.c
index b22394ac4ec4..3c25638a9610 100644
--- a/drivers/staging/media/st-cec/stih-cec.c
+++ b/drivers/staging/media/st-cec/stih-cec.c
@@ -16,7 +16,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/version.h>
#include <media/cec.h>
@@ -336,13 +335,12 @@ static int stih_cec_probe(struct platform_device *pdev)
cec->adap = cec_allocate_adapter(&sti_cec_adap_ops, cec,
CEC_NAME,
CEC_CAP_LOG_ADDRS | CEC_CAP_PASSTHROUGH |
- CEC_CAP_PHYS_ADDR | CEC_CAP_TRANSMIT,
- 1, &pdev->dev);
+ CEC_CAP_PHYS_ADDR | CEC_CAP_TRANSMIT, 1);
ret = PTR_ERR_OR_ZERO(cec->adap);
if (ret)
return ret;
- ret = cec_register_adapter(cec->adap);
+ ret = cec_register_adapter(cec->adap, &pdev->dev);
if (ret) {
cec_delete_adapter(cec->adap);
return ret;
@@ -363,6 +361,7 @@ static const struct of_device_id stih_cec_match[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, stih_cec_match);
static struct platform_driver stih_cec_pdrv = {
.probe = stih_cec_probe,
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 8130dfe89745..4971aa54756a 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -770,6 +770,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
/* Initialize the device private structure. */
struct octeon_ethernet *priv = netdev_priv(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
dev->netdev_ops = &cvm_oct_pow_netdev_ops;
priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
priv->port = CVMX_PIP_NUM_INPUT_PORTS;
@@ -816,6 +817,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
}
/* Initialize the device private structure. */
+ SET_NETDEV_DEV(dev, &pdev->dev);
priv = netdev_priv(dev);
priv->netdev = dev;
priv->of_node = cvm_oct_node_for_port(pip, interface,
diff --git a/drivers/staging/rtl8188eu/Makefile b/drivers/staging/rtl8188eu/Makefile
index 29b9834870fd..27af86e05098 100644
--- a/drivers/staging/rtl8188eu/Makefile
+++ b/drivers/staging/rtl8188eu/Makefile
@@ -53,4 +53,4 @@ r8188eu-y := \
obj-$(CONFIG_R8188EU) := r8188eu.o
-ccflags-y += -D__CHECK_ENDIAN__ -I$(srctree)/$(src)/include
+ccflags-y += -I$(srctree)/$(src)/include
diff --git a/drivers/staging/rtl8192e/Makefile b/drivers/staging/rtl8192e/Makefile
index cb18db74d78c..7101fcc8871b 100644
--- a/drivers/staging/rtl8192e/Makefile
+++ b/drivers/staging/rtl8192e/Makefile
@@ -17,5 +17,3 @@ obj-$(CONFIG_RTLLIB_CRYPTO_TKIP) += rtllib_crypt_tkip.o
obj-$(CONFIG_RTLLIB_CRYPTO_WEP) += rtllib_crypt_wep.o
obj-$(CONFIG_RTL8192E) += rtl8192e/
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/staging/rtl8192e/rtl8192e/Makefile b/drivers/staging/rtl8192e/rtl8192e/Makefile
index a2c4fb4ba1af..176a4a2b8b20 100644
--- a/drivers/staging/rtl8192e/rtl8192e/Makefile
+++ b/drivers/staging/rtl8192e/rtl8192e/Makefile
@@ -16,5 +16,3 @@ r8192e_pci-objs := \
rtl_wx.o \
obj-$(CONFIG_RTL8192E) += r8192e_pci.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c
index d02bf58aea6d..8bcb9b71f764 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_target.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
@@ -9,6 +9,7 @@
#include <linux/workqueue.h>
#include <linux/kthread.h>
#include <asm/unaligned.h>
+#include <net/tcp.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include "cxgbit.h"
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index b7d747e92c7a..da2c73a255de 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -23,7 +23,9 @@
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/idr.h>
+#include <linux/delay.h>
#include <asm/unaligned.h>
+#include <net/ipv6.h>
#include <scsi/scsi_proto.h>
#include <scsi/iscsi_proto.h>
#include <scsi/scsi_tcq.h>
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
index 4cf2c0f2ba2f..e0db2ceb0f87 100644
--- a/drivers/target/iscsi/iscsi_target.h
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -1,6 +1,18 @@
#ifndef ISCSI_TARGET_H
#define ISCSI_TARGET_H
+#include <linux/types.h>
+#include <linux/spinlock.h>
+
+struct iscsi_cmd;
+struct iscsi_conn;
+struct iscsi_np;
+struct iscsi_portal_group;
+struct iscsi_session;
+struct iscsi_tpg_np;
+struct kref;
+struct sockaddr_storage;
+
extern struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *);
extern struct iscsi_tiqn *iscsit_get_tiqn(unsigned char *, int);
extern void iscsit_put_tiqn_for_login(struct iscsi_tiqn *);
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index e116f0e845c0..903b667f8e01 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -20,8 +20,8 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/err.h>
+#include <linux/random.h>
#include <linux/scatterlist.h>
-
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_nego.h"
#include "iscsi_target_auth.h"
diff --git a/drivers/target/iscsi/iscsi_target_auth.h b/drivers/target/iscsi/iscsi_target_auth.h
index d22f7b96a06c..1b91c13cc965 100644
--- a/drivers/target/iscsi/iscsi_target_auth.h
+++ b/drivers/target/iscsi/iscsi_target_auth.h
@@ -1,6 +1,8 @@
#ifndef _ISCSI_CHAP_H_
#define _ISCSI_CHAP_H_
+#include <linux/types.h>
+
#define CHAP_DIGEST_UNKNOWN 0
#define CHAP_DIGEST_MD5 5
#define CHAP_DIGEST_SHA 6
@@ -18,6 +20,9 @@
#define CHAP_STAGE_CLIENT_NRIC 4
#define CHAP_STAGE_SERVER_NR 5
+struct iscsi_node_auth;
+struct iscsi_conn;
+
extern u32 chap_main_loop(struct iscsi_conn *, struct iscsi_node_auth *, char *, char *,
int *, int *);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 923c032f0b95..bf40f03755dd 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -21,10 +21,11 @@
#include <linux/ctype.h>
#include <linux/export.h>
#include <linux/inet.h>
+#include <linux/module.h>
+#include <net/ipv6.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/iscsi/iscsi_transport.h>
-
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_parameters.h"
#include "iscsi_target_device.h"
@@ -100,8 +101,10 @@ static ssize_t lio_target_np_driver_store(struct config_item *item,
tpg_np_new = iscsit_tpg_add_network_portal(tpg,
&np->np_sockaddr, tpg_np, type);
- if (IS_ERR(tpg_np_new))
+ if (IS_ERR(tpg_np_new)) {
+ rc = PTR_ERR(tpg_np_new);
goto out;
+ }
} else {
tpg_np_new = iscsit_tpg_locate_child_np(tpg_np, type);
if (tpg_np_new) {
diff --git a/drivers/target/iscsi/iscsi_target_datain_values.c b/drivers/target/iscsi/iscsi_target_datain_values.c
index 647d4a5dca52..173ddd93c757 100644
--- a/drivers/target/iscsi/iscsi_target_datain_values.c
+++ b/drivers/target/iscsi/iscsi_target_datain_values.c
@@ -16,8 +16,8 @@
* GNU General Public License for more details.
******************************************************************************/
+#include <linux/slab.h>
#include <scsi/iscsi_proto.h>
-
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_seq_pdu_list.h"
#include "iscsi_target_erl1.h"
diff --git a/drivers/target/iscsi/iscsi_target_datain_values.h b/drivers/target/iscsi/iscsi_target_datain_values.h
index 646429ac5a02..16edeeeb7777 100644
--- a/drivers/target/iscsi/iscsi_target_datain_values.h
+++ b/drivers/target/iscsi/iscsi_target_datain_values.h
@@ -1,6 +1,9 @@
#ifndef ISCSI_TARGET_DATAIN_VALUES_H
#define ISCSI_TARGET_DATAIN_VALUES_H
+struct iscsi_cmd;
+struct iscsi_datain;
+
extern struct iscsi_datain_req *iscsit_allocate_datain_req(void);
extern void iscsit_attach_datain_req(struct iscsi_cmd *, struct iscsi_datain_req *);
extern void iscsit_free_datain_req(struct iscsi_cmd *, struct iscsi_datain_req *);
diff --git a/drivers/target/iscsi/iscsi_target_device.h b/drivers/target/iscsi/iscsi_target_device.h
index a0e2df9e8090..06dbff5cd520 100644
--- a/drivers/target/iscsi/iscsi_target_device.h
+++ b/drivers/target/iscsi/iscsi_target_device.h
@@ -1,6 +1,9 @@
#ifndef ISCSI_TARGET_DEVICE_H
#define ISCSI_TARGET_DEVICE_H
+struct iscsi_cmd;
+struct iscsi_session;
+
extern void iscsit_determine_maxcmdsn(struct iscsi_session *);
extern void iscsit_increment_maxcmdsn(struct iscsi_cmd *, struct iscsi_session *);
diff --git a/drivers/target/iscsi/iscsi_target_erl0.h b/drivers/target/iscsi/iscsi_target_erl0.h
index a9e2f9497fb2..60e69e2af6ed 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.h
+++ b/drivers/target/iscsi/iscsi_target_erl0.h
@@ -1,6 +1,12 @@
#ifndef ISCSI_TARGET_ERL0_H
#define ISCSI_TARGET_ERL0_H
+#include <linux/types.h>
+
+struct iscsi_cmd;
+struct iscsi_conn;
+struct iscsi_session;
+
extern void iscsit_set_dataout_sequence_values(struct iscsi_cmd *);
extern int iscsit_check_pre_dataout(struct iscsi_cmd *, unsigned char *);
extern int iscsit_check_post_dataout(struct iscsi_cmd *, unsigned char *, u8);
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 9214c9dafa2b..fe9b7f1e44ac 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -17,6 +17,7 @@
******************************************************************************/
#include <linux/list.h>
+#include <linux/slab.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
diff --git a/drivers/target/iscsi/iscsi_target_erl1.h b/drivers/target/iscsi/iscsi_target_erl1.h
index 2a3ebf118a34..54d36bd25bea 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.h
+++ b/drivers/target/iscsi/iscsi_target_erl1.h
@@ -1,6 +1,16 @@
#ifndef ISCSI_TARGET_ERL1_H
#define ISCSI_TARGET_ERL1_H
+#include <linux/types.h>
+#include <scsi/iscsi_proto.h> /* itt_t */
+
+struct iscsi_cmd;
+struct iscsi_conn;
+struct iscsi_datain_req;
+struct iscsi_ooo_cmdsn;
+struct iscsi_pdu;
+struct iscsi_session;
+
extern int iscsit_dump_data_payload(struct iscsi_conn *, u32, int);
extern int iscsit_create_recovery_datain_values_datasequenceinorder_yes(
struct iscsi_cmd *, struct iscsi_datain_req *);
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index e24f1c7c5862..faf9ae014b30 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -17,6 +17,7 @@
* GNU General Public License for more details.
******************************************************************************/
+#include <linux/slab.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
diff --git a/drivers/target/iscsi/iscsi_target_erl2.h b/drivers/target/iscsi/iscsi_target_erl2.h
index 63f2501f3fe0..7965f1e86506 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.h
+++ b/drivers/target/iscsi/iscsi_target_erl2.h
@@ -1,6 +1,13 @@
#ifndef ISCSI_TARGET_ERL2_H
#define ISCSI_TARGET_ERL2_H
+#include <linux/types.h>
+
+struct iscsi_cmd;
+struct iscsi_conn;
+struct iscsi_conn_recovery;
+struct iscsi_session;
+
extern void iscsit_create_conn_recovery_datain_values(struct iscsi_cmd *, __be32);
extern void iscsit_create_conn_recovery_dataout_values(struct iscsi_cmd *);
extern struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 15f79a2ca34a..450f51deb2a2 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -20,6 +20,8 @@
#include <linux/string.h>
#include <linux/kthread.h>
#include <linux/idr.h>
+#include <linux/tcp.h> /* TCP_NODELAY */
+#include <net/ipv6.h> /* ipv6_addr_v4mapped() */
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
index b597aa2c61a1..0e1fd6cedd54 100644
--- a/drivers/target/iscsi/iscsi_target_login.h
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -1,6 +1,13 @@
#ifndef ISCSI_TARGET_LOGIN_H
#define ISCSI_TARGET_LOGIN_H
+#include <linux/types.h>
+
+struct iscsi_conn;
+struct iscsi_login;
+struct iscsi_np;
+struct sockaddr_storage;
+
extern int iscsi_login_setup_crypto(struct iscsi_conn *);
extern int iscsi_check_for_session_reinstatement(struct iscsi_conn *);
extern int iscsi_login_post_auth_non_zero_tsih(struct iscsi_conn *, u16, u32);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 89d34bd6d87f..46388c9e08da 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -18,6 +18,8 @@
#include <linux/ctype.h>
#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <net/sock.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
diff --git a/drivers/target/iscsi/iscsi_target_nego.h b/drivers/target/iscsi/iscsi_target_nego.h
index f021cbd330e5..53438bfca4c6 100644
--- a/drivers/target/iscsi/iscsi_target_nego.h
+++ b/drivers/target/iscsi/iscsi_target_nego.h
@@ -4,6 +4,10 @@
#define DECIMAL 0
#define HEX 1
+struct iscsi_conn;
+struct iscsi_login;
+struct iscsi_np;
+
extern void convert_null_to_semi(char *, int);
extern int extract_param(const char *, const char *, unsigned int, char *,
unsigned char *);
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.h b/drivers/target/iscsi/iscsi_target_nodeattrib.h
index 0c69a46a62ec..79cdf06ade48 100644
--- a/drivers/target/iscsi/iscsi_target_nodeattrib.h
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.h
@@ -1,6 +1,11 @@
#ifndef ISCSI_TARGET_NODEATTRIB_H
#define ISCSI_TARGET_NODEATTRIB_H
+#include <linux/types.h>
+
+struct iscsi_node_acl;
+struct iscsi_portal_group;
+
extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *,
struct iscsi_portal_group *);
extern int iscsit_na_dataout_timeout(struct iscsi_node_acl *, u32);
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 0efa80bb8962..e65bf78ceef3 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -17,7 +17,7 @@
******************************************************************************/
#include <linux/slab.h>
-
+#include <linux/uio.h> /* struct kvec */
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_util.h"
#include "iscsi_target_parameters.h"
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
index a0751e3f0813..9962ccf0ccd7 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.h
+++ b/drivers/target/iscsi/iscsi_target_parameters.h
@@ -1,6 +1,7 @@
#ifndef ISCSI_PARAMETERS_H
#define ISCSI_PARAMETERS_H
+#include <linux/types.h>
#include <scsi/iscsi_proto.h>
struct iscsi_extra_response {
@@ -23,6 +24,11 @@ struct iscsi_param {
struct list_head p_list;
} ____cacheline_aligned;
+struct iscsi_conn;
+struct iscsi_conn_ops;
+struct iscsi_param_list;
+struct iscsi_sess_ops;
+
extern int iscsi_login_rx_data(struct iscsi_conn *, char *, int);
extern int iscsi_login_tx_data(struct iscsi_conn *, char *, char *, int);
extern void iscsi_dump_conn_ops(struct iscsi_conn_ops *);
diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.h b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
index d5b153751a8d..be1234362271 100644
--- a/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
+++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
@@ -1,6 +1,9 @@
#ifndef ISCSI_SEQ_AND_PDU_LIST_H
#define ISCSI_SEQ_AND_PDU_LIST_H
+#include <linux/types.h>
+#include <linux/cache.h>
+
/* struct iscsi_pdu->status */
#define DATAOUT_PDU_SENT 1
@@ -78,6 +81,8 @@ struct iscsi_seq {
u32 xfer_len;
} ____cacheline_aligned;
+struct iscsi_cmd;
+
extern int iscsit_build_pdu_and_seq_lists(struct iscsi_cmd *, u32);
extern struct iscsi_pdu *iscsit_get_pdu_holder(struct iscsi_cmd *, u32, u32);
extern struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(struct iscsi_cmd *, struct iscsi_seq *);
diff --git a/drivers/target/iscsi/iscsi_target_tmr.h b/drivers/target/iscsi/iscsi_target_tmr.h
index 142e992cb097..64cc5c07e47c 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.h
+++ b/drivers/target/iscsi/iscsi_target_tmr.h
@@ -1,6 +1,12 @@
#ifndef ISCSI_TARGET_TMR_H
#define ISCSI_TARGET_TMR_H
+#include <linux/types.h>
+
+struct iscsi_cmd;
+struct iscsi_conn;
+struct iscsi_tmr_req;
+
extern u8 iscsit_tmr_abort_task(struct iscsi_cmd *, unsigned char *);
extern int iscsit_tmr_task_warm_reset(struct iscsi_conn *, struct iscsi_tmr_req *,
unsigned char *);
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 0814e5894a96..2e7e08dbda48 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -16,9 +16,9 @@
* GNU General Public License for more details.
******************************************************************************/
+#include <linux/slab.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
-
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_erl0.h"
#include "iscsi_target_login.h"
@@ -260,7 +260,6 @@ err_out:
iscsi_release_param_list(tpg->param_list);
tpg->param_list = NULL;
}
- kfree(tpg);
return -ENOMEM;
}
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index 2da211920c18..ceba29851167 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -1,6 +1,15 @@
#ifndef ISCSI_TARGET_TPG_H
#define ISCSI_TARGET_TPG_H
+#include <linux/types.h>
+
+struct iscsi_np;
+struct iscsi_session;
+struct iscsi_tiqn;
+struct iscsi_tpg_np;
+struct se_node_acl;
+struct sockaddr_storage;
+
extern struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *, u16);
extern int iscsit_load_discovery_tpg(void);
extern void iscsit_release_discovery_tpg(void);
diff --git a/drivers/target/iscsi/iscsi_target_transport.c b/drivers/target/iscsi/iscsi_target_transport.c
index 08217d62fb0d..c4eb141c6435 100644
--- a/drivers/target/iscsi/iscsi_target_transport.c
+++ b/drivers/target/iscsi/iscsi_target_transport.c
@@ -1,5 +1,6 @@
#include <linux/spinlock.h>
#include <linux/list.h>
+#include <linux/module.h>
#include <target/iscsi/iscsi_transport.h>
static LIST_HEAD(g_transport_list);
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 1f38177207e0..b5a1b4ccba12 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -18,6 +18,7 @@
#include <linux/list.h>
#include <linux/percpu_ida.h>
+#include <net/ipv6.h> /* ipv6_addr_equal() */
#include <scsi/scsi_tcq.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index 995f1cb29d0e..8ff08856516a 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -1,8 +1,16 @@
#ifndef ISCSI_TARGET_UTIL_H
#define ISCSI_TARGET_UTIL_H
+#include <linux/types.h>
+#include <scsi/iscsi_proto.h> /* itt_t */
+
#define MARKER_SIZE 8
+struct iscsi_cmd;
+struct iscsi_conn;
+struct iscsi_conn_recovery;
+struct iscsi_session;
+
extern int iscsit_add_r2t_to_list(struct iscsi_cmd *, u32, u32, int, u32);
extern struct iscsi_r2t *iscsit_get_r2t_for_eos(struct iscsi_cmd *, u32, u32);
extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *);
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
index 4346462094a1..a8a230b4e6b5 100644
--- a/drivers/target/loopback/tcm_loop.h
+++ b/drivers/target/loopback/tcm_loop.h
@@ -1,3 +1,7 @@
+#include <linux/types.h>
+#include <linux/device.h>
+#include <target/target_core_base.h> /* struct se_cmd */
+
#define TCM_LOOP_VERSION "v2.1-rc2"
#define TL_WWN_ADDR_LEN 256
#define TL_TPGS_PER_HBA 32
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 58bb6ed18185..e5c3e5f827d0 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -28,6 +28,7 @@
#include <linux/string.h>
#include <linux/configfs.h>
#include <linux/ctype.h>
+#include <linux/delay.h>
#include <linux/firewire.h>
#include <linux/firewire-constants.h>
#include <scsi/scsi_proto.h>
@@ -928,7 +929,7 @@ static struct sbp_target_request *sbp_mgt_get_req(struct sbp_session *sess,
struct sbp_target_request *req;
int tag;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
+ tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
if (tag < 0)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 4c82bbe19003..f5e330099bfc 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -26,8 +26,11 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/configfs.h>
+#include <linux/delay.h>
#include <linux/export.h>
+#include <linux/fcntl.h>
#include <linux/file.h>
+#include <linux/fs.h>
#include <scsi/scsi_proto.h>
#include <asm/unaligned.h>
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
index 9b250f9b33bf..c69c11baf07f 100644
--- a/drivers/target/target_core_alua.h
+++ b/drivers/target/target_core_alua.h
@@ -1,6 +1,8 @@
#ifndef TARGET_CORE_ALUA_H
#define TARGET_CORE_ALUA_H
+#include <target/target_core_base.h>
+
/*
* INQUIRY response data, TPGS Field
*
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 2001005bef45..54b36c9835be 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -143,13 +143,13 @@ static ssize_t target_core_item_dbroot_store(struct config_item *item,
pr_err("db_root: cannot open: %s\n", db_root_stage);
return -EINVAL;
}
- if (!S_ISDIR(fp->f_inode->i_mode)) {
- filp_close(fp, 0);
+ if (!S_ISDIR(file_inode(fp)->i_mode)) {
+ filp_close(fp, NULL);
mutex_unlock(&g_tf_lock);
pr_err("db_root: not a directory: %s\n", db_root_stage);
return -EINVAL;
}
- filp_close(fp, 0);
+ filp_close(fp, NULL);
strncpy(db_root, db_root_stage, read_bytes);
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 6b423485c5d6..1ebd13ef7bd3 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -33,6 +33,7 @@
#include <linux/kthread.h>
#include <linux/in.h>
#include <linux/export.h>
+#include <linux/t10-pi.h>
#include <asm/unaligned.h>
#include <net/sock.h>
#include <net/tcp.h>
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index d545993df18b..87aa376a1a1a 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -32,6 +32,7 @@
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/falloc.h>
+#include <linux/uio.h>
#include <scsi/scsi_proto.h>
#include <asm/unaligned.h>
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index 068966fce308..526595a072de 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -1,6 +1,8 @@
#ifndef TARGET_CORE_FILE_H
#define TARGET_CORE_FILE_H
+#include <target/target_core_base.h>
+
#define FD_VERSION "4.0"
#define FD_MAX_DEV_NAME 256
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
index 01c2afd81500..718d3fcd3e7c 100644
--- a/drivers/target/target_core_iblock.h
+++ b/drivers/target/target_core_iblock.h
@@ -1,6 +1,9 @@
#ifndef TARGET_CORE_IBLOCK_H
#define TARGET_CORE_IBLOCK_H
+#include <linux/atomic.h>
+#include <target/target_core_base.h>
+
#define IBLOCK_VERSION "4.0"
#define IBLOCK_MAX_CDBS 16
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index e2c970a9d61c..9ab7090f7c83 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -1,6 +1,11 @@
#ifndef TARGET_CORE_INTERNAL_H
#define TARGET_CORE_INTERNAL_H
+#include <linux/configfs.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <target/target_core_base.h>
+
#define TARGET_CORE_NAME_MAX_LEN 64
#define TARGET_FABRIC_NAME_SIZE 32
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 47463c99c318..d761025144f9 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -29,6 +29,8 @@
#include <linux/list.h>
#include <linux/vmalloc.h>
#include <linux/file.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
#include <scsi/scsi_proto.h>
#include <asm/unaligned.h>
@@ -253,8 +255,7 @@ target_scsi2_reservation_reserve(struct se_cmd *cmd)
if ((cmd->t_task_cdb[1] & 0x01) &&
(cmd->t_task_cdb[1] & 0x02)) {
- pr_err("LongIO and Obselete Bits set, returning"
- " ILLEGAL_REQUEST\n");
+ pr_err("LongIO and Obsolete Bits set, returning ILLEGAL_REQUEST\n");
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
/*
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
index e3d26e9126a0..847bd470339c 100644
--- a/drivers/target/target_core_pr.h
+++ b/drivers/target/target_core_pr.h
@@ -1,5 +1,9 @@
#ifndef TARGET_CORE_PR_H
#define TARGET_CORE_PR_H
+
+#include <linux/types.h>
+#include <target/target_core_base.h>
+
/*
* PERSISTENT_RESERVE_OUT service action codes
*
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
index 6d2007e35df6..8a02fa47c7e8 100644
--- a/drivers/target/target_core_pscsi.h
+++ b/drivers/target/target_core_pscsi.h
@@ -15,11 +15,12 @@
#define PS_TIMEOUT_DISK (15*HZ)
#define PS_TIMEOUT_OTHER (500*HZ)
-#include <linux/device.h>
-#include <linux/kref.h>
-#include <linux/kobject.h>
+#include <linux/cache.h> /* ___cacheline_aligned */
+#include <target/target_core_base.h> /* struct se_device */
+struct block_device;
struct scsi_device;
+struct Scsi_Host;
struct pscsi_plugin_task {
unsigned char pscsi_sense[TRANSPORT_SENSE_BUFFER];
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 24b36fd785f1..ddc216c9f1f6 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -26,7 +26,9 @@
#include <linux/string.h>
#include <linux/parser.h>
+#include <linux/highmem.h>
#include <linux/timer.h>
+#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <scsi/scsi_proto.h>
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
index cc46a6a89b38..91fc1a34791d 100644
--- a/drivers/target/target_core_rd.h
+++ b/drivers/target/target_core_rd.h
@@ -1,6 +1,10 @@
#ifndef TARGET_CORE_RD_H
#define TARGET_CORE_RD_H
+#include <linux/module.h>
+#include <linux/types.h>
+#include <target/target_core_base.h>
+
#define RD_HBA_VERSION "v4.0"
#define RD_MCP_VERSION "4.0"
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 04f616b3ba0a..4879e70e2eef 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/ratelimit.h>
#include <linux/crc-t10dif.h>
+#include <linux/t10-pi.h>
#include <asm/unaligned.h>
#include <scsi/scsi_proto.h>
#include <scsi/scsi_tcq.h>
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 7dfefd66df93..1cadc9eefa21 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1693,6 +1693,10 @@ void transport_generic_request_failure(struct se_cmd *cmd,
case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
+ case TCM_TOO_MANY_TARGET_DESCS:
+ case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE:
+ case TCM_TOO_MANY_SEGMENT_DESCS:
+ case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE:
break;
case TCM_OUT_OF_RESOURCES:
sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -2808,6 +2812,26 @@ static const struct sense_info sense_info_table[] = {
.key = ILLEGAL_REQUEST,
.asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */
},
+ [TCM_TOO_MANY_TARGET_DESCS] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x26,
+ .ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */
+ },
+ [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x26,
+ .ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */
+ },
+ [TCM_TOO_MANY_SEGMENT_DESCS] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x26,
+ .ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */
+ },
+ [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x26,
+ .ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */
+ },
[TCM_PARAMETER_LIST_LENGTH_ERROR] = {
.key = ILLEGAL_REQUEST,
.asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */
diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h
index bd6e78ba153d..97402856a8f0 100644
--- a/drivers/target/target_core_ua.h
+++ b/drivers/target/target_core_ua.h
@@ -1,6 +1,8 @@
#ifndef TARGET_CORE_UA_H
#define TARGET_CORE_UA_H
+#include <target/target_core_base.h>
+
/*
* From spc4r17, Table D.1: ASC and ASCQ Assignement
*/
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 2b3c8564ace8..8041710b6972 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -27,6 +27,7 @@
#include <linux/uio_driver.h>
#include <linux/stringify.h>
#include <linux/bitops.h>
+#include <linux/highmem.h>
#include <net/genetlink.h>
#include <scsi/scsi_common.h>
#include <scsi/scsi_proto.h>
@@ -537,7 +538,7 @@ tcmu_queue_cmd(struct se_cmd *se_cmd)
struct se_device *se_dev = se_cmd->se_dev;
struct tcmu_dev *udev = TCMU_DEV(se_dev);
struct tcmu_cmd *tcmu_cmd;
- int ret;
+ sense_reason_t ret;
tcmu_cmd = tcmu_alloc_cmd(se_cmd);
if (!tcmu_cmd)
@@ -685,8 +686,6 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
target_complete_cmd(cmd->se_cmd, SAM_STAT_CHECK_CONDITION);
cmd->se_cmd = NULL;
- kmem_cache_free(tcmu_cmd_cache, cmd);
-
return 0;
}
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 094a1440eacb..d828b3b5000b 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -25,6 +25,7 @@
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/configfs.h>
+#include <linux/ratelimit.h>
#include <scsi/scsi_proto.h>
#include <asm/unaligned.h>
@@ -52,18 +53,13 @@ static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
return 0;
}
-static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
- bool src)
+static int target_xcopy_locate_se_dev_e4(const unsigned char *dev_wwn,
+ struct se_device **found_dev)
{
struct se_device *se_dev;
- unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn;
+ unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
int rc;
- if (src)
- dev_wwn = &xop->dst_tid_wwn[0];
- else
- dev_wwn = &xop->src_tid_wwn[0];
-
mutex_lock(&g_device_mutex);
list_for_each_entry(se_dev, &g_device_list, g_dev_node) {
@@ -77,15 +73,8 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
if (rc != 0)
continue;
- if (src) {
- xop->dst_dev = se_dev;
- pr_debug("XCOPY 0xe4: Setting xop->dst_dev: %p from located"
- " se_dev\n", xop->dst_dev);
- } else {
- xop->src_dev = se_dev;
- pr_debug("XCOPY 0xe4: Setting xop->src_dev: %p from located"
- " se_dev\n", xop->src_dev);
- }
+ *found_dev = se_dev;
+ pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
rc = target_depend_item(&se_dev->dev_group.cg_item);
if (rc != 0) {
@@ -109,7 +98,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
}
static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
- unsigned char *p, bool src)
+ unsigned char *p, unsigned short cscd_index)
{
unsigned char *desc = p;
unsigned short ript;
@@ -154,7 +143,13 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op
return -EINVAL;
}
- if (src) {
+ if (cscd_index != xop->stdi && cscd_index != xop->dtdi) {
+ pr_debug("XCOPY 0xe4: ignoring CSCD entry %d - neither src nor "
+ "dest\n", cscd_index);
+ return 0;
+ }
+
+ if (cscd_index == xop->stdi) {
memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
/*
* Determine if the source designator matches the local device
@@ -166,10 +161,15 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op
pr_debug("XCOPY 0xe4: Set xop->src_dev %p from source"
" received xop\n", xop->src_dev);
}
- } else {
+ }
+
+ if (cscd_index == xop->dtdi) {
memcpy(&xop->dst_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
/*
- * Determine if the destination designator matches the local device
+ * Determine if the destination designator matches the local
+ * device. If @cscd_index corresponds to both source (stdi) and
+ * destination (dtdi), or dtdi comes after stdi, then
+ * XCOL_DEST_RECV_OP wins.
*/
if (!memcmp(&xop->local_dev_wwn[0], &xop->dst_tid_wwn[0],
XCOPY_NAA_IEEE_REGEX_LEN)) {
@@ -189,20 +189,23 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
{
struct se_device *local_dev = se_cmd->se_dev;
unsigned char *desc = p;
- int offset = tdll % XCOPY_TARGET_DESC_LEN, rc, ret = 0;
+ int offset = tdll % XCOPY_TARGET_DESC_LEN, rc;
+ unsigned short cscd_index = 0;
unsigned short start = 0;
- bool src = true;
*sense_ret = TCM_INVALID_PARAMETER_LIST;
if (offset != 0) {
pr_err("XCOPY target descriptor list length is not"
" multiple of %d\n", XCOPY_TARGET_DESC_LEN);
+ *sense_ret = TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE;
return -EINVAL;
}
- if (tdll > 64) {
+ if (tdll > RCR_OP_MAX_TARGET_DESC_COUNT * XCOPY_TARGET_DESC_LEN) {
pr_err("XCOPY target descriptor supports a maximum"
" two src/dest descriptors, tdll: %hu too large..\n", tdll);
+ /* spc4r37 6.4.3.4 CSCD DESCRIPTOR LIST LENGTH field */
+ *sense_ret = TCM_TOO_MANY_TARGET_DESCS;
return -EINVAL;
}
/*
@@ -214,37 +217,43 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
while (start < tdll) {
/*
- * Check target descriptor identification with 0xE4 type with
- * use VPD 0x83 WWPN matching ..
+ * Check target descriptor identification with 0xE4 type, and
+ * compare the current index with the CSCD descriptor IDs in
+ * the segment descriptor. Use VPD 0x83 WWPN matching ..
*/
switch (desc[0]) {
case 0xe4:
rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop,
- &desc[0], src);
+ &desc[0], cscd_index);
if (rc != 0)
goto out;
- /*
- * Assume target descriptors are in source -> destination order..
- */
- if (src)
- src = false;
- else
- src = true;
start += XCOPY_TARGET_DESC_LEN;
desc += XCOPY_TARGET_DESC_LEN;
- ret++;
+ cscd_index++;
break;
default:
pr_err("XCOPY unsupported descriptor type code:"
" 0x%02x\n", desc[0]);
+ *sense_ret = TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE;
goto out;
}
}
- if (xop->op_origin == XCOL_SOURCE_RECV_OP)
- rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true);
- else
- rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false);
+ switch (xop->op_origin) {
+ case XCOL_SOURCE_RECV_OP:
+ rc = target_xcopy_locate_se_dev_e4(xop->dst_tid_wwn,
+ &xop->dst_dev);
+ break;
+ case XCOL_DEST_RECV_OP:
+ rc = target_xcopy_locate_se_dev_e4(xop->src_tid_wwn,
+ &xop->src_dev);
+ break;
+ default:
+ pr_err("XCOPY CSCD descriptor IDs not found in CSCD list - "
+ "stdi: %hu dtdi: %hu\n", xop->stdi, xop->dtdi);
+ rc = -EINVAL;
+ break;
+ }
/*
* If a matching IEEE NAA 0x83 descriptor for the requested device
* is not located on this node, return COPY_ABORTED with ASQ/ASQC
@@ -261,7 +270,7 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
pr_debug("XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n",
xop->dst_dev, &xop->dst_tid_wwn[0]);
- return ret;
+ return cscd_index;
out:
return -EINVAL;
@@ -283,6 +292,14 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op
xop->stdi = get_unaligned_be16(&desc[4]);
xop->dtdi = get_unaligned_be16(&desc[6]);
+
+ if (xop->stdi > XCOPY_CSCD_DESC_ID_LIST_OFF_MAX ||
+ xop->dtdi > XCOPY_CSCD_DESC_ID_LIST_OFF_MAX) {
+ pr_err("XCOPY segment desc 0x02: unsupported CSCD ID > 0x%x; stdi: %hu dtdi: %hu\n",
+ XCOPY_CSCD_DESC_ID_LIST_OFF_MAX, xop->stdi, xop->dtdi);
+ return -EINVAL;
+ }
+
pr_debug("XCOPY seg desc 0x02: desc_len: %hu stdi: %hu dtdi: %hu, DC: %d\n",
desc_len, xop->stdi, xop->dtdi, dc);
@@ -305,15 +322,25 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op
static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd,
struct xcopy_op *xop, unsigned char *p,
- unsigned int sdll)
+ unsigned int sdll, sense_reason_t *sense_ret)
{
unsigned char *desc = p;
unsigned int start = 0;
int offset = sdll % XCOPY_SEGMENT_DESC_LEN, rc, ret = 0;
+ *sense_ret = TCM_INVALID_PARAMETER_LIST;
+
if (offset != 0) {
pr_err("XCOPY segment descriptor list length is not"
" multiple of %d\n", XCOPY_SEGMENT_DESC_LEN);
+ *sense_ret = TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE;
+ return -EINVAL;
+ }
+ if (sdll > RCR_OP_MAX_SG_DESC_COUNT * XCOPY_SEGMENT_DESC_LEN) {
+ pr_err("XCOPY supports %u segment descriptor(s), sdll: %u too"
+ " large..\n", RCR_OP_MAX_SG_DESC_COUNT, sdll);
+ /* spc4r37 6.4.3.5 SEGMENT DESCRIPTOR LIST LENGTH field */
+ *sense_ret = TCM_TOO_MANY_SEGMENT_DESCS;
return -EINVAL;
}
@@ -334,6 +361,7 @@ static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd,
default:
pr_err("XCOPY unsupported segment descriptor"
"type: 0x%02x\n", desc[0]);
+ *sense_ret = TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE;
goto out;
}
}
@@ -860,6 +888,16 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
+ if (se_cmd->data_length == 0) {
+ target_complete_cmd(se_cmd, SAM_STAT_GOOD);
+ return TCM_NO_SENSE;
+ }
+ if (se_cmd->data_length < XCOPY_HDR_LEN) {
+ pr_err("XCOPY parameter truncation: length %u < hdr_len %u\n",
+ se_cmd->data_length, XCOPY_HDR_LEN);
+ return TCM_PARAMETER_LIST_LENGTH_ERROR;
+ }
+
xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
if (!xop) {
pr_err("Unable to allocate xcopy_op\n");
@@ -882,6 +920,12 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
*/
tdll = get_unaligned_be16(&p[2]);
sdll = get_unaligned_be32(&p[8]);
+ if (tdll + sdll > RCR_OP_MAX_DESC_LIST_LEN) {
+ pr_err("XCOPY descriptor list length %u exceeds maximum %u\n",
+ tdll + sdll, RCR_OP_MAX_DESC_LIST_LEN);
+ ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
+ goto out;
+ }
inline_dl = get_unaligned_be32(&p[12]);
if (inline_dl != 0) {
@@ -889,10 +933,32 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
goto out;
}
+ if (se_cmd->data_length < (XCOPY_HDR_LEN + tdll + sdll + inline_dl)) {
+ pr_err("XCOPY parameter truncation: data length %u too small "
+ "for tdll: %hu sdll: %u inline_dl: %u\n",
+ se_cmd->data_length, tdll, sdll, inline_dl);
+ ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
+ goto out;
+ }
+
pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x"
" tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
tdll, sdll, inline_dl);
+ /*
+ * skip over the target descriptors until segment descriptors
+ * have been passed - CSCD ids are needed to determine src and dest.
+ */
+ seg_desc = &p[16] + tdll;
+
+ rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc,
+ sdll, &ret);
+ if (rc <= 0)
+ goto out;
+
+ pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc,
+ rc * XCOPY_SEGMENT_DESC_LEN);
+
rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret);
if (rc <= 0)
goto out;
@@ -910,18 +976,8 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc,
rc * XCOPY_TARGET_DESC_LEN);
- seg_desc = &p[16];
- seg_desc += (rc * XCOPY_TARGET_DESC_LEN);
-
- rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, sdll);
- if (rc <= 0) {
- xcopy_pt_undepend_remotedev(xop);
- goto out;
- }
transport_kunmap_data_sg(se_cmd);
- pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc,
- rc * XCOPY_SEGMENT_DESC_LEN);
INIT_WORK(&xop->xop_work, target_xcopy_do_work);
queue_work(xcopy_wq, &xop->xop_work);
return TCM_NO_SENSE;
diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h
index 700a981c7b41..7c0b105cbe1b 100644
--- a/drivers/target/target_core_xcopy.h
+++ b/drivers/target/target_core_xcopy.h
@@ -1,8 +1,17 @@
+#include <target/target_core_base.h>
+
+#define XCOPY_HDR_LEN 16
#define XCOPY_TARGET_DESC_LEN 32
#define XCOPY_SEGMENT_DESC_LEN 28
#define XCOPY_NAA_IEEE_REGEX_LEN 16
#define XCOPY_MAX_SECTORS 1024
+/*
+ * SPC4r37 6.4.6.1
+ * Table 150 — CSCD descriptor ID values
+ */
+#define XCOPY_CSCD_DESC_ID_LIST_OFF_MAX 0x07FF
+
enum xcopy_origin_list {
XCOL_SOURCE_RECV_OP = 0x01,
XCOL_DEST_RECV_OP = 0x02,
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index e28209b99b59..11d27b93b413 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -17,6 +17,9 @@
#ifndef __TCM_FC_H__
#define __TCM_FC_H__
+#include <linux/types.h>
+#include <target/target_core_base.h>
+
#define FT_VERSION "0.4"
#define FT_NAMELEN 32 /* length of ASCII WWPNs including pad */
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index dfbb974927f2..dea16bb8c46a 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -127,7 +127,7 @@ static struct serial_state rs_table[1];
#define NR_PORTS ARRAY_SIZE(rs_table)
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define serial_isroot() (capable(CAP_SYS_ADMIN))
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index ce864875330e..9b5c0fb216b5 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -42,7 +42,7 @@
#include <linux/slab.h>
#include <linux/serial_core.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "hvc_console.h"
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
index 3c4d7c2b4ade..7823d6d998cf 100644
--- a/drivers/tty/hvc/hvcs.c
+++ b/drivers/tty/hvc/hvcs.c
@@ -81,7 +81,7 @@
#include <linux/tty_flip.h>
#include <asm/hvconsole.h>
#include <asm/hvcserver.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/vio.h>
/*
diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
index 96ce6bd1cc6f..2e578d6433af 100644
--- a/drivers/tty/hvc/hvsi.c
+++ b/drivers/tty/hvc/hvsi.c
@@ -46,7 +46,7 @@
#include <asm/hvcall.h>
#include <asm/hvconsole.h>
#include <asm/prom.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/vio.h>
#include <asm/param.h>
#include <asm/hvsi.h>
diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
index 60d37b225589..4caf0c3b1f99 100644
--- a/drivers/tty/moxa.c
+++ b/drivers/tty/moxa.c
@@ -47,7 +47,7 @@
#include <linux/ratelimit.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "moxa.h"
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 69294ae154be..7b8f383fb090 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -43,7 +43,7 @@
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "mxser.h"
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index a7fa016f31eb..eb278832f5ce 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -103,7 +103,7 @@
#include <linux/bitops.h>
#include <asm/termios.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/*
* Buffers for individual HDLC frames
diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c
index 345111467b85..305b6490d405 100644
--- a/drivers/tty/n_r3964.c
+++ b/drivers/tty/n_r3964.c
@@ -65,7 +65,7 @@
#include <linux/n_r3964.h>
#include <linux/poll.h>
#include <linux/init.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/*#define DEBUG_QUEUE*/
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 61569a765d9e..76e03a7de9cc 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -675,7 +675,7 @@ static struct console univ8250_console = {
.device = uart_console_device,
.setup = univ8250_console_setup,
.match = univ8250_console_match,
- .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_CONSDEV,
+ .flags = CON_PRINTBUFFER | CON_ANYTIME,
.index = -1,
.data = &serial8250_reg,
};
diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c
index f607946fd996..58cbb30a9401 100644
--- a/drivers/tty/serial/8250/8250_lpss.c
+++ b/drivers/tty/serial/8250/8250_lpss.c
@@ -157,12 +157,12 @@ static int byt_serial_setup(struct lpss8250 *lpss, struct uart_port *port)
static const struct dw_dma_platform_data qrk_serial_dma_pdata = {
.nr_channels = 2,
.is_private = true,
- .is_nollp = true,
.chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
.chan_priority = CHAN_PRIORITY_ASCENDING,
.block_size = 4095,
.nr_masters = 1,
.data_width = {4},
+ .multi_block = {0},
};
static void qrk_serial_setup_dma(struct lpss8250 *lpss, struct uart_port *port)
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index aa0166b6d450..116436b7fa52 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -5642,17 +5642,15 @@ static pci_ers_result_t serial8250_io_slot_reset(struct pci_dev *dev)
static void serial8250_io_resume(struct pci_dev *dev)
{
struct serial_private *priv = pci_get_drvdata(dev);
- const struct pciserial_board *board;
+ struct serial_private *new;
if (!priv)
return;
- board = priv->board;
- kfree(priv);
- priv = pciserial_init_ports(dev, board);
-
- if (!IS_ERR(priv)) {
- pci_set_drvdata(dev, priv);
+ new = pciserial_init_ports(dev, priv->board);
+ if (!IS_ERR(new)) {
+ pci_set_drvdata(dev, new);
+ kfree(priv);
}
}
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index fe4399b41df6..c13fec451d03 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1413,7 +1413,7 @@ static void __do_stop_tx_rs485(struct uart_8250_port *p)
* Enable previously disabled RX interrupts.
*/
if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) {
- serial8250_clear_fifos(p);
+ serial8250_clear_and_reinit_fifos(p);
p->ier |= UART_IER_RLSI | UART_IER_RDI;
serial_port_out(&p->port, UART_IER, p->ier);
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 168b10cad47b..fabbe76203bb 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -481,6 +481,14 @@ static void atmel_stop_tx(struct uart_port *port)
/* disable PDC transmit */
atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
}
+
+ /*
+ * Disable the transmitter.
+ * This is mandatory when DMA is used, otherwise the DMA buffer
+ * is fully transmitted.
+ */
+ atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS);
+
/* Disable interrupts */
atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
@@ -513,6 +521,9 @@ static void atmel_start_tx(struct uart_port *port)
/* Enable interrupts */
atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
+
+ /* re-enable the transmitter */
+ atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
}
/*
@@ -798,6 +809,11 @@ static void atmel_complete_tx_dma(void *arg)
*/
if (!uart_circ_empty(xmit))
atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
+ else if ((port->rs485.flags & SER_RS485_ENABLED) &&
+ !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
+ /* DMA done, stop TX, start RX for RS485 */
+ atmel_start_rx(port);
+ }
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -900,12 +916,6 @@ static void atmel_tx_dma(struct uart_port *port)
desc->callback = atmel_complete_tx_dma;
desc->callback_param = atmel_port;
atmel_port->cookie_tx = dmaengine_submit(desc);
-
- } else {
- if (port->rs485.flags & SER_RS485_ENABLED) {
- /* DMA done, stop TX, start RX for RS485 */
- atmel_start_rx(port);
- }
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
diff --git a/drivers/tty/serial/icom.c b/drivers/tty/serial/icom.c
index c60a8d5e4020..d83783cfbade 100644
--- a/drivers/tty/serial/icom.c
+++ b/drivers/tty/serial/icom.c
@@ -53,7 +53,7 @@
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "icom.h"
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index d0847375ea64..9939c3d9912b 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -36,7 +36,7 @@
#include <linux/mutex.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/*
* This is used to lock changes in serial line configuration.
diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
index 415885c56435..657eed82eeb3 100644
--- a/drivers/tty/synclink.c
+++ b/drivers/tty/synclink.c
@@ -107,7 +107,7 @@
#define PUT_USER(error,value,addr) error = put_user(value,addr)
#define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define RCLRVALUE 0xffff
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index 8267bcf2405e..31885f20fc15 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -77,7 +77,7 @@
#include <asm/irq.h>
#include <asm/dma.h>
#include <asm/types.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_GT_MODULE))
#define SYNCLINK_GENERIC_HDLC 1
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
index d66620f7eaa3..51e8846cd68f 100644
--- a/drivers/tty/synclinkmp.c
+++ b/drivers/tty/synclinkmp.c
@@ -79,7 +79,7 @@
#define PUT_USER(error,value,addr) error = put_user(value,addr)
#define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
static MGSL_PARAMS default_params = {
MGSL_MODE_HDLC, /* unsigned long mode */
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 52bbd27e93ae..701c085bb19b 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -946,8 +946,8 @@ static const struct input_device_id sysrq_ids[] = {
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_KEYBIT,
- .evbit = { BIT_MASK(EV_KEY) },
- .keybit = { BIT_MASK(KEY_LEFTALT) },
+ .evbit = { [BIT_WORD(EV_KEY)] = BIT_MASK(EV_KEY) },
+ .keybit = { [BIT_WORD(KEY_LEFTALT)] = BIT_MASK(KEY_LEFTALT) },
},
{ },
};
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index bf36ac9aee41..f27fc0f14c11 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -22,7 +22,7 @@
#include <linux/compat.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#undef TTY_DEBUG_WAIT_UNTIL_SENT
diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
index 71e81406ef71..1f6e17fc3fb0 100644
--- a/drivers/tty/vt/consolemap.c
+++ b/drivers/tty/vt/consolemap.c
@@ -29,7 +29,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/tty.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/console.h>
#include <linux/consolemap.h>
#include <linux/vt_kern.h>
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index 368ce1803e8f..36e1b8c7680f 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -16,7 +16,7 @@
#include <linux/slab.h>
#include <linux/types.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/kbd_kern.h>
#include <linux/vt_kern.h>
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
index 14a2b5f11bca..56dcff6059d3 100644
--- a/drivers/tty/vt/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -39,7 +39,7 @@
#include <linux/slab.h>
#include <linux/notifier.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index f62c598810ff..a56edf2d58eb 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -29,7 +29,7 @@
#include <linux/timex.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/kbd_kern.h>
#include <linux/vt_kern.h>
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index 4dec9df8764b..5a59da0dc98a 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -64,7 +64,7 @@
#include "usbatm.h"
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/crc32.h>
#include <linux/errno.h>
#include <linux/init.h>
diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c
index de8e22ec3902..93e24ce61a3a 100644
--- a/drivers/usb/chipidea/otg_fsm.c
+++ b/drivers/usb/chipidea/otg_fsm.c
@@ -234,8 +234,8 @@ static void ci_otg_add_timer(struct ci_hdrc *ci, enum otg_fsm_timer t)
ktime_set(timer_sec, timer_nsec));
ci->enabled_otg_timer_bits |= (1 << t);
if ((ci->next_otg_timer == NUM_OTG_FSM_TIMERS) ||
- (ci->hr_timeouts[ci->next_otg_timer].tv64 >
- ci->hr_timeouts[t].tv64)) {
+ (ci->hr_timeouts[ci->next_otg_timer] >
+ ci->hr_timeouts[t])) {
ci->next_otg_timer = t;
hrtimer_start_range_ns(&ci->otg_fsm_hrtimer,
ci->hr_timeouts[t], NSEC_PER_MSEC,
@@ -269,8 +269,8 @@ static void ci_otg_del_timer(struct ci_hdrc *ci, enum otg_fsm_timer t)
for_each_set_bit(cur_timer, &enabled_timer_bits,
NUM_OTG_FSM_TIMERS) {
if ((next_timer == NUM_OTG_FSM_TIMERS) ||
- (ci->hr_timeouts[next_timer].tv64 <
- ci->hr_timeouts[cur_timer].tv64))
+ (ci->hr_timeouts[next_timer] <
+ ci->hr_timeouts[cur_timer]))
next_timer = cur_timer;
}
}
@@ -397,14 +397,14 @@ static enum hrtimer_restart ci_otg_hrtimer_func(struct hrtimer *t)
now = ktime_get();
for_each_set_bit(cur_timer, &enabled_timer_bits, NUM_OTG_FSM_TIMERS) {
- if (now.tv64 >= ci->hr_timeouts[cur_timer].tv64) {
+ if (now >= ci->hr_timeouts[cur_timer]) {
ci->enabled_otg_timer_bits &= ~(1 << cur_timer);
if (otg_timer_handlers[cur_timer])
ret = otg_timer_handlers[cur_timer](ci);
} else {
if ((next_timer == NUM_OTG_FSM_TIMERS) ||
- (ci->hr_timeouts[cur_timer].tv64 <
- ci->hr_timeouts[next_timer].tv64))
+ (ci->hr_timeouts[cur_timer] <
+ ci->hr_timeouts[next_timer]))
next_timer = cur_timer;
}
}
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 0aa9e7d697a5..25dbd8c7aec7 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -239,6 +239,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
if (ifp->desc.bNumEndpoints >= num_ep)
goto skip_to_next_endpoint_or_interface_descriptor;
+ /* Check for duplicate endpoint addresses */
+ for (i = 0; i < ifp->desc.bNumEndpoints; ++i) {
+ if (ifp->endpoint[i].desc.bEndpointAddress ==
+ d->bEndpointAddress) {
+ dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
+ cfgno, inum, asnum, d->bEndpointAddress);
+ goto skip_to_next_endpoint_or_interface_descriptor;
+ }
+ }
+
endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
++ifp->desc.bNumEndpoints;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 143454ea385b..a56c75e09786 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -29,7 +29,7 @@
#include <linux/random.h>
#include <linux/pm_qos.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/byteorder.h>
#include "hub.h"
@@ -103,8 +103,7 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
static void hub_release(struct kref *kref);
static int usb_reset_and_verify_device(struct usb_device *udev);
-static void hub_usb3_port_prepare_disable(struct usb_hub *hub,
- struct usb_port *port_dev);
+static int hub_port_disable(struct usb_hub *hub, int port1, int set_state);
static inline char *portspeed(struct usb_hub *hub, int portstatus)
{
@@ -903,34 +902,6 @@ static int hub_set_port_link_state(struct usb_hub *hub, int port1,
}
/*
- * USB-3 does not have a similar link state as USB-2 that will avoid negotiating
- * a connection with a plugged-in cable but will signal the host when the cable
- * is unplugged. Disable remote wake and set link state to U3 for USB-3 devices
- */
-static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
-{
- struct usb_port *port_dev = hub->ports[port1 - 1];
- struct usb_device *hdev = hub->hdev;
- int ret = 0;
-
- if (!hub->error) {
- if (hub_is_superspeed(hub->hdev)) {
- hub_usb3_port_prepare_disable(hub, port_dev);
- ret = hub_set_port_link_state(hub, port_dev->portnum,
- USB_SS_PORT_LS_U3);
- } else {
- ret = usb_clear_port_feature(hdev, port1,
- USB_PORT_FEAT_ENABLE);
- }
- }
- if (port_dev->child && set_state)
- usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED);
- if (ret && ret != -ENODEV)
- dev_err(&port_dev->dev, "cannot disable (err = %d)\n", ret);
- return ret;
-}
-
-/*
* Disable a port and mark a logical connect-change event, so that some
* time later hub_wq will disconnect() any existing usb_device on the port
* and will re-enumerate if there actually is a device attached.
@@ -4162,6 +4133,34 @@ static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port,
#endif /* CONFIG_PM */
+/*
+ * USB-3 does not have a similar link state as USB-2 that will avoid negotiating
+ * a connection with a plugged-in cable but will signal the host when the cable
+ * is unplugged. Disable remote wake and set link state to U3 for USB-3 devices
+ */
+static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
+{
+ struct usb_port *port_dev = hub->ports[port1 - 1];
+ struct usb_device *hdev = hub->hdev;
+ int ret = 0;
+
+ if (!hub->error) {
+ if (hub_is_superspeed(hub->hdev)) {
+ hub_usb3_port_prepare_disable(hub, port_dev);
+ ret = hub_set_port_link_state(hub, port_dev->portnum,
+ USB_SS_PORT_LS_U3);
+ } else {
+ ret = usb_clear_port_feature(hdev, port1,
+ USB_PORT_FEAT_ENABLE);
+ }
+ }
+ if (port_dev->child && set_state)
+ usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED);
+ if (ret && ret != -ENODEV)
+ dev_err(&port_dev->dev, "cannot disable (err = %d)\n", ret);
+ return ret;
+}
+
/* USB 2.0 spec, 7.1.7.3 / fig 7-29:
*
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index b95930f20d90..c55db4aa54d6 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -3753,7 +3753,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
hs_ep->desc_list = dma_alloc_coherent(hsotg->dev,
MAX_DMA_DESC_NUM_GENERIC *
sizeof(struct dwc2_dma_desc),
- &hs_ep->desc_list_dma, GFP_KERNEL);
+ &hs_ep->desc_list_dma, GFP_ATOMIC);
if (!hs_ep->desc_list) {
ret = -ENOMEM;
goto error2;
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index a786256535b6..11fe68a4627b 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -247,8 +247,6 @@ MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
static void dwc2_get_device_property(struct dwc2_hsotg *hsotg,
char *property, u8 size, u64 *value)
{
- u8 val8;
- u16 val16;
u32 val32;
switch (size) {
@@ -256,17 +254,7 @@ static void dwc2_get_device_property(struct dwc2_hsotg *hsotg,
*value = device_property_read_bool(hsotg->dev, property);
break;
case 1:
- if (device_property_read_u8(hsotg->dev, property, &val8))
- return;
-
- *value = val8;
- break;
case 2:
- if (device_property_read_u16(hsotg->dev, property, &val16))
- return;
-
- *value = val16;
- break;
case 4:
if (device_property_read_u32(hsotg->dev, property, &val32))
return;
@@ -1100,13 +1088,13 @@ static void dwc2_set_gadget_dma(struct dwc2_hsotg *hsotg)
/* Buffer DMA */
dwc2_set_param_bool(hsotg, &p->g_dma,
false, "gadget-dma",
- true, false,
+ dma_capable, false,
dma_capable);
/* DMA Descriptor */
dwc2_set_param_bool(hsotg, &p->g_dma_desc, false,
"gadget-dma-desc",
- p->g_dma, false,
+ !!hw->dma_desc_enable, false,
!!hw->dma_desc_enable);
}
@@ -1130,8 +1118,14 @@ static void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
dwc2_set_param_bool(hsotg, &p->host_dma,
false, "host-dma",
- true, false,
+ dma_capable, false,
dma_capable);
+ dwc2_set_param_host_rx_fifo_size(hsotg,
+ params->host_rx_fifo_size);
+ dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
+ params->host_nperio_tx_fifo_size);
+ dwc2_set_param_host_perio_tx_fifo_size(hsotg,
+ params->host_perio_tx_fifo_size);
}
dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable);
dwc2_set_param_dma_desc_fs_enable(hsotg, params->dma_desc_fs_enable);
@@ -1140,12 +1134,6 @@ static void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
params->host_support_fs_ls_low_power);
dwc2_set_param_enable_dynamic_fifo(hsotg,
params->enable_dynamic_fifo);
- dwc2_set_param_host_rx_fifo_size(hsotg,
- params->host_rx_fifo_size);
- dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
- params->host_nperio_tx_fifo_size);
- dwc2_set_param_host_perio_tx_fifo_size(hsotg,
- params->host_perio_tx_fifo_size);
dwc2_set_param_max_transfer_size(hsotg,
params->max_transfer_size);
dwc2_set_param_max_packet_count(hsotg,
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index de5a8570be04..14b760209680 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -45,9 +45,7 @@
#define DWC3_XHCI_RESOURCES_NUM 2
#define DWC3_SCRATCHBUF_SIZE 4096 /* each buffer is assumed to be 4KiB */
-#define DWC3_EVENT_SIZE 4 /* bytes */
-#define DWC3_EVENT_MAX_NUM 64 /* 2 events/endpoint */
-#define DWC3_EVENT_BUFFERS_SIZE (DWC3_EVENT_SIZE * DWC3_EVENT_MAX_NUM)
+#define DWC3_EVENT_BUFFERS_SIZE 4096
#define DWC3_EVENT_TYPE_MASK 0xfe
#define DWC3_EVENT_TYPE_DEV 0
@@ -311,9 +309,8 @@
#define DWC3_DCFG_SUPERSPEED_PLUS (5 << 0) /* DWC_usb31 only */
#define DWC3_DCFG_SUPERSPEED (4 << 0)
#define DWC3_DCFG_HIGHSPEED (0 << 0)
-#define DWC3_DCFG_FULLSPEED2 (1 << 0)
+#define DWC3_DCFG_FULLSPEED (1 << 0)
#define DWC3_DCFG_LOWSPEED (2 << 0)
-#define DWC3_DCFG_FULLSPEED1 (3 << 0)
#define DWC3_DCFG_NUMP_SHIFT 17
#define DWC3_DCFG_NUMP(n) (((n) >> DWC3_DCFG_NUMP_SHIFT) & 0x1f)
@@ -405,9 +402,8 @@
#define DWC3_DSTS_SUPERSPEED_PLUS (5 << 0) /* DWC_usb31 only */
#define DWC3_DSTS_SUPERSPEED (4 << 0)
#define DWC3_DSTS_HIGHSPEED (0 << 0)
-#define DWC3_DSTS_FULLSPEED2 (1 << 0)
+#define DWC3_DSTS_FULLSPEED (1 << 0)
#define DWC3_DSTS_LOWSPEED (2 << 0)
-#define DWC3_DSTS_FULLSPEED1 (3 << 0)
/* Device Generic Command Register */
#define DWC3_DGCMD_SET_LMP 0x01
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 29e80cc9b634..eb1b9cb3f9d1 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/platform_data/dwc3-omap.h>
@@ -510,7 +511,7 @@ static int dwc3_omap_probe(struct platform_device *pdev)
/* check the DMA Status */
reg = dwc3_omap_readl(omap->base, USBOTGSS_SYSCONFIG);
-
+ irq_set_status_flags(omap->irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(dev, omap->irq, dwc3_omap_interrupt,
dwc3_omap_interrupt_thread, IRQF_SHARED,
"dwc3-omap", omap);
@@ -531,7 +532,7 @@ static int dwc3_omap_probe(struct platform_device *pdev)
}
dwc3_omap_enable_irqs(omap);
-
+ enable_irq(omap->irq);
return 0;
err2:
@@ -552,6 +553,7 @@ static int dwc3_omap_remove(struct platform_device *pdev)
extcon_unregister_notifier(omap->edev, EXTCON_USB, &omap->vbus_nb);
extcon_unregister_notifier(omap->edev, EXTCON_USB_HOST, &omap->id_nb);
dwc3_omap_disable_irqs(omap);
+ disable_irq(omap->irq);
of_platform_depopulate(omap->dev);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 2b73339f286b..cce0a220b6b0 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -38,6 +38,7 @@
#define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa
#define PCI_DEVICE_ID_INTEL_APL 0x5aaa
#define PCI_DEVICE_ID_INTEL_KBP 0xa2b0
+#define PCI_DEVICE_ID_INTEL_GLK 0x31aa
#define PCI_INTEL_BXT_DSM_UUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
#define PCI_INTEL_BXT_FUNC_PMU_PWR 4
@@ -73,16 +74,6 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
{
struct platform_device *dwc3 = dwc->dwc3;
struct pci_dev *pdev = dwc->pci;
- int ret;
-
- struct property_entry sysdev_property[] = {
- PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
- { },
- };
-
- ret = platform_device_add_properties(dwc3, sysdev_property);
- if (ret)
- return ret;
if (pdev->vendor == PCI_VENDOR_ID_AMD &&
pdev->device == PCI_DEVICE_ID_AMD_NL_USB) {
@@ -105,6 +96,7 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
PROPERTY_ENTRY_BOOL("snps,disable_scramble_quirk"),
PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
+ PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
{ },
};
@@ -115,7 +107,8 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
int ret;
struct property_entry properties[] = {
- PROPERTY_ENTRY_STRING("dr-mode", "peripheral"),
+ PROPERTY_ENTRY_STRING("dr_mode", "peripheral"),
+ PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
{ }
};
@@ -167,6 +160,7 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
PROPERTY_ENTRY_BOOL("snps,usb3_lpm_capable"),
PROPERTY_ENTRY_BOOL("snps,has-lpm-erratum"),
PROPERTY_ENTRY_BOOL("snps,dis_enblslpm_quirk"),
+ PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
{ },
};
@@ -274,6 +268,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT_M), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBP), },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GLK), },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
{ } /* Terminating Entry */
};
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 4878d187c7d4..9bb1f8526f3e 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -39,18 +39,13 @@ static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep);
static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
struct dwc3_ep *dep, struct dwc3_request *req);
-static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
- u32 len, u32 type, bool chain)
+static void dwc3_ep0_prepare_one_trb(struct dwc3 *dwc, u8 epnum,
+ dma_addr_t buf_dma, u32 len, u32 type, bool chain)
{
- struct dwc3_gadget_ep_cmd_params params;
struct dwc3_trb *trb;
struct dwc3_ep *dep;
- int ret;
-
dep = dwc->eps[epnum];
- if (dep->flags & DWC3_EP_BUSY)
- return 0;
trb = &dwc->ep0_trb[dep->trb_enqueue];
@@ -71,15 +66,23 @@ static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
trb->ctrl |= (DWC3_TRB_CTRL_IOC
| DWC3_TRB_CTRL_LST);
- if (chain)
+ trace_dwc3_prepare_trb(dep, trb);
+}
+
+static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum)
+{
+ struct dwc3_gadget_ep_cmd_params params;
+ struct dwc3_ep *dep;
+ int ret;
+
+ dep = dwc->eps[epnum];
+ if (dep->flags & DWC3_EP_BUSY)
return 0;
memset(&params, 0, sizeof(params));
params.param0 = upper_32_bits(dwc->ep0_trb_addr);
params.param1 = lower_32_bits(dwc->ep0_trb_addr);
- trace_dwc3_prepare_trb(dep, trb);
-
ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_STARTTRANSFER, &params);
if (ret < 0)
return ret;
@@ -280,8 +283,9 @@ void dwc3_ep0_out_start(struct dwc3 *dwc)
complete(&dwc->ep0_in_setup);
- ret = dwc3_ep0_start_trans(dwc, 0, dwc->ctrl_req_addr, 8,
+ dwc3_ep0_prepare_one_trb(dwc, 0, dwc->ctrl_req_addr, 8,
DWC3_TRBCTL_CONTROL_SETUP, false);
+ ret = dwc3_ep0_start_trans(dwc, 0);
WARN_ON(ret < 0);
}
@@ -912,9 +916,9 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
dwc->ep0_next_event = DWC3_EP0_COMPLETE;
- ret = dwc3_ep0_start_trans(dwc, epnum,
- dwc->ctrl_req_addr, 0,
- DWC3_TRBCTL_CONTROL_DATA, false);
+ dwc3_ep0_prepare_one_trb(dwc, epnum, dwc->ctrl_req_addr,
+ 0, DWC3_TRBCTL_CONTROL_DATA, false);
+ ret = dwc3_ep0_start_trans(dwc, epnum);
WARN_ON(ret < 0);
}
}
@@ -993,9 +997,10 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
req->direction = !!dep->number;
if (req->request.length == 0) {
- ret = dwc3_ep0_start_trans(dwc, dep->number,
+ dwc3_ep0_prepare_one_trb(dwc, dep->number,
dwc->ctrl_req_addr, 0,
DWC3_TRBCTL_CONTROL_DATA, false);
+ ret = dwc3_ep0_start_trans(dwc, dep->number);
} else if (!IS_ALIGNED(req->request.length, dep->endpoint.maxpacket)
&& (dep->number == 0)) {
u32 transfer_size = 0;
@@ -1011,7 +1016,7 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
if (req->request.length > DWC3_EP0_BOUNCE_SIZE) {
transfer_size = ALIGN(req->request.length - maxpacket,
maxpacket);
- ret = dwc3_ep0_start_trans(dwc, dep->number,
+ dwc3_ep0_prepare_one_trb(dwc, dep->number,
req->request.dma,
transfer_size,
DWC3_TRBCTL_CONTROL_DATA,
@@ -1023,18 +1028,20 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
dwc->ep0_bounced = true;
- ret = dwc3_ep0_start_trans(dwc, dep->number,
+ dwc3_ep0_prepare_one_trb(dwc, dep->number,
dwc->ep0_bounce_addr, transfer_size,
DWC3_TRBCTL_CONTROL_DATA, false);
+ ret = dwc3_ep0_start_trans(dwc, dep->number);
} else {
ret = usb_gadget_map_request_by_dev(dwc->sysdev,
&req->request, dep->number);
if (ret)
return;
- ret = dwc3_ep0_start_trans(dwc, dep->number, req->request.dma,
+ dwc3_ep0_prepare_one_trb(dwc, dep->number, req->request.dma,
req->request.length, DWC3_TRBCTL_CONTROL_DATA,
false);
+ ret = dwc3_ep0_start_trans(dwc, dep->number);
}
WARN_ON(ret < 0);
@@ -1048,8 +1055,9 @@ static int dwc3_ep0_start_control_status(struct dwc3_ep *dep)
type = dwc->three_stage_setup ? DWC3_TRBCTL_CONTROL_STATUS3
: DWC3_TRBCTL_CONTROL_STATUS2;
- return dwc3_ep0_start_trans(dwc, dep->number,
+ dwc3_ep0_prepare_one_trb(dwc, dep->number,
dwc->ctrl_req_addr, 0, type, false);
+ return dwc3_ep0_start_trans(dwc, dep->number);
}
static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep)
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 678559525618..204c754cc647 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -180,11 +180,11 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
if (req->request.status == -EINPROGRESS)
req->request.status = status;
- if (dwc->ep0_bounced && dep->number == 0)
+ if (dwc->ep0_bounced && dep->number <= 1)
dwc->ep0_bounced = false;
- else
- usb_gadget_unmap_request_by_dev(dwc->sysdev,
- &req->request, req->direction);
+
+ usb_gadget_unmap_request_by_dev(dwc->sysdev,
+ &req->request, req->direction);
trace_dwc3_gadget_giveback(req);
@@ -1720,7 +1720,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
reg |= DWC3_DCFG_LOWSPEED;
break;
case USB_SPEED_FULL:
- reg |= DWC3_DCFG_FULLSPEED1;
+ reg |= DWC3_DCFG_FULLSPEED;
break;
case USB_SPEED_HIGH:
reg |= DWC3_DCFG_HIGHSPEED;
@@ -2232,9 +2232,14 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
dep = dwc->eps[epnum];
- if (!(dep->flags & DWC3_EP_ENABLED) &&
- !(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
- return;
+ if (!(dep->flags & DWC3_EP_ENABLED)) {
+ if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
+ return;
+
+ /* Handle only EPCMDCMPLT when EP disabled */
+ if (event->endpoint_event != DWC3_DEPEVT_EPCMDCMPLT)
+ return;
+ }
if (epnum == 0 || epnum == 1) {
dwc3_ep0_interrupt(dwc, event);
@@ -2531,8 +2536,7 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
dwc->gadget.ep0->maxpacket = 64;
dwc->gadget.speed = USB_SPEED_HIGH;
break;
- case DWC3_DSTS_FULLSPEED2:
- case DWC3_DSTS_FULLSPEED1:
+ case DWC3_DSTS_FULLSPEED:
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
dwc->gadget.ep0->maxpacket = 64;
dwc->gadget.speed = USB_SPEED_FULL;
@@ -2566,7 +2570,7 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
*/
WARN_ONCE(dwc->revision < DWC3_REVISION_240A
&& dwc->has_lpm_erratum,
- "LPM Erratum not available on dwc3 revisisions < 2.40a\n");
+ "LPM Erratum not available on dwc3 revisions < 2.40a\n");
if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold);
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 41ab61f9b6e0..002822d98fda 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1694,9 +1694,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
value = min(w_length, (u16) 1);
break;
- /* function drivers must handle get/set altsetting; if there's
- * no get() method, we know only altsetting zero works.
- */
+ /* function drivers must handle get/set altsetting */
case USB_REQ_SET_INTERFACE:
if (ctrl->bRequestType != USB_RECIP_INTERFACE)
goto unknown;
@@ -1705,7 +1703,13 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
f = cdev->config->interface[intf];
if (!f)
break;
- if (w_value && !f->set_alt)
+
+ /*
+ * If there's no get_alt() method, we know only altsetting zero
+ * works. There is no need to check if set_alt() is not NULL
+ * as we check this in usb_add_function().
+ */
+ if (w_value && !f->get_alt)
break;
value = f->set_alt(f, w_index, w_value);
if (value == USB_GADGET_DELAYED_STATUS) {
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 0780d8311ec6..5e746adc8a2d 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -949,7 +949,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
goto error_mutex;
}
if (!io_data->read &&
- copy_from_iter(data, data_len, &io_data->data) != data_len) {
+ !copy_from_iter_full(data, data_len, &io_data->data)) {
ret = -EFAULT;
goto error_mutex;
}
@@ -2091,8 +2091,8 @@ static int __ffs_data_do_entity(enum ffs_entity_type type,
case FFS_STRING:
/*
- * Strings are indexed from 1 (0 is magic ;) reserved
- * for languages list or some such)
+ * Strings are indexed from 1 (0 is reserved
+ * for languages list)
*/
if (*valuep > helper->ffs->strings_count)
helper->ffs->strings_count = *valuep;
@@ -2252,7 +2252,7 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
if (len < sizeof(*d) ||
d->bFirstInterfaceNumber >= ffs->interfaces_count ||
- !d->Reserved1)
+ d->Reserved1)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
if (d->Reserved2[i])
@@ -3666,6 +3666,7 @@ static void ffs_closed(struct ffs_data *ffs)
{
struct ffs_dev *ffs_obj;
struct f_fs_opts *opts;
+ struct config_item *ci;
ENTER();
ffs_dev_lock();
@@ -3689,8 +3690,11 @@ static void ffs_closed(struct ffs_data *ffs)
|| !atomic_read(&opts->func_inst.group.cg_item.ci_kref.refcount))
goto done;
- unregister_gadget_item(ffs_obj->opts->
- func_inst.group.cg_item.ci_parent->ci_parent);
+ ci = opts->func_inst.group.cg_item.ci_parent->ci_parent;
+ ffs_dev_unlock();
+
+ unregister_gadget_item(ci);
+ return;
done:
ffs_dev_unlock();
}
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 7abd70b2a588..5f8139b8e601 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -593,7 +593,7 @@ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
}
status = usb_ep_enable(hidg->out_ep);
if (status < 0) {
- ERROR(cdev, "Enable IN endpoint FAILED!\n");
+ ERROR(cdev, "Enable OUT endpoint FAILED!\n");
goto fail;
}
hidg->out_ep->driver_data = hidg;
@@ -905,7 +905,7 @@ static void hidg_free_inst(struct usb_function_instance *f)
mutex_lock(&hidg_ida_lock);
hidg_put_minor(opts->minor);
- if (idr_is_empty(&hidg_ida.idr))
+ if (ida_is_empty(&hidg_ida))
ghid_cleanup();
mutex_unlock(&hidg_ida_lock);
@@ -931,7 +931,7 @@ static struct usb_function_instance *hidg_alloc_inst(void)
mutex_lock(&hidg_ida_lock);
- if (idr_is_empty(&hidg_ida.idr)) {
+ if (ida_is_empty(&hidg_ida)) {
status = ghid_setup(NULL, HIDG_MINORS);
if (status) {
ret = ERR_PTR(status);
@@ -944,7 +944,7 @@ static struct usb_function_instance *hidg_alloc_inst(void)
if (opts->minor < 0) {
ret = ERR_PTR(opts->minor);
kfree(opts);
- if (idr_is_empty(&hidg_ida.idr))
+ if (ida_is_empty(&hidg_ida))
ghid_cleanup();
goto unlock;
}
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index e8008fa35e1e..224717e63a53 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -1113,8 +1113,7 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port,
}
/* Delay the timer. */
- hrtimer_start(&ncm->task_timer,
- ktime_set(0, TX_TIMEOUT_NSECS),
+ hrtimer_start(&ncm->task_timer, TX_TIMEOUT_NSECS,
HRTIMER_MODE_REL);
/* Add the datagram position entries */
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index 0de36cda6e41..8054da9276dd 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -1265,7 +1265,7 @@ static void gprinter_free_inst(struct usb_function_instance *f)
mutex_lock(&printer_ida_lock);
gprinter_put_minor(opts->minor);
- if (idr_is_empty(&printer_ida.idr))
+ if (ida_is_empty(&printer_ida))
gprinter_cleanup();
mutex_unlock(&printer_ida_lock);
@@ -1289,7 +1289,7 @@ static struct usb_function_instance *gprinter_alloc_inst(void)
mutex_lock(&printer_ida_lock);
- if (idr_is_empty(&printer_ida.idr)) {
+ if (ida_is_empty(&printer_ida)) {
status = gprinter_setup(PRINTER_MINORS);
if (status) {
ret = ERR_PTR(status);
@@ -1302,7 +1302,7 @@ static struct usb_function_instance *gprinter_alloc_inst(void)
if (opts->minor < 0) {
ret = ERR_PTR(opts->minor);
kfree(opts);
- if (idr_is_empty(&printer_ida.idr))
+ if (ida_is_empty(&printer_ida))
gprinter_cleanup();
goto unlock;
}
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index 197f73386fac..d2351139342f 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -1073,7 +1073,7 @@ static struct usbg_cmd *usbg_get_cmd(struct f_uas *fu,
struct usbg_cmd *cmd;
int tag;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
+ tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
if (tag < 0)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index bd82dd12deff..6bde4396927c 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -20,7 +20,7 @@
#include <linux/uts.h>
#include <linux/wait.h>
#include <linux/compiler.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/poll.h>
@@ -667,7 +667,7 @@ ep_write_iter(struct kiocb *iocb, struct iov_iter *from)
return -ENOMEM;
}
- if (unlikely(copy_from_iter(buf, len, from) != len)) {
+ if (unlikely(!copy_from_iter_full(buf, len, from))) {
value = -EFAULT;
goto out;
}
@@ -1126,7 +1126,7 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
/* data and/or status stage for control request */
} else if (dev->state == STATE_DEV_SETUP) {
- /* IN DATA+STATUS caller makes len <= wLength */
+ len = min_t(size_t, len, dev->setup_wLength);
if (dev->setup_in) {
retval = setup_req (dev->gadget->ep0, dev->req, len);
if (retval == 0) {
@@ -1734,10 +1734,12 @@ static struct usb_gadget_driver gadgetfs_driver = {
* such as configuration notifications.
*/
-static int is_valid_config (struct usb_config_descriptor *config)
+static int is_valid_config(struct usb_config_descriptor *config,
+ unsigned int total)
{
return config->bDescriptorType == USB_DT_CONFIG
&& config->bLength == USB_DT_CONFIG_SIZE
+ && total >= USB_DT_CONFIG_SIZE
&& config->bConfigurationValue != 0
&& (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0
&& (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0;
@@ -1762,7 +1764,8 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
}
spin_unlock_irq(&dev->lock);
- if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4))
+ if ((len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) ||
+ (len > PAGE_SIZE * 4))
return -EINVAL;
/* we might need to change message format someday */
@@ -1786,7 +1789,8 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
/* full or low speed config */
dev->config = (void *) kbuf;
total = le16_to_cpu(dev->config->wTotalLength);
- if (!is_valid_config (dev->config) || total >= length)
+ if (!is_valid_config(dev->config, total) ||
+ total > length - USB_DT_DEVICE_SIZE)
goto fail;
kbuf += total;
length -= total;
@@ -1795,10 +1799,13 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
if (kbuf [1] == USB_DT_CONFIG) {
dev->hs_config = (void *) kbuf;
total = le16_to_cpu(dev->hs_config->wTotalLength);
- if (!is_valid_config (dev->hs_config) || total >= length)
+ if (!is_valid_config(dev->hs_config, total) ||
+ total > length - USB_DT_DEVICE_SIZE)
goto fail;
kbuf += total;
length -= total;
+ } else {
+ dev->hs_config = NULL;
}
/* could support multiple configs, using another encoding! */
@@ -1811,7 +1818,6 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
|| dev->dev->bDescriptorType != USB_DT_DEVICE
|| dev->dev->bNumConfigurations != 1)
goto fail;
- dev->dev->bNumConfigurations = 1;
dev->dev->bcdUSB = cpu_to_le16 (0x0200);
/* triggers gadgetfs_bind(); then we can enumerate. */
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 9483489080f6..0402177f93cd 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1317,7 +1317,11 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver)
if (!ret)
break;
}
- if (!ret && !udc->driver)
+ if (ret)
+ ret = -ENODEV;
+ else if (udc->driver)
+ ret = -EBUSY;
+ else
goto found;
} else {
list_for_each_entry(udc, &udc_list, list) {
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 02b14e91ae6c..c60abe3a68f9 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -330,7 +330,7 @@ static void nuke(struct dummy *dum, struct dummy_ep *ep)
/* caller must hold lock */
static void stop_activity(struct dummy *dum)
{
- struct dummy_ep *ep;
+ int i;
/* prevent any more requests */
dum->address = 0;
@@ -338,8 +338,8 @@ static void stop_activity(struct dummy *dum)
/* The timer is left running so that outstanding URBs can fail */
/* nuke any pending requests first, so driver i/o is quiesced */
- list_for_each_entry(ep, &dum->gadget.ep_list, ep.ep_list)
- nuke(dum, ep);
+ for (i = 0; i < DUMMY_ENDPOINTS; ++i)
+ nuke(dum, &dum->ep[i]);
/* driver now does any non-usb quiescing necessary */
}
diff --git a/drivers/usb/host/ehci-timer.c b/drivers/usb/host/ehci-timer.c
index 69f50e6533a6..3893b5bafd87 100644
--- a/drivers/usb/host/ehci-timer.c
+++ b/drivers/usb/host/ehci-timer.c
@@ -88,8 +88,7 @@ static void ehci_enable_event(struct ehci_hcd *ehci, unsigned event,
ktime_t *timeout = &ehci->hr_timeouts[event];
if (resched)
- *timeout = ktime_add(ktime_get(),
- ktime_set(0, event_delays_ns[event]));
+ *timeout = ktime_add(ktime_get(), event_delays_ns[event]);
ehci->enabled_hrtimer_events |= (1 << event);
/* Track only the lowest-numbered pending event */
@@ -425,7 +424,7 @@ static enum hrtimer_restart ehci_hrtimer_func(struct hrtimer *t)
*/
now = ktime_get();
for_each_set_bit(e, &events, EHCI_HRTIMER_NUM_EVENTS) {
- if (now.tv64 >= ehci->hr_timeouts[e].tv64)
+ if (now >= ehci->hr_timeouts[e])
event_handlers[e](ehci);
else
ehci_enable_event(ehci, e, false);
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index 66efa9a67687..9d0b0518290a 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -1080,8 +1080,7 @@ static void fotg210_enable_event(struct fotg210_hcd *fotg210, unsigned event,
ktime_t *timeout = &fotg210->hr_timeouts[event];
if (resched)
- *timeout = ktime_add(ktime_get(),
- ktime_set(0, event_delays_ns[event]));
+ *timeout = ktime_add(ktime_get(), event_delays_ns[event]);
fotg210->enabled_hrtimer_events |= (1 << event);
/* Track only the lowest-numbered pending event */
@@ -1381,7 +1380,7 @@ static enum hrtimer_restart fotg210_hrtimer_func(struct hrtimer *t)
*/
now = ktime_get();
for_each_set_bit(e, &events, FOTG210_HRTIMER_NUM_EVENTS) {
- if (now.tv64 >= fotg210->hr_timeouts[e].tv64)
+ if (now >= fotg210->hr_timeouts[e])
event_handlers[e](fotg210);
else
fotg210_enable_event(fotg210, e, false);
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index be9e63836881..414e3c376dbb 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -43,7 +43,6 @@ struct at91_usbh_data {
struct gpio_desc *overcurrent_pin[AT91_MAX_USBH_PORTS];
u8 ports; /* number of ports on root hub */
u8 overcurrent_supported;
- u8 vbus_pin_active_low[AT91_MAX_USBH_PORTS];
u8 overcurrent_status[AT91_MAX_USBH_PORTS];
u8 overcurrent_changed[AT91_MAX_USBH_PORTS];
};
@@ -266,8 +265,7 @@ static void ohci_at91_usb_set_power(struct at91_usbh_data *pdata, int port, int
if (!valid_port(port))
return;
- gpiod_set_value(pdata->vbus_pin[port],
- pdata->vbus_pin_active_low[port] ^ enable);
+ gpiod_set_value(pdata->vbus_pin[port], enable);
}
static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port)
@@ -275,8 +273,7 @@ static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port)
if (!valid_port(port))
return -EINVAL;
- return gpiod_get_value(pdata->vbus_pin[port]) ^
- pdata->vbus_pin_active_low[port];
+ return gpiod_get_value(pdata->vbus_pin[port]);
}
/*
@@ -533,18 +530,17 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
pdata->ports = ports;
at91_for_each_port(i) {
- pdata->vbus_pin[i] = devm_gpiod_get_optional(&pdev->dev,
- "atmel,vbus-gpio",
- GPIOD_IN);
+ if (i >= pdata->ports)
+ break;
+
+ pdata->vbus_pin[i] =
+ devm_gpiod_get_index_optional(&pdev->dev, "atmel,vbus",
+ i, GPIOD_OUT_HIGH);
if (IS_ERR(pdata->vbus_pin[i])) {
err = PTR_ERR(pdata->vbus_pin[i]);
dev_err(&pdev->dev, "unable to claim gpio \"vbus\": %d\n", err);
continue;
}
-
- pdata->vbus_pin_active_low[i] = gpiod_get_value(pdata->vbus_pin[i]);
-
- ohci_at91_usb_set_power(pdata, i, 1);
}
at91_for_each_port(i) {
@@ -552,8 +548,8 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
break;
pdata->overcurrent_pin[i] =
- devm_gpiod_get_optional(&pdev->dev,
- "atmel,oc-gpio", GPIOD_IN);
+ devm_gpiod_get_index_optional(&pdev->dev, "atmel,oc",
+ i, GPIOD_IN);
if (IS_ERR(pdata->overcurrent_pin[i])) {
err = PTR_ERR(pdata->overcurrent_pin[i]);
dev_err(&pdev->dev, "unable to claim gpio \"overcurrent\": %d\n", err);
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index 5d3d914ab4fb..683098afa93e 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -42,7 +42,7 @@
#include <linux/bitops.h>
#include <linux/dmi.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/irq.h>
diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
index 940304c33224..02260cfdedb1 100644
--- a/drivers/usb/host/uhci-pci.c
+++ b/drivers/usb/host/uhci-pci.c
@@ -129,6 +129,10 @@ static int uhci_pci_init(struct usb_hcd *hcd)
if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_HP)
uhci->wait_for_hp = 1;
+ /* Intel controllers use non-PME wakeup signalling */
+ if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_INTEL)
+ device_set_run_wake(uhci_dev(uhci), 1);
+
/* Set up pointers to PCI-specific functions */
uhci->reset_hc = uhci_pci_reset_hc;
uhci->check_and_reset_hc = uhci_pci_check_and_reset_hc;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 321de2e0161b..8414ed2a02de 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -979,6 +979,40 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
xhci->devs[slot_id] = NULL;
}
+/*
+ * Free a virt_device structure.
+ * If the virt_device added a tt_info (a hub) and has children pointing to
+ * that tt_info, then free the child first. Recursive.
+ * We can't rely on udev at this point to find child-parent relationships.
+ */
+void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
+{
+ struct xhci_virt_device *vdev;
+ struct list_head *tt_list_head;
+ struct xhci_tt_bw_info *tt_info, *next;
+ int i;
+
+ vdev = xhci->devs[slot_id];
+ if (!vdev)
+ return;
+
+ tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
+ list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
+ /* is this a hub device that added a tt_info to the tts list */
+ if (tt_info->slot_id == slot_id) {
+ /* are any devices using this tt_info? */
+ for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
+ vdev = xhci->devs[i];
+ if (vdev && (vdev->tt_info == tt_info))
+ xhci_free_virt_devices_depth_first(
+ xhci, i);
+ }
+ }
+ }
+ /* we are now at a leaf device */
+ xhci_free_virt_device(xhci, slot_id);
+}
+
int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
struct usb_device *udev, gfp_t flags)
{
@@ -1795,7 +1829,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
int size;
int i, j, num_ports;
- del_timer_sync(&xhci->cmd_timer);
+ cancel_delayed_work_sync(&xhci->cmd_timer);
/* Free the Event Ring Segment Table and the actual Event Ring */
size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
@@ -1828,8 +1862,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
}
}
- for (i = 1; i < MAX_HC_SLOTS; ++i)
- xhci_free_virt_device(xhci, i);
+ for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--)
+ xhci_free_virt_devices_depth_first(xhci, i);
dma_pool_destroy(xhci->segment_pool);
xhci->segment_pool = NULL;
@@ -2342,9 +2376,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
INIT_LIST_HEAD(&xhci->cmd_list);
- /* init command timeout timer */
- setup_timer(&xhci->cmd_timer, xhci_handle_command_timeout,
- (unsigned long)xhci);
+ /* init command timeout work */
+ INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout);
+ init_completion(&xhci->cmd_ring_stop_completion);
page_size = readl(&xhci->op_regs->page_size);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index 1094ebd2838f..bac961cd24ad 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -579,8 +579,10 @@ static int xhci_mtk_probe(struct platform_device *pdev)
goto disable_ldos;
irq = platform_get_irq(pdev, 0);
- if (irq < 0)
+ if (irq < 0) {
+ ret = irq;
goto disable_clk;
+ }
/* Initialize dma_mask and coherent_dma_mask to 32-bits */
ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index e96ae80d107e..954abfd5014d 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -165,7 +165,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
- pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI)) {
+ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI)) {
xhci->quirks |= XHCI_PME_STUCK_QUIRK;
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index bdf6b13d9b67..e32029a31ca4 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -279,23 +279,76 @@ void xhci_ring_cmd_db(struct xhci_hcd *xhci)
readl(&xhci->dba->doorbell[0]);
}
-static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
+static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci, unsigned long delay)
+{
+ return mod_delayed_work(system_wq, &xhci->cmd_timer, delay);
+}
+
+static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci)
+{
+ return list_first_entry_or_null(&xhci->cmd_list, struct xhci_command,
+ cmd_list);
+}
+
+/*
+ * Turn all commands on command ring with status set to "aborted" to no-op trbs.
+ * If there are other commands waiting then restart the ring and kick the timer.
+ * This must be called with command ring stopped and xhci->lock held.
+ */
+static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
+ struct xhci_command *cur_cmd)
+{
+ struct xhci_command *i_cmd;
+ u32 cycle_state;
+
+ /* Turn all aborted commands in list to no-ops, then restart */
+ list_for_each_entry(i_cmd, &xhci->cmd_list, cmd_list) {
+
+ if (i_cmd->status != COMP_CMD_ABORT)
+ continue;
+
+ i_cmd->status = COMP_CMD_STOP;
+
+ xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
+ i_cmd->command_trb);
+ /* get cycle state from the original cmd trb */
+ cycle_state = le32_to_cpu(
+ i_cmd->command_trb->generic.field[3]) & TRB_CYCLE;
+ /* modify the command trb to no-op command */
+ i_cmd->command_trb->generic.field[0] = 0;
+ i_cmd->command_trb->generic.field[1] = 0;
+ i_cmd->command_trb->generic.field[2] = 0;
+ i_cmd->command_trb->generic.field[3] = cpu_to_le32(
+ TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
+
+ /*
+ * caller waiting for completion is called when command
+ * completion event is received for these no-op commands
+ */
+ }
+
+ xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
+
+ /* ring command ring doorbell to restart the command ring */
+ if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
+ !(xhci->xhc_state & XHCI_STATE_DYING)) {
+ xhci->current_cmd = cur_cmd;
+ xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
+ xhci_ring_cmd_db(xhci);
+ }
+}
+
+/* Must be called with xhci->lock held, releases and aquires lock back */
+static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
{
u64 temp_64;
int ret;
xhci_dbg(xhci, "Abort command ring\n");
- temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
- xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
+ reinit_completion(&xhci->cmd_ring_stop_completion);
- /*
- * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
- * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
- * but the completion event in never sent. Use the cmd timeout timer to
- * handle those cases. Use twice the time to cover the bit polling retry
- */
- mod_timer(&xhci->cmd_timer, jiffies + (2 * XHCI_CMD_DEFAULT_TIMEOUT));
+ temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
&xhci->op_regs->cmd_ring);
@@ -315,17 +368,30 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
udelay(1000);
ret = xhci_handshake(&xhci->op_regs->cmd_ring,
CMD_RING_RUNNING, 0, 3 * 1000 * 1000);
- if (ret == 0)
- return 0;
-
- xhci_err(xhci, "Stopped the command ring failed, "
- "maybe the host is dead\n");
- del_timer(&xhci->cmd_timer);
- xhci->xhc_state |= XHCI_STATE_DYING;
- xhci_halt(xhci);
- return -ESHUTDOWN;
+ if (ret < 0) {
+ xhci_err(xhci, "Stopped the command ring failed, "
+ "maybe the host is dead\n");
+ xhci->xhc_state |= XHCI_STATE_DYING;
+ xhci_halt(xhci);
+ return -ESHUTDOWN;
+ }
+ }
+ /*
+ * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
+ * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
+ * but the completion event in never sent. Wait 2 secs (arbitrary
+ * number) to handle those cases after negation of CMD_RING_RUNNING.
+ */
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ ret = wait_for_completion_timeout(&xhci->cmd_ring_stop_completion,
+ msecs_to_jiffies(2000));
+ spin_lock_irqsave(&xhci->lock, flags);
+ if (!ret) {
+ xhci_dbg(xhci, "No stop event for abort, ring start fail?\n");
+ xhci_cleanup_command_queue(xhci);
+ } else {
+ xhci_handle_stopped_cmd_ring(xhci, xhci_next_queued_cmd(xhci));
}
-
return 0;
}
@@ -847,17 +913,6 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
spin_lock_irqsave(&xhci->lock, flags);
ep->stop_cmds_pending--;
- if (xhci->xhc_state & XHCI_STATE_REMOVING) {
- spin_unlock_irqrestore(&xhci->lock, flags);
- return;
- }
- if (xhci->xhc_state & XHCI_STATE_DYING) {
- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "Stop EP timer ran, but another timer marked "
- "xHCI as DYING, exiting.");
- spin_unlock_irqrestore(&xhci->lock, flags);
- return;
- }
if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Stop EP timer ran, but no command pending, "
@@ -1207,101 +1262,62 @@ void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
xhci_complete_del_and_free_cmd(cur_cmd, COMP_CMD_ABORT);
}
-/*
- * Turn all commands on command ring with status set to "aborted" to no-op trbs.
- * If there are other commands waiting then restart the ring and kick the timer.
- * This must be called with command ring stopped and xhci->lock held.
- */
-static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
- struct xhci_command *cur_cmd)
-{
- struct xhci_command *i_cmd, *tmp_cmd;
- u32 cycle_state;
-
- /* Turn all aborted commands in list to no-ops, then restart */
- list_for_each_entry_safe(i_cmd, tmp_cmd, &xhci->cmd_list,
- cmd_list) {
-
- if (i_cmd->status != COMP_CMD_ABORT)
- continue;
-
- i_cmd->status = COMP_CMD_STOP;
-
- xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
- i_cmd->command_trb);
- /* get cycle state from the original cmd trb */
- cycle_state = le32_to_cpu(
- i_cmd->command_trb->generic.field[3]) & TRB_CYCLE;
- /* modify the command trb to no-op command */
- i_cmd->command_trb->generic.field[0] = 0;
- i_cmd->command_trb->generic.field[1] = 0;
- i_cmd->command_trb->generic.field[2] = 0;
- i_cmd->command_trb->generic.field[3] = cpu_to_le32(
- TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
-
- /*
- * caller waiting for completion is called when command
- * completion event is received for these no-op commands
- */
- }
-
- xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
-
- /* ring command ring doorbell to restart the command ring */
- if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
- !(xhci->xhc_state & XHCI_STATE_DYING)) {
- xhci->current_cmd = cur_cmd;
- mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
- xhci_ring_cmd_db(xhci);
- }
- return;
-}
-
-
-void xhci_handle_command_timeout(unsigned long data)
+void xhci_handle_command_timeout(struct work_struct *work)
{
struct xhci_hcd *xhci;
int ret;
unsigned long flags;
u64 hw_ring_state;
- bool second_timeout = false;
- xhci = (struct xhci_hcd *) data;
- /* mark this command to be cancelled */
+ xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer);
+
spin_lock_irqsave(&xhci->lock, flags);
- if (xhci->current_cmd) {
- if (xhci->current_cmd->status == COMP_CMD_ABORT)
- second_timeout = true;
- xhci->current_cmd->status = COMP_CMD_ABORT;
+
+ /*
+ * If timeout work is pending, or current_cmd is NULL, it means we
+ * raced with command completion. Command is handled so just return.
+ */
+ if (!xhci->current_cmd || delayed_work_pending(&xhci->cmd_timer)) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return;
}
+ /* mark this command to be cancelled */
+ xhci->current_cmd->status = COMP_CMD_ABORT;
/* Make sure command ring is running before aborting it */
hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
(hw_ring_state & CMD_RING_RUNNING)) {
- spin_unlock_irqrestore(&xhci->lock, flags);
+ /* Prevent new doorbell, and start command abort */
+ xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
xhci_dbg(xhci, "Command timeout\n");
- ret = xhci_abort_cmd_ring(xhci);
+ ret = xhci_abort_cmd_ring(xhci, flags);
if (unlikely(ret == -ESHUTDOWN)) {
xhci_err(xhci, "Abort command ring failed\n");
xhci_cleanup_command_queue(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
xhci_dbg(xhci, "xHCI host controller is dead.\n");
+
+ return;
}
- return;
+
+ goto time_out_completed;
}
- /* command ring failed to restart, or host removed. Bail out */
- if (second_timeout || xhci->xhc_state & XHCI_STATE_REMOVING) {
- spin_unlock_irqrestore(&xhci->lock, flags);
- xhci_dbg(xhci, "command timed out twice, ring start fail?\n");
+ /* host removed. Bail out */
+ if (xhci->xhc_state & XHCI_STATE_REMOVING) {
+ xhci_dbg(xhci, "host removed, ring start fail?\n");
xhci_cleanup_command_queue(xhci);
- return;
+
+ goto time_out_completed;
}
/* command timeout on stopped ring, ring can't be aborted */
xhci_dbg(xhci, "Command timeout on stopped ring\n");
xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
+
+time_out_completed:
spin_unlock_irqrestore(&xhci->lock, flags);
return;
}
@@ -1333,7 +1349,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list);
- del_timer(&xhci->cmd_timer);
+ cancel_delayed_work(&xhci->cmd_timer);
trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
@@ -1341,7 +1357,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
/* If CMD ring stopped we own the trbs between enqueue and dequeue */
if (cmd_comp_code == COMP_CMD_STOP) {
- xhci_handle_stopped_cmd_ring(xhci, cmd);
+ complete_all(&xhci->cmd_ring_stop_completion);
return;
}
@@ -1359,8 +1375,11 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
*/
if (cmd_comp_code == COMP_CMD_ABORT) {
xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
- if (cmd->status == COMP_CMD_ABORT)
+ if (cmd->status == COMP_CMD_ABORT) {
+ if (xhci->current_cmd == cmd)
+ xhci->current_cmd = NULL;
goto event_handled;
+ }
}
cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
@@ -1421,7 +1440,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
if (cmd->cmd_list.next != &xhci->cmd_list) {
xhci->current_cmd = list_entry(cmd->cmd_list.next,
struct xhci_command, cmd_list);
- mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
+ xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
+ } else if (xhci->current_cmd == cmd) {
+ xhci->current_cmd = NULL;
}
event_handled:
@@ -1939,8 +1960,9 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
struct xhci_ep_ctx *ep_ctx;
u32 trb_comp_code;
u32 remaining, requested;
- bool on_data_stage;
+ u32 trb_type;
+ trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3]));
slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
xdev = xhci->devs[slot_id];
ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
@@ -1950,14 +1972,11 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
requested = td->urb->transfer_buffer_length;
remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
- /* not setup (dequeue), or status stage means we are at data stage */
- on_data_stage = (ep_trb != ep_ring->dequeue && ep_trb != td->last_trb);
-
switch (trb_comp_code) {
case COMP_SUCCESS:
- if (ep_trb != td->last_trb) {
+ if (trb_type != TRB_STATUS) {
xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n",
- on_data_stage ? "data" : "setup");
+ (trb_type == TRB_DATA) ? "data" : "setup");
*status = -ESHUTDOWN;
break;
}
@@ -1967,15 +1986,25 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
*status = 0;
break;
case COMP_STOP_SHORT:
- if (on_data_stage)
+ if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
td->urb->actual_length = remaining;
else
xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
goto finish_td;
case COMP_STOP:
- if (on_data_stage)
+ switch (trb_type) {
+ case TRB_SETUP:
+ td->urb->actual_length = 0;
+ goto finish_td;
+ case TRB_DATA:
+ case TRB_NORMAL:
td->urb->actual_length = requested - remaining;
- goto finish_td;
+ goto finish_td;
+ default:
+ xhci_warn(xhci, "WARN: unexpected TRB Type %d\n",
+ trb_type);
+ goto finish_td;
+ }
case COMP_STOP_INVAL:
goto finish_td;
default:
@@ -1987,7 +2016,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
/* else fall through */
case COMP_STALL:
/* Did we transfer part of the data (middle) phase? */
- if (on_data_stage)
+ if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
td->urb->actual_length = requested - remaining;
else if (!td->urb_length_set)
td->urb->actual_length = 0;
@@ -1995,14 +2024,15 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
}
/* stopped at setup stage, no data transferred */
- if (ep_trb == ep_ring->dequeue)
+ if (trb_type == TRB_SETUP)
goto finish_td;
/*
* if on data stage then update the actual_length of the URB and flag it
* as set, so it won't be overwritten in the event for the last TRB.
*/
- if (on_data_stage) {
+ if (trb_type == TRB_DATA ||
+ trb_type == TRB_NORMAL) {
td->urb_length_set = true;
td->urb->actual_length = requested - remaining;
xhci_dbg(xhci, "Waiting for status stage event\n");
@@ -3790,9 +3820,9 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
/* if there are no other commands queued we start the timeout timer */
if (xhci->cmd_list.next == &cmd->cmd_list &&
- !timer_pending(&xhci->cmd_timer)) {
+ !delayed_work_pending(&xhci->cmd_timer)) {
xhci->current_cmd = cmd;
- mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
+ xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
}
queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 1cd56417cbec..9a0ec116654a 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1534,19 +1534,6 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
xhci_urb_free_priv(urb_priv);
return ret;
}
- if ((xhci->xhc_state & XHCI_STATE_DYING) ||
- (xhci->xhc_state & XHCI_STATE_HALTED)) {
- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "Ep 0x%x: URB %p to be canceled on "
- "non-responsive xHCI host.",
- urb->ep->desc.bEndpointAddress, urb);
- /* Let the stop endpoint command watchdog timer (which set this
- * state) finish cleaning up the endpoint TD lists. We must
- * have caught it in the middle of dropping a lock and giving
- * back an URB.
- */
- goto done;
- }
ep_index = xhci_get_endpoint_index(&urb->ep->desc);
ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
@@ -3787,8 +3774,10 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
mutex_lock(&xhci->mutex);
- if (xhci->xhc_state) /* dying, removing or halted */
+ if (xhci->xhc_state) { /* dying, removing or halted */
+ ret = -ESHUTDOWN;
goto out;
+ }
if (!udev->slot_id) {
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 8ccc11a974b8..2d7b6374b58d 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1568,7 +1568,8 @@ struct xhci_hcd {
#define CMD_RING_STATE_STOPPED (1 << 2)
struct list_head cmd_list;
unsigned int cmd_ring_reserved_trbs;
- struct timer_list cmd_timer;
+ struct delayed_work cmd_timer;
+ struct completion cmd_ring_stop_completion;
struct xhci_command *current_cmd;
struct xhci_ring *event_ring;
struct xhci_erst erst;
@@ -1934,7 +1935,7 @@ void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
struct xhci_dequeue_state *deq_state);
void xhci_stop_endpoint_command_watchdog(unsigned long arg);
-void xhci_handle_command_timeout(unsigned long data);
+void xhci_handle_command_timeout(struct work_struct *work);
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
unsigned int ep_index, unsigned int stream_id);
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index 9a82f8308ad7..01a9373b7e18 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -48,7 +48,7 @@
#include <linux/module.h>
#include <linux/kref.h>
#include <linux/mutex.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/workqueue.h>
#include <linux/platform_device.h>
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index 2975e80b7a56..debc1fd74b0d 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -23,7 +23,7 @@
#include <linux/module.h>
#include <linux/completion.h>
#include <linux/mutex.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/usb.h>
/* image constants */
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 9ca595632f17..3bc5356832db 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -28,7 +28,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/input.h>
#include <linux/usb.h>
#include <linux/poll.h>
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index c8fbe7b739a0..b10e26c74a90 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -83,7 +83,7 @@
#include <linux/module.h>
#include <linux/completion.h>
#include <linux/mutex.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/poll.h>
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index 1a874a1f3890..91c22276c03b 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -20,7 +20,7 @@
#include <linux/slab.h>
#include <linux/time64.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "usb_mon.h"
diff --git a/drivers/usb/mon/mon_stat.c b/drivers/usb/mon/mon_stat.c
index 5388a339cfb8..5bdf73a57498 100644
--- a/drivers/usb/mon/mon_stat.c
+++ b/drivers/usb/mon/mon_stat.c
@@ -12,7 +12,7 @@
#include <linux/export.h>
#include <linux/usb.h>
#include <linux/fs.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "usb_mon.h"
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index e59334b09c41..db1a4abf2806 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -14,7 +14,7 @@
#include <linux/mutex.h>
#include <linux/debugfs.h>
#include <linux/scatterlist.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "usb_mon.h"
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index 310238c6b5cd..896798071817 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -469,6 +469,7 @@ static const struct musb_platform_ops bfin_ops = {
.init = bfin_musb_init,
.exit = bfin_musb_exit,
+ .fifo_offset = bfin_fifo_offset,
.readb = bfin_readb,
.writeb = bfin_writeb,
.readw = bfin_readw,
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 9e226468a13e..fca288bbc800 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2050,6 +2050,7 @@ struct musb_pending_work {
struct list_head node;
};
+#ifdef CONFIG_PM
/*
* Called from musb_runtime_resume(), musb_resume(), and
* musb_queue_resume_work(). Callers must take musb->lock.
@@ -2077,6 +2078,7 @@ static int musb_run_resume_work(struct musb *musb)
return error;
}
+#endif
/*
* Called to run work if device is active or else queue the work to happen
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index a611e2f67bdc..ade902ea1221 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -216,6 +216,7 @@ struct musb_platform_ops {
void (*pre_root_reset_end)(struct musb *musb);
void (*post_root_reset_end)(struct musb *musb);
int (*phy_callback)(enum musb_vbus_id_status status);
+ void (*clear_ep_rxintr)(struct musb *musb, int epnum);
};
/*
@@ -626,6 +627,12 @@ static inline void musb_platform_post_root_reset_end(struct musb *musb)
musb->ops->post_root_reset_end(musb);
}
+static inline void musb_platform_clear_ep_rxintr(struct musb *musb, int epnum)
+{
+ if (musb->ops->clear_ep_rxintr)
+ musb->ops->clear_ep_rxintr(musb, epnum);
+}
+
/*
* gets the "dr_mode" property from DT and converts it into musb_mode
* if the property is not found or not recognized returns MUSB_OTG
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index d4d7c56b48c7..16363852c034 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -197,8 +197,7 @@ static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
if (!list_empty(&controller->early_tx_list) &&
!hrtimer_is_queued(&controller->early_tx)) {
ret = HRTIMER_RESTART;
- hrtimer_forward_now(&controller->early_tx,
- ktime_set(0, 20 * NSEC_PER_USEC));
+ hrtimer_forward_now(&controller->early_tx, 20 * NSEC_PER_USEC);
}
spin_unlock_irqrestore(&musb->lock, flags);
@@ -280,9 +279,9 @@ static void cppi41_dma_callback(void *private_data)
unsigned long usecs = cppi41_channel->total_len / 10;
hrtimer_start_range_ns(&controller->early_tx,
- ktime_set(0, usecs * NSEC_PER_USEC),
- 20 * NSEC_PER_USEC,
- HRTIMER_MODE_REL);
+ usecs * NSEC_PER_USEC,
+ 20 * NSEC_PER_USEC,
+ HRTIMER_MODE_REL);
}
out:
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index 9b22d946c089..dd70c88419d2 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -37,7 +37,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "musb_core.h"
#include "musb_debug.h"
@@ -114,6 +114,7 @@ static int musb_regdump_show(struct seq_file *s, void *unused)
unsigned i;
seq_printf(s, "MUSB (M)HDRC Register Dump\n");
+ pm_runtime_get_sync(musb->controller);
for (i = 0; i < ARRAY_SIZE(musb_regmap); i++) {
switch (musb_regmap[i].size) {
@@ -132,6 +133,8 @@ static int musb_regdump_show(struct seq_file *s, void *unused)
}
}
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
return 0;
}
@@ -145,7 +148,10 @@ static int musb_test_mode_show(struct seq_file *s, void *unused)
struct musb *musb = s->private;
unsigned test;
+ pm_runtime_get_sync(musb->controller);
test = musb_readb(musb->mregs, MUSB_TESTMODE);
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
if (test & MUSB_TEST_FORCE_HOST)
seq_printf(s, "force host\n");
@@ -194,11 +200,12 @@ static ssize_t musb_test_mode_write(struct file *file,
u8 test;
char buf[18];
+ pm_runtime_get_sync(musb->controller);
test = musb_readb(musb->mregs, MUSB_TESTMODE);
if (test) {
dev_err(musb->controller, "Error: test mode is already set. "
"Please do USB Bus Reset to start a new test.\n");
- return count;
+ goto ret;
}
memset(buf, 0x00, sizeof(buf));
@@ -234,6 +241,9 @@ static ssize_t musb_test_mode_write(struct file *file,
musb_writeb(musb->mregs, MUSB_TESTMODE, test);
+ret:
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
return count;
}
@@ -254,8 +264,13 @@ static int musb_softconnect_show(struct seq_file *s, void *unused)
switch (musb->xceiv->otg->state) {
case OTG_STATE_A_HOST:
case OTG_STATE_A_WAIT_BCON:
+ pm_runtime_get_sync(musb->controller);
+
reg = musb_readb(musb->mregs, MUSB_DEVCTL);
connect = reg & MUSB_DEVCTL_SESSION ? 1 : 0;
+
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
break;
default:
connect = -1;
@@ -284,6 +299,7 @@ static ssize_t musb_softconnect_write(struct file *file,
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
+ pm_runtime_get_sync(musb->controller);
if (!strncmp(buf, "0", 1)) {
switch (musb->xceiv->otg->state) {
case OTG_STATE_A_HOST:
@@ -314,6 +330,8 @@ static ssize_t musb_softconnect_write(struct file *file,
}
}
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
return count;
}
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index feae1561b9ab..9f125e179acd 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -267,6 +267,17 @@ static void otg_timer(unsigned long _musb)
pm_runtime_put_autosuspend(dev);
}
+void dsps_musb_clear_ep_rxintr(struct musb *musb, int epnum)
+{
+ u32 epintr;
+ struct dsps_glue *glue = dev_get_drvdata(musb->controller->parent);
+ const struct dsps_musb_wrapper *wrp = glue->wrp;
+
+ /* musb->lock might already been held */
+ epintr = (1 << epnum) << wrp->rxep_shift;
+ musb_writel(musb->ctrl_base, wrp->epintr_status, epintr);
+}
+
static irqreturn_t dsps_interrupt(int irq, void *hci)
{
struct musb *musb = hci;
@@ -622,6 +633,7 @@ static struct musb_platform_ops dsps_ops = {
.set_mode = dsps_musb_set_mode,
.recover = dsps_musb_recover,
+ .clear_ep_rxintr = dsps_musb_clear_ep_rxintr,
};
static u64 musb_dmamask = DMA_BIT_MASK(32);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index f6cdbad00dac..ac3a4952abb4 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2374,12 +2374,11 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
int is_in = usb_pipein(urb->pipe);
int status = 0;
u16 csr;
+ struct dma_channel *dma = NULL;
musb_ep_select(regs, hw_end);
if (is_dma_capable()) {
- struct dma_channel *dma;
-
dma = is_in ? ep->rx_channel : ep->tx_channel;
if (dma) {
status = ep->musb->dma_controller->channel_abort(dma);
@@ -2395,10 +2394,9 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
/* giveback saves bulk toggle */
csr = musb_h_flush_rxfifo(ep, 0);
- /* REVISIT we still get an irq; should likely clear the
- * endpoint's irq status here to avoid bogus irqs.
- * clearing that status is platform-specific...
- */
+ /* clear the endpoint's irq status here to avoid bogus irqs */
+ if (is_dma_capable() && dma)
+ musb_platform_clear_ep_rxintr(musb, ep->epnum);
} else if (ep->epnum) {
musb_h_tx_flush_fifo(ep);
csr = musb_readw(epio, MUSB_TXCSR);
diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h
index f7b13fd25257..a3dcbd55e436 100644
--- a/drivers/usb/musb/musbhsdma.h
+++ b/drivers/usb/musb/musbhsdma.h
@@ -157,5 +157,5 @@ struct musb_dma_controller {
void __iomem *base;
u8 channel_count;
u8 used_channels;
- u8 irq;
+ int irq;
};
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 2597b83a8ae2..95aa5233726c 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -95,6 +95,7 @@ struct ch341_private {
unsigned baud_rate; /* set baud rate */
u8 line_control; /* set line control value RTS/DTR */
u8 line_status; /* active status of modem control inputs */
+ u8 lcr;
};
static void ch341_set_termios(struct tty_struct *tty,
@@ -112,6 +113,8 @@ static int ch341_control_out(struct usb_device *dev, u8 request,
r = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), request,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
value, index, NULL, 0, DEFAULT_TIMEOUT);
+ if (r < 0)
+ dev_err(&dev->dev, "failed to send control message: %d\n", r);
return r;
}
@@ -129,11 +132,24 @@ static int ch341_control_in(struct usb_device *dev,
r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
value, index, buf, bufsize, DEFAULT_TIMEOUT);
- return r;
+ if (r < bufsize) {
+ if (r >= 0) {
+ dev_err(&dev->dev,
+ "short control message received (%d < %u)\n",
+ r, bufsize);
+ r = -EIO;
+ }
+
+ dev_err(&dev->dev, "failed to receive control message: %d\n",
+ r);
+ return r;
+ }
+
+ return 0;
}
-static int ch341_init_set_baudrate(struct usb_device *dev,
- struct ch341_private *priv, unsigned ctrl)
+static int ch341_set_baudrate_lcr(struct usb_device *dev,
+ struct ch341_private *priv, u8 lcr)
{
short a;
int r;
@@ -156,9 +172,19 @@ static int ch341_init_set_baudrate(struct usb_device *dev,
factor = 0x10000 - factor;
a = (factor & 0xff00) | divisor;
- /* 0x9c is "enable SFR_UART Control register and timer" */
- r = ch341_control_out(dev, CH341_REQ_SERIAL_INIT,
- 0x9c | (ctrl << 8), a | 0x80);
+ /*
+ * CH341A buffers data until a full endpoint-size packet (32 bytes)
+ * has been received unless bit 7 is set.
+ */
+ a |= BIT(7);
+
+ r = ch341_control_out(dev, CH341_REQ_WRITE_REG, 0x1312, a);
+ if (r)
+ return r;
+
+ r = ch341_control_out(dev, CH341_REQ_WRITE_REG, 0x2518, lcr);
+ if (r)
+ return r;
return r;
}
@@ -170,9 +196,9 @@ static int ch341_set_handshake(struct usb_device *dev, u8 control)
static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv)
{
+ const unsigned int size = 2;
char *buffer;
int r;
- const unsigned size = 8;
unsigned long flags;
buffer = kmalloc(size, GFP_KERNEL);
@@ -183,14 +209,9 @@ static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv)
if (r < 0)
goto out;
- /* setup the private status if available */
- if (r == 2) {
- r = 0;
- spin_lock_irqsave(&priv->lock, flags);
- priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT;
- spin_unlock_irqrestore(&priv->lock, flags);
- } else
- r = -EPROTO;
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT;
+ spin_unlock_irqrestore(&priv->lock, flags);
out: kfree(buffer);
return r;
@@ -200,9 +221,9 @@ out: kfree(buffer);
static int ch341_configure(struct usb_device *dev, struct ch341_private *priv)
{
+ const unsigned int size = 2;
char *buffer;
int r;
- const unsigned size = 8;
buffer = kmalloc(size, GFP_KERNEL);
if (!buffer)
@@ -232,7 +253,7 @@ static int ch341_configure(struct usb_device *dev, struct ch341_private *priv)
if (r < 0)
goto out;
- r = ch341_init_set_baudrate(dev, priv, 0);
+ r = ch341_set_baudrate_lcr(dev, priv, priv->lcr);
if (r < 0)
goto out;
@@ -258,7 +279,6 @@ static int ch341_port_probe(struct usb_serial_port *port)
spin_lock_init(&priv->lock);
priv->baud_rate = DEFAULT_BAUD_RATE;
- priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR;
r = ch341_configure(port->serial->dev, priv);
if (r < 0)
@@ -320,7 +340,7 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port)
r = ch341_configure(serial->dev, priv);
if (r)
- goto out;
+ return r;
if (tty)
ch341_set_termios(tty, port, NULL);
@@ -330,12 +350,19 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port)
if (r) {
dev_err(&port->dev, "%s - failed to submit interrupt urb: %d\n",
__func__, r);
- goto out;
+ return r;
}
r = usb_serial_generic_open(tty, port);
+ if (r)
+ goto err_kill_interrupt_urb;
+
+ return 0;
+
+err_kill_interrupt_urb:
+ usb_kill_urb(port->interrupt_in_urb);
-out: return r;
+ return r;
}
/* Old_termios contains the original termios settings and
@@ -356,7 +383,6 @@ static void ch341_set_termios(struct tty_struct *tty,
baud_rate = tty_get_baud_rate(tty);
- priv->baud_rate = baud_rate;
ctrl = CH341_LCR_ENABLE_RX | CH341_LCR_ENABLE_TX;
switch (C_CSIZE(tty)) {
@@ -386,22 +412,25 @@ static void ch341_set_termios(struct tty_struct *tty,
ctrl |= CH341_LCR_STOP_BITS_2;
if (baud_rate) {
- spin_lock_irqsave(&priv->lock, flags);
- priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS);
- spin_unlock_irqrestore(&priv->lock, flags);
- r = ch341_init_set_baudrate(port->serial->dev, priv, ctrl);
+ priv->baud_rate = baud_rate;
+
+ r = ch341_set_baudrate_lcr(port->serial->dev, priv, ctrl);
if (r < 0 && old_termios) {
priv->baud_rate = tty_termios_baud_rate(old_termios);
tty_termios_copy_hw(&tty->termios, old_termios);
+ } else if (r == 0) {
+ priv->lcr = ctrl;
}
- } else {
- spin_lock_irqsave(&priv->lock, flags);
- priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS);
- spin_unlock_irqrestore(&priv->lock, flags);
}
- ch341_set_handshake(port->serial->dev, priv->line_control);
+ spin_lock_irqsave(&priv->lock, flags);
+ if (C_BAUD(tty) == B0)
+ priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS);
+ else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
+ priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ ch341_set_handshake(port->serial->dev, priv->line_control);
}
static void ch341_break_ctl(struct tty_struct *tty, int break_state)
@@ -576,14 +605,23 @@ static int ch341_tiocmget(struct tty_struct *tty)
static int ch341_reset_resume(struct usb_serial *serial)
{
- struct ch341_private *priv;
-
- priv = usb_get_serial_port_data(serial->port[0]);
+ struct usb_serial_port *port = serial->port[0];
+ struct ch341_private *priv = usb_get_serial_port_data(port);
+ int ret;
/* reconfigure ch341 serial port after bus-reset */
ch341_configure(serial->dev, priv);
- return 0;
+ if (tty_port_initialized(&port->port)) {
+ ret = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
+ if (ret) {
+ dev_err(&port->dev, "failed to submit interrupt urb: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return usb_serial_generic_resume(serial);
}
static struct usb_serial_driver ch341_device = {
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index 5f17a3b9916d..80260b08398b 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -50,6 +50,7 @@
#define CYBERJACK_PRODUCT_ID 0x0100
/* Function prototypes */
+static int cyberjack_attach(struct usb_serial *serial);
static int cyberjack_port_probe(struct usb_serial_port *port);
static int cyberjack_port_remove(struct usb_serial_port *port);
static int cyberjack_open(struct tty_struct *tty,
@@ -77,6 +78,7 @@ static struct usb_serial_driver cyberjack_device = {
.description = "Reiner SCT Cyberjack USB card reader",
.id_table = id_table,
.num_ports = 1,
+ .attach = cyberjack_attach,
.port_probe = cyberjack_port_probe,
.port_remove = cyberjack_port_remove,
.open = cyberjack_open,
@@ -100,6 +102,14 @@ struct cyberjack_private {
short wrsent; /* Data already sent */
};
+static int cyberjack_attach(struct usb_serial *serial)
+{
+ if (serial->num_bulk_out < serial->num_ports)
+ return -ENODEV;
+
+ return 0;
+}
+
static int cyberjack_port_probe(struct usb_serial_port *port)
{
struct cyberjack_private *priv;
diff --git a/drivers/usb/serial/f81534.c b/drivers/usb/serial/f81534.c
index 8282a6a18fee..22f23a429a95 100644
--- a/drivers/usb/serial/f81534.c
+++ b/drivers/usb/serial/f81534.c
@@ -1237,6 +1237,7 @@ static int f81534_attach(struct usb_serial *serial)
static int f81534_port_probe(struct usb_serial_port *port)
{
struct f81534_port_private *port_priv;
+ int ret;
port_priv = devm_kzalloc(&port->dev, sizeof(*port_priv), GFP_KERNEL);
if (!port_priv)
@@ -1246,10 +1247,11 @@ static int f81534_port_probe(struct usb_serial_port *port)
mutex_init(&port_priv->mcr_mutex);
/* Assign logic-to-phy mapping */
- port_priv->phy_num = f81534_logic_to_phy_port(port->serial, port);
- if (port_priv->phy_num < 0 || port_priv->phy_num >= F81534_NUM_PORT)
- return -ENODEV;
+ ret = f81534_logic_to_phy_port(port->serial, port);
+ if (ret < 0)
+ return ret;
+ port_priv->phy_num = ret;
usb_set_serial_port_data(port, port_priv);
dev_dbg(&port->dev, "%s: port_number: %d, phy_num: %d\n", __func__,
port->port_number, port_priv->phy_num);
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 97cabf803c2f..b2f2e87aed94 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -1043,6 +1043,7 @@ static int garmin_write_bulk(struct usb_serial_port *port,
"%s - usb_submit_urb(write bulk) failed with status = %d\n",
__func__, status);
count = status;
+ kfree(buffer);
}
/* we are done with this urb, so let the host driver
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index dcc0c58aaad5..d50e5773483f 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -2751,6 +2751,11 @@ static int edge_startup(struct usb_serial *serial)
EDGE_COMPATIBILITY_MASK1,
EDGE_COMPATIBILITY_MASK2 };
+ if (serial->num_bulk_in < 1 || serial->num_interrupt_in < 1) {
+ dev_err(&serial->interface->dev, "missing endpoints\n");
+ return -ENODEV;
+ }
+
dev = serial->dev;
/* create our private serial structure */
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index c339163698eb..9a0db2965fbb 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -1499,8 +1499,7 @@ static int do_boot_mode(struct edgeport_serial *serial,
dev_dbg(dev, "%s - Download successful -- Device rebooting...\n", __func__);
- /* return an error on purpose */
- return -ENODEV;
+ return 1;
}
stayinbootmode:
@@ -1508,7 +1507,7 @@ stayinbootmode:
dev_dbg(dev, "%s - STAYING IN BOOT MODE\n", __func__);
serial->product_info.TiMode = TI_MODE_BOOT;
- return 0;
+ return 1;
}
static int ti_do_config(struct edgeport_port *port, int feature, int on)
@@ -2546,6 +2545,13 @@ static int edge_startup(struct usb_serial *serial)
int status;
u16 product_id;
+ /* Make sure we have the required endpoints when in download mode. */
+ if (serial->interface->cur_altsetting->desc.bNumEndpoints > 1) {
+ if (serial->num_bulk_in < serial->num_ports ||
+ serial->num_bulk_out < serial->num_ports)
+ return -ENODEV;
+ }
+
/* create our private serial structure */
edge_serial = kzalloc(sizeof(struct edgeport_serial), GFP_KERNEL);
if (!edge_serial)
@@ -2553,14 +2559,18 @@ static int edge_startup(struct usb_serial *serial)
mutex_init(&edge_serial->es_lock);
edge_serial->serial = serial;
+ INIT_DELAYED_WORK(&edge_serial->heartbeat_work, edge_heartbeat_work);
usb_set_serial_data(serial, edge_serial);
status = download_fw(edge_serial);
- if (status) {
+ if (status < 0) {
kfree(edge_serial);
return status;
}
+ if (status > 0)
+ return 1; /* bind but do not register any ports */
+
product_id = le16_to_cpu(
edge_serial->serial->dev->descriptor.idProduct);
@@ -2572,7 +2582,6 @@ static int edge_startup(struct usb_serial *serial)
}
}
- INIT_DELAYED_WORK(&edge_serial->heartbeat_work, edge_heartbeat_work);
edge_heartbeat_schedule(edge_serial);
return 0;
@@ -2580,6 +2589,9 @@ static int edge_startup(struct usb_serial *serial)
static void edge_disconnect(struct usb_serial *serial)
{
+ struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
+
+ cancel_delayed_work_sync(&edge_serial->heartbeat_work);
}
static void edge_release(struct usb_serial *serial)
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 344b4eea4bd5..d57fb5199218 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -68,6 +68,16 @@ struct iuu_private {
u32 clk;
};
+static int iuu_attach(struct usb_serial *serial)
+{
+ unsigned char num_ports = serial->num_ports;
+
+ if (serial->num_bulk_in < num_ports || serial->num_bulk_out < num_ports)
+ return -ENODEV;
+
+ return 0;
+}
+
static int iuu_port_probe(struct usb_serial_port *port)
{
struct iuu_private *priv;
@@ -1196,6 +1206,7 @@ static struct usb_serial_driver iuu_device = {
.tiocmset = iuu_tiocmset,
.set_termios = iuu_set_termios,
.init_termios = iuu_init_termios,
+ .attach = iuu_attach,
.port_probe = iuu_port_probe,
.port_remove = iuu_port_remove,
};
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index e49ad0c63ad8..83523fcf6fb9 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -699,6 +699,19 @@ MODULE_FIRMWARE("keyspan_pda/keyspan_pda.fw");
MODULE_FIRMWARE("keyspan_pda/xircom_pgs.fw");
#endif
+static int keyspan_pda_attach(struct usb_serial *serial)
+{
+ unsigned char num_ports = serial->num_ports;
+
+ if (serial->num_bulk_out < num_ports ||
+ serial->num_interrupt_in < num_ports) {
+ dev_err(&serial->interface->dev, "missing endpoints\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static int keyspan_pda_port_probe(struct usb_serial_port *port)
{
@@ -776,6 +789,7 @@ static struct usb_serial_driver keyspan_pda_device = {
.break_ctl = keyspan_pda_break_ctl,
.tiocmget = keyspan_pda_tiocmget,
.tiocmset = keyspan_pda_tiocmset,
+ .attach = keyspan_pda_attach,
.port_probe = keyspan_pda_port_probe,
.port_remove = keyspan_pda_port_remove,
};
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index 0ee190fc1bf8..6cb45757818f 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -192,10 +192,11 @@ static int klsi_105_get_line_state(struct usb_serial_port *port,
status_buf, KLSI_STATUSBUF_LEN,
10000
);
- if (rc < 0)
- dev_err(&port->dev, "Reading line status failed (error = %d)\n",
- rc);
- else {
+ if (rc != KLSI_STATUSBUF_LEN) {
+ dev_err(&port->dev, "reading line status failed: %d\n", rc);
+ if (rc >= 0)
+ rc = -EIO;
+ } else {
status = get_unaligned_le16(status_buf);
dev_info(&port->serial->dev->dev, "read status %x %x\n",
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index 2363654cafc9..813035f51fe7 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -51,6 +51,7 @@
/* Function prototypes */
+static int kobil_attach(struct usb_serial *serial);
static int kobil_port_probe(struct usb_serial_port *probe);
static int kobil_port_remove(struct usb_serial_port *probe);
static int kobil_open(struct tty_struct *tty, struct usb_serial_port *port);
@@ -86,6 +87,7 @@ static struct usb_serial_driver kobil_device = {
.description = "KOBIL USB smart card terminal",
.id_table = id_table,
.num_ports = 1,
+ .attach = kobil_attach,
.port_probe = kobil_port_probe,
.port_remove = kobil_port_remove,
.ioctl = kobil_ioctl,
@@ -113,6 +115,16 @@ struct kobil_private {
};
+static int kobil_attach(struct usb_serial *serial)
+{
+ if (serial->num_interrupt_out < serial->num_ports) {
+ dev_err(&serial->interface->dev, "missing interrupt-out endpoint\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static int kobil_port_probe(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index d52caa03679c..91bc170b408a 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -65,8 +65,6 @@ struct moschip_port {
struct urb *write_urb_pool[NUM_URBS];
};
-static struct usb_serial_driver moschip7720_2port_driver;
-
#define USB_VENDOR_ID_MOSCHIP 0x9710
#define MOSCHIP_DEVICE_ID_7720 0x7720
#define MOSCHIP_DEVICE_ID_7715 0x7715
@@ -970,25 +968,6 @@ static void mos7720_bulk_out_data_callback(struct urb *urb)
tty_port_tty_wakeup(&mos7720_port->port->port);
}
-/*
- * mos77xx_probe
- * this function installs the appropriate read interrupt endpoint callback
- * depending on whether the device is a 7720 or 7715, thus avoiding costly
- * run-time checks in the high-frequency callback routine itself.
- */
-static int mos77xx_probe(struct usb_serial *serial,
- const struct usb_device_id *id)
-{
- if (id->idProduct == MOSCHIP_DEVICE_ID_7715)
- moschip7720_2port_driver.read_int_callback =
- mos7715_interrupt_callback;
- else
- moschip7720_2port_driver.read_int_callback =
- mos7720_interrupt_callback;
-
- return 0;
-}
-
static int mos77xx_calc_num_ports(struct usb_serial *serial)
{
u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
@@ -1917,6 +1896,11 @@ static int mos7720_startup(struct usb_serial *serial)
u16 product;
int ret_val;
+ if (serial->num_bulk_in < 2 || serial->num_bulk_out < 2) {
+ dev_err(&serial->interface->dev, "missing bulk endpoints\n");
+ return -ENODEV;
+ }
+
product = le16_to_cpu(serial->dev->descriptor.idProduct);
dev = serial->dev;
@@ -1941,19 +1925,18 @@ static int mos7720_startup(struct usb_serial *serial)
tmp->interrupt_in_endpointAddress;
serial->port[1]->interrupt_in_urb = NULL;
serial->port[1]->interrupt_in_buffer = NULL;
+
+ if (serial->port[0]->interrupt_in_urb) {
+ struct urb *urb = serial->port[0]->interrupt_in_urb;
+
+ urb->complete = mos7715_interrupt_callback;
+ }
}
/* setting configuration feature to one */
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
(__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000);
- /* start the interrupt urb */
- ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL);
- if (ret_val)
- dev_err(&dev->dev,
- "%s - Error %d submitting control urb\n",
- __func__, ret_val);
-
#ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
if (product == MOSCHIP_DEVICE_ID_7715) {
ret_val = mos7715_parport_init(serial);
@@ -1961,6 +1944,13 @@ static int mos7720_startup(struct usb_serial *serial)
return ret_val;
}
#endif
+ /* start the interrupt urb */
+ ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL);
+ if (ret_val) {
+ dev_err(&dev->dev, "failed to submit interrupt urb: %d\n",
+ ret_val);
+ }
+
/* LSR For Port 1 */
read_mos_reg(serial, 0, MOS7720_LSR, &data);
dev_dbg(&dev->dev, "LSR:%x\n", data);
@@ -1970,6 +1960,8 @@ static int mos7720_startup(struct usb_serial *serial)
static void mos7720_release(struct usb_serial *serial)
{
+ usb_kill_urb(serial->port[0]->interrupt_in_urb);
+
#ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
/* close the parallel port */
@@ -2019,11 +2011,6 @@ static int mos7720_port_probe(struct usb_serial_port *port)
if (!mos7720_port)
return -ENOMEM;
- /* Initialize all port interrupt end point to port 0 int endpoint.
- * Our device has only one interrupt endpoint common to all ports.
- */
- port->interrupt_in_endpointAddress =
- port->serial->port[0]->interrupt_in_endpointAddress;
mos7720_port->port = port;
usb_set_serial_port_data(port, mos7720_port);
@@ -2053,7 +2040,6 @@ static struct usb_serial_driver moschip7720_2port_driver = {
.close = mos7720_close,
.throttle = mos7720_throttle,
.unthrottle = mos7720_unthrottle,
- .probe = mos77xx_probe,
.attach = mos7720_startup,
.release = mos7720_release,
.port_probe = mos7720_port_probe,
@@ -2067,7 +2053,7 @@ static struct usb_serial_driver moschip7720_2port_driver = {
.chars_in_buffer = mos7720_chars_in_buffer,
.break_ctl = mos7720_break,
.read_bulk_callback = mos7720_bulk_in_callback,
- .read_int_callback = NULL /* dynamically assigned in probe() */
+ .read_int_callback = mos7720_interrupt_callback,
};
static struct usb_serial_driver * const serial_drivers[] = {
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 9a220b8e810f..ea27fb23967a 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -214,7 +214,6 @@ MODULE_DEVICE_TABLE(usb, id_table);
struct moschip_port {
int port_num; /*Actual port number in the device(1,2,etc) */
- struct urb *write_urb; /* write URB for this port */
struct urb *read_urb; /* read URB for this port */
__u8 shadowLCR; /* last LCR value received */
__u8 shadowMCR; /* last MCR value received */
@@ -1037,9 +1036,7 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
serial,
serial->port[0]->interrupt_in_urb->interval);
- /* start interrupt read for mos7840 *
- * will continue as long as mos7840 is connected */
-
+ /* start interrupt read for mos7840 */
response =
usb_submit_urb(serial->port[0]->interrupt_in_urb,
GFP_KERNEL);
@@ -1186,7 +1183,6 @@ static void mos7840_close(struct usb_serial_port *port)
}
}
- usb_kill_urb(mos7840_port->write_urb);
usb_kill_urb(mos7840_port->read_urb);
mos7840_port->read_urb_busy = false;
@@ -1199,12 +1195,6 @@ static void mos7840_close(struct usb_serial_port *port)
}
}
- if (mos7840_port->write_urb) {
- /* if this urb had a transfer buffer already (old tx) free it */
- kfree(mos7840_port->write_urb->transfer_buffer);
- usb_free_urb(mos7840_port->write_urb);
- }
-
Data = 0x0;
mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
@@ -2113,6 +2103,17 @@ static int mos7840_calc_num_ports(struct usb_serial *serial)
return mos7840_num_ports;
}
+static int mos7840_attach(struct usb_serial *serial)
+{
+ if (serial->num_bulk_in < serial->num_ports ||
+ serial->num_bulk_out < serial->num_ports) {
+ dev_err(&serial->interface->dev, "missing endpoints\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static int mos7840_port_probe(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
@@ -2388,6 +2389,7 @@ static struct usb_serial_driver moschip7840_4port_device = {
.tiocmset = mos7840_tiocmset,
.tiocmiwait = usb_serial_generic_tiocmiwait,
.get_icount = usb_serial_generic_get_icount,
+ .attach = mos7840_attach,
.port_probe = mos7840_port_probe,
.port_remove = mos7840_port_remove,
.read_bulk_callback = mos7840_bulk_in_callback,
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index f6c6900bccf0..a180b17d2432 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -38,6 +38,7 @@ static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
static int omninet_write_room(struct tty_struct *tty);
static void omninet_disconnect(struct usb_serial *serial);
+static int omninet_attach(struct usb_serial *serial);
static int omninet_port_probe(struct usb_serial_port *port);
static int omninet_port_remove(struct usb_serial_port *port);
@@ -56,6 +57,7 @@ static struct usb_serial_driver zyxel_omninet_device = {
.description = "ZyXEL - omni.net lcd plus usb",
.id_table = id_table,
.num_ports = 1,
+ .attach = omninet_attach,
.port_probe = omninet_port_probe,
.port_remove = omninet_port_remove,
.open = omninet_open,
@@ -104,6 +106,17 @@ struct omninet_data {
__u8 od_outseq; /* Sequence number for bulk_out URBs */
};
+static int omninet_attach(struct usb_serial *serial)
+{
+ /* The second bulk-out endpoint is used for writing. */
+ if (serial->num_bulk_out < 2) {
+ dev_err(&serial->interface->dev, "missing endpoints\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static int omninet_port_probe(struct usb_serial_port *port)
{
struct omninet_data *od;
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index a4b88bc038b6..b8bf52bf7a94 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -134,6 +134,7 @@ static int oti6858_chars_in_buffer(struct tty_struct *tty);
static int oti6858_tiocmget(struct tty_struct *tty);
static int oti6858_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
+static int oti6858_attach(struct usb_serial *serial);
static int oti6858_port_probe(struct usb_serial_port *port);
static int oti6858_port_remove(struct usb_serial_port *port);
@@ -158,6 +159,7 @@ static struct usb_serial_driver oti6858_device = {
.write_bulk_callback = oti6858_write_bulk_callback,
.write_room = oti6858_write_room,
.chars_in_buffer = oti6858_chars_in_buffer,
+ .attach = oti6858_attach,
.port_probe = oti6858_port_probe,
.port_remove = oti6858_port_remove,
};
@@ -324,6 +326,20 @@ static void send_data(struct work_struct *work)
usb_serial_port_softint(port);
}
+static int oti6858_attach(struct usb_serial *serial)
+{
+ unsigned char num_ports = serial->num_ports;
+
+ if (serial->num_bulk_in < num_ports ||
+ serial->num_bulk_out < num_ports ||
+ serial->num_interrupt_in < num_ports) {
+ dev_err(&serial->interface->dev, "missing endpoints\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static int oti6858_port_probe(struct usb_serial_port *port)
{
struct oti6858_private *priv;
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index ae682e4eeaef..46fca6b75846 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -220,9 +220,17 @@ static int pl2303_probe(struct usb_serial *serial,
static int pl2303_startup(struct usb_serial *serial)
{
struct pl2303_serial_private *spriv;
+ unsigned char num_ports = serial->num_ports;
enum pl2303_type type = TYPE_01;
unsigned char *buf;
+ if (serial->num_bulk_in < num_ports ||
+ serial->num_bulk_out < num_ports ||
+ serial->num_interrupt_in < num_ports) {
+ dev_err(&serial->interface->dev, "missing endpoints\n");
+ return -ENODEV;
+ }
+
spriv = kzalloc(sizeof(*spriv), GFP_KERNEL);
if (!spriv)
return -ENOMEM;
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index 659cb8606bd9..5709cc93b083 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -408,16 +408,12 @@ static void qt2_close(struct usb_serial_port *port)
{
struct usb_serial *serial;
struct qt2_port_private *port_priv;
- unsigned long flags;
int i;
serial = port->serial;
port_priv = usb_get_serial_port_data(port);
- spin_lock_irqsave(&port_priv->urb_lock, flags);
usb_kill_urb(port_priv->write_urb);
- port_priv->urb_in_use = false;
- spin_unlock_irqrestore(&port_priv->urb_lock, flags);
/* flush the port transmit buffer */
i = usb_control_msg(serial->dev,
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index ef0dbf0703c5..475e6c31b266 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -154,6 +154,19 @@ static int spcp8x5_probe(struct usb_serial *serial,
return 0;
}
+static int spcp8x5_attach(struct usb_serial *serial)
+{
+ unsigned char num_ports = serial->num_ports;
+
+ if (serial->num_bulk_in < num_ports ||
+ serial->num_bulk_out < num_ports) {
+ dev_err(&serial->interface->dev, "missing endpoints\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static int spcp8x5_port_probe(struct usb_serial_port *port)
{
const struct usb_device_id *id = usb_get_serial_data(port->serial);
@@ -477,6 +490,7 @@ static struct usb_serial_driver spcp8x5_device = {
.tiocmget = spcp8x5_tiocmget,
.tiocmset = spcp8x5_tiocmset,
.probe = spcp8x5_probe,
+ .attach = spcp8x5_attach,
.port_probe = spcp8x5_port_probe,
.port_remove = spcp8x5_port_remove,
};
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 8db9d071d940..64b85b8dedf3 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -579,6 +579,13 @@ static int ti_startup(struct usb_serial *serial)
goto free_tdev;
}
+ if (serial->num_bulk_in < serial->num_ports ||
+ serial->num_bulk_out < serial->num_ports) {
+ dev_err(&serial->interface->dev, "missing endpoints\n");
+ status = -ENODEV;
+ goto free_tdev;
+ }
+
return 0;
free_tdev:
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index af3c7eecff91..16cc18369111 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2109,6 +2109,13 @@ UNUSUAL_DEV( 0x152d, 0x2566, 0x0114, 0x0114,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_BROKEN_FUA ),
+/* Reported-by George Cherian <george.cherian@cavium.com> */
+UNUSUAL_DEV(0x152d, 0x9561, 0x0000, 0x9999,
+ "JMicron",
+ "JMS56x",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_REPORT_OPCODES),
+
/*
* Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI)
* and Mac USB Dock USB-SCSI */
diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c
index 79451f7ef1b7..062c205f0046 100644
--- a/drivers/usb/wusbcore/crypto.c
+++ b/drivers/usb/wusbcore/crypto.c
@@ -216,7 +216,6 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
struct scatterlist sg[4], sg_dst;
void *dst_buf;
size_t dst_size;
- const u8 bzero[16] = { 0 };
u8 iv[crypto_skcipher_ivsize(tfm_cbc)];
size_t zero_padding;
@@ -261,7 +260,7 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
sg_set_buf(&sg[1], &scratch->b1, sizeof(scratch->b1));
sg_set_buf(&sg[2], b, blen);
/* 0 if well behaved :) */
- sg_set_buf(&sg[3], bzero, zero_padding);
+ sg_set_page(&sg[3], ZERO_PAGE(0), zero_padding, 0);
sg_init_one(&sg_dst, dst_buf, dst_size);
skcipher_request_set_tfm(req, tfm_cbc);
diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c
index be1ee89ee917..36d75c367d22 100644
--- a/drivers/vfio/mdev/mdev_core.c
+++ b/drivers/vfio/mdev/mdev_core.c
@@ -27,6 +27,45 @@ static LIST_HEAD(parent_list);
static DEFINE_MUTEX(parent_list_lock);
static struct class_compat *mdev_bus_compat_class;
+static LIST_HEAD(mdev_list);
+static DEFINE_MUTEX(mdev_list_lock);
+
+struct device *mdev_parent_dev(struct mdev_device *mdev)
+{
+ return mdev->parent->dev;
+}
+EXPORT_SYMBOL(mdev_parent_dev);
+
+void *mdev_get_drvdata(struct mdev_device *mdev)
+{
+ return mdev->driver_data;
+}
+EXPORT_SYMBOL(mdev_get_drvdata);
+
+void mdev_set_drvdata(struct mdev_device *mdev, void *data)
+{
+ mdev->driver_data = data;
+}
+EXPORT_SYMBOL(mdev_set_drvdata);
+
+struct device *mdev_dev(struct mdev_device *mdev)
+{
+ return &mdev->dev;
+}
+EXPORT_SYMBOL(mdev_dev);
+
+struct mdev_device *mdev_from_dev(struct device *dev)
+{
+ return dev_is_mdev(dev) ? to_mdev_device(dev) : NULL;
+}
+EXPORT_SYMBOL(mdev_from_dev);
+
+uuid_le mdev_uuid(struct mdev_device *mdev)
+{
+ return mdev->uuid;
+}
+EXPORT_SYMBOL(mdev_uuid);
+
static int _find_mdev_device(struct device *dev, void *data)
{
struct mdev_device *mdev;
@@ -42,7 +81,7 @@ static int _find_mdev_device(struct device *dev, void *data)
return 0;
}
-static bool mdev_device_exist(struct parent_device *parent, uuid_le uuid)
+static bool mdev_device_exist(struct mdev_parent *parent, uuid_le uuid)
{
struct device *dev;
@@ -56,9 +95,9 @@ static bool mdev_device_exist(struct parent_device *parent, uuid_le uuid)
}
/* Should be called holding parent_list_lock */
-static struct parent_device *__find_parent_device(struct device *dev)
+static struct mdev_parent *__find_parent_device(struct device *dev)
{
- struct parent_device *parent;
+ struct mdev_parent *parent;
list_for_each_entry(parent, &parent_list, next) {
if (parent->dev == dev)
@@ -69,8 +108,8 @@ static struct parent_device *__find_parent_device(struct device *dev)
static void mdev_release_parent(struct kref *kref)
{
- struct parent_device *parent = container_of(kref, struct parent_device,
- ref);
+ struct mdev_parent *parent = container_of(kref, struct mdev_parent,
+ ref);
struct device *dev = parent->dev;
kfree(parent);
@@ -78,7 +117,7 @@ static void mdev_release_parent(struct kref *kref)
}
static
-inline struct parent_device *mdev_get_parent(struct parent_device *parent)
+inline struct mdev_parent *mdev_get_parent(struct mdev_parent *parent)
{
if (parent)
kref_get(&parent->ref);
@@ -86,7 +125,7 @@ inline struct parent_device *mdev_get_parent(struct parent_device *parent)
return parent;
}
-static inline void mdev_put_parent(struct parent_device *parent)
+static inline void mdev_put_parent(struct mdev_parent *parent)
{
if (parent)
kref_put(&parent->ref, mdev_release_parent);
@@ -95,7 +134,7 @@ static inline void mdev_put_parent(struct parent_device *parent)
static int mdev_device_create_ops(struct kobject *kobj,
struct mdev_device *mdev)
{
- struct parent_device *parent = mdev->parent;
+ struct mdev_parent *parent = mdev->parent;
int ret;
ret = parent->ops->create(kobj, mdev);
@@ -122,7 +161,7 @@ static int mdev_device_create_ops(struct kobject *kobj,
*/
static int mdev_device_remove_ops(struct mdev_device *mdev, bool force_remove)
{
- struct parent_device *parent = mdev->parent;
+ struct mdev_parent *parent = mdev->parent;
int ret;
/*
@@ -153,10 +192,10 @@ static int mdev_device_remove_cb(struct device *dev, void *data)
* Add device to list of registered parent devices.
* Returns a negative value on error, otherwise 0.
*/
-int mdev_register_device(struct device *dev, const struct parent_ops *ops)
+int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops)
{
int ret;
- struct parent_device *parent;
+ struct mdev_parent *parent;
/* check for mandatory ops */
if (!ops || !ops->create || !ops->remove || !ops->supported_type_groups)
@@ -229,7 +268,7 @@ EXPORT_SYMBOL(mdev_register_device);
void mdev_unregister_device(struct device *dev)
{
- struct parent_device *parent;
+ struct mdev_parent *parent;
bool force_remove = true;
mutex_lock(&parent_list_lock);
@@ -266,7 +305,7 @@ int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid)
{
int ret;
struct mdev_device *mdev;
- struct parent_device *parent;
+ struct mdev_parent *parent;
struct mdev_type *type = to_mdev_type(kobj);
parent = mdev_get_parent(type->parent);
@@ -316,6 +355,11 @@ int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid)
dev_dbg(&mdev->dev, "MDEV: created\n");
mutex_unlock(&parent->lock);
+
+ mutex_lock(&mdev_list_lock);
+ list_add(&mdev->next, &mdev_list);
+ mutex_unlock(&mdev_list_lock);
+
return ret;
create_failed:
@@ -329,12 +373,30 @@ create_err:
int mdev_device_remove(struct device *dev, bool force_remove)
{
- struct mdev_device *mdev;
- struct parent_device *parent;
+ struct mdev_device *mdev, *tmp;
+ struct mdev_parent *parent;
struct mdev_type *type;
int ret;
+ bool found = false;
mdev = to_mdev_device(dev);
+
+ mutex_lock(&mdev_list_lock);
+ list_for_each_entry(tmp, &mdev_list, next) {
+ if (tmp == mdev) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found)
+ list_del(&mdev->next);
+
+ mutex_unlock(&mdev_list_lock);
+
+ if (!found)
+ return -ENODEV;
+
type = to_mdev_type(mdev->type_kobj);
parent = mdev->parent;
mutex_lock(&parent->lock);
@@ -342,6 +404,11 @@ int mdev_device_remove(struct device *dev, bool force_remove)
ret = mdev_device_remove_ops(mdev, force_remove);
if (ret) {
mutex_unlock(&parent->lock);
+
+ mutex_lock(&mdev_list_lock);
+ list_add(&mdev->next, &mdev_list);
+ mutex_unlock(&mdev_list_lock);
+
return ret;
}
@@ -349,7 +416,8 @@ int mdev_device_remove(struct device *dev, bool force_remove)
device_unregister(dev);
mutex_unlock(&parent->lock);
mdev_put_parent(parent);
- return ret;
+
+ return 0;
}
static int __init mdev_init(void)
diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h
index d35097cbf3d7..a9cefd70a705 100644
--- a/drivers/vfio/mdev/mdev_private.h
+++ b/drivers/vfio/mdev/mdev_private.h
@@ -16,10 +16,33 @@
int mdev_bus_register(void);
void mdev_bus_unregister(void);
+struct mdev_parent {
+ struct device *dev;
+ const struct mdev_parent_ops *ops;
+ struct kref ref;
+ struct mutex lock;
+ struct list_head next;
+ struct kset *mdev_types_kset;
+ struct list_head type_list;
+};
+
+struct mdev_device {
+ struct device dev;
+ struct mdev_parent *parent;
+ uuid_le uuid;
+ void *driver_data;
+ struct kref ref;
+ struct list_head next;
+ struct kobject *type_kobj;
+};
+
+#define to_mdev_device(dev) container_of(dev, struct mdev_device, dev)
+#define dev_is_mdev(d) ((d)->bus == &mdev_bus_type)
+
struct mdev_type {
struct kobject kobj;
struct kobject *devices_kobj;
- struct parent_device *parent;
+ struct mdev_parent *parent;
struct list_head next;
struct attribute_group *group;
};
@@ -29,8 +52,8 @@ struct mdev_type {
#define to_mdev_type(_kobj) \
container_of(_kobj, struct mdev_type, kobj)
-int parent_create_sysfs_files(struct parent_device *parent);
-void parent_remove_sysfs_files(struct parent_device *parent);
+int parent_create_sysfs_files(struct mdev_parent *parent);
+void parent_remove_sysfs_files(struct mdev_parent *parent);
int mdev_create_sysfs_files(struct device *dev, struct mdev_type *type);
void mdev_remove_sysfs_files(struct device *dev, struct mdev_type *type);
diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c
index 1a53deb2ee10..802df210929b 100644
--- a/drivers/vfio/mdev/mdev_sysfs.c
+++ b/drivers/vfio/mdev/mdev_sysfs.c
@@ -92,7 +92,7 @@ static struct kobj_type mdev_type_ktype = {
.release = mdev_type_release,
};
-struct mdev_type *add_mdev_supported_type(struct parent_device *parent,
+struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent,
struct attribute_group *group)
{
struct mdev_type *type;
@@ -158,7 +158,7 @@ static void remove_mdev_supported_type(struct mdev_type *type)
kobject_put(&type->kobj);
}
-static int add_mdev_supported_type_groups(struct parent_device *parent)
+static int add_mdev_supported_type_groups(struct mdev_parent *parent)
{
int i;
@@ -183,7 +183,7 @@ static int add_mdev_supported_type_groups(struct parent_device *parent)
}
/* mdev sysfs functions */
-void parent_remove_sysfs_files(struct parent_device *parent)
+void parent_remove_sysfs_files(struct mdev_parent *parent)
{
struct mdev_type *type, *tmp;
@@ -196,7 +196,7 @@ void parent_remove_sysfs_files(struct parent_device *parent)
kset_unregister(parent->mdev_types_kset);
}
-int parent_create_sysfs_files(struct parent_device *parent)
+int parent_create_sysfs_files(struct mdev_parent *parent)
{
int ret;
diff --git a/drivers/vfio/mdev/vfio_mdev.c b/drivers/vfio/mdev/vfio_mdev.c
index ffc36758cb84..fa848a701b8b 100644
--- a/drivers/vfio/mdev/vfio_mdev.c
+++ b/drivers/vfio/mdev/vfio_mdev.c
@@ -27,7 +27,7 @@
static int vfio_mdev_open(void *device_data)
{
struct mdev_device *mdev = device_data;
- struct parent_device *parent = mdev->parent;
+ struct mdev_parent *parent = mdev->parent;
int ret;
if (unlikely(!parent->ops->open))
@@ -46,7 +46,7 @@ static int vfio_mdev_open(void *device_data)
static void vfio_mdev_release(void *device_data)
{
struct mdev_device *mdev = device_data;
- struct parent_device *parent = mdev->parent;
+ struct mdev_parent *parent = mdev->parent;
if (likely(parent->ops->release))
parent->ops->release(mdev);
@@ -58,7 +58,7 @@ static long vfio_mdev_unlocked_ioctl(void *device_data,
unsigned int cmd, unsigned long arg)
{
struct mdev_device *mdev = device_data;
- struct parent_device *parent = mdev->parent;
+ struct mdev_parent *parent = mdev->parent;
if (unlikely(!parent->ops->ioctl))
return -EINVAL;
@@ -70,7 +70,7 @@ static ssize_t vfio_mdev_read(void *device_data, char __user *buf,
size_t count, loff_t *ppos)
{
struct mdev_device *mdev = device_data;
- struct parent_device *parent = mdev->parent;
+ struct mdev_parent *parent = mdev->parent;
if (unlikely(!parent->ops->read))
return -EINVAL;
@@ -82,7 +82,7 @@ static ssize_t vfio_mdev_write(void *device_data, const char __user *buf,
size_t count, loff_t *ppos)
{
struct mdev_device *mdev = device_data;
- struct parent_device *parent = mdev->parent;
+ struct mdev_parent *parent = mdev->parent;
if (unlikely(!parent->ops->write))
return -EINVAL;
@@ -93,7 +93,7 @@ static ssize_t vfio_mdev_write(void *device_data, const char __user *buf,
static int vfio_mdev_mmap(void *device_data, struct vm_area_struct *vma)
{
struct mdev_device *mdev = device_data;
- struct parent_device *parent = mdev->parent;
+ struct mdev_parent *parent = mdev->parent;
if (unlikely(!parent->ops->mmap))
return -EINVAL;
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index dcd7c2a99618..324c52e3a1a4 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -1142,6 +1142,10 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
return ret;
vdev->barmap[index] = pci_iomap(pdev, index, 0);
+ if (!vdev->barmap[index]) {
+ pci_release_selected_regions(pdev, 1 << index);
+ return -ENOMEM;
+ }
}
vma->vm_private_data = vdev;
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index e4220ca8ca27..330a57024cbc 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -31,8 +31,6 @@
#include "vfio_pci_private.h"
-#define PCI_CFG_SPACE_SIZE 256
-
/* Fake capability ID for standard config space */
#define PCI_CAP_ID_BASIC 0
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
index 5ffd1d9ad4bd..357243d76f10 100644
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -193,7 +193,10 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
if (!vdev->has_vga)
return -EINVAL;
- switch (pos) {
+ if (pos > 0xbfffful)
+ return -EINVAL;
+
+ switch ((u32)pos) {
case 0xa0000 ... 0xbffff:
count = min(count, (size_t)(0xc0000 - pos));
iomem = ioremap_nocache(0xa0000, 0xbffff - 0xa0000 + 1);
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 80378ddadc5c..c8823578a1b2 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -31,49 +31,49 @@
static void tce_iommu_detach_group(void *iommu_data,
struct iommu_group *iommu_group);
-static long try_increment_locked_vm(long npages)
+static long try_increment_locked_vm(struct mm_struct *mm, long npages)
{
long ret = 0, locked, lock_limit;
- if (!current || !current->mm)
- return -ESRCH; /* process exited */
+ if (WARN_ON_ONCE(!mm))
+ return -EPERM;
if (!npages)
return 0;
- down_write(&current->mm->mmap_sem);
- locked = current->mm->locked_vm + npages;
+ down_write(&mm->mmap_sem);
+ locked = mm->locked_vm + npages;
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
ret = -ENOMEM;
else
- current->mm->locked_vm += npages;
+ mm->locked_vm += npages;
pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid,
npages << PAGE_SHIFT,
- current->mm->locked_vm << PAGE_SHIFT,
+ mm->locked_vm << PAGE_SHIFT,
rlimit(RLIMIT_MEMLOCK),
ret ? " - exceeded" : "");
- up_write(&current->mm->mmap_sem);
+ up_write(&mm->mmap_sem);
return ret;
}
-static void decrement_locked_vm(long npages)
+static void decrement_locked_vm(struct mm_struct *mm, long npages)
{
- if (!current || !current->mm || !npages)
- return; /* process exited */
+ if (!mm || !npages)
+ return;
- down_write(&current->mm->mmap_sem);
- if (WARN_ON_ONCE(npages > current->mm->locked_vm))
- npages = current->mm->locked_vm;
- current->mm->locked_vm -= npages;
+ down_write(&mm->mmap_sem);
+ if (WARN_ON_ONCE(npages > mm->locked_vm))
+ npages = mm->locked_vm;
+ mm->locked_vm -= npages;
pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid,
npages << PAGE_SHIFT,
- current->mm->locked_vm << PAGE_SHIFT,
+ mm->locked_vm << PAGE_SHIFT,
rlimit(RLIMIT_MEMLOCK));
- up_write(&current->mm->mmap_sem);
+ up_write(&mm->mmap_sem);
}
/*
@@ -89,6 +89,15 @@ struct tce_iommu_group {
};
/*
+ * A container needs to remember which preregistered region it has
+ * referenced to do proper cleanup at the userspace process exit.
+ */
+struct tce_iommu_prereg {
+ struct list_head next;
+ struct mm_iommu_table_group_mem_t *mem;
+};
+
+/*
* The container descriptor supports only a single group per container.
* Required by the API as the container is not supplied with the IOMMU group
* at the moment of initialization.
@@ -97,24 +106,68 @@ struct tce_container {
struct mutex lock;
bool enabled;
bool v2;
+ bool def_window_pending;
unsigned long locked_pages;
+ struct mm_struct *mm;
struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
struct list_head group_list;
+ struct list_head prereg_list;
};
+static long tce_iommu_mm_set(struct tce_container *container)
+{
+ if (container->mm) {
+ if (container->mm == current->mm)
+ return 0;
+ return -EPERM;
+ }
+ BUG_ON(!current->mm);
+ container->mm = current->mm;
+ atomic_inc(&container->mm->mm_count);
+
+ return 0;
+}
+
+static long tce_iommu_prereg_free(struct tce_container *container,
+ struct tce_iommu_prereg *tcemem)
+{
+ long ret;
+
+ ret = mm_iommu_put(container->mm, tcemem->mem);
+ if (ret)
+ return ret;
+
+ list_del(&tcemem->next);
+ kfree(tcemem);
+
+ return 0;
+}
+
static long tce_iommu_unregister_pages(struct tce_container *container,
__u64 vaddr, __u64 size)
{
struct mm_iommu_table_group_mem_t *mem;
+ struct tce_iommu_prereg *tcemem;
+ bool found = false;
if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
return -EINVAL;
- mem = mm_iommu_find(vaddr, size >> PAGE_SHIFT);
+ mem = mm_iommu_find(container->mm, vaddr, size >> PAGE_SHIFT);
if (!mem)
return -ENOENT;
- return mm_iommu_put(mem);
+ list_for_each_entry(tcemem, &container->prereg_list, next) {
+ if (tcemem->mem == mem) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return -ENOENT;
+
+ return tce_iommu_prereg_free(container, tcemem);
}
static long tce_iommu_register_pages(struct tce_container *container,
@@ -122,22 +175,36 @@ static long tce_iommu_register_pages(struct tce_container *container,
{
long ret = 0;
struct mm_iommu_table_group_mem_t *mem = NULL;
+ struct tce_iommu_prereg *tcemem;
unsigned long entries = size >> PAGE_SHIFT;
if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) ||
((vaddr + size) < vaddr))
return -EINVAL;
- ret = mm_iommu_get(vaddr, entries, &mem);
+ mem = mm_iommu_find(container->mm, vaddr, entries);
+ if (mem) {
+ list_for_each_entry(tcemem, &container->prereg_list, next) {
+ if (tcemem->mem == mem)
+ return -EBUSY;
+ }
+ }
+
+ ret = mm_iommu_get(container->mm, vaddr, entries, &mem);
if (ret)
return ret;
+ tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL);
+ tcemem->mem = mem;
+ list_add(&tcemem->next, &container->prereg_list);
+
container->enabled = true;
return 0;
}
-static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl)
+static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl,
+ struct mm_struct *mm)
{
unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
tbl->it_size, PAGE_SIZE);
@@ -146,13 +213,13 @@ static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl)
BUG_ON(tbl->it_userspace);
- ret = try_increment_locked_vm(cb >> PAGE_SHIFT);
+ ret = try_increment_locked_vm(mm, cb >> PAGE_SHIFT);
if (ret)
return ret;
uas = vzalloc(cb);
if (!uas) {
- decrement_locked_vm(cb >> PAGE_SHIFT);
+ decrement_locked_vm(mm, cb >> PAGE_SHIFT);
return -ENOMEM;
}
tbl->it_userspace = uas;
@@ -160,7 +227,8 @@ static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl)
return 0;
}
-static void tce_iommu_userspace_view_free(struct iommu_table *tbl)
+static void tce_iommu_userspace_view_free(struct iommu_table *tbl,
+ struct mm_struct *mm)
{
unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
tbl->it_size, PAGE_SIZE);
@@ -170,7 +238,7 @@ static void tce_iommu_userspace_view_free(struct iommu_table *tbl)
vfree(tbl->it_userspace);
tbl->it_userspace = NULL;
- decrement_locked_vm(cb >> PAGE_SHIFT);
+ decrement_locked_vm(mm, cb >> PAGE_SHIFT);
}
static bool tce_page_is_contained(struct page *page, unsigned page_shift)
@@ -230,9 +298,6 @@ static int tce_iommu_enable(struct tce_container *container)
struct iommu_table_group *table_group;
struct tce_iommu_group *tcegrp;
- if (!current->mm)
- return -ESRCH; /* process exited */
-
if (container->enabled)
return -EBUSY;
@@ -277,8 +342,12 @@ static int tce_iommu_enable(struct tce_container *container)
if (!table_group->tce32_size)
return -EPERM;
+ ret = tce_iommu_mm_set(container);
+ if (ret)
+ return ret;
+
locked = table_group->tce32_size >> PAGE_SHIFT;
- ret = try_increment_locked_vm(locked);
+ ret = try_increment_locked_vm(container->mm, locked);
if (ret)
return ret;
@@ -296,10 +365,8 @@ static void tce_iommu_disable(struct tce_container *container)
container->enabled = false;
- if (!current->mm)
- return;
-
- decrement_locked_vm(container->locked_pages);
+ BUG_ON(!container->mm);
+ decrement_locked_vm(container->mm, container->locked_pages);
}
static void *tce_iommu_open(unsigned long arg)
@@ -317,6 +384,7 @@ static void *tce_iommu_open(unsigned long arg)
mutex_init(&container->lock);
INIT_LIST_HEAD_RCU(&container->group_list);
+ INIT_LIST_HEAD_RCU(&container->prereg_list);
container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU;
@@ -326,7 +394,8 @@ static void *tce_iommu_open(unsigned long arg)
static int tce_iommu_clear(struct tce_container *container,
struct iommu_table *tbl,
unsigned long entry, unsigned long pages);
-static void tce_iommu_free_table(struct iommu_table *tbl);
+static void tce_iommu_free_table(struct tce_container *container,
+ struct iommu_table *tbl);
static void tce_iommu_release(void *iommu_data)
{
@@ -351,10 +420,20 @@ static void tce_iommu_release(void *iommu_data)
continue;
tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
- tce_iommu_free_table(tbl);
+ tce_iommu_free_table(container, tbl);
+ }
+
+ while (!list_empty(&container->prereg_list)) {
+ struct tce_iommu_prereg *tcemem;
+
+ tcemem = list_first_entry(&container->prereg_list,
+ struct tce_iommu_prereg, next);
+ WARN_ON_ONCE(tce_iommu_prereg_free(container, tcemem));
}
tce_iommu_disable(container);
+ if (container->mm)
+ mmdrop(container->mm);
mutex_destroy(&container->lock);
kfree(container);
@@ -369,13 +448,14 @@ static void tce_iommu_unuse_page(struct tce_container *container,
put_page(page);
}
-static int tce_iommu_prereg_ua_to_hpa(unsigned long tce, unsigned long size,
+static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
+ unsigned long tce, unsigned long size,
unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
{
long ret = 0;
struct mm_iommu_table_group_mem_t *mem;
- mem = mm_iommu_lookup(tce, size);
+ mem = mm_iommu_lookup(container->mm, tce, size);
if (!mem)
return -EINVAL;
@@ -388,18 +468,18 @@ static int tce_iommu_prereg_ua_to_hpa(unsigned long tce, unsigned long size,
return 0;
}
-static void tce_iommu_unuse_page_v2(struct iommu_table *tbl,
- unsigned long entry)
+static void tce_iommu_unuse_page_v2(struct tce_container *container,
+ struct iommu_table *tbl, unsigned long entry)
{
struct mm_iommu_table_group_mem_t *mem = NULL;
int ret;
unsigned long hpa = 0;
unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
- if (!pua || !current || !current->mm)
+ if (!pua)
return;
- ret = tce_iommu_prereg_ua_to_hpa(*pua, IOMMU_PAGE_SIZE(tbl),
+ ret = tce_iommu_prereg_ua_to_hpa(container, *pua, IOMMU_PAGE_SIZE(tbl),
&hpa, &mem);
if (ret)
pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
@@ -429,7 +509,7 @@ static int tce_iommu_clear(struct tce_container *container,
continue;
if (container->v2) {
- tce_iommu_unuse_page_v2(tbl, entry);
+ tce_iommu_unuse_page_v2(container, tbl, entry);
continue;
}
@@ -509,13 +589,19 @@ static long tce_iommu_build_v2(struct tce_container *container,
unsigned long hpa;
enum dma_data_direction dirtmp;
+ if (!tbl->it_userspace) {
+ ret = tce_iommu_userspace_view_alloc(tbl, container->mm);
+ if (ret)
+ return ret;
+ }
+
for (i = 0; i < pages; ++i) {
struct mm_iommu_table_group_mem_t *mem = NULL;
unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl,
entry + i);
- ret = tce_iommu_prereg_ua_to_hpa(tce, IOMMU_PAGE_SIZE(tbl),
- &hpa, &mem);
+ ret = tce_iommu_prereg_ua_to_hpa(container,
+ tce, IOMMU_PAGE_SIZE(tbl), &hpa, &mem);
if (ret)
break;
@@ -536,7 +622,7 @@ static long tce_iommu_build_v2(struct tce_container *container,
ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
if (ret) {
/* dirtmp cannot be DMA_NONE here */
- tce_iommu_unuse_page_v2(tbl, entry + i);
+ tce_iommu_unuse_page_v2(container, tbl, entry + i);
pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
__func__, entry << tbl->it_page_shift,
tce, ret);
@@ -544,7 +630,7 @@ static long tce_iommu_build_v2(struct tce_container *container,
}
if (dirtmp != DMA_NONE)
- tce_iommu_unuse_page_v2(tbl, entry + i);
+ tce_iommu_unuse_page_v2(container, tbl, entry + i);
*pua = tce;
@@ -572,7 +658,7 @@ static long tce_iommu_create_table(struct tce_container *container,
if (!table_size)
return -EINVAL;
- ret = try_increment_locked_vm(table_size >> PAGE_SHIFT);
+ ret = try_increment_locked_vm(container->mm, table_size >> PAGE_SHIFT);
if (ret)
return ret;
@@ -582,25 +668,17 @@ static long tce_iommu_create_table(struct tce_container *container,
WARN_ON(!ret && !(*ptbl)->it_ops->free);
WARN_ON(!ret && ((*ptbl)->it_allocated_size != table_size));
- if (!ret && container->v2) {
- ret = tce_iommu_userspace_view_alloc(*ptbl);
- if (ret)
- (*ptbl)->it_ops->free(*ptbl);
- }
-
- if (ret)
- decrement_locked_vm(table_size >> PAGE_SHIFT);
-
return ret;
}
-static void tce_iommu_free_table(struct iommu_table *tbl)
+static void tce_iommu_free_table(struct tce_container *container,
+ struct iommu_table *tbl)
{
unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
- tce_iommu_userspace_view_free(tbl);
+ tce_iommu_userspace_view_free(tbl, container->mm);
tbl->it_ops->free(tbl);
- decrement_locked_vm(pages);
+ decrement_locked_vm(container->mm, pages);
}
static long tce_iommu_create_window(struct tce_container *container,
@@ -663,7 +741,7 @@ unset_exit:
table_group = iommu_group_get_iommudata(tcegrp->grp);
table_group->ops->unset_window(table_group, num);
}
- tce_iommu_free_table(tbl);
+ tce_iommu_free_table(container, tbl);
return ret;
}
@@ -701,12 +779,41 @@ static long tce_iommu_remove_window(struct tce_container *container,
/* Free table */
tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
- tce_iommu_free_table(tbl);
+ tce_iommu_free_table(container, tbl);
container->tables[num] = NULL;
return 0;
}
+static long tce_iommu_create_default_window(struct tce_container *container)
+{
+ long ret;
+ __u64 start_addr = 0;
+ struct tce_iommu_group *tcegrp;
+ struct iommu_table_group *table_group;
+
+ if (!container->def_window_pending)
+ return 0;
+
+ if (!tce_groups_attached(container))
+ return -ENODEV;
+
+ tcegrp = list_first_entry(&container->group_list,
+ struct tce_iommu_group, next);
+ table_group = iommu_group_get_iommudata(tcegrp->grp);
+ if (!table_group)
+ return -ENODEV;
+
+ ret = tce_iommu_create_window(container, IOMMU_PAGE_SHIFT_4K,
+ table_group->tce32_size, 1, &start_addr);
+ WARN_ON_ONCE(!ret && start_addr);
+
+ if (!ret)
+ container->def_window_pending = false;
+
+ return ret;
+}
+
static long tce_iommu_ioctl(void *iommu_data,
unsigned int cmd, unsigned long arg)
{
@@ -727,7 +834,17 @@ static long tce_iommu_ioctl(void *iommu_data,
}
return (ret < 0) ? 0 : ret;
+ }
+
+ /*
+ * Sanity check to prevent one userspace from manipulating
+ * another userspace mm.
+ */
+ BUG_ON(!container);
+ if (container->mm && container->mm != current->mm)
+ return -EPERM;
+ switch (cmd) {
case VFIO_IOMMU_SPAPR_TCE_GET_INFO: {
struct vfio_iommu_spapr_tce_info info;
struct tce_iommu_group *tcegrp;
@@ -797,6 +914,10 @@ static long tce_iommu_ioctl(void *iommu_data,
VFIO_DMA_MAP_FLAG_WRITE))
return -EINVAL;
+ ret = tce_iommu_create_default_window(container);
+ if (ret)
+ return ret;
+
num = tce_iommu_find_table(container, param.iova, &tbl);
if (num < 0)
return -ENXIO;
@@ -860,6 +981,10 @@ static long tce_iommu_ioctl(void *iommu_data,
if (param.flags)
return -EINVAL;
+ ret = tce_iommu_create_default_window(container);
+ if (ret)
+ return ret;
+
num = tce_iommu_find_table(container, param.iova, &tbl);
if (num < 0)
return -ENXIO;
@@ -888,6 +1013,10 @@ static long tce_iommu_ioctl(void *iommu_data,
minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
size);
+ ret = tce_iommu_mm_set(container);
+ if (ret)
+ return ret;
+
if (copy_from_user(&param, (void __user *)arg, minsz))
return -EFAULT;
@@ -911,6 +1040,9 @@ static long tce_iommu_ioctl(void *iommu_data,
if (!container->v2)
break;
+ if (!container->mm)
+ return -EPERM;
+
minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
size);
@@ -969,6 +1101,10 @@ static long tce_iommu_ioctl(void *iommu_data,
if (!container->v2)
break;
+ ret = tce_iommu_mm_set(container);
+ if (ret)
+ return ret;
+
if (!tce_groups_attached(container))
return -ENXIO;
@@ -986,6 +1122,10 @@ static long tce_iommu_ioctl(void *iommu_data,
mutex_lock(&container->lock);
+ ret = tce_iommu_create_default_window(container);
+ if (ret)
+ return ret;
+
ret = tce_iommu_create_window(container, create.page_shift,
create.window_size, create.levels,
&create.start_addr);
@@ -1003,6 +1143,10 @@ static long tce_iommu_ioctl(void *iommu_data,
if (!container->v2)
break;
+ ret = tce_iommu_mm_set(container);
+ if (ret)
+ return ret;
+
if (!tce_groups_attached(container))
return -ENXIO;
@@ -1018,6 +1162,11 @@ static long tce_iommu_ioctl(void *iommu_data,
if (remove.flags)
return -EINVAL;
+ if (container->def_window_pending && !remove.start_addr) {
+ container->def_window_pending = false;
+ return 0;
+ }
+
mutex_lock(&container->lock);
ret = tce_iommu_remove_window(container, remove.start_addr);
@@ -1043,7 +1192,7 @@ static void tce_iommu_release_ownership(struct tce_container *container,
continue;
tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
- tce_iommu_userspace_view_free(tbl);
+ tce_iommu_userspace_view_free(tbl, container->mm);
if (tbl->it_map)
iommu_release_ownership(tbl);
@@ -1062,10 +1211,7 @@ static int tce_iommu_take_ownership(struct tce_container *container,
if (!tbl || !tbl->it_map)
continue;
- rc = tce_iommu_userspace_view_alloc(tbl);
- if (!rc)
- rc = iommu_take_ownership(tbl);
-
+ rc = iommu_take_ownership(tbl);
if (rc) {
for (j = 0; j < i; ++j)
iommu_release_ownership(
@@ -1100,9 +1246,6 @@ static void tce_iommu_release_ownership_ddw(struct tce_container *container,
static long tce_iommu_take_ownership_ddw(struct tce_container *container,
struct iommu_table_group *table_group)
{
- long i, ret = 0;
- struct iommu_table *tbl = NULL;
-
if (!table_group->ops->create_table || !table_group->ops->set_window ||
!table_group->ops->release_ownership) {
WARN_ON_ONCE(1);
@@ -1111,47 +1254,7 @@ static long tce_iommu_take_ownership_ddw(struct tce_container *container,
table_group->ops->take_ownership(table_group);
- /*
- * If it the first group attached, check if there is
- * a default DMA window and create one if none as
- * the userspace expects it to exist.
- */
- if (!tce_groups_attached(container) && !container->tables[0]) {
- ret = tce_iommu_create_table(container,
- table_group,
- 0, /* window number */
- IOMMU_PAGE_SHIFT_4K,
- table_group->tce32_size,
- 1, /* default levels */
- &tbl);
- if (ret)
- goto release_exit;
- else
- container->tables[0] = tbl;
- }
-
- /* Set all windows to the new group */
- for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
- tbl = container->tables[i];
-
- if (!tbl)
- continue;
-
- /* Set the default window to a new group */
- ret = table_group->ops->set_window(table_group, i, tbl);
- if (ret)
- goto release_exit;
- }
-
return 0;
-
-release_exit:
- for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
- table_group->ops->unset_window(table_group, i);
-
- table_group->ops->release_ownership(table_group);
-
- return ret;
}
static int tce_iommu_attach_group(void *iommu_data,
@@ -1203,10 +1306,13 @@ static int tce_iommu_attach_group(void *iommu_data,
}
if (!table_group->ops || !table_group->ops->take_ownership ||
- !table_group->ops->release_ownership)
+ !table_group->ops->release_ownership) {
ret = tce_iommu_take_ownership(container, table_group);
- else
+ } else {
ret = tce_iommu_take_ownership_ddw(container, table_group);
+ if (!tce_groups_attached(container) && !container->tables[0])
+ container->def_window_pending = true;
+ }
if (!ret) {
tcegrp->grp = iommu_group;
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 9815e45c23c4..b3cc33fa6d26 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -36,7 +36,6 @@
#include <linux/uaccess.h>
#include <linux/vfio.h>
#include <linux/workqueue.h>
-#include <linux/pid_namespace.h>
#include <linux/mdev.h>
#include <linux/notifier.h>
@@ -268,28 +267,38 @@ static void vfio_lock_acct(struct task_struct *task, long npage)
{
struct vwork *vwork;
struct mm_struct *mm;
+ bool is_current;
if (!npage)
return;
- mm = get_task_mm(task);
+ is_current = (task->mm == current->mm);
+
+ mm = is_current ? task->mm : get_task_mm(task);
if (!mm)
- return; /* process exited or nothing to do */
+ return; /* process exited */
if (down_write_trylock(&mm->mmap_sem)) {
mm->locked_vm += npage;
up_write(&mm->mmap_sem);
- mmput(mm);
+ if (!is_current)
+ mmput(mm);
return;
}
+ if (is_current) {
+ mm = get_task_mm(task);
+ if (!mm)
+ return;
+ }
+
/*
* Couldn't get mmap_sem lock, so must setup to update
* mm->locked_vm later. If locked_vm were atomic, we
* wouldn't need this silliness
*/
vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL);
- if (!vwork) {
+ if (WARN_ON(!vwork)) {
mmput(mm);
return;
}
@@ -362,7 +371,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
down_read(&mm->mmap_sem);
ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page,
- NULL);
+ NULL, NULL);
up_read(&mm->mmap_sem);
}
@@ -393,77 +402,71 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
long npage, unsigned long *pfn_base)
{
- unsigned long limit;
- bool lock_cap = ns_capable(task_active_pid_ns(dma->task)->user_ns,
- CAP_IPC_LOCK);
- struct mm_struct *mm;
- long ret, i = 0, lock_acct = 0;
+ unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ bool lock_cap = capable(CAP_IPC_LOCK);
+ long ret, pinned = 0, lock_acct = 0;
bool rsvd;
dma_addr_t iova = vaddr - dma->vaddr + dma->iova;
- mm = get_task_mm(dma->task);
- if (!mm)
+ /* This code path is only user initiated */
+ if (!current->mm)
return -ENODEV;
- ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base);
+ ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, pfn_base);
if (ret)
- goto pin_pg_remote_exit;
+ return ret;
+ pinned++;
rsvd = is_invalid_reserved_pfn(*pfn_base);
- limit = task_rlimit(dma->task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
/*
* Reserved pages aren't counted against the user, externally pinned
* pages are already counted against the user.
*/
if (!rsvd && !vfio_find_vpfn(dma, iova)) {
- if (!lock_cap && mm->locked_vm + 1 > limit) {
+ if (!lock_cap && current->mm->locked_vm + 1 > limit) {
put_pfn(*pfn_base, dma->prot);
pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
limit << PAGE_SHIFT);
- ret = -ENOMEM;
- goto pin_pg_remote_exit;
+ return -ENOMEM;
}
lock_acct++;
}
- i++;
- if (likely(!disable_hugepages)) {
- /* Lock all the consecutive pages from pfn_base */
- for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; i < npage;
- i++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) {
- unsigned long pfn = 0;
+ if (unlikely(disable_hugepages))
+ goto out;
- ret = vaddr_get_pfn(mm, vaddr, dma->prot, &pfn);
- if (ret)
- break;
+ /* Lock all the consecutive pages from pfn_base */
+ for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage;
+ pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) {
+ unsigned long pfn = 0;
- if (pfn != *pfn_base + i ||
- rsvd != is_invalid_reserved_pfn(pfn)) {
+ ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, &pfn);
+ if (ret)
+ break;
+
+ if (pfn != *pfn_base + pinned ||
+ rsvd != is_invalid_reserved_pfn(pfn)) {
+ put_pfn(pfn, dma->prot);
+ break;
+ }
+
+ if (!rsvd && !vfio_find_vpfn(dma, iova)) {
+ if (!lock_cap &&
+ current->mm->locked_vm + lock_acct + 1 > limit) {
put_pfn(pfn, dma->prot);
+ pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
+ __func__, limit << PAGE_SHIFT);
break;
}
-
- if (!rsvd && !vfio_find_vpfn(dma, iova)) {
- if (!lock_cap &&
- mm->locked_vm + lock_acct + 1 > limit) {
- put_pfn(pfn, dma->prot);
- pr_warn("%s: RLIMIT_MEMLOCK (%ld) "
- "exceeded\n", __func__,
- limit << PAGE_SHIFT);
- break;
- }
- lock_acct++;
- }
+ lock_acct++;
}
}
- vfio_lock_acct(dma->task, lock_acct);
- ret = i;
+out:
+ vfio_lock_acct(current, lock_acct);
-pin_pg_remote_exit:
- mmput(mm);
- return ret;
+ return pinned;
}
static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova,
@@ -473,10 +476,10 @@ static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova,
long unlocked = 0, locked = 0;
long i;
- for (i = 0; i < npage; i++) {
+ for (i = 0; i < npage; i++, iova += PAGE_SIZE) {
if (put_pfn(pfn++, dma->prot)) {
unlocked++;
- if (vfio_find_vpfn(dma, iova + (i << PAGE_SHIFT)))
+ if (vfio_find_vpfn(dma, iova))
locked++;
}
}
@@ -491,8 +494,7 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
unsigned long *pfn_base, bool do_accounting)
{
unsigned long limit;
- bool lock_cap = ns_capable(task_active_pid_ns(dma->task)->user_ns,
- CAP_IPC_LOCK);
+ bool lock_cap = has_capability(dma->task, CAP_IPC_LOCK);
struct mm_struct *mm;
int ret;
bool rsvd;
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 6e29d053843d..253310cdaaca 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -922,8 +922,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
*/
iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size);
- ret = copy_from_iter(req, req_size, &out_iter);
- if (unlikely(ret != req_size)) {
+ if (unlikely(!copy_from_iter_full(req, req_size, &out_iter))) {
vq_err(vq, "Faulted on copy_from_iter\n");
vhost_scsi_send_bad_target(vs, vq, head, out);
continue;
@@ -1749,7 +1748,6 @@ out:
static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
const char *name)
{
- struct se_portal_group *se_tpg;
struct vhost_scsi_nexus *tv_nexus;
mutex_lock(&tpg->tv_tpg_mutex);
@@ -1758,7 +1756,6 @@ static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
pr_debug("tpg->tpg_nexus already exists\n");
return -EEXIST;
}
- se_tpg = &tpg->se_tpg;
tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL);
if (!tv_nexus) {
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 266354390c8f..d6432603880c 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -49,7 +49,7 @@ enum {
INTERVAL_TREE_DEFINE(struct vhost_umem_node,
rb, __u64, __subtree_last,
- START, LAST, , vhost_umem_interval_tree);
+ START, LAST, static inline, vhost_umem_interval_tree);
#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
@@ -290,6 +290,7 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->avail = NULL;
vq->used = NULL;
vq->last_avail_idx = 0;
+ vq->last_used_event = 0;
vq->avail_idx = 0;
vq->last_used_idx = 0;
vq->signalled_used = 0;
@@ -719,7 +720,7 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem,
static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
struct iovec iov[], int iov_size, int access);
-static int vhost_copy_to_user(struct vhost_virtqueue *vq, void *to,
+static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to,
const void *from, unsigned size)
{
int ret;
@@ -749,7 +750,7 @@ out:
}
static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
- void *from, unsigned size)
+ void __user *from, unsigned size)
{
int ret;
@@ -783,7 +784,7 @@ out:
}
static void __user *__vhost_get_user(struct vhost_virtqueue *vq,
- void *addr, unsigned size)
+ void __user *addr, unsigned size)
{
int ret;
@@ -934,8 +935,8 @@ static int umem_access_ok(u64 uaddr, u64 size, int access)
return 0;
}
-int vhost_process_iotlb_msg(struct vhost_dev *dev,
- struct vhost_iotlb_msg *msg)
+static int vhost_process_iotlb_msg(struct vhost_dev *dev,
+ struct vhost_iotlb_msg *msg)
{
int ret = 0;
@@ -1324,7 +1325,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
r = -EINVAL;
break;
}
- vq->last_avail_idx = s.num;
+ vq->last_avail_idx = vq->last_used_event = s.num;
/* Forget the cached index value. */
vq->avail_idx = vq->last_avail_idx;
break;
@@ -1862,8 +1863,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
i, count);
return -EINVAL;
}
- if (unlikely(copy_from_iter(&desc, sizeof(desc), &from) !=
- sizeof(desc))) {
+ if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) {
vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
return -EINVAL;
@@ -2159,10 +2159,6 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
__u16 old, new;
__virtio16 event;
bool v;
- /* Flush out used index updates. This is paired
- * with the barrier that the Guest executes when enabling
- * interrupts. */
- smp_mb();
if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
unlikely(vq->avail_idx == vq->last_avail_idx))
@@ -2170,6 +2166,10 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
__virtio16 flags;
+ /* Flush out used index updates. This is paired
+ * with the barrier that the Guest executes when enabling
+ * interrupts. */
+ smp_mb();
if (vhost_get_user(vq, flags, &vq->avail->flags)) {
vq_err(vq, "Failed to get flags");
return true;
@@ -2184,11 +2184,26 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
if (unlikely(!v))
return true;
+ /* We're sure if the following conditions are met, there's no
+ * need to notify guest:
+ * 1) cached used event is ahead of new
+ * 2) old to new updating does not cross cached used event. */
+ if (vring_need_event(vq->last_used_event, new + vq->num, new) &&
+ !vring_need_event(vq->last_used_event, new, old))
+ return false;
+
+ /* Flush out used index updates. This is paired
+ * with the barrier that the Guest executes when enabling
+ * interrupts. */
+ smp_mb();
+
if (vhost_get_user(vq, event, vhost_used_event(vq))) {
vq_err(vq, "Failed to get used event idx");
return true;
}
- return vring_need_event(vhost16_to_cpu(vq, event), new, old);
+ vq->last_used_event = vhost16_to_cpu(vq, event);
+
+ return vring_need_event(vq->last_used_event, new, old);
}
/* This actually signals the guest, using eventfd. */
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 78f3c5fc02e4..a9cbbb148f46 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -107,6 +107,9 @@ struct vhost_virtqueue {
/* Last index we used. */
u16 last_used_idx;
+ /* Last used evet we've seen */
+ u16 last_used_event;
+
/* Used flags */
u16 used_flags;
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index 3bb02c60a2f5..bb8971f2a634 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -3,6 +3,7 @@
*
* Since these may be in userspace, we use (inline) accessors.
*/
+#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/vringh.h>
#include <linux/virtio_ring.h>
@@ -820,13 +821,13 @@ EXPORT_SYMBOL(vringh_need_notify_user);
static inline int getu16_kern(const struct vringh *vrh,
u16 *val, const __virtio16 *p)
{
- *val = vringh16_to_cpu(vrh, ACCESS_ONCE(*p));
+ *val = vringh16_to_cpu(vrh, READ_ONCE(*p));
return 0;
}
static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
{
- ACCESS_ONCE(*p) = cpu_to_vringh16(vrh, val);
+ WRITE_ONCE(*p, cpu_to_vringh16(vrh, val));
return 0;
}
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index e6b70966c19d..bbbf588540ed 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -50,11 +50,10 @@ static u32 vhost_transport_get_local_cid(void)
return VHOST_VSOCK_DEFAULT_HOST_CID;
}
-static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
+static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid)
{
struct vhost_vsock *vsock;
- spin_lock_bh(&vhost_vsock_lock);
list_for_each_entry(vsock, &vhost_vsock_list, list) {
u32 other_cid = vsock->guest_cid;
@@ -63,15 +62,24 @@ static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
continue;
if (other_cid == guest_cid) {
- spin_unlock_bh(&vhost_vsock_lock);
return vsock;
}
}
- spin_unlock_bh(&vhost_vsock_lock);
return NULL;
}
+static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
+{
+ struct vhost_vsock *vsock;
+
+ spin_lock_bh(&vhost_vsock_lock);
+ vsock = __vhost_vsock_get(guest_cid);
+ spin_unlock_bh(&vhost_vsock_lock);
+
+ return vsock;
+}
+
static void
vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
struct vhost_virtqueue *vq)
@@ -559,11 +567,12 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
return -EINVAL;
/* Refuse if CID is already in use */
- other = vhost_vsock_get(guest_cid);
- if (other && other != vsock)
- return -EADDRINUSE;
-
spin_lock_bh(&vhost_vsock_lock);
+ other = __vhost_vsock_get(guest_cid);
+ if (other && other != vsock) {
+ spin_unlock_bh(&vhost_vsock_lock);
+ return -EADDRINUSE;
+ }
vsock->guest_cid = guest_cid;
spin_unlock_bh(&vhost_vsock_lock);
diff --git a/drivers/video/console/newport_con.c b/drivers/video/console/newport_con.c
index 1e11614322fe..42d02a206059 100644
--- a/drivers/video/console/newport_con.c
+++ b/drivers/video/console/newport_con.c
@@ -21,7 +21,7 @@
#include <linux/slab.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/gio_device.h>
diff --git a/drivers/video/fbdev/68328fb.c b/drivers/video/fbdev/68328fb.c
index 17f21cedff9b..c0c6b88d3839 100644
--- a/drivers/video/fbdev/68328fb.c
+++ b/drivers/video/fbdev/68328fb.c
@@ -35,7 +35,7 @@
#include <linux/vmalloc.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/fb.h>
#include <linux/init.h>
diff --git a/drivers/video/fbdev/cobalt_lcdfb.c b/drivers/video/fbdev/cobalt_lcdfb.c
index 2d3b691f3fc4..038ac6934fe9 100644
--- a/drivers/video/fbdev/cobalt_lcdfb.c
+++ b/drivers/video/fbdev/cobalt_lcdfb.c
@@ -308,6 +308,11 @@ static int cobalt_lcdfb_probe(struct platform_device *dev)
info->screen_size = resource_size(res);
info->screen_base = devm_ioremap(&dev->dev, res->start,
info->screen_size);
+ if (!info->screen_base) {
+ framebuffer_release(info);
+ return -ENOMEM;
+ }
+
info->fbops = &cobalt_lcd_fbops;
info->fix = cobalt_lcdfb_fix;
info->fix.smem_start = res->start;
diff --git a/drivers/video/fbdev/hitfb.c b/drivers/video/fbdev/hitfb.c
index 9d68dc9ee7bf..abe3e54d4506 100644
--- a/drivers/video/fbdev/hitfb.c
+++ b/drivers/video/fbdev/hitfb.c
@@ -22,7 +22,7 @@
#include <linux/fb.h>
#include <asm/machvec.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/hd64461.h>
diff --git a/drivers/video/fbdev/hpfb.c b/drivers/video/fbdev/hpfb.c
index 9476d196f510..16f16f5e1a4b 100644
--- a/drivers/video/fbdev/hpfb.c
+++ b/drivers/video/fbdev/hpfb.c
@@ -16,7 +16,7 @@
#include <linux/dio.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
static struct fb_info fb_info = {
.fix = {
diff --git a/drivers/video/fbdev/mx3fb.c b/drivers/video/fbdev/mx3fb.c
index 8778e01cebac..1c3c7ab26a95 100644
--- a/drivers/video/fbdev/mx3fb.c
+++ b/drivers/video/fbdev/mx3fb.c
@@ -33,7 +33,7 @@
#include <linux/platform_data/video-mx3fb.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define MX3FB_NAME "mx3_sdc_fb"
diff --git a/drivers/video/fbdev/q40fb.c b/drivers/video/fbdev/q40fb.c
index 7487f76f6275..04ea330ccf5d 100644
--- a/drivers/video/fbdev/q40fb.c
+++ b/drivers/video/fbdev/q40fb.c
@@ -18,7 +18,7 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/setup.h>
#include <asm/q40_master.h>
#include <linux/fb.h>
diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c
index d0a4e2f79a57..d80bc8a3200f 100644
--- a/drivers/video/fbdev/sm501fb.c
+++ b/drivers/video/fbdev/sm501fb.c
@@ -31,7 +31,7 @@
#include <linux/console.h>
#include <linux/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/div64.h>
#ifdef CONFIG_PM
diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
index 7df4228e25f0..accfef71e984 100644
--- a/drivers/video/fbdev/stifb.c
+++ b/drivers/video/fbdev/stifb.c
@@ -67,7 +67,7 @@
#include <linux/io.h>
#include <asm/grfioctl.h> /* for HP-UX compatibility */
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "sticore.h"
diff --git a/drivers/video/fbdev/w100fb.c b/drivers/video/fbdev/w100fb.c
index 10951c82f6ed..d570e19a2864 100644
--- a/drivers/video/fbdev/w100fb.c
+++ b/drivers/video/fbdev/w100fb.c
@@ -35,7 +35,7 @@
#include <linux/vmalloc.h>
#include <linux/module.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <video/w100fb.h>
#include "w100fb.h"
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 48bfea91dbca..d47a2fcef818 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -489,6 +489,7 @@ static const struct virtio_config_ops virtio_mmio_config_ops = {
};
+static void virtio_mmio_release_dev_empty(struct device *_d) {}
/* Platform device */
@@ -511,6 +512,7 @@ static int virtio_mmio_probe(struct platform_device *pdev)
return -ENOMEM;
vm_dev->vdev.dev.parent = &pdev->dev;
+ vm_dev->vdev.dev.release = virtio_mmio_release_dev_empty;
vm_dev->vdev.config = &virtio_mmio_config_ops;
vm_dev->pdev = pdev;
INIT_LIST_HEAD(&vm_dev->virtqueues);
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index d9a905827967..186cbab327b8 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -37,7 +37,7 @@ void vp_synchronize_vectors(struct virtio_device *vdev)
synchronize_irq(vp_dev->pci_dev->irq);
for (i = 0; i < vp_dev->msix_vectors; ++i)
- synchronize_irq(vp_dev->msix_entries[i].vector);
+ synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
}
/* the notify function used when creating a virt queue */
@@ -102,41 +102,6 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
return vp_vring_interrupt(irq, opaque);
}
-static void vp_free_vectors(struct virtio_device *vdev)
-{
- struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- int i;
-
- if (vp_dev->intx_enabled) {
- free_irq(vp_dev->pci_dev->irq, vp_dev);
- vp_dev->intx_enabled = 0;
- }
-
- for (i = 0; i < vp_dev->msix_used_vectors; ++i)
- free_irq(vp_dev->msix_entries[i].vector, vp_dev);
-
- for (i = 0; i < vp_dev->msix_vectors; i++)
- if (vp_dev->msix_affinity_masks[i])
- free_cpumask_var(vp_dev->msix_affinity_masks[i]);
-
- if (vp_dev->msix_enabled) {
- /* Disable the vector used for configuration */
- vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
-
- pci_disable_msix(vp_dev->pci_dev);
- vp_dev->msix_enabled = 0;
- }
-
- vp_dev->msix_vectors = 0;
- vp_dev->msix_used_vectors = 0;
- kfree(vp_dev->msix_names);
- vp_dev->msix_names = NULL;
- kfree(vp_dev->msix_entries);
- vp_dev->msix_entries = NULL;
- kfree(vp_dev->msix_affinity_masks);
- vp_dev->msix_affinity_masks = NULL;
-}
-
static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
bool per_vq_vectors)
{
@@ -147,10 +112,6 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
vp_dev->msix_vectors = nvectors;
- vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries,
- GFP_KERNEL);
- if (!vp_dev->msix_entries)
- goto error;
vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names,
GFP_KERNEL);
if (!vp_dev->msix_names)
@@ -165,12 +126,9 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
GFP_KERNEL))
goto error;
- for (i = 0; i < nvectors; ++i)
- vp_dev->msix_entries[i].entry = i;
-
- err = pci_enable_msix_exact(vp_dev->pci_dev,
- vp_dev->msix_entries, nvectors);
- if (err)
+ err = pci_alloc_irq_vectors(vp_dev->pci_dev, nvectors, nvectors,
+ PCI_IRQ_MSIX);
+ if (err < 0)
goto error;
vp_dev->msix_enabled = 1;
@@ -178,7 +136,7 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
v = vp_dev->msix_used_vectors;
snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
"%s-config", name);
- err = request_irq(vp_dev->msix_entries[v].vector,
+ err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
vp_config_changed, 0, vp_dev->msix_names[v],
vp_dev);
if (err)
@@ -197,7 +155,7 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
v = vp_dev->msix_used_vectors;
snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
"%s-virtqueues", name);
- err = request_irq(vp_dev->msix_entries[v].vector,
+ err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
vp_vring_interrupt, 0, vp_dev->msix_names[v],
vp_dev);
if (err)
@@ -206,19 +164,6 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
}
return 0;
error:
- vp_free_vectors(vdev);
- return err;
-}
-
-static int vp_request_intx(struct virtio_device *vdev)
-{
- int err;
- struct virtio_pci_device *vp_dev = to_vp_device(vdev);
-
- err = request_irq(vp_dev->pci_dev->irq, vp_interrupt,
- IRQF_SHARED, dev_name(&vdev->dev), vp_dev);
- if (!err)
- vp_dev->intx_enabled = 1;
return err;
}
@@ -276,67 +221,88 @@ void vp_del_vqs(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtqueue *vq, *n;
- struct virtio_pci_vq_info *info;
+ int i;
list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
- info = vp_dev->vqs[vq->index];
- if (vp_dev->per_vq_vectors &&
- info->msix_vector != VIRTIO_MSI_NO_VECTOR)
- free_irq(vp_dev->msix_entries[info->msix_vector].vector,
- vq);
+ if (vp_dev->per_vq_vectors) {
+ int v = vp_dev->vqs[vq->index]->msix_vector;
+
+ if (v != VIRTIO_MSI_NO_VECTOR)
+ free_irq(pci_irq_vector(vp_dev->pci_dev, v),
+ vq);
+ }
vp_del_vq(vq);
}
vp_dev->per_vq_vectors = false;
- vp_free_vectors(vdev);
+ if (vp_dev->intx_enabled) {
+ free_irq(vp_dev->pci_dev->irq, vp_dev);
+ vp_dev->intx_enabled = 0;
+ }
+
+ for (i = 0; i < vp_dev->msix_used_vectors; ++i)
+ free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
+
+ for (i = 0; i < vp_dev->msix_vectors; i++)
+ if (vp_dev->msix_affinity_masks[i])
+ free_cpumask_var(vp_dev->msix_affinity_masks[i]);
+
+ if (vp_dev->msix_enabled) {
+ /* Disable the vector used for configuration */
+ vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
+
+ pci_free_irq_vectors(vp_dev->pci_dev);
+ vp_dev->msix_enabled = 0;
+ }
+
+ vp_dev->msix_vectors = 0;
+ vp_dev->msix_used_vectors = 0;
+ kfree(vp_dev->msix_names);
+ vp_dev->msix_names = NULL;
+ kfree(vp_dev->msix_affinity_masks);
+ vp_dev->msix_affinity_masks = NULL;
kfree(vp_dev->vqs);
vp_dev->vqs = NULL;
}
-static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char * const names[],
- bool use_msix,
bool per_vq_vectors)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
u16 msix_vec;
int i, err, nvectors, allocated_vectors;
- vp_dev->vqs = kmalloc(nvqs * sizeof *vp_dev->vqs, GFP_KERNEL);
+ vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
if (!vp_dev->vqs)
return -ENOMEM;
- if (!use_msix) {
- /* Old style: one normal interrupt for change and all vqs. */
- err = vp_request_intx(vdev);
- if (err)
- goto error_find;
+ if (per_vq_vectors) {
+ /* Best option: one for change interrupt, one per vq. */
+ nvectors = 1;
+ for (i = 0; i < nvqs; ++i)
+ if (callbacks[i])
+ ++nvectors;
} else {
- if (per_vq_vectors) {
- /* Best option: one for change interrupt, one per vq. */
- nvectors = 1;
- for (i = 0; i < nvqs; ++i)
- if (callbacks[i])
- ++nvectors;
- } else {
- /* Second best: one for change, shared for all vqs. */
- nvectors = 2;
- }
-
- err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors);
- if (err)
- goto error_find;
+ /* Second best: one for change, shared for all vqs. */
+ nvectors = 2;
}
+ err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors);
+ if (err)
+ goto error_find;
+
vp_dev->per_vq_vectors = per_vq_vectors;
allocated_vectors = vp_dev->msix_used_vectors;
for (i = 0; i < nvqs; ++i) {
if (!names[i]) {
vqs[i] = NULL;
continue;
- } else if (!callbacks[i] || !vp_dev->msix_enabled)
+ }
+
+ if (!callbacks[i])
msix_vec = VIRTIO_MSI_NO_VECTOR;
else if (vp_dev->per_vq_vectors)
msix_vec = allocated_vectors++;
@@ -356,14 +322,12 @@ static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs,
sizeof *vp_dev->msix_names,
"%s-%s",
dev_name(&vp_dev->vdev.dev), names[i]);
- err = request_irq(vp_dev->msix_entries[msix_vec].vector,
+ err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
vring_interrupt, 0,
vp_dev->msix_names[msix_vec],
vqs[i]);
- if (err) {
- vp_del_vq(vqs[i]);
+ if (err)
goto error_find;
- }
}
return 0;
@@ -372,6 +336,43 @@ error_find:
return err;
}
+static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
+ struct virtqueue *vqs[], vq_callback_t *callbacks[],
+ const char * const names[])
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ int i, err;
+
+ vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
+ if (!vp_dev->vqs)
+ return -ENOMEM;
+
+ err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
+ dev_name(&vdev->dev), vp_dev);
+ if (err)
+ goto out_del_vqs;
+
+ vp_dev->intx_enabled = 1;
+ vp_dev->per_vq_vectors = false;
+ for (i = 0; i < nvqs; ++i) {
+ if (!names[i]) {
+ vqs[i] = NULL;
+ continue;
+ }
+ vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
+ VIRTIO_MSI_NO_VECTOR);
+ if (IS_ERR(vqs[i])) {
+ err = PTR_ERR(vqs[i]);
+ goto out_del_vqs;
+ }
+ }
+
+ return 0;
+out_del_vqs:
+ vp_del_vqs(vdev);
+ return err;
+}
+
/* the config->find_vqs() implementation */
int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
struct virtqueue *vqs[],
@@ -381,17 +382,15 @@ int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
int err;
/* Try MSI-X with one vector per queue. */
- err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, true, true);
+ err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true);
if (!err)
return 0;
/* Fallback: MSI-X with one vector for config, one shared for queues. */
- err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names,
- true, false);
+ err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false);
if (!err)
return 0;
/* Finally fall back to regular interrupts. */
- return vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names,
- false, false);
+ return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names);
}
const char *vp_bus_name(struct virtio_device *vdev)
@@ -419,7 +418,7 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
if (vp_dev->msix_enabled) {
mask = vp_dev->msix_affinity_masks[info->msix_vector];
- irq = vp_dev->msix_entries[info->msix_vector].vector;
+ irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
if (cpu == -1)
irq_set_affinity_hint(irq, NULL);
else {
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index 28263200ed42..b2f666250ae0 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -85,7 +85,6 @@ struct virtio_pci_device {
/* MSI-X support */
int msix_enabled;
int intx_enabled;
- struct msix_entry *msix_entries;
cpumask_var_t *msix_affinity_masks;
/* Name strings for interrupts. This size should be enough,
* and I'm too lazy to allocate each name separately. */
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index e76bd91a29da..4bf7ab375894 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -33,12 +33,12 @@ static inline u8 vp_ioread8(u8 __iomem *addr)
{
return ioread8(addr);
}
-static inline u16 vp_ioread16 (u16 __iomem *addr)
+static inline u16 vp_ioread16 (__le16 __iomem *addr)
{
return ioread16(addr);
}
-static inline u32 vp_ioread32(u32 __iomem *addr)
+static inline u32 vp_ioread32(__le32 __iomem *addr)
{
return ioread32(addr);
}
@@ -48,12 +48,12 @@ static inline void vp_iowrite8(u8 value, u8 __iomem *addr)
iowrite8(value, addr);
}
-static inline void vp_iowrite16(u16 value, u16 __iomem *addr)
+static inline void vp_iowrite16(u16 value, __le16 __iomem *addr)
{
iowrite16(value, addr);
}
-static inline void vp_iowrite32(u32 value, u32 __iomem *addr)
+static inline void vp_iowrite32(u32 value, __le32 __iomem *addr)
{
iowrite32(value, addr);
}
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 489bfc61cf30..409aeaa49246 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -420,7 +420,7 @@ unmap_release:
if (i == err_idx)
break;
vring_unmap_one(vq, &desc[i]);
- i = vq->vring.desc[i].next;
+ i = virtio16_to_cpu(_vq->vdev, vq->vring.desc[i].next);
}
vq->vq.num_free += total_sg;
@@ -601,7 +601,7 @@ EXPORT_SYMBOL_GPL(virtqueue_kick);
static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
{
unsigned int i, j;
- u16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
+ __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
/* Clear data ptr. */
vq->desc_state[head].data = NULL;
@@ -649,7 +649,7 @@ static inline bool more_used(const struct vring_virtqueue *vq)
* @vq: the struct virtqueue we're talking about.
* @len: the length written into the buffer
*
- * If the driver wrote data into the buffer, @len will be set to the
+ * If the device wrote data into the buffer, @len will be set to the
* amount written. This means you don't need to clear the buffer
* beforehand to ensure there's no data leakage in the case of short
* writes.
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
index 6b5ee896af63..7cc51223db1c 100644
--- a/drivers/vme/bridges/vme_ca91cx42.c
+++ b/drivers/vme/bridges/vme_ca91cx42.c
@@ -464,7 +464,7 @@ static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]);
pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]);
- *pci_base = (dma_addr_t)vme_base + pci_offset;
+ *pci_base = (dma_addr_t)*vme_base + pci_offset;
*size = (unsigned long long)((vme_bound - *vme_base) + granularity);
*enabled = 0;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 3eb58cb51e56..acb00b53a520 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -72,16 +72,16 @@ config SOFT_WATCHDOG
module will be called softdog.
config DA9052_WATCHDOG
- tristate "Dialog DA9052 Watchdog"
- depends on PMIC_DA9052
- select WATCHDOG_CORE
- help
- Support for the watchdog in the DA9052 PMIC. Watchdog trigger
- cause system reset.
+ tristate "Dialog DA9052 Watchdog"
+ depends on PMIC_DA9052
+ select WATCHDOG_CORE
+ help
+ Support for the watchdog in the DA9052 PMIC. Watchdog trigger
+ cause system reset.
- Say Y here to include support for the DA9052 watchdog.
- Alternatively say M to compile the driver as a module,
- which will be called da9052_wdt.
+ Say Y here to include support for the DA9052 watchdog.
+ Alternatively say M to compile the driver as a module,
+ which will be called da9052_wdt.
config DA9055_WATCHDOG
tristate "Dialog Semiconductor DA9055 Watchdog"
@@ -104,11 +104,11 @@ config DA9063_WATCHDOG
This driver can be built as a module. The module name is da9063_wdt.
config DA9062_WATCHDOG
- tristate "Dialog DA9062 Watchdog"
+ tristate "Dialog DA9062/61 Watchdog"
depends on MFD_DA9062
select WATCHDOG_CORE
help
- Support for the watchdog in the DA9062 PMIC.
+ Support for the watchdog in the DA9062 and DA9061 PMICs.
This driver can be built as a module. The module name is da9062_wdt.
@@ -1008,8 +1008,8 @@ config IT87_WDT
tristate "IT87 Watchdog Timer"
depends on X86
---help---
- This is the driver for the hardware watchdog on the ITE IT8702,
- IT8712, IT8716, IT8718, IT8720, IT8721, IT8726 and IT8728
+ This is the driver for the hardware watchdog on the ITE IT8620,
+ IT8702, IT8712, IT8716, IT8718, IT8720, IT8721, IT8726 and IT8728
Super I/O chips.
If the driver does not work, then make sure that the game port in
@@ -1514,6 +1514,13 @@ config LANTIQ_WDT
help
Hardware driver for the Lantiq SoC Watchdog Timer.
+config LOONGSON1_WDT
+ tristate "Loongson1 SoC hardware watchdog"
+ depends on MACH_LOONGSON32
+ select WATCHDOG_CORE
+ help
+ Hardware driver for the Loongson1 SoC Watchdog Timer.
+
config RALINK_WDT
tristate "Ralink SoC watchdog"
select WATCHDOG_CORE
@@ -1624,16 +1631,16 @@ config BOOKE_WDT_DEFAULT_TIMEOUT
The value can be overridden by the wdt_period command-line parameter.
config MEN_A21_WDT
- tristate "MEN A21 VME CPU Carrier Board Watchdog Timer"
- select WATCHDOG_CORE
- depends on GPIOLIB || COMPILE_TEST
- help
- Watchdog driver for MEN A21 VMEbus CPU Carrier Boards.
+ tristate "MEN A21 VME CPU Carrier Board Watchdog Timer"
+ select WATCHDOG_CORE
+ depends on GPIOLIB || COMPILE_TEST
+ help
+ Watchdog driver for MEN A21 VMEbus CPU Carrier Boards.
- The driver can also be built as a module. If so, the module will be
- called mena21_wdt.
+ The driver can also be built as a module. If so, the module will be
+ called mena21_wdt.
- If unsure select N here.
+ If unsure select N here.
# PPC64 Architecture
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index caa9f4aa492a..0c3d35e3c334 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -163,6 +163,7 @@ obj-$(CONFIG_TXX9_WDT) += txx9wdt.o
obj-$(CONFIG_OCTEON_WDT) += octeon-wdt.o
octeon-wdt-y := octeon-wdt-main.o octeon-wdt-nmi.o
obj-$(CONFIG_LANTIQ_WDT) += lantiq_wdt.o
+obj-$(CONFIG_LOONGSON1_WDT) += loongson1_wdt.o
obj-$(CONFIG_RALINK_WDT) += rt2880_wdt.o
obj-$(CONFIG_IMGPDC_WDT) += imgpdc_wdt.o
obj-$(CONFIG_MT7621_WDT) += mt7621_wdt.o
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
index 4dddd8298a22..c32c45bd8b09 100644
--- a/drivers/watchdog/bcm2835_wdt.c
+++ b/drivers/watchdog/bcm2835_wdt.c
@@ -55,6 +55,15 @@ struct bcm2835_wdt {
static unsigned int heartbeat;
static bool nowayout = WATCHDOG_NOWAYOUT;
+static bool bcm2835_wdt_is_running(struct bcm2835_wdt *wdt)
+{
+ uint32_t cur;
+
+ cur = readl(wdt->base + PM_RSTC);
+
+ return !!(cur & PM_RSTC_WRCFG_FULL_RESET);
+}
+
static int bcm2835_wdt_start(struct watchdog_device *wdog)
{
struct bcm2835_wdt *wdt = watchdog_get_drvdata(wdog);
@@ -181,6 +190,17 @@ static int bcm2835_wdt_probe(struct platform_device *pdev)
watchdog_init_timeout(&bcm2835_wdt_wdd, heartbeat, dev);
watchdog_set_nowayout(&bcm2835_wdt_wdd, nowayout);
bcm2835_wdt_wdd.parent = &pdev->dev;
+ if (bcm2835_wdt_is_running(wdt)) {
+ /*
+ * The currently active timeout value (set by the
+ * bootloader) may be different from the module
+ * heartbeat parameter or the value in device
+ * tree. But we just need to set WDOG_HW_RUNNING,
+ * because then the framework will "immediately" ping
+ * the device, updating the timeout.
+ */
+ set_bit(WDOG_HW_RUNNING, &bcm2835_wdt_wdd.status);
+ }
err = watchdog_register_device(&bcm2835_wdt_wdd);
if (err) {
dev_err(dev, "Failed to register watchdog device");
diff --git a/drivers/watchdog/bcm7038_wdt.c b/drivers/watchdog/bcm7038_wdt.c
index e238df4d75a2..4814c00b32f6 100644
--- a/drivers/watchdog/bcm7038_wdt.c
+++ b/drivers/watchdog/bcm7038_wdt.c
@@ -216,6 +216,7 @@ static const struct of_device_id bcm7038_wdt_match[] = {
{ .compatible = "brcm,bcm7038-wdt" },
{},
};
+MODULE_DEVICE_TABLE(of, bcm7038_wdt_match);
static struct platform_driver bcm7038_wdt_driver = {
.probe = bcm7038_wdt_probe,
diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c
index 71ee07950e63..3d43775548e5 100644
--- a/drivers/watchdog/cpwd.c
+++ b/drivers/watchdog/cpwd.c
@@ -538,12 +538,9 @@ static int cpwd_probe(struct platform_device *op)
if (cpwd_device)
return -EINVAL;
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- err = -ENOMEM;
- if (!p) {
- pr_err("Unable to allocate struct cpwd\n");
- goto out;
- }
+ p = devm_kzalloc(&op->dev, sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
p->irq = op->archdata.irqs[0];
@@ -553,12 +550,12 @@ static int cpwd_probe(struct platform_device *op)
4 * WD_TIMER_REGSZ, DRIVER_NAME);
if (!p->regs) {
pr_err("Unable to map registers\n");
- goto out_free;
+ return -ENOMEM;
}
options = of_find_node_by_path("/options");
- err = -ENODEV;
if (!options) {
+ err = -ENODEV;
pr_err("Unable to find /options node\n");
goto out_iounmap;
}
@@ -620,10 +617,7 @@ static int cpwd_probe(struct platform_device *op)
platform_set_drvdata(op, p);
cpwd_device = p;
- err = 0;
-
-out:
- return err;
+ return 0;
out_unregister:
for (i--; i >= 0; i--)
@@ -632,9 +626,7 @@ out_unregister:
out_iounmap:
of_iounmap(&op->resource[0], p->regs, 4 * WD_TIMER_REGSZ);
-out_free:
- kfree(p);
- goto out;
+ return err;
}
static int cpwd_remove(struct platform_device *op)
@@ -659,7 +651,6 @@ static int cpwd_remove(struct platform_device *op)
free_irq(p->irq, p);
of_iounmap(&op->resource[0], p->regs, 4 * WD_TIMER_REGSZ);
- kfree(p);
cpwd_device = NULL;
diff --git a/drivers/watchdog/da9062_wdt.c b/drivers/watchdog/da9062_wdt.c
index 7386111220d5..a02cee6820a1 100644
--- a/drivers/watchdog/da9062_wdt.c
+++ b/drivers/watchdog/da9062_wdt.c
@@ -1,5 +1,5 @@
/*
- * da9062_wdt.c - WDT device driver for DA9062
+ * Watchdog device driver for DA9062 and DA9061 PMICs
* Copyright (C) 2015 Dialog Semiconductor Ltd.
*
* This program is free software; you can redistribute it and/or
@@ -188,6 +188,13 @@ static const struct watchdog_ops da9062_watchdog_ops = {
.set_timeout = da9062_wdt_set_timeout,
};
+static const struct of_device_id da9062_compatible_id_table[] = {
+ { .compatible = "dlg,da9062-watchdog", },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, da9062_compatible_id_table);
+
static int da9062_wdt_probe(struct platform_device *pdev)
{
int ret;
@@ -244,11 +251,12 @@ static struct platform_driver da9062_wdt_driver = {
.remove = da9062_wdt_remove,
.driver = {
.name = "da9062-watchdog",
+ .of_match_table = da9062_compatible_id_table,
},
};
module_platform_driver(da9062_wdt_driver);
MODULE_AUTHOR("S Twiss <stwiss.opensource@diasemi.com>");
-MODULE_DESCRIPTION("WDT device driver for Dialog DA9062");
+MODULE_DESCRIPTION("WDT device driver for Dialog DA9062 and DA9061");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:da9062-watchdog");
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
index 17454ca653f4..0e731d797a2a 100644
--- a/drivers/watchdog/davinci_wdt.c
+++ b/drivers/watchdog/davinci_wdt.c
@@ -166,8 +166,12 @@ static int davinci_wdt_probe(struct platform_device *pdev)
return -ENOMEM;
davinci_wdt->clk = devm_clk_get(dev, NULL);
- if (WARN_ON(IS_ERR(davinci_wdt->clk)))
+
+ if (IS_ERR(davinci_wdt->clk)) {
+ if (PTR_ERR(davinci_wdt->clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get clock node\n");
return PTR_ERR(davinci_wdt->clk);
+ }
clk_prepare_enable(davinci_wdt->clk);
diff --git a/drivers/watchdog/intel-mid_wdt.c b/drivers/watchdog/intel-mid_wdt.c
index db36d12e2b52..a4b729259b12 100644
--- a/drivers/watchdog/intel-mid_wdt.c
+++ b/drivers/watchdog/intel-mid_wdt.c
@@ -43,6 +43,7 @@ static inline int wdt_command(int sub, u32 *in, int inlen)
static int wdt_start(struct watchdog_device *wd)
{
+ struct device *dev = watchdog_get_drvdata(wd);
int ret, in_size;
int timeout = wd->timeout;
struct ipc_wd_start {
@@ -57,36 +58,32 @@ static int wdt_start(struct watchdog_device *wd)
in_size = DIV_ROUND_UP(sizeof(ipc_wd_start), 4);
ret = wdt_command(SCU_WATCHDOG_START, (u32 *)&ipc_wd_start, in_size);
- if (ret) {
- struct device *dev = watchdog_get_drvdata(wd);
+ if (ret)
dev_crit(dev, "error starting watchdog: %d\n", ret);
- }
return ret;
}
static int wdt_ping(struct watchdog_device *wd)
{
+ struct device *dev = watchdog_get_drvdata(wd);
int ret;
ret = wdt_command(SCU_WATCHDOG_KEEPALIVE, NULL, 0);
- if (ret) {
- struct device *dev = watchdog_get_drvdata(wd);
- dev_crit(dev, "Error executing keepalive: 0x%x\n", ret);
- }
+ if (ret)
+ dev_crit(dev, "Error executing keepalive: %d\n", ret);
return ret;
}
static int wdt_stop(struct watchdog_device *wd)
{
+ struct device *dev = watchdog_get_drvdata(wd);
int ret;
ret = wdt_command(SCU_WATCHDOG_STOP, NULL, 0);
- if (ret) {
- struct device *dev = watchdog_get_drvdata(wd);
- dev_crit(dev, "Error stopping watchdog: 0x%x\n", ret);
- }
+ if (ret)
+ dev_crit(dev, "Error stopping watchdog: %d\n", ret);
return ret;
}
@@ -151,6 +148,9 @@ static int mid_wdt_probe(struct platform_device *pdev)
return ret;
}
+ /* Make sure the watchdog is not running */
+ wdt_stop(wdt_dev);
+
ret = watchdog_register_device(wdt_dev);
if (ret) {
dev_err(&pdev->dev, "error registering watchdog device\n");
diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c
index e54839b12650..b9878c41598f 100644
--- a/drivers/watchdog/it87_wdt.c
+++ b/drivers/watchdog/it87_wdt.c
@@ -12,7 +12,7 @@
* http://www.ite.com.tw/
*
* Support of the watchdog timers, which are available on
- * IT8702, IT8712, IT8716, IT8718, IT8720, IT8721, IT8726,
+ * IT8620, IT8702, IT8712, IT8716, IT8718, IT8720, IT8721, IT8726,
* IT8728 and IT8783.
*
* This program is free software; you can redistribute it and/or
@@ -78,6 +78,7 @@
/* Chip Id numbers */
#define NO_DEV_ID 0xffff
+#define IT8620_ID 0x8620
#define IT8702_ID 0x8702
#define IT8705_ID 0x8705
#define IT8712_ID 0x8712
@@ -630,6 +631,7 @@ static int __init it87_wdt_init(void)
case IT8726_ID:
max_units = 65535;
break;
+ case IT8620_ID:
case IT8718_ID:
case IT8720_ID:
case IT8721_ID:
diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c
index c8d51ddb26d5..20627f22baf6 100644
--- a/drivers/watchdog/jz4740_wdt.c
+++ b/drivers/watchdog/jz4740_wdt.c
@@ -148,7 +148,7 @@ static const struct of_device_id jz4740_wdt_of_matches[] = {
{ .compatible = "ingenic,jz4740-watchdog", },
{ /* sentinel */ }
};
-MODULE_DEVICE_TABLE(of, jz4740_wdt_of_matches)
+MODULE_DEVICE_TABLE(of, jz4740_wdt_of_matches);
#endif
static int jz4740_wdt_probe(struct platform_device *pdev)
diff --git a/drivers/watchdog/loongson1_wdt.c b/drivers/watchdog/loongson1_wdt.c
new file mode 100644
index 000000000000..3aee50c64a36
--- /dev/null
+++ b/drivers/watchdog/loongson1_wdt.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2016 Yang Ling <gnaygnil@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+#include <loongson1.h>
+
+#define DEFAULT_HEARTBEAT 30
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0444);
+
+static unsigned int heartbeat;
+module_param(heartbeat, uint, 0444);
+
+struct ls1x_wdt_drvdata {
+ void __iomem *base;
+ struct clk *clk;
+ unsigned long clk_rate;
+ struct watchdog_device wdt;
+};
+
+static int ls1x_wdt_ping(struct watchdog_device *wdt_dev)
+{
+ struct ls1x_wdt_drvdata *drvdata = watchdog_get_drvdata(wdt_dev);
+
+ writel(0x1, drvdata->base + WDT_SET);
+
+ return 0;
+}
+
+static int ls1x_wdt_set_timeout(struct watchdog_device *wdt_dev,
+ unsigned int timeout)
+{
+ struct ls1x_wdt_drvdata *drvdata = watchdog_get_drvdata(wdt_dev);
+ unsigned int max_hw_heartbeat = wdt_dev->max_hw_heartbeat_ms / 1000;
+ unsigned int counts;
+
+ wdt_dev->timeout = timeout;
+
+ counts = drvdata->clk_rate * min(timeout, max_hw_heartbeat);
+ writel(counts, drvdata->base + WDT_TIMER);
+
+ return 0;
+}
+
+static int ls1x_wdt_start(struct watchdog_device *wdt_dev)
+{
+ struct ls1x_wdt_drvdata *drvdata = watchdog_get_drvdata(wdt_dev);
+
+ writel(0x1, drvdata->base + WDT_EN);
+
+ return 0;
+}
+
+static int ls1x_wdt_stop(struct watchdog_device *wdt_dev)
+{
+ struct ls1x_wdt_drvdata *drvdata = watchdog_get_drvdata(wdt_dev);
+
+ writel(0x0, drvdata->base + WDT_EN);
+
+ return 0;
+}
+
+static const struct watchdog_info ls1x_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+ .identity = "Loongson1 Watchdog",
+};
+
+static const struct watchdog_ops ls1x_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = ls1x_wdt_start,
+ .stop = ls1x_wdt_stop,
+ .ping = ls1x_wdt_ping,
+ .set_timeout = ls1x_wdt_set_timeout,
+};
+
+static int ls1x_wdt_probe(struct platform_device *pdev)
+{
+ struct ls1x_wdt_drvdata *drvdata;
+ struct watchdog_device *ls1x_wdt;
+ unsigned long clk_rate;
+ struct resource *res;
+ int err;
+
+ drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ drvdata->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(drvdata->base))
+ return PTR_ERR(drvdata->base);
+
+ drvdata->clk = devm_clk_get(&pdev->dev, pdev->name);
+ if (IS_ERR(drvdata->clk))
+ return PTR_ERR(drvdata->clk);
+
+ err = clk_prepare_enable(drvdata->clk);
+ if (err) {
+ dev_err(&pdev->dev, "clk enable failed\n");
+ return err;
+ }
+
+ clk_rate = clk_get_rate(drvdata->clk);
+ if (!clk_rate) {
+ err = -EINVAL;
+ goto err0;
+ }
+ drvdata->clk_rate = clk_rate;
+
+ ls1x_wdt = &drvdata->wdt;
+ ls1x_wdt->info = &ls1x_wdt_info;
+ ls1x_wdt->ops = &ls1x_wdt_ops;
+ ls1x_wdt->timeout = DEFAULT_HEARTBEAT;
+ ls1x_wdt->min_timeout = 1;
+ ls1x_wdt->max_hw_heartbeat_ms = U32_MAX / clk_rate * 1000;
+ ls1x_wdt->parent = &pdev->dev;
+
+ watchdog_init_timeout(ls1x_wdt, heartbeat, &pdev->dev);
+ watchdog_set_nowayout(ls1x_wdt, nowayout);
+ watchdog_set_drvdata(ls1x_wdt, drvdata);
+
+ err = watchdog_register_device(&drvdata->wdt);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register watchdog device\n");
+ goto err0;
+ }
+
+ platform_set_drvdata(pdev, drvdata);
+
+ dev_info(&pdev->dev, "Loongson1 Watchdog driver registered\n");
+
+ return 0;
+err0:
+ clk_disable_unprepare(drvdata->clk);
+ return err;
+}
+
+static int ls1x_wdt_remove(struct platform_device *pdev)
+{
+ struct ls1x_wdt_drvdata *drvdata = platform_get_drvdata(pdev);
+
+ watchdog_unregister_device(&drvdata->wdt);
+ clk_disable_unprepare(drvdata->clk);
+
+ return 0;
+}
+
+static struct platform_driver ls1x_wdt_driver = {
+ .probe = ls1x_wdt_probe,
+ .remove = ls1x_wdt_remove,
+ .driver = {
+ .name = "ls1x-wdt",
+ },
+};
+
+module_platform_driver(ls1x_wdt_driver);
+
+MODULE_AUTHOR("Yang Ling <gnaygnil@gmail.com>");
+MODULE_DESCRIPTION("Loongson1 Watchdog Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/max77620_wdt.c b/drivers/watchdog/max77620_wdt.c
index 48b84df2afda..68c41fa2be27 100644
--- a/drivers/watchdog/max77620_wdt.c
+++ b/drivers/watchdog/max77620_wdt.c
@@ -205,6 +205,7 @@ static struct platform_device_id max77620_wdt_devtype[] = {
{ .name = "max77620-watchdog", },
{ },
};
+MODULE_DEVICE_TABLE(platform, max77620_wdt_devtype);
static struct platform_driver max77620_wdt_driver = {
.driver = {
diff --git a/drivers/watchdog/mei_wdt.c b/drivers/watchdog/mei_wdt.c
index 79b35515904e..b29c6fde7473 100644
--- a/drivers/watchdog/mei_wdt.c
+++ b/drivers/watchdog/mei_wdt.c
@@ -389,6 +389,8 @@ static int mei_wdt_register(struct mei_wdt *wdt)
wdt->wdd.max_timeout = MEI_WDT_MAX_TIMEOUT;
watchdog_set_drvdata(&wdt->wdd, wdt);
+ watchdog_stop_on_reboot(&wdt->wdd);
+
ret = watchdog_register_device(&wdt->wdd);
if (ret) {
dev_err(dev, "unable to register watchdog device = %d.\n", ret);
diff --git a/drivers/watchdog/meson_gxbb_wdt.c b/drivers/watchdog/meson_gxbb_wdt.c
index 44d180a2c5e5..45d47664a00a 100644
--- a/drivers/watchdog/meson_gxbb_wdt.c
+++ b/drivers/watchdog/meson_gxbb_wdt.c
@@ -264,7 +264,6 @@ static struct platform_driver meson_gxbb_wdt_driver = {
module_platform_driver(meson_gxbb_wdt_driver);
-MODULE_ALIAS("platform:meson-gxbb-wdt");
MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
MODULE_DESCRIPTION("Amlogic Meson GXBB Watchdog timer driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index 5f2273aac37d..366e5c7e650b 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -23,7 +23,6 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/timer.h>
-#include <linux/miscdevice.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/module.h>
diff --git a/drivers/watchdog/octeon-wdt-main.c b/drivers/watchdog/octeon-wdt-main.c
index 529182d7d8a7..b5cdceb36cff 100644
--- a/drivers/watchdog/octeon-wdt-main.c
+++ b/drivers/watchdog/octeon-wdt-main.c
@@ -56,7 +56,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/miscdevice.h>
#include <linux/interrupt.h>
#include <linux/watchdog.h>
#include <linux/cpumask.h>
diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
index 5796b5d1b3f2..4f47b5e90956 100644
--- a/drivers/watchdog/qcom-wdt.c
+++ b/drivers/watchdog/qcom-wdt.c
@@ -209,7 +209,7 @@ static int qcom_wdt_probe(struct platform_device *pdev)
wdt->wdd.parent = &pdev->dev;
wdt->layout = regs;
- if (readl(wdt->base + WDT_STS) & 1)
+ if (readl(wdt_addr(wdt, WDT_STS)) & 1)
wdt->wdd.bootstatus = WDIOF_CARDRESET;
/*
diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c
index e1d39a1e9628..8965e3f536c3 100644
--- a/drivers/watchdog/sa1100_wdt.c
+++ b/drivers/watchdog/sa1100_wdt.c
@@ -22,6 +22,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/clk.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/fs.h>
@@ -155,12 +156,27 @@ static struct miscdevice sa1100dog_miscdev = {
};
static int margin __initdata = 60; /* (secs) Default is 1 minute */
+static struct clk *clk;
static int __init sa1100dog_init(void)
{
int ret;
- oscr_freq = get_clock_tick_rate();
+ clk = clk_get(NULL, "OSTIMER0");
+ if (IS_ERR(clk)) {
+ pr_err("SA1100/PXA2xx Watchdog Timer: clock not found: %d\n",
+ (int) PTR_ERR(clk));
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ pr_err("SA1100/PXA2xx Watchdog Timer: clock failed to prepare+enable: %d\n",
+ ret);
+ goto err;
+ }
+
+ oscr_freq = clk_get_rate(clk);
/*
* Read the reset status, and save it for later. If
@@ -176,11 +192,17 @@ static int __init sa1100dog_init(void)
pr_info("SA1100/PXA2xx Watchdog Timer: timer margin %d sec\n",
margin);
return ret;
+err:
+ clk_disable_unprepare(clk);
+ clk_put(clk);
+ return ret;
}
static void __exit sa1100dog_exit(void)
{
misc_deregister(&sa1100dog_miscdev);
+ clk_disable_unprepare(clk);
+ clk_put(clk);
}
module_init(sa1100dog_init);
diff --git a/drivers/xen/arm-device.c b/drivers/xen/arm-device.c
index 778acf80aacb..85dd20e05726 100644
--- a/drivers/xen/arm-device.c
+++ b/drivers/xen/arm-device.c
@@ -58,9 +58,13 @@ static int xen_map_device_mmio(const struct resource *resources,
xen_pfn_t *gpfns;
xen_ulong_t *idxs;
int *errs;
- struct xen_add_to_physmap_range xatp;
for (i = 0; i < count; i++) {
+ struct xen_add_to_physmap_range xatp = {
+ .domid = DOMID_SELF,
+ .space = XENMAPSPACE_dev_mmio
+ };
+
r = &resources[i];
nr = DIV_ROUND_UP(resource_size(r), XEN_PAGE_SIZE);
if ((resource_type(r) != IORESOURCE_MEM) || (nr == 0))
@@ -87,9 +91,7 @@ static int xen_map_device_mmio(const struct resource *resources,
idxs[j] = XEN_PFN_DOWN(r->start) + j;
}
- xatp.domid = DOMID_SELF;
xatp.size = nr;
- xatp.space = XENMAPSPACE_dev_mmio;
set_xen_guest_handle(xatp.gpfns, gpfns);
set_xen_guest_handle(xatp.idxs, idxs);
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index 7ef27c6ed72f..3c41470c7fc4 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -369,8 +369,7 @@ static void evtchn_fifo_resume(void)
}
ret = init_control_block(cpu, control_block);
- if (ret < 0)
- BUG();
+ BUG_ON(ret < 0);
}
/*
@@ -445,7 +444,7 @@ int __init xen_evtchn_fifo_init(void)
evtchn_ops = &evtchn_ops_fifo;
cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE,
- "CPUHP_XEN_EVTCHN_PREPARE",
+ "xen/evtchn:prepare",
xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead);
out:
put_cpu();
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index e8c7f09d01be..6890897a6f30 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -125,7 +125,7 @@ static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn)
while (*new) {
struct user_evtchn *this;
- this = container_of(*new, struct user_evtchn, node);
+ this = rb_entry(*new, struct user_evtchn, node);
parent = *new;
if (this->port < evtchn->port)
@@ -157,7 +157,7 @@ static struct user_evtchn *find_evtchn(struct per_user_data *u, unsigned port)
while (node) {
struct user_evtchn *evtchn;
- evtchn = container_of(node, struct user_evtchn, node);
+ evtchn = rb_entry(node, struct user_evtchn, node);
if (evtchn->port < port)
node = node->rb_left;
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index 112ce422dc22..2a165cc8a43c 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -42,6 +42,7 @@
static unsigned long platform_mmio;
static unsigned long platform_mmio_alloc;
static unsigned long platform_mmiolen;
+static uint64_t callback_via;
static unsigned long alloc_xen_mmio(unsigned long len)
{
@@ -54,6 +55,51 @@ static unsigned long alloc_xen_mmio(unsigned long len)
return addr;
}
+static uint64_t get_callback_via(struct pci_dev *pdev)
+{
+ u8 pin;
+ int irq;
+
+ irq = pdev->irq;
+ if (irq < 16)
+ return irq; /* ISA IRQ */
+
+ pin = pdev->pin;
+
+ /* We don't know the GSI. Specify the PCI INTx line instead. */
+ return ((uint64_t)0x01 << HVM_CALLBACK_VIA_TYPE_SHIFT) | /* PCI INTx identifier */
+ ((uint64_t)pci_domain_nr(pdev->bus) << 32) |
+ ((uint64_t)pdev->bus->number << 16) |
+ ((uint64_t)(pdev->devfn & 0xff) << 8) |
+ ((uint64_t)(pin - 1) & 3);
+}
+
+static irqreturn_t do_hvm_evtchn_intr(int irq, void *dev_id)
+{
+ xen_hvm_evtchn_do_upcall();
+ return IRQ_HANDLED;
+}
+
+static int xen_allocate_irq(struct pci_dev *pdev)
+{
+ return request_irq(pdev->irq, do_hvm_evtchn_intr,
+ IRQF_NOBALANCING | IRQF_TRIGGER_RISING,
+ "xen-platform-pci", pdev);
+}
+
+static int platform_pci_resume(struct pci_dev *pdev)
+{
+ int err;
+ if (!xen_pv_domain())
+ return 0;
+ err = xen_set_callback_via(callback_via);
+ if (err) {
+ dev_err(&pdev->dev, "platform_pci_resume failure!\n");
+ return err;
+ }
+ return 0;
+}
+
static int platform_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -92,6 +138,28 @@ static int platform_pci_probe(struct pci_dev *pdev,
platform_mmio = mmio_addr;
platform_mmiolen = mmio_len;
+ /*
+ * Xen HVM guests always use the vector callback mechanism.
+ * L1 Dom0 in a nested Xen environment is a PV guest inside in an
+ * HVM environment. It needs the platform-pci driver to get
+ * notifications from L0 Xen, but it cannot use the vector callback
+ * as it is not exported by L1 Xen.
+ */
+ if (xen_pv_domain()) {
+ ret = xen_allocate_irq(pdev);
+ if (ret) {
+ dev_warn(&pdev->dev, "request_irq failed err=%d\n", ret);
+ goto out;
+ }
+ callback_via = get_callback_via(pdev);
+ ret = xen_set_callback_via(callback_via);
+ if (ret) {
+ dev_warn(&pdev->dev, "Unable to set the evtchn callback "
+ "err=%d\n", ret);
+ goto out;
+ }
+ }
+
max_nr_gframes = gnttab_max_grant_frames();
grant_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes);
ret = gnttab_setup_auto_xlat_frames(grant_frames);
@@ -123,6 +191,9 @@ static struct pci_driver platform_driver = {
.name = DRV_NAME,
.probe = platform_pci_probe,
.id_table = platform_pci_tbl,
+#ifdef CONFIG_PM
+ .resume_early = platform_pci_resume,
+#endif
};
builtin_pci_driver(platform_driver);
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 702040fe2001..6e3306f4a525 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -602,7 +602,7 @@ static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
vma, vma->vm_start, vma->vm_end,
- vmf->pgoff, vmf->virtual_address);
+ vmf->pgoff, (void *)vmf->address);
return VM_FAULT_SIGBUS;
}
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 478fb91e3df2..f905d6eeb048 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -275,6 +275,10 @@ retry:
rc = 0;
} else
rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
+
+ if (!rc)
+ swiotlb_set_max_segment(PAGE_SIZE);
+
return rc;
error:
if (repeat--) {
@@ -392,7 +396,7 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
if (dma_capable(dev, dev_addr, size) &&
!range_straddles_page_boundary(phys, size) &&
!xen_arch_need_swiotlb(dev, phys, dev_addr) &&
- !swiotlb_force) {
+ (swiotlb_force != SWIOTLB_FORCE)) {
/* we are not interested in the dma_addr returned by
* xen_dma_map_page, only in the potential cache flushes executed
* by the function. */
@@ -552,7 +556,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
phys_addr_t paddr = sg_phys(sg);
dma_addr_t dev_addr = xen_phys_to_bus(paddr);
- if (swiotlb_force ||
+ if (swiotlb_force == SWIOTLB_FORCE ||
xen_arch_need_swiotlb(hwdev, paddr, dev_addr) ||
!dma_capable(hwdev, dev_addr, sg->length) ||
range_straddles_page_boundary(paddr, sg->length)) {
diff --git a/drivers/xen/xenbus/xenbus_comms.h b/drivers/xen/xenbus/xenbus_comms.h
index e74f9c1fbd80..867a2e425208 100644
--- a/drivers/xen/xenbus/xenbus_comms.h
+++ b/drivers/xen/xenbus/xenbus_comms.h
@@ -42,7 +42,6 @@ int xb_write(const void *data, unsigned len);
int xb_read(void *data, unsigned len);
int xb_data_to_read(void);
int xb_wait_for_data_to_read(void);
-int xs_input_avail(void);
extern struct xenstore_domain_interface *xen_store_interface;
extern int xen_store_evtchn;
extern enum xenstore_init xen_store_domain_type;
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 6c0ead4be784..79130b310247 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -302,6 +302,29 @@ static void watch_fired(struct xenbus_watch *watch,
mutex_unlock(&adap->dev_data->reply_mutex);
}
+static int xenbus_command_reply(struct xenbus_file_priv *u,
+ unsigned int msg_type, const char *reply)
+{
+ struct {
+ struct xsd_sockmsg hdr;
+ const char body[16];
+ } msg;
+ int rc;
+
+ msg.hdr = u->u.msg;
+ msg.hdr.type = msg_type;
+ msg.hdr.len = strlen(reply) + 1;
+ if (msg.hdr.len > sizeof(msg.body))
+ return -E2BIG;
+
+ mutex_lock(&u->reply_mutex);
+ rc = queue_reply(&u->read_buffers, &msg, sizeof(msg.hdr) + msg.hdr.len);
+ wake_up(&u->read_waitq);
+ mutex_unlock(&u->reply_mutex);
+
+ return rc;
+}
+
static int xenbus_write_transaction(unsigned msg_type,
struct xenbus_file_priv *u)
{
@@ -316,12 +339,12 @@ static int xenbus_write_transaction(unsigned msg_type,
rc = -ENOMEM;
goto out;
}
- } else if (msg_type == XS_TRANSACTION_END) {
+ } else if (u->u.msg.tx_id != 0) {
list_for_each_entry(trans, &u->transactions, list)
if (trans->handle.id == u->u.msg.tx_id)
break;
if (&trans->list == &u->transactions)
- return -ESRCH;
+ return xenbus_command_reply(u, XS_ERROR, "ENOENT");
}
reply = xenbus_dev_request_and_reply(&u->u.msg);
@@ -372,12 +395,12 @@ static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
path = u->u.buffer + sizeof(u->u.msg);
token = memchr(path, 0, u->u.msg.len);
if (token == NULL) {
- rc = -EILSEQ;
+ rc = xenbus_command_reply(u, XS_ERROR, "EINVAL");
goto out;
}
token++;
if (memchr(token, 0, u->u.msg.len - (token - path)) == NULL) {
- rc = -EILSEQ;
+ rc = xenbus_command_reply(u, XS_ERROR, "EINVAL");
goto out;
}
@@ -411,23 +434,7 @@ static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
}
/* Success. Synthesize a reply to say all is OK. */
- {
- struct {
- struct xsd_sockmsg hdr;
- char body[3];
- } __packed reply = {
- {
- .type = msg_type,
- .len = sizeof(reply.body)
- },
- "OK"
- };
-
- mutex_lock(&u->reply_mutex);
- rc = queue_reply(&u->read_buffers, &reply, sizeof(reply));
- wake_up(&u->read_waitq);
- mutex_unlock(&u->reply_mutex);
- }
+ rc = xenbus_command_reply(u, msg_type, "OK");
out:
return rc;
diff --git a/drivers/zorro/proc.c b/drivers/zorro/proc.c
index 6ac2579da0eb..05397305fccd 100644
--- a/drivers/zorro/proc.c
+++ b/drivers/zorro/proc.c
@@ -16,7 +16,7 @@
#include <linux/export.h>
#include <asm/byteorder.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/amigahw.h>
#include <asm/setup.h>